1 /* 2 * Copyright 2008 IBM Corporation. 3 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 4 * 5 * This work is licensed under the GNU GPL license version 2 or later. 6 * 7 */ 8 9 #ifndef KVM_PPC_H 10 #define KVM_PPC_H 11 12 #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") 13 14 #ifdef CONFIG_KVM 15 16 uint32_t kvmppc_get_tbfreq(void); 17 uint64_t kvmppc_get_clockfreq(void); 18 bool kvmppc_get_host_model(char **buf); 19 bool kvmppc_get_host_serial(char **buf); 20 int kvmppc_get_hasidle(CPUPPCState *env); 21 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); 22 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); 23 void kvmppc_enable_logical_ci_hcalls(void); 24 void kvmppc_enable_set_mode_hcall(void); 25 void kvmppc_enable_clear_ref_mod_hcalls(void); 26 void kvmppc_set_papr(PowerPCCPU *cpu); 27 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); 28 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); 29 int kvmppc_smt_threads(void); 30 void kvmppc_hint_smt_possible(Error **errp); 31 int kvmppc_set_smt_threads(int smt); 32 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 33 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 34 int kvmppc_set_tcr(PowerPCCPU *cpu); 35 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu); 36 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 37 bool radix, bool gtse, 38 uint64_t proc_tbl); 39 #ifndef CONFIG_USER_ONLY 40 off_t kvmppc_alloc_rma(void **rma); 41 bool kvmppc_spapr_use_multitce(void); 42 int kvmppc_spapr_enable_inkernel_multitce(void); 43 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 44 uint64_t bus_offset, uint32_t nb_table, 45 int *pfd, bool need_vfio); 46 int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size); 47 int kvmppc_reset_htab(int shift_hint); 48 uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift); 49 #endif /* !CONFIG_USER_ONLY */ 50 bool kvmppc_has_cap_epr(void); 51 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function); 52 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp); 53 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); 54 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 55 uint16_t n_valid, uint16_t n_invalid); 56 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n); 57 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1); 58 bool kvmppc_has_cap_fixup_hcalls(void); 59 bool kvmppc_has_cap_htm(void); 60 bool kvmppc_has_cap_mmu_radix(void); 61 bool kvmppc_has_cap_mmu_hash_v3(void); 62 int kvmppc_get_cap_safe_cache(void); 63 int kvmppc_get_cap_safe_bounds_check(void); 64 int kvmppc_get_cap_safe_indirect_branch(void); 65 int kvmppc_enable_hwrng(void); 66 int kvmppc_put_books_sregs(PowerPCCPU *cpu); 67 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); 68 void kvmppc_check_papr_resize_hpt(Error **errp); 69 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift); 70 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift); 71 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu); 72 73 bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path); 74 75 #else 76 77 static inline uint32_t kvmppc_get_tbfreq(void) 78 { 79 return 0; 80 } 81 82 static inline bool kvmppc_get_host_model(char **buf) 83 { 84 return false; 85 } 86 87 static inline bool kvmppc_get_host_serial(char **buf) 88 { 89 return false; 90 } 91 92 static inline uint64_t kvmppc_get_clockfreq(void) 93 { 94 return 0; 95 } 96 97 static inline uint32_t kvmppc_get_vmx(void) 98 { 99 return 0; 100 } 101 102 static inline uint32_t kvmppc_get_dfp(void) 103 { 104 return 0; 105 } 106 107 static inline int kvmppc_get_hasidle(CPUPPCState *env) 108 { 109 return 0; 110 } 111 112 static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) 113 { 114 return -1; 115 } 116 117 static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) 118 { 119 return -1; 120 } 121 122 static inline void kvmppc_enable_logical_ci_hcalls(void) 123 { 124 } 125 126 static inline void kvmppc_enable_set_mode_hcall(void) 127 { 128 } 129 130 static inline void kvmppc_enable_clear_ref_mod_hcalls(void) 131 { 132 } 133 134 static inline void kvmppc_set_papr(PowerPCCPU *cpu) 135 { 136 } 137 138 static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) 139 { 140 return 0; 141 } 142 143 static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) 144 { 145 } 146 147 static inline int kvmppc_smt_threads(void) 148 { 149 return 1; 150 } 151 152 static inline void kvmppc_hint_smt_possible(Error **errp) 153 { 154 return; 155 } 156 157 static inline int kvmppc_set_smt_threads(int smt) 158 { 159 return 0; 160 } 161 162 static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 163 { 164 return 0; 165 } 166 167 static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 168 { 169 return 0; 170 } 171 172 static inline int kvmppc_set_tcr(PowerPCCPU *cpu) 173 { 174 return 0; 175 } 176 177 static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) 178 { 179 return -1; 180 } 181 182 static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 183 bool radix, bool gtse, 184 uint64_t proc_tbl) 185 { 186 return 0; 187 } 188 189 #ifndef CONFIG_USER_ONLY 190 static inline off_t kvmppc_alloc_rma(void **rma) 191 { 192 return 0; 193 } 194 195 static inline bool kvmppc_spapr_use_multitce(void) 196 { 197 return false; 198 } 199 200 static inline int kvmppc_spapr_enable_inkernel_multitce(void) 201 { 202 return -1; 203 } 204 205 static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 206 uint64_t bus_offset, 207 uint32_t nb_table, 208 int *pfd, bool need_vfio) 209 { 210 return NULL; 211 } 212 213 static inline int kvmppc_remove_spapr_tce(void *table, int pfd, 214 uint32_t nb_table) 215 { 216 return -1; 217 } 218 219 static inline int kvmppc_reset_htab(int shift_hint) 220 { 221 return 0; 222 } 223 224 static inline uint64_t kvmppc_rma_size(uint64_t current_size, 225 unsigned int hash_shift) 226 { 227 return ram_size; 228 } 229 230 static inline bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path) 231 { 232 return true; 233 } 234 235 #endif /* !CONFIG_USER_ONLY */ 236 237 static inline bool kvmppc_has_cap_epr(void) 238 { 239 return false; 240 } 241 242 static inline int kvmppc_define_rtas_kernel_token(uint32_t token, 243 const char *function) 244 { 245 return -1; 246 } 247 248 static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) 249 { 250 return -1; 251 } 252 253 static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, 254 int64_t max_ns) 255 { 256 abort(); 257 } 258 259 static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 260 uint16_t n_valid, uint16_t n_invalid) 261 { 262 abort(); 263 } 264 265 static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, 266 hwaddr ptex, int n) 267 { 268 abort(); 269 } 270 271 static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) 272 { 273 abort(); 274 } 275 276 static inline bool kvmppc_has_cap_fixup_hcalls(void) 277 { 278 abort(); 279 } 280 281 static inline bool kvmppc_has_cap_htm(void) 282 { 283 return false; 284 } 285 286 static inline bool kvmppc_has_cap_mmu_radix(void) 287 { 288 return false; 289 } 290 291 static inline bool kvmppc_has_cap_mmu_hash_v3(void) 292 { 293 return false; 294 } 295 296 static inline int kvmppc_get_cap_safe_cache(void) 297 { 298 return 0; 299 } 300 301 static inline int kvmppc_get_cap_safe_bounds_check(void) 302 { 303 return 0; 304 } 305 306 static inline int kvmppc_get_cap_safe_indirect_branch(void) 307 { 308 return 0; 309 } 310 311 static inline int kvmppc_enable_hwrng(void) 312 { 313 return -1; 314 } 315 316 static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu) 317 { 318 abort(); 319 } 320 321 static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) 322 { 323 return NULL; 324 } 325 326 static inline void kvmppc_check_papr_resize_hpt(Error **errp) 327 { 328 return; 329 } 330 331 static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, 332 target_ulong flags, int shift) 333 { 334 return -ENOSYS; 335 } 336 337 static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, 338 target_ulong flags, int shift) 339 { 340 return -ENOSYS; 341 } 342 343 #endif 344 345 #ifndef CONFIG_KVM 346 347 #define kvmppc_eieio() do { } while (0) 348 349 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 350 { 351 } 352 353 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 354 { 355 } 356 357 #else /* CONFIG_KVM */ 358 359 #define kvmppc_eieio() \ 360 do { \ 361 if (kvm_enabled()) { \ 362 asm volatile("eieio" : : : "memory"); \ 363 } \ 364 } while (0) 365 366 /* Store data cache blocks back to memory */ 367 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 368 { 369 uint8_t *p; 370 371 for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { 372 asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); 373 } 374 } 375 376 /* Invalidate instruction cache blocks */ 377 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 378 { 379 uint8_t *p; 380 381 for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { 382 asm volatile("icbi 0,%0" : : "r"(p)); 383 } 384 } 385 386 #endif /* CONFIG_KVM */ 387 388 #endif /* KVM_PPC_H */ 389