1 /* 2 * TCG CPU-specific operations 3 * 4 * Copyright 2021 SUSE LLC 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 */ 9 10 #ifndef TCG_CPU_OPS_H 11 #define TCG_CPU_OPS_H 12 13 #include "exec/breakpoint.h" 14 #include "exec/hwaddr.h" 15 #include "exec/memattrs.h" 16 #include "exec/memop.h" 17 #include "exec/mmu-access-type.h" 18 #include "exec/vaddr.h" 19 20 struct TCGCPUOps { 21 /** 22 * @initialize: Initialize TCG state 23 * 24 * Called when the first CPU is realized. 25 */ 26 void (*initialize)(void); 27 /** 28 * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock 29 * 30 * This is called when we abandon execution of a TB before starting it, 31 * and must set all parts of the CPU state which the previous TB in the 32 * chain may not have updated. 33 * By default, when this is NULL, a call is made to @set_pc(tb->pc). 34 * 35 * If more state needs to be restored, the target must implement a 36 * function to restore all the state, and register it here. 37 */ 38 void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); 39 /** 40 * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn 41 * 42 * This is called when we unwind state in the middle of a TB, 43 * usually before raising an exception. Set all part of the CPU 44 * state which are tracked insn-by-insn in the target-specific 45 * arguments to start_insn, passed as @data. 46 */ 47 void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, 48 const uint64_t *data); 49 50 /** @cpu_exec_enter: Callback for cpu_exec preparation */ 51 void (*cpu_exec_enter)(CPUState *cpu); 52 /** @cpu_exec_exit: Callback for cpu_exec cleanup */ 53 void (*cpu_exec_exit)(CPUState *cpu); 54 /** @debug_excp_handler: Callback for handling debug exceptions */ 55 void (*debug_excp_handler)(CPUState *cpu); 56 57 #ifdef CONFIG_USER_ONLY 58 /** 59 * @fake_user_interrupt: Callback for 'fake exception' handling. 60 * 61 * Simulate 'fake exception' which will be handled outside the 62 * cpu execution loop (hack for x86 user mode). 63 */ 64 void (*fake_user_interrupt)(CPUState *cpu); 65 66 /** 67 * record_sigsegv: 68 * @cpu: cpu context 69 * @addr: faulting guest address 70 * @access_type: access was read/write/execute 71 * @maperr: true for invalid page, false for permission fault 72 * @ra: host pc for unwinding 73 * 74 * We are about to raise SIGSEGV with si_code set for @maperr, 75 * and si_addr set for @addr. Record anything further needed 76 * for the signal ucontext_t. 77 * 78 * If the emulated kernel does not provide anything to the signal 79 * handler with anything besides the user context registers, and 80 * the siginfo_t, then this hook need do nothing and may be omitted. 81 * Otherwise, record the data and return; the caller will raise 82 * the signal, unwind the cpu state, and return to the main loop. 83 * 84 * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided 85 * so that a "normal" cpu exception can be raised. In this case, 86 * the signal must be raised by the architecture cpu_loop. 87 */ 88 void (*record_sigsegv)(CPUState *cpu, vaddr addr, 89 MMUAccessType access_type, 90 bool maperr, uintptr_t ra); 91 /** 92 * record_sigbus: 93 * @cpu: cpu context 94 * @addr: misaligned guest address 95 * @access_type: access was read/write/execute 96 * @ra: host pc for unwinding 97 * 98 * We are about to raise SIGBUS with si_code BUS_ADRALN, 99 * and si_addr set for @addr. Record anything further needed 100 * for the signal ucontext_t. 101 * 102 * If the emulated kernel does not provide the signal handler with 103 * anything besides the user context registers, and the siginfo_t, 104 * then this hook need do nothing and may be omitted. 105 * Otherwise, record the data and return; the caller will raise 106 * the signal, unwind the cpu state, and return to the main loop. 107 * 108 * If it is simpler to re-use the sysemu do_unaligned_access code, 109 * @ra is provided so that a "normal" cpu exception can be raised. 110 * In this case, the signal must be raised by the architecture cpu_loop. 111 */ 112 void (*record_sigbus)(CPUState *cpu, vaddr addr, 113 MMUAccessType access_type, uintptr_t ra); 114 #else 115 /** @do_interrupt: Callback for interrupt handling. */ 116 void (*do_interrupt)(CPUState *cpu); 117 /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ 118 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); 119 /** 120 * @cpu_exec_halt: Callback for handling halt in cpu_exec. 121 * 122 * The target CPU should do any special processing here that it needs 123 * to do when the CPU is in the halted state. 124 * 125 * Return true to indicate that the CPU should now leave halt, false 126 * if it should remain in the halted state. (This should generally 127 * be the same value that cpu_has_work() would return.) 128 * 129 * This method must be provided. If the target does not need to 130 * do anything special for halt, the same function used for its 131 * CPUClass::has_work method can be used here, as they have the 132 * same function signature. 133 */ 134 bool (*cpu_exec_halt)(CPUState *cpu); 135 /** 136 * @tlb_fill_align: Handle a softmmu tlb miss 137 * @cpu: cpu context 138 * @out: output page properties 139 * @addr: virtual address 140 * @access_type: read, write or execute 141 * @mmu_idx: mmu context 142 * @memop: memory operation for the access 143 * @size: memory access size, or 0 for whole page 144 * @probe: test only, no fault 145 * @ra: host return address for exception unwind 146 * 147 * If the access is valid, fill in @out and return true. 148 * Otherwise if probe is true, return false. 149 * Otherwise raise an exception and do not return. 150 * 151 * The alignment check for the access is deferred to this hook, 152 * so that the target can determine the priority of any alignment 153 * fault with respect to other potential faults from paging. 154 * Zero may be passed for @memop to skip any alignment check 155 * for non-memory-access operations such as probing. 156 */ 157 bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr, 158 MMUAccessType access_type, int mmu_idx, 159 MemOp memop, int size, bool probe, uintptr_t ra); 160 /** 161 * @tlb_fill: Handle a softmmu tlb miss 162 * 163 * If the access is valid, call tlb_set_page and return true; 164 * if the access is invalid and probe is true, return false; 165 * otherwise raise an exception and do not return. 166 */ 167 bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, 168 MMUAccessType access_type, int mmu_idx, 169 bool probe, uintptr_t retaddr); 170 /** 171 * @do_transaction_failed: Callback for handling failed memory transactions 172 * (ie bus faults or external aborts; not MMU faults) 173 */ 174 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr, 175 unsigned size, MMUAccessType access_type, 176 int mmu_idx, MemTxAttrs attrs, 177 MemTxResult response, uintptr_t retaddr); 178 /** 179 * @do_unaligned_access: Callback for unaligned access handling 180 * The callback must exit via raising an exception. 181 */ 182 G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr, 183 MMUAccessType access_type, 184 int mmu_idx, uintptr_t retaddr); 185 186 /** 187 * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM 188 */ 189 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); 190 191 /** 192 * @debug_check_watchpoint: return true if the architectural 193 * watchpoint whose address has matched should really fire, used by ARM 194 * and RISC-V 195 */ 196 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); 197 198 /** 199 * @debug_check_breakpoint: return true if the architectural 200 * breakpoint whose PC has matched should really fire. 201 */ 202 bool (*debug_check_breakpoint)(CPUState *cpu); 203 204 /** 205 * @io_recompile_replay_branch: Callback for cpu_io_recompile. 206 * 207 * The cpu has been stopped, and cpu_restore_state_from_tb has been 208 * called. If the faulting instruction is in a delay slot, and the 209 * target architecture requires re-execution of the branch, then 210 * adjust the cpu state as required and return true. 211 */ 212 bool (*io_recompile_replay_branch)(CPUState *cpu, 213 const TranslationBlock *tb); 214 /** 215 * @need_replay_interrupt: Return %true if @interrupt_request 216 * needs to be recorded for replay purposes. 217 */ 218 bool (*need_replay_interrupt)(int interrupt_request); 219 #endif /* !CONFIG_USER_ONLY */ 220 }; 221 222 #if defined(CONFIG_USER_ONLY) 223 224 static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, 225 MemTxAttrs atr, int fl, uintptr_t ra) 226 { 227 } 228 229 static inline int cpu_watchpoint_address_matches(CPUState *cpu, 230 vaddr addr, vaddr len) 231 { 232 return 0; 233 } 234 235 #else 236 237 /** 238 * cpu_check_watchpoint: 239 * @cpu: cpu context 240 * @addr: guest virtual address 241 * @len: access length 242 * @attrs: memory access attributes 243 * @flags: watchpoint access type 244 * @ra: unwind return address 245 * 246 * Check for a watchpoint hit in [addr, addr+len) of the type 247 * specified by @flags. Exit via exception with a hit. 248 */ 249 void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, 250 MemTxAttrs attrs, int flags, uintptr_t ra); 251 252 /** 253 * cpu_watchpoint_address_matches: 254 * @cpu: cpu context 255 * @addr: guest virtual address 256 * @len: access length 257 * 258 * Return the watchpoint flags that apply to [addr, addr+len). 259 * If no watchpoint is registered for the range, the result is 0. 260 */ 261 int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); 262 263 #endif 264 265 #endif /* TCG_CPU_OPS_H */ 266