1 /* 2 * qemu user cpu loop 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu.h" 22 #include "user-internals.h" 23 #include "cpu_loop-common.h" 24 #include "signal-common.h" 25 #include "qemu/guest-random.h" 26 #include "semihosting/common-semi.h" 27 #include "target/arm/syndrome.h" 28 29 #define get_user_code_u32(x, gaddr, env) \ 30 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 31 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 32 (x) = bswap32(x); \ 33 } \ 34 __r; \ 35 }) 36 37 #define get_user_code_u16(x, gaddr, env) \ 38 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 39 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 40 (x) = bswap16(x); \ 41 } \ 42 __r; \ 43 }) 44 45 #define get_user_data_u32(x, gaddr, env) \ 46 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 47 if (!__r && arm_cpu_bswap_data(env)) { \ 48 (x) = bswap32(x); \ 49 } \ 50 __r; \ 51 }) 52 53 #define get_user_data_u16(x, gaddr, env) \ 54 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 55 if (!__r && arm_cpu_bswap_data(env)) { \ 56 (x) = bswap16(x); \ 57 } \ 58 __r; \ 59 }) 60 61 #define put_user_data_u32(x, gaddr, env) \ 62 ({ typeof(x) __x = (x); \ 63 if (arm_cpu_bswap_data(env)) { \ 64 __x = bswap32(__x); \ 65 } \ 66 put_user_u32(__x, (gaddr)); \ 67 }) 68 69 #define put_user_data_u16(x, gaddr, env) \ 70 ({ typeof(x) __x = (x); \ 71 if (arm_cpu_bswap_data(env)) { \ 72 __x = bswap16(__x); \ 73 } \ 74 put_user_u16(__x, (gaddr)); \ 75 }) 76 77 /* AArch64 main loop */ 78 void cpu_loop(CPUARMState *env) 79 { 80 CPUState *cs = env_cpu(env); 81 int trapnr, ec, fsc, si_code, si_signo; 82 abi_long ret; 83 84 for (;;) { 85 cpu_exec_start(cs); 86 trapnr = cpu_exec(cs); 87 cpu_exec_end(cs); 88 process_queued_cpu_work(cs); 89 90 switch (trapnr) { 91 case EXCP_SWI: 92 ret = do_syscall(env, 93 env->xregs[8], 94 env->xregs[0], 95 env->xregs[1], 96 env->xregs[2], 97 env->xregs[3], 98 env->xregs[4], 99 env->xregs[5], 100 0, 0); 101 if (ret == -QEMU_ERESTARTSYS) { 102 env->pc -= 4; 103 } else if (ret != -QEMU_ESIGRETURN) { 104 env->xregs[0] = ret; 105 } 106 break; 107 case EXCP_INTERRUPT: 108 /* just indicate that signals should be handled asap */ 109 break; 110 case EXCP_UDEF: 111 force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc); 112 break; 113 case EXCP_PREFETCH_ABORT: 114 case EXCP_DATA_ABORT: 115 ec = syn_get_ec(env->exception.syndrome); 116 switch (ec) { 117 case EC_DATAABORT: 118 case EC_INSNABORT: 119 /* Both EC have the same format for FSC, or close enough. */ 120 fsc = extract32(env->exception.syndrome, 0, 6); 121 switch (fsc) { 122 case 0x04 ... 0x07: /* Translation fault, level {0-3} */ 123 si_signo = TARGET_SIGSEGV; 124 si_code = TARGET_SEGV_MAPERR; 125 break; 126 case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ 127 case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ 128 si_signo = TARGET_SIGSEGV; 129 si_code = TARGET_SEGV_ACCERR; 130 break; 131 case 0x11: /* Synchronous Tag Check Fault */ 132 si_signo = TARGET_SIGSEGV; 133 si_code = TARGET_SEGV_MTESERR; 134 break; 135 case 0x21: /* Alignment fault */ 136 si_signo = TARGET_SIGBUS; 137 si_code = TARGET_BUS_ADRALN; 138 break; 139 default: 140 g_assert_not_reached(); 141 } 142 break; 143 case EC_PCALIGNMENT: 144 si_signo = TARGET_SIGBUS; 145 si_code = TARGET_BUS_ADRALN; 146 break; 147 default: 148 g_assert_not_reached(); 149 } 150 force_sig_fault(si_signo, si_code, env->exception.vaddress); 151 break; 152 case EXCP_DEBUG: 153 case EXCP_BKPT: 154 force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); 155 break; 156 case EXCP_SEMIHOST: 157 do_common_semihosting(cs); 158 env->pc += 4; 159 break; 160 case EXCP_YIELD: 161 /* nothing to do here for user-mode, just resume guest code */ 162 break; 163 case EXCP_ATOMIC: 164 cpu_exec_step_atomic(cs); 165 break; 166 default: 167 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); 168 abort(); 169 } 170 171 /* Check for MTE asynchronous faults */ 172 if (unlikely(env->cp15.tfsr_el[0])) { 173 env->cp15.tfsr_el[0] = 0; 174 force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MTEAERR, 0); 175 } 176 177 process_pending_signals(env); 178 /* Exception return on AArch64 always clears the exclusive monitor, 179 * so any return to running guest code implies this. 180 */ 181 env->exclusive_addr = -1; 182 } 183 } 184 185 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 186 { 187 ARMCPU *cpu = env_archcpu(env); 188 CPUState *cs = env_cpu(env); 189 TaskState *ts = cs->opaque; 190 struct image_info *info = ts->info; 191 int i; 192 193 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) { 194 fprintf(stderr, 195 "The selected ARM CPU does not support 64 bit mode\n"); 196 exit(EXIT_FAILURE); 197 } 198 199 for (i = 0; i < 31; i++) { 200 env->xregs[i] = regs->regs[i]; 201 } 202 env->pc = regs->pc; 203 env->xregs[31] = regs->sp; 204 #if TARGET_BIG_ENDIAN 205 env->cp15.sctlr_el[1] |= SCTLR_E0E; 206 for (i = 1; i < 4; ++i) { 207 env->cp15.sctlr_el[i] |= SCTLR_EE; 208 } 209 arm_rebuild_hflags(env); 210 #endif 211 212 if (cpu_isar_feature(aa64_pauth, cpu)) { 213 qemu_guest_getrandom_nofail(&env->keys, sizeof(env->keys)); 214 } 215 216 ts->stack_base = info->start_stack; 217 ts->heap_base = info->brk; 218 /* This will be filled in on the first SYS_HEAPINFO call. */ 219 ts->heap_limit = 0; 220 } 221