xref: /openbmc/qemu/linux-user/arm/cpu_loop.c (revision 2df1eb27)
1 /*
2  *  qemu user cpu loop
3  *
4  *  Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu.h"
22 #include "user-internals.h"
23 #include "elf.h"
24 #include "cpu_loop-common.h"
25 #include "signal-common.h"
26 #include "semihosting/common-semi.h"
27 #include "target/arm/syndrome.h"
28 
29 #define get_user_code_u32(x, gaddr, env)                \
30     ({ abi_long __r = get_user_u32((x), (gaddr));       \
31         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
32             (x) = bswap32(x);                           \
33         }                                               \
34         __r;                                            \
35     })
36 
37 #define get_user_code_u16(x, gaddr, env)                \
38     ({ abi_long __r = get_user_u16((x), (gaddr));       \
39         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
40             (x) = bswap16(x);                           \
41         }                                               \
42         __r;                                            \
43     })
44 
45 #define get_user_data_u32(x, gaddr, env)                \
46     ({ abi_long __r = get_user_u32((x), (gaddr));       \
47         if (!__r && arm_cpu_bswap_data(env)) {          \
48             (x) = bswap32(x);                           \
49         }                                               \
50         __r;                                            \
51     })
52 
53 #define get_user_data_u16(x, gaddr, env)                \
54     ({ abi_long __r = get_user_u16((x), (gaddr));       \
55         if (!__r && arm_cpu_bswap_data(env)) {          \
56             (x) = bswap16(x);                           \
57         }                                               \
58         __r;                                            \
59     })
60 
61 #define put_user_data_u32(x, gaddr, env)                \
62     ({ typeof(x) __x = (x);                             \
63         if (arm_cpu_bswap_data(env)) {                  \
64             __x = bswap32(__x);                         \
65         }                                               \
66         put_user_u32(__x, (gaddr));                     \
67     })
68 
69 #define put_user_data_u16(x, gaddr, env)                \
70     ({ typeof(x) __x = (x);                             \
71         if (arm_cpu_bswap_data(env)) {                  \
72             __x = bswap16(__x);                         \
73         }                                               \
74         put_user_u16(__x, (gaddr));                     \
75     })
76 
77 /*
78  * Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
79  * Must be called with mmap_lock.
80  * We get the PC of the entry address - which is as good as anything,
81  * on a real kernel what you get depends on which mode it uses.
82  */
83 static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size)
84 {
85     int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID;
86     int page_flags;
87 
88     /* Enforce guest required alignment.  */
89     if (unlikely(addr & (size - 1))) {
90         force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
91         return NULL;
92     }
93 
94     page_flags = page_get_flags(addr);
95     if (unlikely((page_flags & need_flags) != need_flags)) {
96         force_sig_fault(TARGET_SIGSEGV,
97                         page_flags & PAGE_VALID ?
98                         TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
99         return NULL;
100     }
101 
102     return g2h(env_cpu(env), addr);
103 }
104 
105 /*
106  * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
107  * Input:
108  * r0 = oldval
109  * r1 = newval
110  * r2 = pointer to target value
111  *
112  * Output:
113  * r0 = 0 if *ptr was changed, non-0 if no exchange happened
114  * C set if *ptr was changed, clear if no exchange happened
115  */
116 static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
117 {
118     uint32_t oldval, newval, val, addr, cpsr, *host_addr;
119 
120     /* Swap if host != guest endianness, for the host cmpxchg below */
121     oldval = tswap32(env->regs[0]);
122     newval = tswap32(env->regs[1]);
123     addr = env->regs[2];
124 
125     mmap_lock();
126     host_addr = atomic_mmu_lookup(env, addr, 4);
127     if (!host_addr) {
128         mmap_unlock();
129         return;
130     }
131 
132     val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
133     mmap_unlock();
134 
135     cpsr = (val == oldval) * CPSR_C;
136     cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
137     env->regs[0] = cpsr ? 0 : -1;
138 }
139 
140 /*
141  * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
142  * Input:
143  * r0 = pointer to oldval
144  * r1 = pointer to newval
145  * r2 = pointer to target value
146  *
147  * Output:
148  * r0 = 0 if *ptr was changed, non-0 if no exchange happened
149  * C set if *ptr was changed, clear if no exchange happened
150  *
151  * Note segv's in kernel helpers are a bit tricky, we can set the
152  * data address sensibly but the PC address is just the entry point.
153  */
154 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
155 {
156     uint64_t oldval, newval, val;
157     uint32_t addr, cpsr;
158     uint64_t *host_addr;
159 
160     addr = env->regs[0];
161     if (get_user_u64(oldval, addr)) {
162         goto segv;
163     }
164 
165     addr = env->regs[1];
166     if (get_user_u64(newval, addr)) {
167         goto segv;
168     }
169 
170     mmap_lock();
171     addr = env->regs[2];
172     host_addr = atomic_mmu_lookup(env, addr, 8);
173     if (!host_addr) {
174         mmap_unlock();
175         return;
176     }
177 
178     /* Swap if host != guest endianness, for the host cmpxchg below */
179     oldval = tswap64(oldval);
180     newval = tswap64(newval);
181 
182 #ifdef CONFIG_ATOMIC64
183     val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
184     cpsr = (val == oldval) * CPSR_C;
185 #else
186     /*
187      * This only works between threads, not between processes, but since
188      * the host has no 64-bit cmpxchg, it is the best that we can do.
189      */
190     start_exclusive();
191     val = *host_addr;
192     if (val == oldval) {
193         *host_addr = newval;
194         cpsr = CPSR_C;
195     } else {
196         cpsr = 0;
197     }
198     end_exclusive();
199 #endif
200     mmap_unlock();
201 
202     cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
203     env->regs[0] = cpsr ? 0 : -1;
204     return;
205 
206  segv:
207     force_sig_fault(TARGET_SIGSEGV,
208                     page_get_flags(addr) & PAGE_VALID ?
209                     TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
210 }
211 
212 /* Handle a jump to the kernel code page.  */
213 static int
214 do_kernel_trap(CPUARMState *env)
215 {
216     uint32_t addr;
217 
218     switch (env->regs[15]) {
219     case 0xffff0fa0: /* __kernel_memory_barrier */
220         smp_mb();
221         break;
222     case 0xffff0fc0: /* __kernel_cmpxchg */
223         arm_kernel_cmpxchg32_helper(env);
224         break;
225     case 0xffff0fe0: /* __kernel_get_tls */
226         env->regs[0] = cpu_get_tls(env);
227         break;
228     case 0xffff0f60: /* __kernel_cmpxchg64 */
229         arm_kernel_cmpxchg64_helper(env);
230         break;
231 
232     default:
233         return 1;
234     }
235     /* Jump back to the caller.  */
236     addr = env->regs[14];
237     if (addr & 1) {
238         env->thumb = true;
239         addr &= ~1;
240     }
241     env->regs[15] = addr;
242 
243     return 0;
244 }
245 
246 static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
247 {
248     /*
249      * Return true if this insn is one of the three magic UDF insns
250      * which the kernel treats as breakpoint insns.
251      */
252     if (!is_thumb) {
253         return (opcode & 0x0fffffff) == 0x07f001f0;
254     } else {
255         /*
256          * Note that we get the two halves of the 32-bit T32 insn
257          * in the opposite order to the value the kernel uses in
258          * its undef_hook struct.
259          */
260         return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
261     }
262 }
263 
264 static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
265 {
266     TaskState *ts = env_cpu(env)->opaque;
267     int rc = EmulateAll(opcode, &ts->fpa, env);
268     int raise, enabled;
269 
270     if (rc == 0) {
271         /* Illegal instruction */
272         return false;
273     }
274     if (rc > 0) {
275         /* Everything ok. */
276         env->regs[15] += 4;
277         return true;
278     }
279 
280     /* FP exception */
281     rc = -rc;
282     raise = 0;
283 
284     /* Translate softfloat flags to FPSR flags */
285     if (rc & float_flag_invalid) {
286         raise |= BIT_IOC;
287     }
288     if (rc & float_flag_divbyzero) {
289         raise |= BIT_DZC;
290     }
291     if (rc & float_flag_overflow) {
292         raise |= BIT_OFC;
293     }
294     if (rc & float_flag_underflow) {
295         raise |= BIT_UFC;
296     }
297     if (rc & float_flag_inexact) {
298         raise |= BIT_IXC;
299     }
300 
301     /* Accumulate unenabled exceptions */
302     enabled = ts->fpa.fpsr >> 16;
303     ts->fpa.fpsr |= raise & ~enabled;
304 
305     if (raise & enabled) {
306         /*
307          * The kernel's nwfpe emulator does not pass a real si_code.
308          * It merely uses send_sig(SIGFPE, current, 1), which results in
309          * __send_signal() filling out SI_KERNEL with pid and uid 0 (under
310          * the "SEND_SIG_PRIV" case). That's what our force_sig() does.
311          */
312         force_sig(TARGET_SIGFPE);
313     } else {
314         env->regs[15] += 4;
315     }
316     return true;
317 }
318 
319 void cpu_loop(CPUARMState *env)
320 {
321     CPUState *cs = env_cpu(env);
322     int trapnr, si_signo, si_code;
323     unsigned int n, insn;
324     abi_ulong ret;
325 
326     for(;;) {
327         cpu_exec_start(cs);
328         trapnr = cpu_exec(cs);
329         cpu_exec_end(cs);
330         process_queued_cpu_work(cs);
331 
332         switch(trapnr) {
333         case EXCP_UDEF:
334         case EXCP_NOCP:
335         case EXCP_INVSTATE:
336             {
337                 uint32_t opcode;
338 
339                 /* we handle the FPU emulation here, as Linux */
340                 /* we get the opcode */
341                 /* FIXME - what to do if get_user() fails? */
342                 get_user_code_u32(opcode, env->regs[15], env);
343 
344                 /*
345                  * The Linux kernel treats some UDF patterns specially
346                  * to use as breakpoints (instead of the architectural
347                  * bkpt insn). These should trigger a SIGTRAP rather
348                  * than SIGILL.
349                  */
350                 if (insn_is_linux_bkpt(opcode, env->thumb)) {
351                     goto excp_debug;
352                 }
353 
354                 if (!env->thumb && emulate_arm_fpa11(env, opcode)) {
355                     break;
356                 }
357 
358                 force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN,
359                                 env->regs[15]);
360             }
361             break;
362         case EXCP_SWI:
363             {
364                 env->eabi = true;
365                 /* system call */
366                 if (env->thumb) {
367                     /* Thumb is always EABI style with syscall number in r7 */
368                     n = env->regs[7];
369                 } else {
370                     /*
371                      * Equivalent of kernel CONFIG_OABI_COMPAT: read the
372                      * Arm SVC insn to extract the immediate, which is the
373                      * syscall number in OABI.
374                      */
375                     /* FIXME - what to do if get_user() fails? */
376                     get_user_code_u32(insn, env->regs[15] - 4, env);
377                     n = insn & 0xffffff;
378                     if (n == 0) {
379                         /* zero immediate: EABI, syscall number in r7 */
380                         n = env->regs[7];
381                     } else {
382                         /*
383                          * This XOR matches the kernel code: an immediate
384                          * in the valid range (0x900000 .. 0x9fffff) is
385                          * converted into the correct EABI-style syscall
386                          * number; invalid immediates end up as values
387                          * > 0xfffff and are handled below as out-of-range.
388                          */
389                         n ^= ARM_SYSCALL_BASE;
390                         env->eabi = false;
391                     }
392                 }
393 
394                 if (n > ARM_NR_BASE) {
395                     switch (n) {
396                     case ARM_NR_cacheflush:
397                         /* nop */
398                         break;
399                     case ARM_NR_set_tls:
400                         cpu_set_tls(env, env->regs[0]);
401                         env->regs[0] = 0;
402                         break;
403                     case ARM_NR_breakpoint:
404                         env->regs[15] -= env->thumb ? 2 : 4;
405                         goto excp_debug;
406                     case ARM_NR_get_tls:
407                         env->regs[0] = cpu_get_tls(env);
408                         break;
409                     default:
410                         if (n < 0xf0800) {
411                             /*
412                              * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
413                              * 0x9f07ff in OABI numbering) are defined
414                              * to return -ENOSYS rather than raising
415                              * SIGILL. Note that we have already
416                              * removed the 0x900000 prefix.
417                              */
418                             qemu_log_mask(LOG_UNIMP,
419                                 "qemu: Unsupported ARM syscall: 0x%x\n",
420                                           n);
421                             env->regs[0] = -TARGET_ENOSYS;
422                         } else {
423                             /*
424                              * Otherwise SIGILL. This includes any SWI with
425                              * immediate not originally 0x9fxxxx, because
426                              * of the earlier XOR.
427                              * Like the real kernel, we report the addr of the
428                              * SWI in the siginfo si_addr but leave the PC
429                              * pointing at the insn after the SWI.
430                              */
431                             abi_ulong faultaddr = env->regs[15];
432                             faultaddr -= env->thumb ? 2 : 4;
433                             force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
434                                             faultaddr);
435                         }
436                         break;
437                     }
438                 } else {
439                     ret = do_syscall(env,
440                                      n,
441                                      env->regs[0],
442                                      env->regs[1],
443                                      env->regs[2],
444                                      env->regs[3],
445                                      env->regs[4],
446                                      env->regs[5],
447                                      0, 0);
448                     if (ret == -QEMU_ERESTARTSYS) {
449                         env->regs[15] -= env->thumb ? 2 : 4;
450                     } else if (ret != -QEMU_ESIGRETURN) {
451                         env->regs[0] = ret;
452                     }
453                 }
454             }
455             break;
456         case EXCP_SEMIHOST:
457             do_common_semihosting(cs);
458             env->regs[15] += env->thumb ? 2 : 4;
459             break;
460         case EXCP_INTERRUPT:
461             /* just indicate that signals should be handled asap */
462             break;
463         case EXCP_PREFETCH_ABORT:
464         case EXCP_DATA_ABORT:
465             /* For user-only we don't set TTBCR_EAE, so look at the FSR. */
466             switch (env->exception.fsr & 0x1f) {
467             case 0x1: /* Alignment */
468                 si_signo = TARGET_SIGBUS;
469                 si_code = TARGET_BUS_ADRALN;
470                 break;
471             case 0x3: /* Access flag fault, level 1 */
472             case 0x6: /* Access flag fault, level 2 */
473             case 0x9: /* Domain fault, level 1 */
474             case 0xb: /* Domain fault, level 2 */
475             case 0xd: /* Permission fault, level 1 */
476             case 0xf: /* Permission fault, level 2 */
477                 si_signo = TARGET_SIGSEGV;
478                 si_code = TARGET_SEGV_ACCERR;
479                 break;
480             case 0x5: /* Translation fault, level 1 */
481             case 0x7: /* Translation fault, level 2 */
482                 si_signo = TARGET_SIGSEGV;
483                 si_code = TARGET_SEGV_MAPERR;
484                 break;
485             default:
486                 g_assert_not_reached();
487             }
488             force_sig_fault(si_signo, si_code, env->exception.vaddress);
489             break;
490         case EXCP_DEBUG:
491         case EXCP_BKPT:
492         excp_debug:
493             force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]);
494             break;
495         case EXCP_KERNEL_TRAP:
496             if (do_kernel_trap(env))
497               goto error;
498             break;
499         case EXCP_YIELD:
500             /* nothing to do here for user-mode, just resume guest code */
501             break;
502         case EXCP_ATOMIC:
503             cpu_exec_step_atomic(cs);
504             break;
505         default:
506         error:
507             EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
508             abort();
509         }
510         process_pending_signals(env);
511     }
512 }
513 
514 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
515 {
516     CPUState *cpu = env_cpu(env);
517     TaskState *ts = cpu->opaque;
518     struct image_info *info = ts->info;
519     int i;
520 
521     cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
522                CPSRWriteByInstr);
523     for(i = 0; i < 16; i++) {
524         env->regs[i] = regs->uregs[i];
525     }
526 #if TARGET_BIG_ENDIAN
527     /* Enable BE8.  */
528     if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
529         && (info->elf_flags & EF_ARM_BE8)) {
530         env->uncached_cpsr |= CPSR_E;
531         env->cp15.sctlr_el[1] |= SCTLR_E0E;
532     } else {
533         env->cp15.sctlr_el[1] |= SCTLR_B;
534     }
535     arm_rebuild_hflags(env);
536 #endif
537 
538     ts->stack_base = info->start_stack;
539     ts->heap_base = info->brk;
540     /* This will be filled in on the first SYS_HEAPINFO call.  */
541     ts->heap_limit = 0;
542 }
543