xref: /openbmc/qemu/linux-user/arm/cpu_loop.c (revision 831734cc)
1 /*
2  *  qemu user cpu loop
3  *
4  *  Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qemu.h"
23 #include "elf.h"
24 #include "cpu_loop-common.h"
25 #include "hw/semihosting/common-semi.h"
26 
27 #define get_user_code_u32(x, gaddr, env)                \
28     ({ abi_long __r = get_user_u32((x), (gaddr));       \
29         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
30             (x) = bswap32(x);                           \
31         }                                               \
32         __r;                                            \
33     })
34 
35 #define get_user_code_u16(x, gaddr, env)                \
36     ({ abi_long __r = get_user_u16((x), (gaddr));       \
37         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
38             (x) = bswap16(x);                           \
39         }                                               \
40         __r;                                            \
41     })
42 
43 #define get_user_data_u32(x, gaddr, env)                \
44     ({ abi_long __r = get_user_u32((x), (gaddr));       \
45         if (!__r && arm_cpu_bswap_data(env)) {          \
46             (x) = bswap32(x);                           \
47         }                                               \
48         __r;                                            \
49     })
50 
51 #define get_user_data_u16(x, gaddr, env)                \
52     ({ abi_long __r = get_user_u16((x), (gaddr));       \
53         if (!__r && arm_cpu_bswap_data(env)) {          \
54             (x) = bswap16(x);                           \
55         }                                               \
56         __r;                                            \
57     })
58 
59 #define put_user_data_u32(x, gaddr, env)                \
60     ({ typeof(x) __x = (x);                             \
61         if (arm_cpu_bswap_data(env)) {                  \
62             __x = bswap32(__x);                         \
63         }                                               \
64         put_user_u32(__x, (gaddr));                     \
65     })
66 
67 #define put_user_data_u16(x, gaddr, env)                \
68     ({ typeof(x) __x = (x);                             \
69         if (arm_cpu_bswap_data(env)) {                  \
70             __x = bswap16(__x);                         \
71         }                                               \
72         put_user_u16(__x, (gaddr));                     \
73     })
74 
75 /* Commpage handling -- there is no commpage for AArch64 */
76 
77 /*
78  * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
79  * Input:
80  * r0 = pointer to oldval
81  * r1 = pointer to newval
82  * r2 = pointer to target value
83  *
84  * Output:
85  * r0 = 0 if *ptr was changed, non-0 if no exchange happened
86  * C set if *ptr was changed, clear if no exchange happened
87  *
88  * Note segv's in kernel helpers are a bit tricky, we can set the
89  * data address sensibly but the PC address is just the entry point.
90  */
91 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
92 {
93     uint64_t oldval, newval, val;
94     uint32_t addr, cpsr;
95     target_siginfo_t info;
96 
97     /* Based on the 32 bit code in do_kernel_trap */
98 
99     /* XXX: This only works between threads, not between processes.
100        It's probably possible to implement this with native host
101        operations. However things like ldrex/strex are much harder so
102        there's not much point trying.  */
103     start_exclusive();
104     cpsr = cpsr_read(env);
105     addr = env->regs[2];
106 
107     if (get_user_u64(oldval, env->regs[0])) {
108         env->exception.vaddress = env->regs[0];
109         goto segv;
110     };
111 
112     if (get_user_u64(newval, env->regs[1])) {
113         env->exception.vaddress = env->regs[1];
114         goto segv;
115     };
116 
117     if (get_user_u64(val, addr)) {
118         env->exception.vaddress = addr;
119         goto segv;
120     }
121 
122     if (val == oldval) {
123         val = newval;
124 
125         if (put_user_u64(val, addr)) {
126             env->exception.vaddress = addr;
127             goto segv;
128         };
129 
130         env->regs[0] = 0;
131         cpsr |= CPSR_C;
132     } else {
133         env->regs[0] = -1;
134         cpsr &= ~CPSR_C;
135     }
136     cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
137     end_exclusive();
138     return;
139 
140 segv:
141     end_exclusive();
142     /* We get the PC of the entry address - which is as good as anything,
143        on a real kernel what you get depends on which mode it uses. */
144     info.si_signo = TARGET_SIGSEGV;
145     info.si_errno = 0;
146     /* XXX: check env->error_code */
147     info.si_code = TARGET_SEGV_MAPERR;
148     info._sifields._sigfault._addr = env->exception.vaddress;
149     queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
150 }
151 
152 /* Handle a jump to the kernel code page.  */
153 static int
154 do_kernel_trap(CPUARMState *env)
155 {
156     uint32_t addr;
157     uint32_t cpsr;
158     uint32_t val;
159 
160     switch (env->regs[15]) {
161     case 0xffff0fa0: /* __kernel_memory_barrier */
162         /* ??? No-op. Will need to do better for SMP.  */
163         break;
164     case 0xffff0fc0: /* __kernel_cmpxchg */
165          /* XXX: This only works between threads, not between processes.
166             It's probably possible to implement this with native host
167             operations. However things like ldrex/strex are much harder so
168             there's not much point trying.  */
169         start_exclusive();
170         cpsr = cpsr_read(env);
171         addr = env->regs[2];
172         /* FIXME: This should SEGV if the access fails.  */
173         if (get_user_u32(val, addr))
174             val = ~env->regs[0];
175         if (val == env->regs[0]) {
176             val = env->regs[1];
177             /* FIXME: Check for segfaults.  */
178             put_user_u32(val, addr);
179             env->regs[0] = 0;
180             cpsr |= CPSR_C;
181         } else {
182             env->regs[0] = -1;
183             cpsr &= ~CPSR_C;
184         }
185         cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
186         end_exclusive();
187         break;
188     case 0xffff0fe0: /* __kernel_get_tls */
189         env->regs[0] = cpu_get_tls(env);
190         break;
191     case 0xffff0f60: /* __kernel_cmpxchg64 */
192         arm_kernel_cmpxchg64_helper(env);
193         break;
194 
195     default:
196         return 1;
197     }
198     /* Jump back to the caller.  */
199     addr = env->regs[14];
200     if (addr & 1) {
201         env->thumb = 1;
202         addr &= ~1;
203     }
204     env->regs[15] = addr;
205 
206     return 0;
207 }
208 
209 static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
210 {
211     /*
212      * Return true if this insn is one of the three magic UDF insns
213      * which the kernel treats as breakpoint insns.
214      */
215     if (!is_thumb) {
216         return (opcode & 0x0fffffff) == 0x07f001f0;
217     } else {
218         /*
219          * Note that we get the two halves of the 32-bit T32 insn
220          * in the opposite order to the value the kernel uses in
221          * its undef_hook struct.
222          */
223         return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
224     }
225 }
226 
227 void cpu_loop(CPUARMState *env)
228 {
229     CPUState *cs = env_cpu(env);
230     int trapnr;
231     unsigned int n, insn;
232     target_siginfo_t info;
233     uint32_t addr;
234     abi_ulong ret;
235 
236     for(;;) {
237         cpu_exec_start(cs);
238         trapnr = cpu_exec(cs);
239         cpu_exec_end(cs);
240         process_queued_cpu_work(cs);
241 
242         switch(trapnr) {
243         case EXCP_UDEF:
244         case EXCP_NOCP:
245         case EXCP_INVSTATE:
246             {
247                 TaskState *ts = cs->opaque;
248                 uint32_t opcode;
249                 int rc;
250 
251                 /* we handle the FPU emulation here, as Linux */
252                 /* we get the opcode */
253                 /* FIXME - what to do if get_user() fails? */
254                 get_user_code_u32(opcode, env->regs[15], env);
255 
256                 /*
257                  * The Linux kernel treats some UDF patterns specially
258                  * to use as breakpoints (instead of the architectural
259                  * bkpt insn). These should trigger a SIGTRAP rather
260                  * than SIGILL.
261                  */
262                 if (insn_is_linux_bkpt(opcode, env->thumb)) {
263                     goto excp_debug;
264                 }
265 
266                 rc = EmulateAll(opcode, &ts->fpa, env);
267                 if (rc == 0) { /* illegal instruction */
268                     info.si_signo = TARGET_SIGILL;
269                     info.si_errno = 0;
270                     info.si_code = TARGET_ILL_ILLOPN;
271                     info._sifields._sigfault._addr = env->regs[15];
272                     queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
273                 } else if (rc < 0) { /* FP exception */
274                     int arm_fpe=0;
275 
276                     /* translate softfloat flags to FPSR flags */
277                     if (-rc & float_flag_invalid)
278                       arm_fpe |= BIT_IOC;
279                     if (-rc & float_flag_divbyzero)
280                       arm_fpe |= BIT_DZC;
281                     if (-rc & float_flag_overflow)
282                       arm_fpe |= BIT_OFC;
283                     if (-rc & float_flag_underflow)
284                       arm_fpe |= BIT_UFC;
285                     if (-rc & float_flag_inexact)
286                       arm_fpe |= BIT_IXC;
287 
288                     FPSR fpsr = ts->fpa.fpsr;
289                     //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
290 
291                     if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
292                       info.si_signo = TARGET_SIGFPE;
293                       info.si_errno = 0;
294 
295                       /* ordered by priority, least first */
296                       if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
297                       if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
298                       if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
299                       if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
300                       if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
301 
302                       info._sifields._sigfault._addr = env->regs[15];
303                       queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
304                     } else {
305                       env->regs[15] += 4;
306                     }
307 
308                     /* accumulate unenabled exceptions */
309                     if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
310                       fpsr |= BIT_IXC;
311                     if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
312                       fpsr |= BIT_UFC;
313                     if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
314                       fpsr |= BIT_OFC;
315                     if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
316                       fpsr |= BIT_DZC;
317                     if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
318                       fpsr |= BIT_IOC;
319                     ts->fpa.fpsr=fpsr;
320                 } else { /* everything OK */
321                     /* increment PC */
322                     env->regs[15] += 4;
323                 }
324             }
325             break;
326         case EXCP_SWI:
327             {
328                 env->eabi = 1;
329                 /* system call */
330                 if (env->thumb) {
331                     /* Thumb is always EABI style with syscall number in r7 */
332                     n = env->regs[7];
333                 } else {
334                     /*
335                      * Equivalent of kernel CONFIG_OABI_COMPAT: read the
336                      * Arm SVC insn to extract the immediate, which is the
337                      * syscall number in OABI.
338                      */
339                     /* FIXME - what to do if get_user() fails? */
340                     get_user_code_u32(insn, env->regs[15] - 4, env);
341                     n = insn & 0xffffff;
342                     if (n == 0) {
343                         /* zero immediate: EABI, syscall number in r7 */
344                         n = env->regs[7];
345                     } else {
346                         /*
347                          * This XOR matches the kernel code: an immediate
348                          * in the valid range (0x900000 .. 0x9fffff) is
349                          * converted into the correct EABI-style syscall
350                          * number; invalid immediates end up as values
351                          * > 0xfffff and are handled below as out-of-range.
352                          */
353                         n ^= ARM_SYSCALL_BASE;
354                         env->eabi = 0;
355                     }
356                 }
357 
358                 if (n > ARM_NR_BASE) {
359                     switch (n) {
360                     case ARM_NR_cacheflush:
361                         /* nop */
362                         break;
363                     case ARM_NR_set_tls:
364                         cpu_set_tls(env, env->regs[0]);
365                         env->regs[0] = 0;
366                         break;
367                     case ARM_NR_breakpoint:
368                         env->regs[15] -= env->thumb ? 2 : 4;
369                         goto excp_debug;
370                     case ARM_NR_get_tls:
371                         env->regs[0] = cpu_get_tls(env);
372                         break;
373                     default:
374                         if (n < 0xf0800) {
375                             /*
376                              * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
377                              * 0x9f07ff in OABI numbering) are defined
378                              * to return -ENOSYS rather than raising
379                              * SIGILL. Note that we have already
380                              * removed the 0x900000 prefix.
381                              */
382                             qemu_log_mask(LOG_UNIMP,
383                                 "qemu: Unsupported ARM syscall: 0x%x\n",
384                                           n);
385                             env->regs[0] = -TARGET_ENOSYS;
386                         } else {
387                             /*
388                              * Otherwise SIGILL. This includes any SWI with
389                              * immediate not originally 0x9fxxxx, because
390                              * of the earlier XOR.
391                              */
392                             info.si_signo = TARGET_SIGILL;
393                             info.si_errno = 0;
394                             info.si_code = TARGET_ILL_ILLTRP;
395                             info._sifields._sigfault._addr = env->regs[15];
396                             if (env->thumb) {
397                                 info._sifields._sigfault._addr -= 2;
398                             } else {
399                                 info._sifields._sigfault._addr -= 4;
400                             }
401                             queue_signal(env, info.si_signo,
402                                          QEMU_SI_FAULT, &info);
403                         }
404                         break;
405                     }
406                 } else {
407                     ret = do_syscall(env,
408                                      n,
409                                      env->regs[0],
410                                      env->regs[1],
411                                      env->regs[2],
412                                      env->regs[3],
413                                      env->regs[4],
414                                      env->regs[5],
415                                      0, 0);
416                     if (ret == -TARGET_ERESTARTSYS) {
417                         env->regs[15] -= env->thumb ? 2 : 4;
418                     } else if (ret != -TARGET_QEMU_ESIGRETURN) {
419                         env->regs[0] = ret;
420                     }
421                 }
422             }
423             break;
424         case EXCP_SEMIHOST:
425             env->regs[0] = do_common_semihosting(cs);
426             env->regs[15] += env->thumb ? 2 : 4;
427             break;
428         case EXCP_INTERRUPT:
429             /* just indicate that signals should be handled asap */
430             break;
431         case EXCP_PREFETCH_ABORT:
432         case EXCP_DATA_ABORT:
433             addr = env->exception.vaddress;
434             {
435                 info.si_signo = TARGET_SIGSEGV;
436                 info.si_errno = 0;
437                 /* XXX: check env->error_code */
438                 info.si_code = TARGET_SEGV_MAPERR;
439                 info._sifields._sigfault._addr = addr;
440                 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
441             }
442             break;
443         case EXCP_DEBUG:
444         case EXCP_BKPT:
445         excp_debug:
446             info.si_signo = TARGET_SIGTRAP;
447             info.si_errno = 0;
448             info.si_code = TARGET_TRAP_BRKPT;
449             queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
450             break;
451         case EXCP_KERNEL_TRAP:
452             if (do_kernel_trap(env))
453               goto error;
454             break;
455         case EXCP_YIELD:
456             /* nothing to do here for user-mode, just resume guest code */
457             break;
458         case EXCP_ATOMIC:
459             cpu_exec_step_atomic(cs);
460             break;
461         default:
462         error:
463             EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
464             abort();
465         }
466         process_pending_signals(env);
467     }
468 }
469 
470 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
471 {
472     CPUState *cpu = env_cpu(env);
473     TaskState *ts = cpu->opaque;
474     struct image_info *info = ts->info;
475     int i;
476 
477     cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
478                CPSRWriteByInstr);
479     for(i = 0; i < 16; i++) {
480         env->regs[i] = regs->uregs[i];
481     }
482 #ifdef TARGET_WORDS_BIGENDIAN
483     /* Enable BE8.  */
484     if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
485         && (info->elf_flags & EF_ARM_BE8)) {
486         env->uncached_cpsr |= CPSR_E;
487         env->cp15.sctlr_el[1] |= SCTLR_E0E;
488     } else {
489         env->cp15.sctlr_el[1] |= SCTLR_B;
490     }
491     arm_rebuild_hflags(env);
492 #endif
493 
494     ts->stack_base = info->start_stack;
495     ts->heap_base = info->brk;
496     /* This will be filled in on the first SYS_HEAPINFO call.  */
497     ts->heap_limit = 0;
498 }
499