xref: /openbmc/qemu/linux-user/arm/cpu_loop.c (revision 5ade579b)
1 /*
2  *  qemu user cpu loop
3  *
4  *  Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qemu.h"
23 #include "elf.h"
24 #include "cpu_loop-common.h"
25 
26 #define get_user_code_u32(x, gaddr, env)                \
27     ({ abi_long __r = get_user_u32((x), (gaddr));       \
28         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
29             (x) = bswap32(x);                           \
30         }                                               \
31         __r;                                            \
32     })
33 
34 #define get_user_code_u16(x, gaddr, env)                \
35     ({ abi_long __r = get_user_u16((x), (gaddr));       \
36         if (!__r && bswap_code(arm_sctlr_b(env))) {     \
37             (x) = bswap16(x);                           \
38         }                                               \
39         __r;                                            \
40     })
41 
42 #define get_user_data_u32(x, gaddr, env)                \
43     ({ abi_long __r = get_user_u32((x), (gaddr));       \
44         if (!__r && arm_cpu_bswap_data(env)) {          \
45             (x) = bswap32(x);                           \
46         }                                               \
47         __r;                                            \
48     })
49 
50 #define get_user_data_u16(x, gaddr, env)                \
51     ({ abi_long __r = get_user_u16((x), (gaddr));       \
52         if (!__r && arm_cpu_bswap_data(env)) {          \
53             (x) = bswap16(x);                           \
54         }                                               \
55         __r;                                            \
56     })
57 
58 #define put_user_data_u32(x, gaddr, env)                \
59     ({ typeof(x) __x = (x);                             \
60         if (arm_cpu_bswap_data(env)) {                  \
61             __x = bswap32(__x);                         \
62         }                                               \
63         put_user_u32(__x, (gaddr));                     \
64     })
65 
66 #define put_user_data_u16(x, gaddr, env)                \
67     ({ typeof(x) __x = (x);                             \
68         if (arm_cpu_bswap_data(env)) {                  \
69             __x = bswap16(__x);                         \
70         }                                               \
71         put_user_u16(__x, (gaddr));                     \
72     })
73 
74 /* Commpage handling -- there is no commpage for AArch64 */
75 
76 /*
77  * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
78  * Input:
79  * r0 = pointer to oldval
80  * r1 = pointer to newval
81  * r2 = pointer to target value
82  *
83  * Output:
84  * r0 = 0 if *ptr was changed, non-0 if no exchange happened
85  * C set if *ptr was changed, clear if no exchange happened
86  *
87  * Note segv's in kernel helpers are a bit tricky, we can set the
88  * data address sensibly but the PC address is just the entry point.
89  */
90 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
91 {
92     uint64_t oldval, newval, val;
93     uint32_t addr, cpsr;
94     target_siginfo_t info;
95 
96     /* Based on the 32 bit code in do_kernel_trap */
97 
98     /* XXX: This only works between threads, not between processes.
99        It's probably possible to implement this with native host
100        operations. However things like ldrex/strex are much harder so
101        there's not much point trying.  */
102     start_exclusive();
103     cpsr = cpsr_read(env);
104     addr = env->regs[2];
105 
106     if (get_user_u64(oldval, env->regs[0])) {
107         env->exception.vaddress = env->regs[0];
108         goto segv;
109     };
110 
111     if (get_user_u64(newval, env->regs[1])) {
112         env->exception.vaddress = env->regs[1];
113         goto segv;
114     };
115 
116     if (get_user_u64(val, addr)) {
117         env->exception.vaddress = addr;
118         goto segv;
119     }
120 
121     if (val == oldval) {
122         val = newval;
123 
124         if (put_user_u64(val, addr)) {
125             env->exception.vaddress = addr;
126             goto segv;
127         };
128 
129         env->regs[0] = 0;
130         cpsr |= CPSR_C;
131     } else {
132         env->regs[0] = -1;
133         cpsr &= ~CPSR_C;
134     }
135     cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
136     end_exclusive();
137     return;
138 
139 segv:
140     end_exclusive();
141     /* We get the PC of the entry address - which is as good as anything,
142        on a real kernel what you get depends on which mode it uses. */
143     info.si_signo = TARGET_SIGSEGV;
144     info.si_errno = 0;
145     /* XXX: check env->error_code */
146     info.si_code = TARGET_SEGV_MAPERR;
147     info._sifields._sigfault._addr = env->exception.vaddress;
148     queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
149 }
150 
151 /* Handle a jump to the kernel code page.  */
152 static int
153 do_kernel_trap(CPUARMState *env)
154 {
155     uint32_t addr;
156     uint32_t cpsr;
157     uint32_t val;
158 
159     switch (env->regs[15]) {
160     case 0xffff0fa0: /* __kernel_memory_barrier */
161         /* ??? No-op. Will need to do better for SMP.  */
162         break;
163     case 0xffff0fc0: /* __kernel_cmpxchg */
164          /* XXX: This only works between threads, not between processes.
165             It's probably possible to implement this with native host
166             operations. However things like ldrex/strex are much harder so
167             there's not much point trying.  */
168         start_exclusive();
169         cpsr = cpsr_read(env);
170         addr = env->regs[2];
171         /* FIXME: This should SEGV if the access fails.  */
172         if (get_user_u32(val, addr))
173             val = ~env->regs[0];
174         if (val == env->regs[0]) {
175             val = env->regs[1];
176             /* FIXME: Check for segfaults.  */
177             put_user_u32(val, addr);
178             env->regs[0] = 0;
179             cpsr |= CPSR_C;
180         } else {
181             env->regs[0] = -1;
182             cpsr &= ~CPSR_C;
183         }
184         cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
185         end_exclusive();
186         break;
187     case 0xffff0fe0: /* __kernel_get_tls */
188         env->regs[0] = cpu_get_tls(env);
189         break;
190     case 0xffff0f60: /* __kernel_cmpxchg64 */
191         arm_kernel_cmpxchg64_helper(env);
192         break;
193 
194     default:
195         return 1;
196     }
197     /* Jump back to the caller.  */
198     addr = env->regs[14];
199     if (addr & 1) {
200         env->thumb = 1;
201         addr &= ~1;
202     }
203     env->regs[15] = addr;
204 
205     return 0;
206 }
207 
208 static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
209 {
210     /*
211      * Return true if this insn is one of the three magic UDF insns
212      * which the kernel treats as breakpoint insns.
213      */
214     if (!is_thumb) {
215         return (opcode & 0x0fffffff) == 0x07f001f0;
216     } else {
217         /*
218          * Note that we get the two halves of the 32-bit T32 insn
219          * in the opposite order to the value the kernel uses in
220          * its undef_hook struct.
221          */
222         return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
223     }
224 }
225 
226 void cpu_loop(CPUARMState *env)
227 {
228     CPUState *cs = env_cpu(env);
229     int trapnr;
230     unsigned int n, insn;
231     target_siginfo_t info;
232     uint32_t addr;
233     abi_ulong ret;
234 
235     for(;;) {
236         cpu_exec_start(cs);
237         trapnr = cpu_exec(cs);
238         cpu_exec_end(cs);
239         process_queued_cpu_work(cs);
240 
241         switch(trapnr) {
242         case EXCP_UDEF:
243         case EXCP_NOCP:
244         case EXCP_INVSTATE:
245             {
246                 TaskState *ts = cs->opaque;
247                 uint32_t opcode;
248                 int rc;
249 
250                 /* we handle the FPU emulation here, as Linux */
251                 /* we get the opcode */
252                 /* FIXME - what to do if get_user() fails? */
253                 get_user_code_u32(opcode, env->regs[15], env);
254 
255                 /*
256                  * The Linux kernel treats some UDF patterns specially
257                  * to use as breakpoints (instead of the architectural
258                  * bkpt insn). These should trigger a SIGTRAP rather
259                  * than SIGILL.
260                  */
261                 if (insn_is_linux_bkpt(opcode, env->thumb)) {
262                     goto excp_debug;
263                 }
264 
265                 rc = EmulateAll(opcode, &ts->fpa, env);
266                 if (rc == 0) { /* illegal instruction */
267                     info.si_signo = TARGET_SIGILL;
268                     info.si_errno = 0;
269                     info.si_code = TARGET_ILL_ILLOPN;
270                     info._sifields._sigfault._addr = env->regs[15];
271                     queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
272                 } else if (rc < 0) { /* FP exception */
273                     int arm_fpe=0;
274 
275                     /* translate softfloat flags to FPSR flags */
276                     if (-rc & float_flag_invalid)
277                       arm_fpe |= BIT_IOC;
278                     if (-rc & float_flag_divbyzero)
279                       arm_fpe |= BIT_DZC;
280                     if (-rc & float_flag_overflow)
281                       arm_fpe |= BIT_OFC;
282                     if (-rc & float_flag_underflow)
283                       arm_fpe |= BIT_UFC;
284                     if (-rc & float_flag_inexact)
285                       arm_fpe |= BIT_IXC;
286 
287                     FPSR fpsr = ts->fpa.fpsr;
288                     //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
289 
290                     if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
291                       info.si_signo = TARGET_SIGFPE;
292                       info.si_errno = 0;
293 
294                       /* ordered by priority, least first */
295                       if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
296                       if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
297                       if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
298                       if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
299                       if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
300 
301                       info._sifields._sigfault._addr = env->regs[15];
302                       queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
303                     } else {
304                       env->regs[15] += 4;
305                     }
306 
307                     /* accumulate unenabled exceptions */
308                     if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
309                       fpsr |= BIT_IXC;
310                     if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
311                       fpsr |= BIT_UFC;
312                     if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
313                       fpsr |= BIT_OFC;
314                     if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
315                       fpsr |= BIT_DZC;
316                     if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
317                       fpsr |= BIT_IOC;
318                     ts->fpa.fpsr=fpsr;
319                 } else { /* everything OK */
320                     /* increment PC */
321                     env->regs[15] += 4;
322                 }
323             }
324             break;
325         case EXCP_SWI:
326             {
327                 env->eabi = 1;
328                 /* system call */
329                 if (env->thumb) {
330                     /* Thumb is always EABI style with syscall number in r7 */
331                     n = env->regs[7];
332                 } else {
333                     /*
334                      * Equivalent of kernel CONFIG_OABI_COMPAT: read the
335                      * Arm SVC insn to extract the immediate, which is the
336                      * syscall number in OABI.
337                      */
338                     /* FIXME - what to do if get_user() fails? */
339                     get_user_code_u32(insn, env->regs[15] - 4, env);
340                     n = insn & 0xffffff;
341                     if (n == 0) {
342                         /* zero immediate: EABI, syscall number in r7 */
343                         n = env->regs[7];
344                     } else {
345                         /*
346                          * This XOR matches the kernel code: an immediate
347                          * in the valid range (0x900000 .. 0x9fffff) is
348                          * converted into the correct EABI-style syscall
349                          * number; invalid immediates end up as values
350                          * > 0xfffff and are handled below as out-of-range.
351                          */
352                         n ^= ARM_SYSCALL_BASE;
353                         env->eabi = 0;
354                     }
355                 }
356 
357                 if (n > ARM_NR_BASE) {
358                     switch (n) {
359                     case ARM_NR_cacheflush:
360                         /* nop */
361                         break;
362                     case ARM_NR_set_tls:
363                         cpu_set_tls(env, env->regs[0]);
364                         env->regs[0] = 0;
365                         break;
366                     case ARM_NR_breakpoint:
367                         env->regs[15] -= env->thumb ? 2 : 4;
368                         goto excp_debug;
369                     case ARM_NR_get_tls:
370                         env->regs[0] = cpu_get_tls(env);
371                         break;
372                     default:
373                         if (n < 0xf0800) {
374                             /*
375                              * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
376                              * 0x9f07ff in OABI numbering) are defined
377                              * to return -ENOSYS rather than raising
378                              * SIGILL. Note that we have already
379                              * removed the 0x900000 prefix.
380                              */
381                             qemu_log_mask(LOG_UNIMP,
382                                 "qemu: Unsupported ARM syscall: 0x%x\n",
383                                           n);
384                             env->regs[0] = -TARGET_ENOSYS;
385                         } else {
386                             /*
387                              * Otherwise SIGILL. This includes any SWI with
388                              * immediate not originally 0x9fxxxx, because
389                              * of the earlier XOR.
390                              */
391                             info.si_signo = TARGET_SIGILL;
392                             info.si_errno = 0;
393                             info.si_code = TARGET_ILL_ILLTRP;
394                             info._sifields._sigfault._addr = env->regs[15];
395                             if (env->thumb) {
396                                 info._sifields._sigfault._addr -= 2;
397                             } else {
398                                 info._sifields._sigfault._addr -= 4;
399                             }
400                             queue_signal(env, info.si_signo,
401                                          QEMU_SI_FAULT, &info);
402                         }
403                         break;
404                     }
405                 } else {
406                     ret = do_syscall(env,
407                                      n,
408                                      env->regs[0],
409                                      env->regs[1],
410                                      env->regs[2],
411                                      env->regs[3],
412                                      env->regs[4],
413                                      env->regs[5],
414                                      0, 0);
415                     if (ret == -TARGET_ERESTARTSYS) {
416                         env->regs[15] -= env->thumb ? 2 : 4;
417                     } else if (ret != -TARGET_QEMU_ESIGRETURN) {
418                         env->regs[0] = ret;
419                     }
420                 }
421             }
422             break;
423         case EXCP_SEMIHOST:
424             env->regs[0] = do_arm_semihosting(env);
425             env->regs[15] += env->thumb ? 2 : 4;
426             break;
427         case EXCP_INTERRUPT:
428             /* just indicate that signals should be handled asap */
429             break;
430         case EXCP_PREFETCH_ABORT:
431         case EXCP_DATA_ABORT:
432             addr = env->exception.vaddress;
433             {
434                 info.si_signo = TARGET_SIGSEGV;
435                 info.si_errno = 0;
436                 /* XXX: check env->error_code */
437                 info.si_code = TARGET_SEGV_MAPERR;
438                 info._sifields._sigfault._addr = addr;
439                 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
440             }
441             break;
442         case EXCP_DEBUG:
443         case EXCP_BKPT:
444         excp_debug:
445             info.si_signo = TARGET_SIGTRAP;
446             info.si_errno = 0;
447             info.si_code = TARGET_TRAP_BRKPT;
448             queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
449             break;
450         case EXCP_KERNEL_TRAP:
451             if (do_kernel_trap(env))
452               goto error;
453             break;
454         case EXCP_YIELD:
455             /* nothing to do here for user-mode, just resume guest code */
456             break;
457         case EXCP_ATOMIC:
458             cpu_exec_step_atomic(cs);
459             break;
460         default:
461         error:
462             EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
463             abort();
464         }
465         process_pending_signals(env);
466     }
467 }
468 
469 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
470 {
471     CPUState *cpu = env_cpu(env);
472     TaskState *ts = cpu->opaque;
473     struct image_info *info = ts->info;
474     int i;
475 
476     cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
477                CPSRWriteByInstr);
478     for(i = 0; i < 16; i++) {
479         env->regs[i] = regs->uregs[i];
480     }
481 #ifdef TARGET_WORDS_BIGENDIAN
482     /* Enable BE8.  */
483     if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
484         && (info->elf_flags & EF_ARM_BE8)) {
485         env->uncached_cpsr |= CPSR_E;
486         env->cp15.sctlr_el[1] |= SCTLR_E0E;
487     } else {
488         env->cp15.sctlr_el[1] |= SCTLR_B;
489     }
490     arm_rebuild_hflags(env);
491 #endif
492 
493     ts->stack_base = info->start_stack;
494     ts->heap_base = info->brk;
495     /* This will be filled in on the first SYS_HEAPINFO call.  */
496     ts->heap_limit = 0;
497 }
498