xref: /openbmc/qemu/accel/tcg/user-exec.c (revision 09e94676ade52708cbece8fd4bd255a25b6ee475)
1 /*
2  *  User emulator execution
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "internal.h"
31 
32 __thread uintptr_t helper_retaddr;
33 
34 //#define DEBUG_SIGNAL
35 
36 /*
37  * Adjust the pc to pass to cpu_restore_state; return the memop type.
38  */
39 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
40 {
41     switch (helper_retaddr) {
42     default:
43         /*
44          * Fault during host memory operation within a helper function.
45          * The helper's host return address, saved here, gives us a
46          * pointer into the generated code that will unwind to the
47          * correct guest pc.
48          */
49         *pc = helper_retaddr;
50         break;
51 
52     case 0:
53         /*
54          * Fault during host memory operation within generated code.
55          * (Or, a unrelated bug within qemu, but we can't tell from here).
56          *
57          * We take the host pc from the signal frame.  However, we cannot
58          * use that value directly.  Within cpu_restore_state_from_tb, we
59          * assume PC comes from GETPC(), as used by the helper functions,
60          * so we adjust the address by -GETPC_ADJ to form an address that
61          * is within the call insn, so that the address does not accidentally
62          * match the beginning of the next guest insn.  However, when the
63          * pc comes from the signal frame it points to the actual faulting
64          * host memory insn and not the return from a call insn.
65          *
66          * Therefore, adjust to compensate for what will be done later
67          * by cpu_restore_state_from_tb.
68          */
69         *pc += GETPC_ADJ;
70         break;
71 
72     case 1:
73         /*
74          * Fault during host read for translation, or loosely, "execution".
75          *
76          * The guest pc is already pointing to the start of the TB for which
77          * code is being generated.  If the guest translator manages the
78          * page crossings correctly, this is exactly the correct address
79          * (and if the translator doesn't handle page boundaries correctly
80          * there's little we can do about that here).  Therefore, do not
81          * trigger the unwinder.
82          *
83          * Like tb_gen_code, release the memory lock before cpu_loop_exit.
84          */
85         mmap_unlock();
86         *pc = 0;
87         return MMU_INST_FETCH;
88     }
89 
90     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
91 }
92 
93 /**
94  * handle_sigsegv_accerr_write:
95  * @cpu: the cpu context
96  * @old_set: the sigset_t from the signal ucontext_t
97  * @host_pc: the host pc, adjusted for the signal
98  * @guest_addr: the guest address of the fault
99  *
100  * Return true if the write fault has been handled, and should be re-tried.
101  *
102  * Note that it is important that we don't call page_unprotect() unless
103  * this is really a "write to nonwriteable page" fault, because
104  * page_unprotect() assumes that if it is called for an access to
105  * a page that's writeable this means we had two threads racing and
106  * another thread got there first and already made the page writeable;
107  * so we will retry the access. If we were to call page_unprotect()
108  * for some other kind of fault that should really be passed to the
109  * guest, we'd end up in an infinite loop of retrying the faulting access.
110  */
111 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
112                                  uintptr_t host_pc, abi_ptr guest_addr)
113 {
114     switch (page_unprotect(guest_addr, host_pc)) {
115     case 0:
116         /*
117          * Fault not caused by a page marked unwritable to protect
118          * cached translations, must be the guest binary's problem.
119          */
120         return false;
121     case 1:
122         /*
123          * Fault caused by protection of cached translation; TBs
124          * invalidated, so resume execution.
125          */
126         return true;
127     case 2:
128         /*
129          * Fault caused by protection of cached translation, and the
130          * currently executing TB was modified and must be exited immediately.
131          */
132         sigprocmask(SIG_SETMASK, old_set, NULL);
133         cpu_loop_exit_noexc(cpu);
134         /* NORETURN */
135     default:
136         g_assert_not_reached();
137     }
138 }
139 
140 static int probe_access_internal(CPUArchState *env, target_ulong addr,
141                                  int fault_size, MMUAccessType access_type,
142                                  bool nonfault, uintptr_t ra)
143 {
144     int flags;
145 
146     switch (access_type) {
147     case MMU_DATA_STORE:
148         flags = PAGE_WRITE;
149         break;
150     case MMU_DATA_LOAD:
151         flags = PAGE_READ;
152         break;
153     case MMU_INST_FETCH:
154         flags = PAGE_EXEC;
155         break;
156     default:
157         g_assert_not_reached();
158     }
159 
160     if (!guest_addr_valid_untagged(addr) ||
161         page_check_range(addr, 1, flags) < 0) {
162         if (nonfault) {
163             return TLB_INVALID_MASK;
164         } else {
165             CPUState *cpu = env_cpu(env);
166             CPUClass *cc = CPU_GET_CLASS(cpu);
167             cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
168                                   MMU_USER_IDX, false, ra);
169             g_assert_not_reached();
170         }
171     }
172     return 0;
173 }
174 
175 int probe_access_flags(CPUArchState *env, target_ulong addr,
176                        MMUAccessType access_type, int mmu_idx,
177                        bool nonfault, void **phost, uintptr_t ra)
178 {
179     int flags;
180 
181     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
182     *phost = flags ? NULL : g2h(env_cpu(env), addr);
183     return flags;
184 }
185 
186 void *probe_access(CPUArchState *env, target_ulong addr, int size,
187                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
188 {
189     int flags;
190 
191     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
192     flags = probe_access_internal(env, addr, size, access_type, false, ra);
193     g_assert(flags == 0);
194 
195     return size ? g2h(env_cpu(env), addr) : NULL;
196 }
197 
198 /* The softmmu versions of these helpers are in cputlb.c.  */
199 
200 /*
201  * Verify that we have passed the correct MemOp to the correct function.
202  *
203  * We could present one function to target code, and dispatch based on
204  * the MemOp, but so far we have worked hard to avoid an indirect function
205  * call along the memory path.
206  */
207 static void validate_memop(MemOpIdx oi, MemOp expected)
208 {
209 #ifdef CONFIG_DEBUG_TCG
210     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
211     assert(have == expected);
212 #endif
213 }
214 
215 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
216                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
217 {
218     void *ret;
219 
220     /* TODO: Enforce guest required alignment.  */
221 
222     ret = g2h(env_cpu(env), addr);
223     set_helper_retaddr(ra);
224     return ret;
225 }
226 
227 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
228                     MemOpIdx oi, uintptr_t ra)
229 {
230     void *haddr;
231     uint8_t ret;
232 
233     validate_memop(oi, MO_UB);
234     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
235     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
236     ret = ldub_p(haddr);
237     clear_helper_retaddr();
238     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
239     return ret;
240 }
241 
242 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
243                         MemOpIdx oi, uintptr_t ra)
244 {
245     void *haddr;
246     uint16_t ret;
247 
248     validate_memop(oi, MO_BEUW);
249     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
250     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
251     ret = lduw_be_p(haddr);
252     clear_helper_retaddr();
253     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
254     return ret;
255 }
256 
257 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
258                         MemOpIdx oi, uintptr_t ra)
259 {
260     void *haddr;
261     uint32_t ret;
262 
263     validate_memop(oi, MO_BEUL);
264     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
265     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
266     ret = ldl_be_p(haddr);
267     clear_helper_retaddr();
268     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
269     return ret;
270 }
271 
272 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
273                         MemOpIdx oi, uintptr_t ra)
274 {
275     void *haddr;
276     uint64_t ret;
277 
278     validate_memop(oi, MO_BEQ);
279     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
280     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
281     ret = ldq_be_p(haddr);
282     clear_helper_retaddr();
283     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
284     return ret;
285 }
286 
287 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
288                         MemOpIdx oi, uintptr_t ra)
289 {
290     void *haddr;
291     uint16_t ret;
292 
293     validate_memop(oi, MO_LEUW);
294     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
295     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
296     ret = lduw_le_p(haddr);
297     clear_helper_retaddr();
298     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
299     return ret;
300 }
301 
302 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
303                         MemOpIdx oi, uintptr_t ra)
304 {
305     void *haddr;
306     uint32_t ret;
307 
308     validate_memop(oi, MO_LEUL);
309     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
310     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
311     ret = ldl_le_p(haddr);
312     clear_helper_retaddr();
313     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
314     return ret;
315 }
316 
317 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
318                         MemOpIdx oi, uintptr_t ra)
319 {
320     void *haddr;
321     uint64_t ret;
322 
323     validate_memop(oi, MO_LEQ);
324     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
325     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
326     ret = ldq_le_p(haddr);
327     clear_helper_retaddr();
328     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
329     return ret;
330 }
331 
332 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
333                  MemOpIdx oi, uintptr_t ra)
334 {
335     void *haddr;
336 
337     validate_memop(oi, MO_UB);
338     trace_guest_st_before_exec(env_cpu(env), addr, oi);
339     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
340     stb_p(haddr, val);
341     clear_helper_retaddr();
342     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
343 }
344 
345 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
346                     MemOpIdx oi, uintptr_t ra)
347 {
348     void *haddr;
349 
350     validate_memop(oi, MO_BEUW);
351     trace_guest_st_before_exec(env_cpu(env), addr, oi);
352     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
353     stw_be_p(haddr, val);
354     clear_helper_retaddr();
355     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
356 }
357 
358 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
359                     MemOpIdx oi, uintptr_t ra)
360 {
361     void *haddr;
362 
363     validate_memop(oi, MO_BEUL);
364     trace_guest_st_before_exec(env_cpu(env), addr, oi);
365     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
366     stl_be_p(haddr, val);
367     clear_helper_retaddr();
368     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
369 }
370 
371 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
372                     MemOpIdx oi, uintptr_t ra)
373 {
374     void *haddr;
375 
376     validate_memop(oi, MO_BEQ);
377     trace_guest_st_before_exec(env_cpu(env), addr, oi);
378     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
379     stq_be_p(haddr, val);
380     clear_helper_retaddr();
381     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
382 }
383 
384 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
385                     MemOpIdx oi, uintptr_t ra)
386 {
387     void *haddr;
388 
389     validate_memop(oi, MO_LEUW);
390     trace_guest_st_before_exec(env_cpu(env), addr, oi);
391     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
392     stw_le_p(haddr, val);
393     clear_helper_retaddr();
394     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
395 }
396 
397 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
398                     MemOpIdx oi, uintptr_t ra)
399 {
400     void *haddr;
401 
402     validate_memop(oi, MO_LEUL);
403     trace_guest_st_before_exec(env_cpu(env), addr, oi);
404     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
405     stl_le_p(haddr, val);
406     clear_helper_retaddr();
407     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
408 }
409 
410 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
411                     MemOpIdx oi, uintptr_t ra)
412 {
413     void *haddr;
414 
415     validate_memop(oi, MO_LEQ);
416     trace_guest_st_before_exec(env_cpu(env), addr, oi);
417     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
418     stq_le_p(haddr, val);
419     clear_helper_retaddr();
420     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
421 }
422 
423 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
424 {
425     uint32_t ret;
426 
427     set_helper_retaddr(1);
428     ret = ldub_p(g2h_untagged(ptr));
429     clear_helper_retaddr();
430     return ret;
431 }
432 
433 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
434 {
435     uint32_t ret;
436 
437     set_helper_retaddr(1);
438     ret = lduw_p(g2h_untagged(ptr));
439     clear_helper_retaddr();
440     return ret;
441 }
442 
443 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
444 {
445     uint32_t ret;
446 
447     set_helper_retaddr(1);
448     ret = ldl_p(g2h_untagged(ptr));
449     clear_helper_retaddr();
450     return ret;
451 }
452 
453 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
454 {
455     uint64_t ret;
456 
457     set_helper_retaddr(1);
458     ret = ldq_p(g2h_untagged(ptr));
459     clear_helper_retaddr();
460     return ret;
461 }
462 
463 #include "ldst_common.c.inc"
464 
465 /*
466  * Do not allow unaligned operations to proceed.  Return the host address.
467  *
468  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
469  */
470 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
471                                MemOpIdx oi, int size, int prot,
472                                uintptr_t retaddr)
473 {
474     /* Enforce qemu required alignment.  */
475     if (unlikely(addr & (size - 1))) {
476         cpu_loop_exit_atomic(env_cpu(env), retaddr);
477     }
478     void *ret = g2h(env_cpu(env), addr);
479     set_helper_retaddr(retaddr);
480     return ret;
481 }
482 
483 #include "atomic_common.c.inc"
484 
485 /*
486  * First set of functions passes in OI and RETADDR.
487  * This makes them callable from other helpers.
488  */
489 
490 #define ATOMIC_NAME(X) \
491     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
492 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
493 #define ATOMIC_MMU_IDX MMU_USER_IDX
494 
495 #define DATA_SIZE 1
496 #include "atomic_template.h"
497 
498 #define DATA_SIZE 2
499 #include "atomic_template.h"
500 
501 #define DATA_SIZE 4
502 #include "atomic_template.h"
503 
504 #ifdef CONFIG_ATOMIC64
505 #define DATA_SIZE 8
506 #include "atomic_template.h"
507 #endif
508 
509 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
510 #define DATA_SIZE 16
511 #include "atomic_template.h"
512 #endif
513