xref: /openbmc/qemu/accel/tcg/user-exec.c (revision cac720ec)
1 /*
2  *  User emulator execution
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "internal.h"
31 
32 __thread uintptr_t helper_retaddr;
33 
34 //#define DEBUG_SIGNAL
35 
36 /*
37  * Adjust the pc to pass to cpu_restore_state; return the memop type.
38  */
39 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
40 {
41     switch (helper_retaddr) {
42     default:
43         /*
44          * Fault during host memory operation within a helper function.
45          * The helper's host return address, saved here, gives us a
46          * pointer into the generated code that will unwind to the
47          * correct guest pc.
48          */
49         *pc = helper_retaddr;
50         break;
51 
52     case 0:
53         /*
54          * Fault during host memory operation within generated code.
55          * (Or, a unrelated bug within qemu, but we can't tell from here).
56          *
57          * We take the host pc from the signal frame.  However, we cannot
58          * use that value directly.  Within cpu_restore_state_from_tb, we
59          * assume PC comes from GETPC(), as used by the helper functions,
60          * so we adjust the address by -GETPC_ADJ to form an address that
61          * is within the call insn, so that the address does not accidentally
62          * match the beginning of the next guest insn.  However, when the
63          * pc comes from the signal frame it points to the actual faulting
64          * host memory insn and not the return from a call insn.
65          *
66          * Therefore, adjust to compensate for what will be done later
67          * by cpu_restore_state_from_tb.
68          */
69         *pc += GETPC_ADJ;
70         break;
71 
72     case 1:
73         /*
74          * Fault during host read for translation, or loosely, "execution".
75          *
76          * The guest pc is already pointing to the start of the TB for which
77          * code is being generated.  If the guest translator manages the
78          * page crossings correctly, this is exactly the correct address
79          * (and if the translator doesn't handle page boundaries correctly
80          * there's little we can do about that here).  Therefore, do not
81          * trigger the unwinder.
82          *
83          * Like tb_gen_code, release the memory lock before cpu_loop_exit.
84          */
85         mmap_unlock();
86         *pc = 0;
87         return MMU_INST_FETCH;
88     }
89 
90     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
91 }
92 
93 /**
94  * handle_sigsegv_accerr_write:
95  * @cpu: the cpu context
96  * @old_set: the sigset_t from the signal ucontext_t
97  * @host_pc: the host pc, adjusted for the signal
98  * @guest_addr: the guest address of the fault
99  *
100  * Return true if the write fault has been handled, and should be re-tried.
101  *
102  * Note that it is important that we don't call page_unprotect() unless
103  * this is really a "write to nonwriteable page" fault, because
104  * page_unprotect() assumes that if it is called for an access to
105  * a page that's writeable this means we had two threads racing and
106  * another thread got there first and already made the page writeable;
107  * so we will retry the access. If we were to call page_unprotect()
108  * for some other kind of fault that should really be passed to the
109  * guest, we'd end up in an infinite loop of retrying the faulting access.
110  */
111 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
112                                  uintptr_t host_pc, abi_ptr guest_addr)
113 {
114     switch (page_unprotect(guest_addr, host_pc)) {
115     case 0:
116         /*
117          * Fault not caused by a page marked unwritable to protect
118          * cached translations, must be the guest binary's problem.
119          */
120         return false;
121     case 1:
122         /*
123          * Fault caused by protection of cached translation; TBs
124          * invalidated, so resume execution.
125          */
126         return true;
127     case 2:
128         /*
129          * Fault caused by protection of cached translation, and the
130          * currently executing TB was modified and must be exited immediately.
131          */
132         sigprocmask(SIG_SETMASK, old_set, NULL);
133         cpu_loop_exit_noexc(cpu);
134         /* NORETURN */
135     default:
136         g_assert_not_reached();
137     }
138 }
139 
140 static int probe_access_internal(CPUArchState *env, target_ulong addr,
141                                  int fault_size, MMUAccessType access_type,
142                                  bool nonfault, uintptr_t ra)
143 {
144     int acc_flag;
145     bool maperr;
146 
147     switch (access_type) {
148     case MMU_DATA_STORE:
149         acc_flag = PAGE_WRITE_ORG;
150         break;
151     case MMU_DATA_LOAD:
152         acc_flag = PAGE_READ;
153         break;
154     case MMU_INST_FETCH:
155         acc_flag = PAGE_EXEC;
156         break;
157     default:
158         g_assert_not_reached();
159     }
160 
161     if (guest_addr_valid_untagged(addr)) {
162         int page_flags = page_get_flags(addr);
163         if (page_flags & acc_flag) {
164             return 0; /* success */
165         }
166         maperr = !(page_flags & PAGE_VALID);
167     } else {
168         maperr = true;
169     }
170 
171     if (nonfault) {
172         return TLB_INVALID_MASK;
173     }
174 
175     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
176 }
177 
178 int probe_access_flags(CPUArchState *env, target_ulong addr,
179                        MMUAccessType access_type, int mmu_idx,
180                        bool nonfault, void **phost, uintptr_t ra)
181 {
182     int flags;
183 
184     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
185     *phost = flags ? NULL : g2h(env_cpu(env), addr);
186     return flags;
187 }
188 
189 void *probe_access(CPUArchState *env, target_ulong addr, int size,
190                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
191 {
192     int flags;
193 
194     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
195     flags = probe_access_internal(env, addr, size, access_type, false, ra);
196     g_assert(flags == 0);
197 
198     return size ? g2h(env_cpu(env), addr) : NULL;
199 }
200 
201 /* The softmmu versions of these helpers are in cputlb.c.  */
202 
203 /*
204  * Verify that we have passed the correct MemOp to the correct function.
205  *
206  * We could present one function to target code, and dispatch based on
207  * the MemOp, but so far we have worked hard to avoid an indirect function
208  * call along the memory path.
209  */
210 static void validate_memop(MemOpIdx oi, MemOp expected)
211 {
212 #ifdef CONFIG_DEBUG_TCG
213     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
214     assert(have == expected);
215 #endif
216 }
217 
218 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
219                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
220 {
221     void *ret;
222 
223     /* TODO: Enforce guest required alignment.  */
224 
225     ret = g2h(env_cpu(env), addr);
226     set_helper_retaddr(ra);
227     return ret;
228 }
229 
230 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
231                     MemOpIdx oi, uintptr_t ra)
232 {
233     void *haddr;
234     uint8_t ret;
235 
236     validate_memop(oi, MO_UB);
237     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
238     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
239     ret = ldub_p(haddr);
240     clear_helper_retaddr();
241     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
242     return ret;
243 }
244 
245 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
246                         MemOpIdx oi, uintptr_t ra)
247 {
248     void *haddr;
249     uint16_t ret;
250 
251     validate_memop(oi, MO_BEUW);
252     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
253     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
254     ret = lduw_be_p(haddr);
255     clear_helper_retaddr();
256     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
257     return ret;
258 }
259 
260 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
261                         MemOpIdx oi, uintptr_t ra)
262 {
263     void *haddr;
264     uint32_t ret;
265 
266     validate_memop(oi, MO_BEUL);
267     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
268     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
269     ret = ldl_be_p(haddr);
270     clear_helper_retaddr();
271     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
272     return ret;
273 }
274 
275 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
276                         MemOpIdx oi, uintptr_t ra)
277 {
278     void *haddr;
279     uint64_t ret;
280 
281     validate_memop(oi, MO_BEQ);
282     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
283     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
284     ret = ldq_be_p(haddr);
285     clear_helper_retaddr();
286     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
287     return ret;
288 }
289 
290 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
291                         MemOpIdx oi, uintptr_t ra)
292 {
293     void *haddr;
294     uint16_t ret;
295 
296     validate_memop(oi, MO_LEUW);
297     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
298     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
299     ret = lduw_le_p(haddr);
300     clear_helper_retaddr();
301     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
302     return ret;
303 }
304 
305 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
306                         MemOpIdx oi, uintptr_t ra)
307 {
308     void *haddr;
309     uint32_t ret;
310 
311     validate_memop(oi, MO_LEUL);
312     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
313     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
314     ret = ldl_le_p(haddr);
315     clear_helper_retaddr();
316     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
317     return ret;
318 }
319 
320 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
321                         MemOpIdx oi, uintptr_t ra)
322 {
323     void *haddr;
324     uint64_t ret;
325 
326     validate_memop(oi, MO_LEQ);
327     trace_guest_ld_before_exec(env_cpu(env), addr, oi);
328     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
329     ret = ldq_le_p(haddr);
330     clear_helper_retaddr();
331     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
332     return ret;
333 }
334 
335 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
336                  MemOpIdx oi, uintptr_t ra)
337 {
338     void *haddr;
339 
340     validate_memop(oi, MO_UB);
341     trace_guest_st_before_exec(env_cpu(env), addr, oi);
342     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
343     stb_p(haddr, val);
344     clear_helper_retaddr();
345     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
346 }
347 
348 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
349                     MemOpIdx oi, uintptr_t ra)
350 {
351     void *haddr;
352 
353     validate_memop(oi, MO_BEUW);
354     trace_guest_st_before_exec(env_cpu(env), addr, oi);
355     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
356     stw_be_p(haddr, val);
357     clear_helper_retaddr();
358     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
359 }
360 
361 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
362                     MemOpIdx oi, uintptr_t ra)
363 {
364     void *haddr;
365 
366     validate_memop(oi, MO_BEUL);
367     trace_guest_st_before_exec(env_cpu(env), addr, oi);
368     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
369     stl_be_p(haddr, val);
370     clear_helper_retaddr();
371     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
372 }
373 
374 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
375                     MemOpIdx oi, uintptr_t ra)
376 {
377     void *haddr;
378 
379     validate_memop(oi, MO_BEQ);
380     trace_guest_st_before_exec(env_cpu(env), addr, oi);
381     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
382     stq_be_p(haddr, val);
383     clear_helper_retaddr();
384     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
385 }
386 
387 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
388                     MemOpIdx oi, uintptr_t ra)
389 {
390     void *haddr;
391 
392     validate_memop(oi, MO_LEUW);
393     trace_guest_st_before_exec(env_cpu(env), addr, oi);
394     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
395     stw_le_p(haddr, val);
396     clear_helper_retaddr();
397     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
398 }
399 
400 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
401                     MemOpIdx oi, uintptr_t ra)
402 {
403     void *haddr;
404 
405     validate_memop(oi, MO_LEUL);
406     trace_guest_st_before_exec(env_cpu(env), addr, oi);
407     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
408     stl_le_p(haddr, val);
409     clear_helper_retaddr();
410     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
411 }
412 
413 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
414                     MemOpIdx oi, uintptr_t ra)
415 {
416     void *haddr;
417 
418     validate_memop(oi, MO_LEQ);
419     trace_guest_st_before_exec(env_cpu(env), addr, oi);
420     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
421     stq_le_p(haddr, val);
422     clear_helper_retaddr();
423     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
424 }
425 
426 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
427 {
428     uint32_t ret;
429 
430     set_helper_retaddr(1);
431     ret = ldub_p(g2h_untagged(ptr));
432     clear_helper_retaddr();
433     return ret;
434 }
435 
436 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
437 {
438     uint32_t ret;
439 
440     set_helper_retaddr(1);
441     ret = lduw_p(g2h_untagged(ptr));
442     clear_helper_retaddr();
443     return ret;
444 }
445 
446 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
447 {
448     uint32_t ret;
449 
450     set_helper_retaddr(1);
451     ret = ldl_p(g2h_untagged(ptr));
452     clear_helper_retaddr();
453     return ret;
454 }
455 
456 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
457 {
458     uint64_t ret;
459 
460     set_helper_retaddr(1);
461     ret = ldq_p(g2h_untagged(ptr));
462     clear_helper_retaddr();
463     return ret;
464 }
465 
466 #include "ldst_common.c.inc"
467 
468 /*
469  * Do not allow unaligned operations to proceed.  Return the host address.
470  *
471  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
472  */
473 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
474                                MemOpIdx oi, int size, int prot,
475                                uintptr_t retaddr)
476 {
477     /* Enforce qemu required alignment.  */
478     if (unlikely(addr & (size - 1))) {
479         cpu_loop_exit_atomic(env_cpu(env), retaddr);
480     }
481     void *ret = g2h(env_cpu(env), addr);
482     set_helper_retaddr(retaddr);
483     return ret;
484 }
485 
486 #include "atomic_common.c.inc"
487 
488 /*
489  * First set of functions passes in OI and RETADDR.
490  * This makes them callable from other helpers.
491  */
492 
493 #define ATOMIC_NAME(X) \
494     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
495 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
496 #define ATOMIC_MMU_IDX MMU_USER_IDX
497 
498 #define DATA_SIZE 1
499 #include "atomic_template.h"
500 
501 #define DATA_SIZE 2
502 #include "atomic_template.h"
503 
504 #define DATA_SIZE 4
505 #include "atomic_template.h"
506 
507 #ifdef CONFIG_ATOMIC64
508 #define DATA_SIZE 8
509 #include "atomic_template.h"
510 #endif
511 
512 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
513 #define DATA_SIZE 16
514 #include "atomic_template.h"
515 #endif
516