xref: /openbmc/qemu/accel/tcg/user-exec.c (revision e409c905)
1 /*
2  *  User emulator execution
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "qemu/rcu.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translate-all.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/atomic128.h"
30 #include "trace/trace-root.h"
31 #include "tcg/tcg-ldst.h"
32 #include "internal.h"
33 
34 __thread uintptr_t helper_retaddr;
35 
36 //#define DEBUG_SIGNAL
37 
38 /*
39  * Adjust the pc to pass to cpu_restore_state; return the memop type.
40  */
41 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
42 {
43     switch (helper_retaddr) {
44     default:
45         /*
46          * Fault during host memory operation within a helper function.
47          * The helper's host return address, saved here, gives us a
48          * pointer into the generated code that will unwind to the
49          * correct guest pc.
50          */
51         *pc = helper_retaddr;
52         break;
53 
54     case 0:
55         /*
56          * Fault during host memory operation within generated code.
57          * (Or, a unrelated bug within qemu, but we can't tell from here).
58          *
59          * We take the host pc from the signal frame.  However, we cannot
60          * use that value directly.  Within cpu_restore_state_from_tb, we
61          * assume PC comes from GETPC(), as used by the helper functions,
62          * so we adjust the address by -GETPC_ADJ to form an address that
63          * is within the call insn, so that the address does not accidentally
64          * match the beginning of the next guest insn.  However, when the
65          * pc comes from the signal frame it points to the actual faulting
66          * host memory insn and not the return from a call insn.
67          *
68          * Therefore, adjust to compensate for what will be done later
69          * by cpu_restore_state_from_tb.
70          */
71         *pc += GETPC_ADJ;
72         break;
73 
74     case 1:
75         /*
76          * Fault during host read for translation, or loosely, "execution".
77          *
78          * The guest pc is already pointing to the start of the TB for which
79          * code is being generated.  If the guest translator manages the
80          * page crossings correctly, this is exactly the correct address
81          * (and if the translator doesn't handle page boundaries correctly
82          * there's little we can do about that here).  Therefore, do not
83          * trigger the unwinder.
84          */
85         *pc = 0;
86         return MMU_INST_FETCH;
87     }
88 
89     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
90 }
91 
92 /**
93  * handle_sigsegv_accerr_write:
94  * @cpu: the cpu context
95  * @old_set: the sigset_t from the signal ucontext_t
96  * @host_pc: the host pc, adjusted for the signal
97  * @guest_addr: the guest address of the fault
98  *
99  * Return true if the write fault has been handled, and should be re-tried.
100  *
101  * Note that it is important that we don't call page_unprotect() unless
102  * this is really a "write to nonwritable page" fault, because
103  * page_unprotect() assumes that if it is called for an access to
104  * a page that's writable this means we had two threads racing and
105  * another thread got there first and already made the page writable;
106  * so we will retry the access. If we were to call page_unprotect()
107  * for some other kind of fault that should really be passed to the
108  * guest, we'd end up in an infinite loop of retrying the faulting access.
109  */
110 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
111                                  uintptr_t host_pc, abi_ptr guest_addr)
112 {
113     switch (page_unprotect(guest_addr, host_pc)) {
114     case 0:
115         /*
116          * Fault not caused by a page marked unwritable to protect
117          * cached translations, must be the guest binary's problem.
118          */
119         return false;
120     case 1:
121         /*
122          * Fault caused by protection of cached translation; TBs
123          * invalidated, so resume execution.
124          */
125         return true;
126     case 2:
127         /*
128          * Fault caused by protection of cached translation, and the
129          * currently executing TB was modified and must be exited immediately.
130          */
131         sigprocmask(SIG_SETMASK, old_set, NULL);
132         cpu_loop_exit_noexc(cpu);
133         /* NORETURN */
134     default:
135         g_assert_not_reached();
136     }
137 }
138 
139 typedef struct PageFlagsNode {
140     struct rcu_head rcu;
141     IntervalTreeNode itree;
142     int flags;
143 } PageFlagsNode;
144 
145 static IntervalTreeRoot pageflags_root;
146 
147 static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
148 {
149     IntervalTreeNode *n;
150 
151     n = interval_tree_iter_first(&pageflags_root, start, last);
152     return n ? container_of(n, PageFlagsNode, itree) : NULL;
153 }
154 
155 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
156                                      target_long last)
157 {
158     IntervalTreeNode *n;
159 
160     n = interval_tree_iter_next(&p->itree, start, last);
161     return n ? container_of(n, PageFlagsNode, itree) : NULL;
162 }
163 
164 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
165 {
166     IntervalTreeNode *n;
167     int rc = 0;
168 
169     mmap_lock();
170     for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
171          n != NULL;
172          n = interval_tree_iter_next(n, 0, -1)) {
173         PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
174 
175         rc = fn(priv, n->start, n->last + 1, p->flags);
176         if (rc != 0) {
177             break;
178         }
179     }
180     mmap_unlock();
181 
182     return rc;
183 }
184 
185 static int dump_region(void *priv, target_ulong start,
186                        target_ulong end, unsigned long prot)
187 {
188     FILE *f = (FILE *)priv;
189 
190     fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
191             start, end, end - start,
192             ((prot & PAGE_READ) ? 'r' : '-'),
193             ((prot & PAGE_WRITE) ? 'w' : '-'),
194             ((prot & PAGE_EXEC) ? 'x' : '-'));
195     return 0;
196 }
197 
198 /* dump memory mappings */
199 void page_dump(FILE *f)
200 {
201     const int length = sizeof(target_ulong) * 2;
202 
203     fprintf(f, "%-*s %-*s %-*s %s\n",
204             length, "start", length, "end", length, "size", "prot");
205     walk_memory_regions(f, dump_region);
206 }
207 
208 int page_get_flags(target_ulong address)
209 {
210     PageFlagsNode *p = pageflags_find(address, address);
211 
212     /*
213      * See util/interval-tree.c re lockless lookups: no false positives but
214      * there are false negatives.  If we find nothing, retry with the mmap
215      * lock acquired.
216      */
217     if (p) {
218         return p->flags;
219     }
220     if (have_mmap_lock()) {
221         return 0;
222     }
223 
224     mmap_lock();
225     p = pageflags_find(address, address);
226     mmap_unlock();
227     return p ? p->flags : 0;
228 }
229 
230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
231 static void pageflags_create(target_ulong start, target_ulong last, int flags)
232 {
233     PageFlagsNode *p = g_new(PageFlagsNode, 1);
234 
235     p->itree.start = start;
236     p->itree.last = last;
237     p->flags = flags;
238     interval_tree_insert(&p->itree, &pageflags_root);
239 }
240 
241 /* A subroutine of page_set_flags: remove everything in [start,last]. */
242 static bool pageflags_unset(target_ulong start, target_ulong last)
243 {
244     bool inval_tb = false;
245 
246     while (true) {
247         PageFlagsNode *p = pageflags_find(start, last);
248         target_ulong p_last;
249 
250         if (!p) {
251             break;
252         }
253 
254         if (p->flags & PAGE_EXEC) {
255             inval_tb = true;
256         }
257 
258         interval_tree_remove(&p->itree, &pageflags_root);
259         p_last = p->itree.last;
260 
261         if (p->itree.start < start) {
262             /* Truncate the node from the end, or split out the middle. */
263             p->itree.last = start - 1;
264             interval_tree_insert(&p->itree, &pageflags_root);
265             if (last < p_last) {
266                 pageflags_create(last + 1, p_last, p->flags);
267                 break;
268             }
269         } else if (p_last <= last) {
270             /* Range completely covers node -- remove it. */
271             g_free_rcu(p, rcu);
272         } else {
273             /* Truncate the node from the start. */
274             p->itree.start = last + 1;
275             interval_tree_insert(&p->itree, &pageflags_root);
276             break;
277         }
278     }
279 
280     return inval_tb;
281 }
282 
283 /*
284  * A subroutine of page_set_flags: nothing overlaps [start,last],
285  * but check adjacent mappings and maybe merge into a single range.
286  */
287 static void pageflags_create_merge(target_ulong start, target_ulong last,
288                                    int flags)
289 {
290     PageFlagsNode *next = NULL, *prev = NULL;
291 
292     if (start > 0) {
293         prev = pageflags_find(start - 1, start - 1);
294         if (prev) {
295             if (prev->flags == flags) {
296                 interval_tree_remove(&prev->itree, &pageflags_root);
297             } else {
298                 prev = NULL;
299             }
300         }
301     }
302     if (last + 1 != 0) {
303         next = pageflags_find(last + 1, last + 1);
304         if (next) {
305             if (next->flags == flags) {
306                 interval_tree_remove(&next->itree, &pageflags_root);
307             } else {
308                 next = NULL;
309             }
310         }
311     }
312 
313     if (prev) {
314         if (next) {
315             prev->itree.last = next->itree.last;
316             g_free_rcu(next, rcu);
317         } else {
318             prev->itree.last = last;
319         }
320         interval_tree_insert(&prev->itree, &pageflags_root);
321     } else if (next) {
322         next->itree.start = start;
323         interval_tree_insert(&next->itree, &pageflags_root);
324     } else {
325         pageflags_create(start, last, flags);
326     }
327 }
328 
329 /*
330  * Allow the target to decide if PAGE_TARGET_[12] may be reset.
331  * By default, they are not kept.
332  */
333 #ifndef PAGE_TARGET_STICKY
334 #define PAGE_TARGET_STICKY  0
335 #endif
336 #define PAGE_STICKY  (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
337 
338 /* A subroutine of page_set_flags: add flags to [start,last]. */
339 static bool pageflags_set_clear(target_ulong start, target_ulong last,
340                                 int set_flags, int clear_flags)
341 {
342     PageFlagsNode *p;
343     target_ulong p_start, p_last;
344     int p_flags, merge_flags;
345     bool inval_tb = false;
346 
347  restart:
348     p = pageflags_find(start, last);
349     if (!p) {
350         if (set_flags) {
351             pageflags_create_merge(start, last, set_flags);
352         }
353         goto done;
354     }
355 
356     p_start = p->itree.start;
357     p_last = p->itree.last;
358     p_flags = p->flags;
359     /* Using mprotect on a page does not change sticky bits. */
360     merge_flags = (p_flags & ~clear_flags) | set_flags;
361 
362     /*
363      * Need to flush if an overlapping executable region
364      * removes exec, or adds write.
365      */
366     if ((p_flags & PAGE_EXEC)
367         && (!(merge_flags & PAGE_EXEC)
368             || (merge_flags & ~p_flags & PAGE_WRITE))) {
369         inval_tb = true;
370     }
371 
372     /*
373      * If there is an exact range match, update and return without
374      * attempting to merge with adjacent regions.
375      */
376     if (start == p_start && last == p_last) {
377         if (merge_flags) {
378             p->flags = merge_flags;
379         } else {
380             interval_tree_remove(&p->itree, &pageflags_root);
381             g_free_rcu(p, rcu);
382         }
383         goto done;
384     }
385 
386     /*
387      * If sticky bits affect the original mapping, then we must be more
388      * careful about the existing intervals and the separate flags.
389      */
390     if (set_flags != merge_flags) {
391         if (p_start < start) {
392             interval_tree_remove(&p->itree, &pageflags_root);
393             p->itree.last = start - 1;
394             interval_tree_insert(&p->itree, &pageflags_root);
395 
396             if (last < p_last) {
397                 if (merge_flags) {
398                     pageflags_create(start, last, merge_flags);
399                 }
400                 pageflags_create(last + 1, p_last, p_flags);
401             } else {
402                 if (merge_flags) {
403                     pageflags_create(start, p_last, merge_flags);
404                 }
405                 if (p_last < last) {
406                     start = p_last + 1;
407                     goto restart;
408                 }
409             }
410         } else {
411             if (start < p_start && set_flags) {
412                 pageflags_create(start, p_start - 1, set_flags);
413             }
414             if (last < p_last) {
415                 interval_tree_remove(&p->itree, &pageflags_root);
416                 p->itree.start = last + 1;
417                 interval_tree_insert(&p->itree, &pageflags_root);
418                 if (merge_flags) {
419                     pageflags_create(start, last, merge_flags);
420                 }
421             } else {
422                 if (merge_flags) {
423                     p->flags = merge_flags;
424                 } else {
425                     interval_tree_remove(&p->itree, &pageflags_root);
426                     g_free_rcu(p, rcu);
427                 }
428                 if (p_last < last) {
429                     start = p_last + 1;
430                     goto restart;
431                 }
432             }
433         }
434         goto done;
435     }
436 
437     /* If flags are not changing for this range, incorporate it. */
438     if (set_flags == p_flags) {
439         if (start < p_start) {
440             interval_tree_remove(&p->itree, &pageflags_root);
441             p->itree.start = start;
442             interval_tree_insert(&p->itree, &pageflags_root);
443         }
444         if (p_last < last) {
445             start = p_last + 1;
446             goto restart;
447         }
448         goto done;
449     }
450 
451     /* Maybe split out head and/or tail ranges with the original flags. */
452     interval_tree_remove(&p->itree, &pageflags_root);
453     if (p_start < start) {
454         p->itree.last = start - 1;
455         interval_tree_insert(&p->itree, &pageflags_root);
456 
457         if (p_last < last) {
458             goto restart;
459         }
460         if (last < p_last) {
461             pageflags_create(last + 1, p_last, p_flags);
462         }
463     } else if (last < p_last) {
464         p->itree.start = last + 1;
465         interval_tree_insert(&p->itree, &pageflags_root);
466     } else {
467         g_free_rcu(p, rcu);
468         goto restart;
469     }
470     if (set_flags) {
471         pageflags_create(start, last, set_flags);
472     }
473 
474  done:
475     return inval_tb;
476 }
477 
478 /*
479  * Modify the flags of a page and invalidate the code if necessary.
480  * The flag PAGE_WRITE_ORG is positioned automatically depending
481  * on PAGE_WRITE.  The mmap_lock should already be held.
482  */
483 void page_set_flags(target_ulong start, target_ulong last, int flags)
484 {
485     bool reset = false;
486     bool inval_tb = false;
487 
488     /* This function should never be called with addresses outside the
489        guest address space.  If this assert fires, it probably indicates
490        a missing call to h2g_valid.  */
491     assert(start <= last);
492     assert(last <= GUEST_ADDR_MAX);
493     /* Only set PAGE_ANON with new mappings. */
494     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
495     assert_memory_lock();
496 
497     start &= TARGET_PAGE_MASK;
498     last |= ~TARGET_PAGE_MASK;
499 
500     if (!(flags & PAGE_VALID)) {
501         flags = 0;
502     } else {
503         reset = flags & PAGE_RESET;
504         flags &= ~PAGE_RESET;
505         if (flags & PAGE_WRITE) {
506             flags |= PAGE_WRITE_ORG;
507         }
508     }
509 
510     if (!flags || reset) {
511         page_reset_target_data(start, last);
512         inval_tb |= pageflags_unset(start, last);
513     }
514     if (flags) {
515         inval_tb |= pageflags_set_clear(start, last, flags,
516                                         ~(reset ? 0 : PAGE_STICKY));
517     }
518     if (inval_tb) {
519         tb_invalidate_phys_range(start, last);
520     }
521 }
522 
523 int page_check_range(target_ulong start, target_ulong len, int flags)
524 {
525     target_ulong last;
526     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
527     int ret;
528 
529     if (len == 0) {
530         return 0;  /* trivial length */
531     }
532 
533     last = start + len - 1;
534     if (last < start) {
535         return -1; /* wrap around */
536     }
537 
538     locked = have_mmap_lock();
539     while (true) {
540         PageFlagsNode *p = pageflags_find(start, last);
541         int missing;
542 
543         if (!p) {
544             if (!locked) {
545                 /*
546                  * Lockless lookups have false negatives.
547                  * Retry with the lock held.
548                  */
549                 mmap_lock();
550                 locked = -1;
551                 p = pageflags_find(start, last);
552             }
553             if (!p) {
554                 ret = -1; /* entire region invalid */
555                 break;
556             }
557         }
558         if (start < p->itree.start) {
559             ret = -1; /* initial bytes invalid */
560             break;
561         }
562 
563         missing = flags & ~p->flags;
564         if (missing & PAGE_READ) {
565             ret = -1; /* page not readable */
566             break;
567         }
568         if (missing & PAGE_WRITE) {
569             if (!(p->flags & PAGE_WRITE_ORG)) {
570                 ret = -1; /* page not writable */
571                 break;
572             }
573             /* Asking about writable, but has been protected: undo. */
574             if (!page_unprotect(start, 0)) {
575                 ret = -1;
576                 break;
577             }
578             /* TODO: page_unprotect should take a range, not a single page. */
579             if (last - start < TARGET_PAGE_SIZE) {
580                 ret = 0; /* ok */
581                 break;
582             }
583             start += TARGET_PAGE_SIZE;
584             continue;
585         }
586 
587         if (last <= p->itree.last) {
588             ret = 0; /* ok */
589             break;
590         }
591         start = p->itree.last + 1;
592     }
593 
594     /* Release the lock if acquired locally. */
595     if (locked < 0) {
596         mmap_unlock();
597     }
598     return ret;
599 }
600 
601 void page_protect(tb_page_addr_t address)
602 {
603     PageFlagsNode *p;
604     target_ulong start, last;
605     int prot;
606 
607     assert_memory_lock();
608 
609     if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
610         start = address & TARGET_PAGE_MASK;
611         last = start + TARGET_PAGE_SIZE - 1;
612     } else {
613         start = address & qemu_host_page_mask;
614         last = start + qemu_host_page_size - 1;
615     }
616 
617     p = pageflags_find(start, last);
618     if (!p) {
619         return;
620     }
621     prot = p->flags;
622 
623     if (unlikely(p->itree.last < last)) {
624         /* More than one protection region covers the one host page. */
625         assert(TARGET_PAGE_SIZE < qemu_host_page_size);
626         while ((p = pageflags_next(p, start, last)) != NULL) {
627             prot |= p->flags;
628         }
629     }
630 
631     if (prot & PAGE_WRITE) {
632         pageflags_set_clear(start, last, 0, PAGE_WRITE);
633         mprotect(g2h_untagged(start), qemu_host_page_size,
634                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
635     }
636 }
637 
638 /*
639  * Called from signal handler: invalidate the code and unprotect the
640  * page. Return 0 if the fault was not handled, 1 if it was handled,
641  * and 2 if it was handled but the caller must cause the TB to be
642  * immediately exited. (We can only return 2 if the 'pc' argument is
643  * non-zero.)
644  */
645 int page_unprotect(target_ulong address, uintptr_t pc)
646 {
647     PageFlagsNode *p;
648     bool current_tb_invalidated;
649 
650     /*
651      * Technically this isn't safe inside a signal handler.  However we
652      * know this only ever happens in a synchronous SEGV handler, so in
653      * practice it seems to be ok.
654      */
655     mmap_lock();
656 
657     p = pageflags_find(address, address);
658 
659     /* If this address was not really writable, nothing to do. */
660     if (!p || !(p->flags & PAGE_WRITE_ORG)) {
661         mmap_unlock();
662         return 0;
663     }
664 
665     current_tb_invalidated = false;
666     if (p->flags & PAGE_WRITE) {
667         /*
668          * If the page is actually marked WRITE then assume this is because
669          * this thread raced with another one which got here first and
670          * set the page to PAGE_WRITE and did the TB invalidate for us.
671          */
672 #ifdef TARGET_HAS_PRECISE_SMC
673         TranslationBlock *current_tb = tcg_tb_lookup(pc);
674         if (current_tb) {
675             current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
676         }
677 #endif
678     } else {
679         target_ulong start, len, i;
680         int prot;
681 
682         if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
683             start = address & TARGET_PAGE_MASK;
684             len = TARGET_PAGE_SIZE;
685             prot = p->flags | PAGE_WRITE;
686             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
687             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
688         } else {
689             start = address & qemu_host_page_mask;
690             len = qemu_host_page_size;
691             prot = 0;
692 
693             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
694                 target_ulong addr = start + i;
695 
696                 p = pageflags_find(addr, addr);
697                 if (p) {
698                     prot |= p->flags;
699                     if (p->flags & PAGE_WRITE_ORG) {
700                         prot |= PAGE_WRITE;
701                         pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
702                                             PAGE_WRITE, 0);
703                     }
704                 }
705                 /*
706                  * Since the content will be modified, we must invalidate
707                  * the corresponding translated code.
708                  */
709                 current_tb_invalidated |=
710                     tb_invalidate_phys_page_unwind(addr, pc);
711             }
712         }
713         if (prot & PAGE_EXEC) {
714             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
715         }
716         mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
717     }
718     mmap_unlock();
719 
720     /* If current TB was invalidated return to main loop */
721     return current_tb_invalidated ? 2 : 1;
722 }
723 
724 static int probe_access_internal(CPUArchState *env, vaddr addr,
725                                  int fault_size, MMUAccessType access_type,
726                                  bool nonfault, uintptr_t ra)
727 {
728     int acc_flag;
729     bool maperr;
730 
731     switch (access_type) {
732     case MMU_DATA_STORE:
733         acc_flag = PAGE_WRITE_ORG;
734         break;
735     case MMU_DATA_LOAD:
736         acc_flag = PAGE_READ;
737         break;
738     case MMU_INST_FETCH:
739         acc_flag = PAGE_EXEC;
740         break;
741     default:
742         g_assert_not_reached();
743     }
744 
745     if (guest_addr_valid_untagged(addr)) {
746         int page_flags = page_get_flags(addr);
747         if (page_flags & acc_flag) {
748             return 0; /* success */
749         }
750         maperr = !(page_flags & PAGE_VALID);
751     } else {
752         maperr = true;
753     }
754 
755     if (nonfault) {
756         return TLB_INVALID_MASK;
757     }
758 
759     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
760 }
761 
762 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
763                        MMUAccessType access_type, int mmu_idx,
764                        bool nonfault, void **phost, uintptr_t ra)
765 {
766     int flags;
767 
768     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
769     flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
770     *phost = flags ? NULL : g2h(env_cpu(env), addr);
771     return flags;
772 }
773 
774 void *probe_access(CPUArchState *env, vaddr addr, int size,
775                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
776 {
777     int flags;
778 
779     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
780     flags = probe_access_internal(env, addr, size, access_type, false, ra);
781     g_assert(flags == 0);
782 
783     return size ? g2h(env_cpu(env), addr) : NULL;
784 }
785 
786 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
787                                         void **hostp)
788 {
789     int flags;
790 
791     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
792     g_assert(flags == 0);
793 
794     if (hostp) {
795         *hostp = g2h_untagged(addr);
796     }
797     return addr;
798 }
799 
800 #ifdef TARGET_PAGE_DATA_SIZE
801 /*
802  * Allocate chunks of target data together.  For the only current user,
803  * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
804  * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
805  */
806 #define TPD_PAGES  64
807 #define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
808 
809 typedef struct TargetPageDataNode {
810     struct rcu_head rcu;
811     IntervalTreeNode itree;
812     char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
813 } TargetPageDataNode;
814 
815 static IntervalTreeRoot targetdata_root;
816 
817 void page_reset_target_data(target_ulong start, target_ulong last)
818 {
819     IntervalTreeNode *n, *next;
820 
821     assert_memory_lock();
822 
823     start &= TARGET_PAGE_MASK;
824     last |= ~TARGET_PAGE_MASK;
825 
826     for (n = interval_tree_iter_first(&targetdata_root, start, last),
827          next = n ? interval_tree_iter_next(n, start, last) : NULL;
828          n != NULL;
829          n = next,
830          next = next ? interval_tree_iter_next(n, start, last) : NULL) {
831         target_ulong n_start, n_last, p_ofs, p_len;
832         TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
833 
834         if (n->start >= start && n->last <= last) {
835             interval_tree_remove(n, &targetdata_root);
836             g_free_rcu(t, rcu);
837             continue;
838         }
839 
840         if (n->start < start) {
841             n_start = start;
842             p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
843         } else {
844             n_start = n->start;
845             p_ofs = 0;
846         }
847         n_last = MIN(last, n->last);
848         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
849 
850         memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
851     }
852 }
853 
854 void *page_get_target_data(target_ulong address)
855 {
856     IntervalTreeNode *n;
857     TargetPageDataNode *t;
858     target_ulong page, region;
859 
860     page = address & TARGET_PAGE_MASK;
861     region = address & TBD_MASK;
862 
863     n = interval_tree_iter_first(&targetdata_root, page, page);
864     if (!n) {
865         /*
866          * See util/interval-tree.c re lockless lookups: no false positives
867          * but there are false negatives.  If we find nothing, retry with
868          * the mmap lock acquired.  We also need the lock for the
869          * allocation + insert.
870          */
871         mmap_lock();
872         n = interval_tree_iter_first(&targetdata_root, page, page);
873         if (!n) {
874             t = g_new0(TargetPageDataNode, 1);
875             n = &t->itree;
876             n->start = region;
877             n->last = region | ~TBD_MASK;
878             interval_tree_insert(n, &targetdata_root);
879         }
880         mmap_unlock();
881     }
882 
883     t = container_of(n, TargetPageDataNode, itree);
884     return t->data[(page - region) >> TARGET_PAGE_BITS];
885 }
886 #else
887 void page_reset_target_data(target_ulong start, target_ulong last) { }
888 #endif /* TARGET_PAGE_DATA_SIZE */
889 
890 /* The softmmu versions of these helpers are in cputlb.c.  */
891 
892 static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
893                             MemOp mop, uintptr_t ra, MMUAccessType type)
894 {
895     int a_bits = get_alignment_bits(mop);
896     void *ret;
897 
898     /* Enforce guest required alignment.  */
899     if (unlikely(addr & ((1 << a_bits) - 1))) {
900         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
901     }
902 
903     ret = g2h(env_cpu(env), addr);
904     set_helper_retaddr(ra);
905     return ret;
906 }
907 
908 #include "ldst_atomicity.c.inc"
909 
910 static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
911                           MemOp mop, uintptr_t ra)
912 {
913     void *haddr;
914     uint8_t ret;
915 
916     tcg_debug_assert((mop & MO_SIZE) == MO_8);
917     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
918     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
919     ret = ldub_p(haddr);
920     clear_helper_retaddr();
921     return ret;
922 }
923 
924 tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
925                                  MemOpIdx oi, uintptr_t ra)
926 {
927     return do_ld1_mmu(env, addr, get_memop(oi), ra);
928 }
929 
930 tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
931                                  MemOpIdx oi, uintptr_t ra)
932 {
933     return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
934 }
935 
936 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
937                     MemOpIdx oi, uintptr_t ra)
938 {
939     uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
940     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
941     return ret;
942 }
943 
944 static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
945                            MemOp mop, uintptr_t ra)
946 {
947     void *haddr;
948     uint16_t ret;
949 
950     tcg_debug_assert((mop & MO_SIZE) == MO_16);
951     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
952     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
953     ret = load_atom_2(env, ra, haddr, mop);
954     clear_helper_retaddr();
955 
956     if (mop & MO_BSWAP) {
957         ret = bswap16(ret);
958     }
959     return ret;
960 }
961 
962 tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
963                                  MemOpIdx oi, uintptr_t ra)
964 {
965     return do_ld2_mmu(env, addr, get_memop(oi), ra);
966 }
967 
968 tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
969                                  MemOpIdx oi, uintptr_t ra)
970 {
971     return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
972 }
973 
974 uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
975                      MemOpIdx oi, uintptr_t ra)
976 {
977     uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
978     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
979     return ret;
980 }
981 
982 static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
983                            MemOp mop, uintptr_t ra)
984 {
985     void *haddr;
986     uint32_t ret;
987 
988     tcg_debug_assert((mop & MO_SIZE) == MO_32);
989     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
990     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
991     ret = load_atom_4(env, ra, haddr, mop);
992     clear_helper_retaddr();
993 
994     if (mop & MO_BSWAP) {
995         ret = bswap32(ret);
996     }
997     return ret;
998 }
999 
1000 tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
1001                                  MemOpIdx oi, uintptr_t ra)
1002 {
1003     return do_ld4_mmu(env, addr, get_memop(oi), ra);
1004 }
1005 
1006 tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
1007                                  MemOpIdx oi, uintptr_t ra)
1008 {
1009     return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
1010 }
1011 
1012 uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
1013                      MemOpIdx oi, uintptr_t ra)
1014 {
1015     uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
1016     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1017     return ret;
1018 }
1019 
1020 static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
1021                            MemOp mop, uintptr_t ra)
1022 {
1023     void *haddr;
1024     uint64_t ret;
1025 
1026     tcg_debug_assert((mop & MO_SIZE) == MO_64);
1027     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1028     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1029     ret = load_atom_8(env, ra, haddr, mop);
1030     clear_helper_retaddr();
1031 
1032     if (mop & MO_BSWAP) {
1033         ret = bswap64(ret);
1034     }
1035     return ret;
1036 }
1037 
1038 uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
1039                         MemOpIdx oi, uintptr_t ra)
1040 {
1041     return do_ld8_mmu(env, addr, get_memop(oi), ra);
1042 }
1043 
1044 uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
1045                      MemOpIdx oi, uintptr_t ra)
1046 {
1047     uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
1048     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1049     return ret;
1050 }
1051 
1052 static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
1053                           MemOp mop, uintptr_t ra)
1054 {
1055     void *haddr;
1056     Int128 ret;
1057 
1058     tcg_debug_assert((mop & MO_SIZE) == MO_128);
1059     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1060     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1061     ret = load_atom_16(env, ra, haddr, mop);
1062     clear_helper_retaddr();
1063 
1064     if (mop & MO_BSWAP) {
1065         ret = bswap128(ret);
1066     }
1067     return ret;
1068 }
1069 
1070 Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
1071                        MemOpIdx oi, uintptr_t ra)
1072 {
1073     return do_ld16_mmu(env, addr, get_memop(oi), ra);
1074 }
1075 
1076 Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
1077 {
1078     return helper_ld16_mmu(env, addr, oi, GETPC());
1079 }
1080 
1081 Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
1082                     MemOpIdx oi, uintptr_t ra)
1083 {
1084     Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
1085     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1086     return ret;
1087 }
1088 
1089 static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1090                        MemOp mop, uintptr_t ra)
1091 {
1092     void *haddr;
1093 
1094     tcg_debug_assert((mop & MO_SIZE) == MO_8);
1095     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1096     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1097     stb_p(haddr, val);
1098     clear_helper_retaddr();
1099 }
1100 
1101 void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1102                     MemOpIdx oi, uintptr_t ra)
1103 {
1104     do_st1_mmu(env, addr, val, get_memop(oi), ra);
1105 }
1106 
1107 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1108                  MemOpIdx oi, uintptr_t ra)
1109 {
1110     do_st1_mmu(env, addr, val, get_memop(oi), ra);
1111     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1112 }
1113 
1114 static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1115                        MemOp mop, uintptr_t ra)
1116 {
1117     void *haddr;
1118 
1119     tcg_debug_assert((mop & MO_SIZE) == MO_16);
1120     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1121     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1122 
1123     if (mop & MO_BSWAP) {
1124         val = bswap16(val);
1125     }
1126     store_atom_2(env, ra, haddr, mop, val);
1127     clear_helper_retaddr();
1128 }
1129 
1130 void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1131                     MemOpIdx oi, uintptr_t ra)
1132 {
1133     do_st2_mmu(env, addr, val, get_memop(oi), ra);
1134 }
1135 
1136 void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1137                     MemOpIdx oi, uintptr_t ra)
1138 {
1139     do_st2_mmu(env, addr, val, get_memop(oi), ra);
1140     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1141 }
1142 
1143 static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1144                        MemOp mop, uintptr_t ra)
1145 {
1146     void *haddr;
1147 
1148     tcg_debug_assert((mop & MO_SIZE) == MO_32);
1149     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1150     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1151 
1152     if (mop & MO_BSWAP) {
1153         val = bswap32(val);
1154     }
1155     store_atom_4(env, ra, haddr, mop, val);
1156     clear_helper_retaddr();
1157 }
1158 
1159 void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1160                     MemOpIdx oi, uintptr_t ra)
1161 {
1162     do_st4_mmu(env, addr, val, get_memop(oi), ra);
1163 }
1164 
1165 void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1166                  MemOpIdx oi, uintptr_t ra)
1167 {
1168     do_st4_mmu(env, addr, val, get_memop(oi), ra);
1169     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1170 }
1171 
1172 static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1173                        MemOp mop, uintptr_t ra)
1174 {
1175     void *haddr;
1176 
1177     tcg_debug_assert((mop & MO_SIZE) == MO_64);
1178     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1179     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1180 
1181     if (mop & MO_BSWAP) {
1182         val = bswap64(val);
1183     }
1184     store_atom_8(env, ra, haddr, mop, val);
1185     clear_helper_retaddr();
1186 }
1187 
1188 void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
1189                     MemOpIdx oi, uintptr_t ra)
1190 {
1191     do_st8_mmu(env, addr, val, get_memop(oi), ra);
1192 }
1193 
1194 void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1195                     MemOpIdx oi, uintptr_t ra)
1196 {
1197     do_st8_mmu(env, addr, val, get_memop(oi), ra);
1198     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1199 }
1200 
1201 static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
1202                         MemOp mop, uintptr_t ra)
1203 {
1204     void *haddr;
1205 
1206     tcg_debug_assert((mop & MO_SIZE) == MO_128);
1207     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1208     haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1209 
1210     if (mop & MO_BSWAP) {
1211         val = bswap128(val);
1212     }
1213     store_atom_16(env, ra, haddr, mop, val);
1214     clear_helper_retaddr();
1215 }
1216 
1217 void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
1218                      MemOpIdx oi, uintptr_t ra)
1219 {
1220     do_st16_mmu(env, addr, val, get_memop(oi), ra);
1221 }
1222 
1223 void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
1224 {
1225     helper_st16_mmu(env, addr, val, oi, GETPC());
1226 }
1227 
1228 void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
1229                   Int128 val, MemOpIdx oi, uintptr_t ra)
1230 {
1231     do_st16_mmu(env, addr, val, get_memop(oi), ra);
1232     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1233 }
1234 
1235 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1236 {
1237     uint32_t ret;
1238 
1239     set_helper_retaddr(1);
1240     ret = ldub_p(g2h_untagged(ptr));
1241     clear_helper_retaddr();
1242     return ret;
1243 }
1244 
1245 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1246 {
1247     uint32_t ret;
1248 
1249     set_helper_retaddr(1);
1250     ret = lduw_p(g2h_untagged(ptr));
1251     clear_helper_retaddr();
1252     return ret;
1253 }
1254 
1255 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1256 {
1257     uint32_t ret;
1258 
1259     set_helper_retaddr(1);
1260     ret = ldl_p(g2h_untagged(ptr));
1261     clear_helper_retaddr();
1262     return ret;
1263 }
1264 
1265 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1266 {
1267     uint64_t ret;
1268 
1269     set_helper_retaddr(1);
1270     ret = ldq_p(g2h_untagged(ptr));
1271     clear_helper_retaddr();
1272     return ret;
1273 }
1274 
1275 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
1276                          MemOpIdx oi, uintptr_t ra)
1277 {
1278     void *haddr;
1279     uint8_t ret;
1280 
1281     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1282     ret = ldub_p(haddr);
1283     clear_helper_retaddr();
1284     return ret;
1285 }
1286 
1287 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
1288                           MemOpIdx oi, uintptr_t ra)
1289 {
1290     void *haddr;
1291     uint16_t ret;
1292 
1293     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1294     ret = lduw_p(haddr);
1295     clear_helper_retaddr();
1296     if (get_memop(oi) & MO_BSWAP) {
1297         ret = bswap16(ret);
1298     }
1299     return ret;
1300 }
1301 
1302 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
1303                           MemOpIdx oi, uintptr_t ra)
1304 {
1305     void *haddr;
1306     uint32_t ret;
1307 
1308     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1309     ret = ldl_p(haddr);
1310     clear_helper_retaddr();
1311     if (get_memop(oi) & MO_BSWAP) {
1312         ret = bswap32(ret);
1313     }
1314     return ret;
1315 }
1316 
1317 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
1318                           MemOpIdx oi, uintptr_t ra)
1319 {
1320     void *haddr;
1321     uint64_t ret;
1322 
1323     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1324     ret = ldq_p(haddr);
1325     clear_helper_retaddr();
1326     if (get_memop(oi) & MO_BSWAP) {
1327         ret = bswap64(ret);
1328     }
1329     return ret;
1330 }
1331 
1332 #include "ldst_common.c.inc"
1333 
1334 /*
1335  * Do not allow unaligned operations to proceed.  Return the host address.
1336  */
1337 static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1338                                int size, uintptr_t retaddr)
1339 {
1340     MemOp mop = get_memop(oi);
1341     int a_bits = get_alignment_bits(mop);
1342     void *ret;
1343 
1344     /* Enforce guest required alignment.  */
1345     if (unlikely(addr & ((1 << a_bits) - 1))) {
1346         cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
1347     }
1348 
1349     /* Enforce qemu required alignment.  */
1350     if (unlikely(addr & (size - 1))) {
1351         cpu_loop_exit_atomic(env_cpu(env), retaddr);
1352     }
1353 
1354     ret = g2h(env_cpu(env), addr);
1355     set_helper_retaddr(retaddr);
1356     return ret;
1357 }
1358 
1359 #include "atomic_common.c.inc"
1360 
1361 /*
1362  * First set of functions passes in OI and RETADDR.
1363  * This makes them callable from other helpers.
1364  */
1365 
1366 #define ATOMIC_NAME(X) \
1367     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1368 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1369 
1370 #define DATA_SIZE 1
1371 #include "atomic_template.h"
1372 
1373 #define DATA_SIZE 2
1374 #include "atomic_template.h"
1375 
1376 #define DATA_SIZE 4
1377 #include "atomic_template.h"
1378 
1379 #ifdef CONFIG_ATOMIC64
1380 #define DATA_SIZE 8
1381 #include "atomic_template.h"
1382 #endif
1383 
1384 #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
1385 #define DATA_SIZE 16
1386 #include "atomic_template.h"
1387 #endif
1388