xref: /openbmc/qemu/tcg/tcg-op-ldst.c (revision 764a6ee9)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33 
34 
35 static void check_max_alignment(unsigned a_bits)
36 {
37     /*
38      * The requested alignment cannot overlap the TLB flags.
39      * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
40      */
41     if (tcg_use_softmmu) {
42         tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
43     }
44 }
45 
46 static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
47 {
48     unsigned a_bits = get_alignment_bits(op);
49 
50     check_max_alignment(a_bits);
51 
52     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
53     if (a_bits == (op & MO_SIZE)) {
54         op = (op & ~MO_AMASK) | MO_ALIGN;
55     }
56 
57     switch (op & MO_SIZE) {
58     case MO_8:
59         op &= ~MO_BSWAP;
60         break;
61     case MO_16:
62         break;
63     case MO_32:
64         if (!is64) {
65             op &= ~MO_SIGN;
66         }
67         break;
68     case MO_64:
69         if (is64) {
70             op &= ~MO_SIGN;
71             break;
72         }
73         /* fall through */
74     default:
75         g_assert_not_reached();
76     }
77     if (st) {
78         op &= ~MO_SIGN;
79     }
80 
81     /* In serial mode, reduce atomicity. */
82     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
83         op &= ~MO_ATOM_MASK;
84         op |= MO_ATOM_NONE;
85     }
86 
87     return op;
88 }
89 
90 static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
91                      TCGTemp *addr, MemOpIdx oi)
92 {
93     if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
94         if (vh) {
95             tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
96         } else {
97             tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
98         }
99     } else {
100         /* See TCGV_LOW/HIGH. */
101         TCGTemp *al = addr + HOST_BIG_ENDIAN;
102         TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
103 
104         if (vh) {
105             tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
106                         temp_arg(al), temp_arg(ah), oi);
107         } else {
108             tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
109         }
110     }
111 }
112 
113 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
114 {
115     if (TCG_TARGET_REG_BITS == 32) {
116         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
117         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
118         gen_ldst(opc, vl, vh, addr, oi);
119     } else {
120         gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
121     }
122 }
123 
124 static void tcg_gen_req_mo(TCGBar type)
125 {
126     type &= tcg_ctx->guest_mo;
127     type &= ~TCG_TARGET_DEFAULT_MO;
128     if (type) {
129         tcg_gen_mb(type | TCG_BAR_SC);
130     }
131 }
132 
133 /* Only required for loads, where value might overlap addr. */
134 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
135 {
136 #ifdef CONFIG_PLUGIN
137     if (tcg_ctx->plugin_insn != NULL) {
138         /* Save a copy of the vaddr for use after a load.  */
139         TCGv_i64 temp = tcg_temp_ebb_new_i64();
140         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
141             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
142         } else {
143             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
144         }
145         return temp;
146     }
147 #endif
148     return NULL;
149 }
150 
151 #ifdef CONFIG_PLUGIN
152 static void
153 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
154                          enum qemu_plugin_mem_rw rw)
155 {
156     if (tcg_ctx->plugin_insn != NULL) {
157         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
158 
159         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
160             if (!copy_addr) {
161                 copy_addr = tcg_temp_ebb_new_i64();
162                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
163             }
164             tcg_gen_plugin_mem_cb(copy_addr, info);
165             tcg_temp_free_i64(copy_addr);
166         } else {
167             if (copy_addr) {
168                 tcg_gen_plugin_mem_cb(copy_addr, info);
169                 tcg_temp_free_i64(copy_addr);
170             } else {
171                 tcg_gen_plugin_mem_cb(temp_tcgv_i64(orig_addr), info);
172             }
173         }
174     }
175 }
176 #endif
177 
178 static void
179 plugin_gen_mem_callbacks_i32(TCGv_i32 val,
180                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
181                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
182 {
183 #ifdef CONFIG_PLUGIN
184     if (tcg_ctx->plugin_insn != NULL) {
185         tcg_gen_st_i32(val, tcg_env,
186                        offsetof(CPUState, neg.plugin_mem_value_low) -
187                        sizeof(CPUState) + (HOST_BIG_ENDIAN * 4));
188         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
189     }
190 #endif
191 }
192 
193 static void
194 plugin_gen_mem_callbacks_i64(TCGv_i64 val,
195                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
196                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
197 {
198 #ifdef CONFIG_PLUGIN
199     if (tcg_ctx->plugin_insn != NULL) {
200         tcg_gen_st_i64(val, tcg_env,
201                        offsetof(CPUState, neg.plugin_mem_value_low) -
202                        sizeof(CPUState));
203         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
204     }
205 #endif
206 }
207 
208 static void
209 plugin_gen_mem_callbacks_i128(TCGv_i128 val,
210                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
211                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
212 {
213 #ifdef CONFIG_PLUGIN
214     if (tcg_ctx->plugin_insn != NULL) {
215         tcg_gen_st_i64(TCGV128_LOW(val), tcg_env,
216                        offsetof(CPUState, neg.plugin_mem_value_low) -
217                        sizeof(CPUState));
218         tcg_gen_st_i64(TCGV128_HIGH(val), tcg_env,
219                        offsetof(CPUState, neg.plugin_mem_value_high) -
220                        sizeof(CPUState));
221         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
222     }
223 #endif
224 }
225 
226 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
227                                     TCGArg idx, MemOp memop)
228 {
229     MemOp orig_memop;
230     MemOpIdx orig_oi, oi;
231     TCGv_i64 copy_addr;
232     TCGOpcode opc;
233 
234     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
235     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
236     orig_oi = oi = make_memop_idx(memop, idx);
237 
238     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
239         memop &= ~MO_BSWAP;
240         /* The bswap primitive benefits from zero-extended input.  */
241         if ((memop & MO_SSIZE) == MO_SW) {
242             memop &= ~MO_SIGN;
243         }
244         oi = make_memop_idx(memop, idx);
245     }
246 
247     copy_addr = plugin_maybe_preserve_addr(addr);
248     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
249         opc = INDEX_op_qemu_ld_a32_i32;
250     } else {
251         opc = INDEX_op_qemu_ld_a64_i32;
252     }
253     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
254     plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
255                                  QEMU_PLUGIN_MEM_R);
256 
257     if ((orig_memop ^ memop) & MO_BSWAP) {
258         switch (orig_memop & MO_SIZE) {
259         case MO_16:
260             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
261                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
262                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
263             break;
264         case MO_32:
265             tcg_gen_bswap32_i32(val, val);
266             break;
267         default:
268             g_assert_not_reached();
269         }
270     }
271 }
272 
273 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
274                              MemOp memop, TCGType addr_type)
275 {
276     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
277     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
278     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
279 }
280 
281 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
282                                     TCGArg idx, MemOp memop)
283 {
284     TCGv_i32 swap = NULL;
285     MemOpIdx orig_oi, oi;
286     TCGOpcode opc;
287 
288     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
289     memop = tcg_canonicalize_memop(memop, 0, 1);
290     orig_oi = oi = make_memop_idx(memop, idx);
291 
292     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
293         swap = tcg_temp_ebb_new_i32();
294         switch (memop & MO_SIZE) {
295         case MO_16:
296             tcg_gen_bswap16_i32(swap, val, 0);
297             break;
298         case MO_32:
299             tcg_gen_bswap32_i32(swap, val);
300             break;
301         default:
302             g_assert_not_reached();
303         }
304         val = swap;
305         memop &= ~MO_BSWAP;
306         oi = make_memop_idx(memop, idx);
307     }
308 
309     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
310         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
311             opc = INDEX_op_qemu_st8_a32_i32;
312         } else {
313             opc = INDEX_op_qemu_st8_a64_i32;
314         }
315     } else {
316         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
317             opc = INDEX_op_qemu_st_a32_i32;
318         } else {
319             opc = INDEX_op_qemu_st_a64_i32;
320         }
321     }
322     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
323     plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
324 
325     if (swap) {
326         tcg_temp_free_i32(swap);
327     }
328 }
329 
330 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
331                              MemOp memop, TCGType addr_type)
332 {
333     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
334     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
335     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
336 }
337 
338 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
339                                     TCGArg idx, MemOp memop)
340 {
341     MemOp orig_memop;
342     MemOpIdx orig_oi, oi;
343     TCGv_i64 copy_addr;
344     TCGOpcode opc;
345 
346     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
347         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
348         if (memop & MO_SIGN) {
349             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
350         } else {
351             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
352         }
353         return;
354     }
355 
356     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
357     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
358     orig_oi = oi = make_memop_idx(memop, idx);
359 
360     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
361         memop &= ~MO_BSWAP;
362         /* The bswap primitive benefits from zero-extended input.  */
363         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
364             memop &= ~MO_SIGN;
365         }
366         oi = make_memop_idx(memop, idx);
367     }
368 
369     copy_addr = plugin_maybe_preserve_addr(addr);
370     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
371         opc = INDEX_op_qemu_ld_a32_i64;
372     } else {
373         opc = INDEX_op_qemu_ld_a64_i64;
374     }
375     gen_ldst_i64(opc, val, addr, oi);
376     plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
377                                  QEMU_PLUGIN_MEM_R);
378 
379     if ((orig_memop ^ memop) & MO_BSWAP) {
380         int flags = (orig_memop & MO_SIGN
381                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
382                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
383         switch (orig_memop & MO_SIZE) {
384         case MO_16:
385             tcg_gen_bswap16_i64(val, val, flags);
386             break;
387         case MO_32:
388             tcg_gen_bswap32_i64(val, val, flags);
389             break;
390         case MO_64:
391             tcg_gen_bswap64_i64(val, val);
392             break;
393         default:
394             g_assert_not_reached();
395         }
396     }
397 }
398 
399 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
400                              MemOp memop, TCGType addr_type)
401 {
402     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
403     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
404     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
405 }
406 
407 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
408                                     TCGArg idx, MemOp memop)
409 {
410     TCGv_i64 swap = NULL;
411     MemOpIdx orig_oi, oi;
412     TCGOpcode opc;
413 
414     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
415         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
416         return;
417     }
418 
419     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
420     memop = tcg_canonicalize_memop(memop, 1, 1);
421     orig_oi = oi = make_memop_idx(memop, idx);
422 
423     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
424         swap = tcg_temp_ebb_new_i64();
425         switch (memop & MO_SIZE) {
426         case MO_16:
427             tcg_gen_bswap16_i64(swap, val, 0);
428             break;
429         case MO_32:
430             tcg_gen_bswap32_i64(swap, val, 0);
431             break;
432         case MO_64:
433             tcg_gen_bswap64_i64(swap, val);
434             break;
435         default:
436             g_assert_not_reached();
437         }
438         val = swap;
439         memop &= ~MO_BSWAP;
440         oi = make_memop_idx(memop, idx);
441     }
442 
443     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
444         opc = INDEX_op_qemu_st_a32_i64;
445     } else {
446         opc = INDEX_op_qemu_st_a64_i64;
447     }
448     gen_ldst_i64(opc, val, addr, oi);
449     plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
450 
451     if (swap) {
452         tcg_temp_free_i64(swap);
453     }
454 }
455 
456 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
457                              MemOp memop, TCGType addr_type)
458 {
459     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
460     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
461     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
462 }
463 
464 /*
465  * Return true if @mop, without knowledge of the pointer alignment,
466  * does not require 16-byte atomicity, and it would be adventagous
467  * to avoid a call to a helper function.
468  */
469 static bool use_two_i64_for_i128(MemOp mop)
470 {
471     /* Two softmmu tlb lookups is larger than one function call. */
472     if (tcg_use_softmmu) {
473         return false;
474     }
475 
476     /*
477      * For user-only, two 64-bit operations may well be smaller than a call.
478      * Determine if that would be legal for the requested atomicity.
479      */
480     switch (mop & MO_ATOM_MASK) {
481     case MO_ATOM_NONE:
482     case MO_ATOM_IFALIGN_PAIR:
483         return true;
484     case MO_ATOM_IFALIGN:
485     case MO_ATOM_SUBALIGN:
486     case MO_ATOM_WITHIN16:
487     case MO_ATOM_WITHIN16_PAIR:
488         return false;
489     default:
490         g_assert_not_reached();
491     }
492 }
493 
494 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
495 {
496     MemOp mop_1 = orig, mop_2;
497 
498     /* Reduce the size to 64-bit. */
499     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
500 
501     /* Retain the alignment constraints of the original. */
502     switch (orig & MO_AMASK) {
503     case MO_UNALN:
504     case MO_ALIGN_2:
505     case MO_ALIGN_4:
506         mop_2 = mop_1;
507         break;
508     case MO_ALIGN_8:
509         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
510         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
511         mop_2 = mop_1;
512         break;
513     case MO_ALIGN:
514         /* Second has 8-byte alignment; first has 16-byte alignment. */
515         mop_2 = mop_1;
516         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
517         break;
518     case MO_ALIGN_16:
519     case MO_ALIGN_32:
520     case MO_ALIGN_64:
521         /* Second has 8-byte alignment; first retains original. */
522         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
523         break;
524     default:
525         g_assert_not_reached();
526     }
527 
528     /* Use a memory ordering implemented by the host. */
529     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
530         mop_1 &= ~MO_BSWAP;
531         mop_2 &= ~MO_BSWAP;
532     }
533 
534     ret[0] = mop_1;
535     ret[1] = mop_2;
536 }
537 
538 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
539 {
540     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
541         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
542         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
543         return a64;
544     }
545     return temp_tcgv_i64(addr);
546 }
547 
548 static void maybe_free_addr64(TCGv_i64 a64)
549 {
550     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
551         tcg_temp_free_i64(a64);
552     }
553 }
554 
555 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
556                                      TCGArg idx, MemOp memop)
557 {
558     MemOpIdx orig_oi;
559     TCGv_i64 ext_addr = NULL;
560     TCGOpcode opc;
561 
562     check_max_alignment(get_alignment_bits(memop));
563     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
564 
565     /* In serial mode, reduce atomicity. */
566     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
567         memop &= ~MO_ATOM_MASK;
568         memop |= MO_ATOM_NONE;
569     }
570     orig_oi = make_memop_idx(memop, idx);
571 
572     /* TODO: For now, force 32-bit hosts to use the helper. */
573     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
574         TCGv_i64 lo, hi;
575         bool need_bswap = false;
576         MemOpIdx oi = orig_oi;
577 
578         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
579             lo = TCGV128_HIGH(val);
580             hi = TCGV128_LOW(val);
581             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
582             need_bswap = true;
583         } else {
584             lo = TCGV128_LOW(val);
585             hi = TCGV128_HIGH(val);
586         }
587 
588         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
589             opc = INDEX_op_qemu_ld_a32_i128;
590         } else {
591             opc = INDEX_op_qemu_ld_a64_i128;
592         }
593         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
594 
595         if (need_bswap) {
596             tcg_gen_bswap64_i64(lo, lo);
597             tcg_gen_bswap64_i64(hi, hi);
598         }
599     } else if (use_two_i64_for_i128(memop)) {
600         MemOp mop[2];
601         TCGTemp *addr_p8;
602         TCGv_i64 x, y;
603         bool need_bswap;
604 
605         canonicalize_memop_i128_as_i64(mop, memop);
606         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
607 
608         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
609             opc = INDEX_op_qemu_ld_a32_i64;
610         } else {
611             opc = INDEX_op_qemu_ld_a64_i64;
612         }
613 
614         /*
615          * Since there are no global TCGv_i128, there is no visible state
616          * changed if the second load faults.  Load directly into the two
617          * subwords.
618          */
619         if ((memop & MO_BSWAP) == MO_LE) {
620             x = TCGV128_LOW(val);
621             y = TCGV128_HIGH(val);
622         } else {
623             x = TCGV128_HIGH(val);
624             y = TCGV128_LOW(val);
625         }
626 
627         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
628 
629         if (need_bswap) {
630             tcg_gen_bswap64_i64(x, x);
631         }
632 
633         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
634             TCGv_i32 t = tcg_temp_ebb_new_i32();
635             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
636             addr_p8 = tcgv_i32_temp(t);
637         } else {
638             TCGv_i64 t = tcg_temp_ebb_new_i64();
639             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
640             addr_p8 = tcgv_i64_temp(t);
641         }
642 
643         gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
644         tcg_temp_free_internal(addr_p8);
645 
646         if (need_bswap) {
647             tcg_gen_bswap64_i64(y, y);
648         }
649     } else {
650         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
651             ext_addr = tcg_temp_ebb_new_i64();
652             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
653             addr = tcgv_i64_temp(ext_addr);
654         }
655         gen_helper_ld_i128(val, tcg_env, temp_tcgv_i64(addr),
656                            tcg_constant_i32(orig_oi));
657     }
658 
659     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
660                                   QEMU_PLUGIN_MEM_R);
661 }
662 
663 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
664                               MemOp memop, TCGType addr_type)
665 {
666     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
667     tcg_debug_assert((memop & MO_SIZE) == MO_128);
668     tcg_debug_assert((memop & MO_SIGN) == 0);
669     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
670 }
671 
672 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
673                                      TCGArg idx, MemOp memop)
674 {
675     MemOpIdx orig_oi;
676     TCGv_i64 ext_addr = NULL;
677     TCGOpcode opc;
678 
679     check_max_alignment(get_alignment_bits(memop));
680     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
681 
682     /* In serial mode, reduce atomicity. */
683     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
684         memop &= ~MO_ATOM_MASK;
685         memop |= MO_ATOM_NONE;
686     }
687     orig_oi = make_memop_idx(memop, idx);
688 
689     /* TODO: For now, force 32-bit hosts to use the helper. */
690 
691     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
692         TCGv_i64 lo, hi;
693         MemOpIdx oi = orig_oi;
694         bool need_bswap = false;
695 
696         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
697             lo = tcg_temp_ebb_new_i64();
698             hi = tcg_temp_ebb_new_i64();
699             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
700             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
701             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
702             need_bswap = true;
703         } else {
704             lo = TCGV128_LOW(val);
705             hi = TCGV128_HIGH(val);
706         }
707 
708         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
709             opc = INDEX_op_qemu_st_a32_i128;
710         } else {
711             opc = INDEX_op_qemu_st_a64_i128;
712         }
713         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
714 
715         if (need_bswap) {
716             tcg_temp_free_i64(lo);
717             tcg_temp_free_i64(hi);
718         }
719     } else if (use_two_i64_for_i128(memop)) {
720         MemOp mop[2];
721         TCGTemp *addr_p8;
722         TCGv_i64 x, y, b = NULL;
723 
724         canonicalize_memop_i128_as_i64(mop, memop);
725 
726         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
727             opc = INDEX_op_qemu_st_a32_i64;
728         } else {
729             opc = INDEX_op_qemu_st_a64_i64;
730         }
731 
732         if ((memop & MO_BSWAP) == MO_LE) {
733             x = TCGV128_LOW(val);
734             y = TCGV128_HIGH(val);
735         } else {
736             x = TCGV128_HIGH(val);
737             y = TCGV128_LOW(val);
738         }
739 
740         if ((mop[0] ^ memop) & MO_BSWAP) {
741             b = tcg_temp_ebb_new_i64();
742             tcg_gen_bswap64_i64(b, x);
743             x = b;
744         }
745 
746         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
747 
748         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
749             TCGv_i32 t = tcg_temp_ebb_new_i32();
750             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
751             addr_p8 = tcgv_i32_temp(t);
752         } else {
753             TCGv_i64 t = tcg_temp_ebb_new_i64();
754             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
755             addr_p8 = tcgv_i64_temp(t);
756         }
757 
758         if (b) {
759             tcg_gen_bswap64_i64(b, y);
760             gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
761             tcg_temp_free_i64(b);
762         } else {
763             gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
764         }
765         tcg_temp_free_internal(addr_p8);
766     } else {
767         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
768             ext_addr = tcg_temp_ebb_new_i64();
769             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
770             addr = tcgv_i64_temp(ext_addr);
771         }
772         gen_helper_st_i128(tcg_env, temp_tcgv_i64(addr), val,
773                            tcg_constant_i32(orig_oi));
774     }
775 
776     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
777                                   QEMU_PLUGIN_MEM_W);
778 }
779 
780 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
781                               MemOp memop, TCGType addr_type)
782 {
783     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
784     tcg_debug_assert((memop & MO_SIZE) == MO_128);
785     tcg_debug_assert((memop & MO_SIGN) == 0);
786     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
787 }
788 
789 void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
790 {
791     switch (opc & MO_SSIZE) {
792     case MO_SB:
793         tcg_gen_ext8s_i32(ret, val);
794         break;
795     case MO_UB:
796         tcg_gen_ext8u_i32(ret, val);
797         break;
798     case MO_SW:
799         tcg_gen_ext16s_i32(ret, val);
800         break;
801     case MO_UW:
802         tcg_gen_ext16u_i32(ret, val);
803         break;
804     case MO_UL:
805     case MO_SL:
806         tcg_gen_mov_i32(ret, val);
807         break;
808     default:
809         g_assert_not_reached();
810     }
811 }
812 
813 void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
814 {
815     switch (opc & MO_SSIZE) {
816     case MO_SB:
817         tcg_gen_ext8s_i64(ret, val);
818         break;
819     case MO_UB:
820         tcg_gen_ext8u_i64(ret, val);
821         break;
822     case MO_SW:
823         tcg_gen_ext16s_i64(ret, val);
824         break;
825     case MO_UW:
826         tcg_gen_ext16u_i64(ret, val);
827         break;
828     case MO_SL:
829         tcg_gen_ext32s_i64(ret, val);
830         break;
831     case MO_UL:
832         tcg_gen_ext32u_i64(ret, val);
833         break;
834     case MO_UQ:
835     case MO_SQ:
836         tcg_gen_mov_i64(ret, val);
837         break;
838     default:
839         g_assert_not_reached();
840     }
841 }
842 
843 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
844                                   TCGv_i32, TCGv_i32, TCGv_i32);
845 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
846                                   TCGv_i64, TCGv_i64, TCGv_i32);
847 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
848                                    TCGv_i128, TCGv_i128, TCGv_i32);
849 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
850                                   TCGv_i32, TCGv_i32);
851 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
852                                   TCGv_i64, TCGv_i32);
853 
854 #ifdef CONFIG_ATOMIC64
855 # define WITH_ATOMIC64(X) X,
856 #else
857 # define WITH_ATOMIC64(X)
858 #endif
859 #if HAVE_CMPXCHG128
860 # define WITH_ATOMIC128(X) X,
861 #else
862 # define WITH_ATOMIC128(X)
863 #endif
864 
865 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
866     [MO_8] = gen_helper_atomic_cmpxchgb,
867     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
868     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
869     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
870     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
871     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
872     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
873     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
874     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
875 };
876 
877 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
878                                               TCGv_i32 cmpv, TCGv_i32 newv,
879                                               TCGArg idx, MemOp memop)
880 {
881     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
882     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
883 
884     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
885 
886     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
887     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
888     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
889     tcg_temp_free_i32(t2);
890 
891     if (memop & MO_SIGN) {
892         tcg_gen_ext_i32(retv, t1, memop);
893     } else {
894         tcg_gen_mov_i32(retv, t1);
895     }
896     tcg_temp_free_i32(t1);
897 }
898 
899 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
900                                        TCGv_i32 cmpv, TCGv_i32 newv,
901                                        TCGArg idx, MemOp memop,
902                                        TCGType addr_type)
903 {
904     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
905     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
906     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
907 }
908 
909 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
910                                            TCGv_i32 cmpv, TCGv_i32 newv,
911                                            TCGArg idx, MemOp memop)
912 {
913     gen_atomic_cx_i32 gen;
914     TCGv_i64 a64;
915     MemOpIdx oi;
916 
917     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
918         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
919         return;
920     }
921 
922     memop = tcg_canonicalize_memop(memop, 0, 0);
923     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
924     tcg_debug_assert(gen != NULL);
925 
926     oi = make_memop_idx(memop & ~MO_SIGN, idx);
927     a64 = maybe_extend_addr64(addr);
928     gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
929     maybe_free_addr64(a64);
930 
931     if (memop & MO_SIGN) {
932         tcg_gen_ext_i32(retv, retv, memop);
933     }
934 }
935 
936 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
937                                     TCGv_i32 cmpv, TCGv_i32 newv,
938                                     TCGArg idx, MemOp memop,
939                                     TCGType addr_type)
940 {
941     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
942     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
943     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
944 }
945 
946 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
947                                               TCGv_i64 cmpv, TCGv_i64 newv,
948                                               TCGArg idx, MemOp memop)
949 {
950     TCGv_i64 t1, t2;
951 
952     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
953         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
954                                           TCGV_LOW(newv), idx, memop);
955         if (memop & MO_SIGN) {
956             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
957         } else {
958             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
959         }
960         return;
961     }
962 
963     t1 = tcg_temp_ebb_new_i64();
964     t2 = tcg_temp_ebb_new_i64();
965 
966     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
967 
968     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
969     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
970     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
971     tcg_temp_free_i64(t2);
972 
973     if (memop & MO_SIGN) {
974         tcg_gen_ext_i64(retv, t1, memop);
975     } else {
976         tcg_gen_mov_i64(retv, t1);
977     }
978     tcg_temp_free_i64(t1);
979 }
980 
981 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
982                                        TCGv_i64 cmpv, TCGv_i64 newv,
983                                        TCGArg idx, MemOp memop,
984                                        TCGType addr_type)
985 {
986     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
987     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
988     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
989 }
990 
991 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
992                                            TCGv_i64 cmpv, TCGv_i64 newv,
993                                            TCGArg idx, MemOp memop)
994 {
995     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
996         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
997         return;
998     }
999 
1000     if ((memop & MO_SIZE) == MO_64) {
1001         gen_atomic_cx_i64 gen;
1002 
1003         memop = tcg_canonicalize_memop(memop, 1, 0);
1004         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1005         if (gen) {
1006             MemOpIdx oi = make_memop_idx(memop, idx);
1007             TCGv_i64 a64 = maybe_extend_addr64(addr);
1008             gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
1009             maybe_free_addr64(a64);
1010             return;
1011         }
1012 
1013         gen_helper_exit_atomic(tcg_env);
1014 
1015         /*
1016          * Produce a result for a well-formed opcode stream.  This satisfies
1017          * liveness for set before used, which happens before this dead code
1018          * is removed.
1019          */
1020         tcg_gen_movi_i64(retv, 0);
1021         return;
1022     }
1023 
1024     if (TCG_TARGET_REG_BITS == 32) {
1025         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
1026                                        TCGV_LOW(newv), idx, memop);
1027         if (memop & MO_SIGN) {
1028             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
1029         } else {
1030             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
1031         }
1032     } else {
1033         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
1034         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
1035         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1036 
1037         tcg_gen_extrl_i64_i32(c32, cmpv);
1038         tcg_gen_extrl_i64_i32(n32, newv);
1039         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
1040                                        idx, memop & ~MO_SIGN);
1041         tcg_temp_free_i32(c32);
1042         tcg_temp_free_i32(n32);
1043 
1044         tcg_gen_extu_i32_i64(retv, r32);
1045         tcg_temp_free_i32(r32);
1046 
1047         if (memop & MO_SIGN) {
1048             tcg_gen_ext_i64(retv, retv, memop);
1049         }
1050     }
1051 }
1052 
1053 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
1054                                     TCGv_i64 cmpv, TCGv_i64 newv,
1055                                     TCGArg idx, MemOp memop, TCGType addr_type)
1056 {
1057     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1058     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
1059     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
1060 }
1061 
1062 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1063                                                TCGv_i128 cmpv, TCGv_i128 newv,
1064                                                TCGArg idx, MemOp memop)
1065 {
1066     if (TCG_TARGET_REG_BITS == 32) {
1067         /* Inline expansion below is simply too large for 32-bit hosts. */
1068         MemOpIdx oi = make_memop_idx(memop, idx);
1069         TCGv_i64 a64 = maybe_extend_addr64(addr);
1070 
1071         gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
1072                                       tcg_constant_i32(oi));
1073         maybe_free_addr64(a64);
1074     } else {
1075         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
1076         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
1077         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
1078         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1079         TCGv_i64 z = tcg_constant_i64(0);
1080 
1081         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
1082 
1083         /* Compare i128 */
1084         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
1085         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
1086         tcg_gen_or_i64(t0, t0, t1);
1087 
1088         /* tmpv = equal ? newv : oldv */
1089         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1090                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
1091         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1092                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1093 
1094         /* Unconditional writeback. */
1095         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1096         tcg_gen_mov_i128(retv, oldv);
1097 
1098         tcg_temp_free_i64(t0);
1099         tcg_temp_free_i64(t1);
1100         tcg_temp_free_i128(tmpv);
1101         tcg_temp_free_i128(oldv);
1102     }
1103 }
1104 
1105 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1106                                         TCGv_i128 cmpv, TCGv_i128 newv,
1107                                         TCGArg idx, MemOp memop,
1108                                         TCGType addr_type)
1109 {
1110     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1111     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1112     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1113 }
1114 
1115 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1116                                             TCGv_i128 cmpv, TCGv_i128 newv,
1117                                             TCGArg idx, MemOp memop)
1118 {
1119     gen_atomic_cx_i128 gen;
1120 
1121     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1122         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1123         return;
1124     }
1125 
1126     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1127     if (gen) {
1128         MemOpIdx oi = make_memop_idx(memop, idx);
1129         TCGv_i64 a64 = maybe_extend_addr64(addr);
1130         gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
1131         maybe_free_addr64(a64);
1132         return;
1133     }
1134 
1135     gen_helper_exit_atomic(tcg_env);
1136 
1137     /*
1138      * Produce a result for a well-formed opcode stream.  This satisfies
1139      * liveness for set before used, which happens before this dead code
1140      * is removed.
1141      */
1142     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1143     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1144 }
1145 
1146 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1147                                      TCGv_i128 cmpv, TCGv_i128 newv,
1148                                      TCGArg idx, MemOp memop,
1149                                      TCGType addr_type)
1150 {
1151     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1152     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1153     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1154 }
1155 
1156 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1157                                 TCGArg idx, MemOp memop, bool new_val,
1158                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1159 {
1160     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1161     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1162 
1163     memop = tcg_canonicalize_memop(memop, 0, 0);
1164 
1165     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1166     tcg_gen_ext_i32(t2, val, memop);
1167     gen(t2, t1, t2);
1168     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1169 
1170     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1171     tcg_temp_free_i32(t1);
1172     tcg_temp_free_i32(t2);
1173 }
1174 
1175 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1176                              TCGArg idx, MemOp memop, void * const table[])
1177 {
1178     gen_atomic_op_i32 gen;
1179     TCGv_i64 a64;
1180     MemOpIdx oi;
1181 
1182     memop = tcg_canonicalize_memop(memop, 0, 0);
1183 
1184     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1185     tcg_debug_assert(gen != NULL);
1186 
1187     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1188     a64 = maybe_extend_addr64(addr);
1189     gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1190     maybe_free_addr64(a64);
1191 
1192     if (memop & MO_SIGN) {
1193         tcg_gen_ext_i32(ret, ret, memop);
1194     }
1195 }
1196 
1197 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1198                                 TCGArg idx, MemOp memop, bool new_val,
1199                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1200 {
1201     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1202     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1203 
1204     memop = tcg_canonicalize_memop(memop, 1, 0);
1205 
1206     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1207     tcg_gen_ext_i64(t2, val, memop);
1208     gen(t2, t1, t2);
1209     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1210 
1211     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1212     tcg_temp_free_i64(t1);
1213     tcg_temp_free_i64(t2);
1214 }
1215 
1216 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1217                              TCGArg idx, MemOp memop, void * const table[])
1218 {
1219     memop = tcg_canonicalize_memop(memop, 1, 0);
1220 
1221     if ((memop & MO_SIZE) == MO_64) {
1222         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1223 
1224         if (gen) {
1225             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1226             TCGv_i64 a64 = maybe_extend_addr64(addr);
1227             gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1228             maybe_free_addr64(a64);
1229             return;
1230         }
1231 
1232         gen_helper_exit_atomic(tcg_env);
1233         /* Produce a result, so that we have a well-formed opcode stream
1234            with respect to uses of the result in the (dead) code following.  */
1235         tcg_gen_movi_i64(ret, 0);
1236     } else {
1237         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1238         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1239 
1240         tcg_gen_extrl_i64_i32(v32, val);
1241         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1242         tcg_temp_free_i32(v32);
1243 
1244         tcg_gen_extu_i32_i64(ret, r32);
1245         tcg_temp_free_i32(r32);
1246 
1247         if (memop & MO_SIGN) {
1248             tcg_gen_ext_i64(ret, ret, memop);
1249         }
1250     }
1251 }
1252 
1253 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1254 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1255     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1256     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1257     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1258     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1259     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1260     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1261     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1262 };                                                                      \
1263 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1264                                      TCGv_i32 val, TCGArg idx,          \
1265                                      MemOp memop, TCGType addr_type)    \
1266 {                                                                       \
1267     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1268     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1269     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1270         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1271     } else {                                                            \
1272         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1273                             tcg_gen_##OP##_i32);                        \
1274     }                                                                   \
1275 }                                                                       \
1276 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1277                                      TCGv_i64 val, TCGArg idx,          \
1278                                      MemOp memop, TCGType addr_type)    \
1279 {                                                                       \
1280     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1281     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1282     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1283         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1284     } else {                                                            \
1285         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1286                             tcg_gen_##OP##_i64);                        \
1287     }                                                                   \
1288 }
1289 
1290 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1291 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1292 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1293 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1294 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1295 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1296 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1297 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1298 
1299 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1300 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1301 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1302 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1303 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1304 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1305 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1306 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1307 
1308 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1309 {
1310     tcg_gen_mov_i32(r, b);
1311 }
1312 
1313 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1314 {
1315     tcg_gen_mov_i64(r, b);
1316 }
1317 
1318 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1319 
1320 #undef GEN_ATOMIC_HELPER
1321