xref: /openbmc/qemu/tcg/optimize.c (revision 95faaa73)
1 /*
2  * Optimizations for Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2010 Samsung Electronics.
5  * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "config.h"
27 
28 #include <stdlib.h>
29 #include <stdio.h>
30 
31 #include "qemu-common.h"
32 #include "tcg-op.h"
33 
34 #define CASE_OP_32_64(x)                        \
35         glue(glue(case INDEX_op_, x), _i32):    \
36         glue(glue(case INDEX_op_, x), _i64)
37 
38 typedef enum {
39     TCG_TEMP_UNDEF = 0,
40     TCG_TEMP_CONST,
41     TCG_TEMP_COPY,
42 } tcg_temp_state;
43 
44 struct tcg_temp_info {
45     tcg_temp_state state;
46     uint16_t prev_copy;
47     uint16_t next_copy;
48     tcg_target_ulong val;
49     tcg_target_ulong mask;
50 };
51 
52 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
53 
54 /* Reset TEMP's state to TCG_TEMP_UNDEF.  If TEMP only had one copy, remove
55    the copy flag from the left temp.  */
56 static void reset_temp(TCGArg temp)
57 {
58     if (temps[temp].state == TCG_TEMP_COPY) {
59         if (temps[temp].prev_copy == temps[temp].next_copy) {
60             temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
61         } else {
62             temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
63             temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
64         }
65     }
66     temps[temp].state = TCG_TEMP_UNDEF;
67     temps[temp].mask = -1;
68 }
69 
70 /* Reset all temporaries, given that there are NB_TEMPS of them.  */
71 static void reset_all_temps(int nb_temps)
72 {
73     int i;
74     for (i = 0; i < nb_temps; i++) {
75         temps[i].state = TCG_TEMP_UNDEF;
76         temps[i].mask = -1;
77     }
78 }
79 
80 static int op_bits(TCGOpcode op)
81 {
82     const TCGOpDef *def = &tcg_op_defs[op];
83     return def->flags & TCG_OPF_64BIT ? 64 : 32;
84 }
85 
86 static TCGOpcode op_to_mov(TCGOpcode op)
87 {
88     switch (op_bits(op)) {
89     case 32:
90         return INDEX_op_mov_i32;
91     case 64:
92         return INDEX_op_mov_i64;
93     default:
94         fprintf(stderr, "op_to_mov: unexpected return value of "
95                 "function op_bits.\n");
96         tcg_abort();
97     }
98 }
99 
100 static TCGOpcode op_to_movi(TCGOpcode op)
101 {
102     switch (op_bits(op)) {
103     case 32:
104         return INDEX_op_movi_i32;
105     case 64:
106         return INDEX_op_movi_i64;
107     default:
108         fprintf(stderr, "op_to_movi: unexpected return value of "
109                 "function op_bits.\n");
110         tcg_abort();
111     }
112 }
113 
114 static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
115 {
116     TCGArg i;
117 
118     /* If this is already a global, we can't do better. */
119     if (temp < s->nb_globals) {
120         return temp;
121     }
122 
123     /* Search for a global first. */
124     for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
125         if (i < s->nb_globals) {
126             return i;
127         }
128     }
129 
130     /* If it is a temp, search for a temp local. */
131     if (!s->temps[temp].temp_local) {
132         for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
133             if (s->temps[i].temp_local) {
134                 return i;
135             }
136         }
137     }
138 
139     /* Failure to find a better representation, return the same temp. */
140     return temp;
141 }
142 
143 static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
144 {
145     TCGArg i;
146 
147     if (arg1 == arg2) {
148         return true;
149     }
150 
151     if (temps[arg1].state != TCG_TEMP_COPY
152         || temps[arg2].state != TCG_TEMP_COPY) {
153         return false;
154     }
155 
156     for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
157         if (i == arg2) {
158             return true;
159         }
160     }
161 
162     return false;
163 }
164 
165 static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
166                             TCGOpcode old_op, TCGArg dst, TCGArg src)
167 {
168     TCGOpcode new_op = op_to_mov(old_op);
169     tcg_target_ulong mask;
170 
171     s->gen_opc_buf[op_index] = new_op;
172 
173     reset_temp(dst);
174     mask = temps[src].mask;
175     if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
176         /* High bits of the destination are now garbage.  */
177         mask |= ~0xffffffffull;
178     }
179     temps[dst].mask = mask;
180 
181     assert(temps[src].state != TCG_TEMP_CONST);
182 
183     if (s->temps[src].type == s->temps[dst].type) {
184         if (temps[src].state != TCG_TEMP_COPY) {
185             temps[src].state = TCG_TEMP_COPY;
186             temps[src].next_copy = src;
187             temps[src].prev_copy = src;
188         }
189         temps[dst].state = TCG_TEMP_COPY;
190         temps[dst].next_copy = temps[src].next_copy;
191         temps[dst].prev_copy = src;
192         temps[temps[dst].next_copy].prev_copy = dst;
193         temps[src].next_copy = dst;
194     }
195 
196     gen_args[0] = dst;
197     gen_args[1] = src;
198 }
199 
200 static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
201                              TCGOpcode old_op, TCGArg dst, TCGArg val)
202 {
203     TCGOpcode new_op = op_to_movi(old_op);
204     tcg_target_ulong mask;
205 
206     s->gen_opc_buf[op_index] = new_op;
207 
208     reset_temp(dst);
209     temps[dst].state = TCG_TEMP_CONST;
210     temps[dst].val = val;
211     mask = val;
212     if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
213         /* High bits of the destination are now garbage.  */
214         mask |= ~0xffffffffull;
215     }
216     temps[dst].mask = mask;
217 
218     gen_args[0] = dst;
219     gen_args[1] = val;
220 }
221 
222 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
223 {
224     uint64_t l64, h64;
225 
226     switch (op) {
227     CASE_OP_32_64(add):
228         return x + y;
229 
230     CASE_OP_32_64(sub):
231         return x - y;
232 
233     CASE_OP_32_64(mul):
234         return x * y;
235 
236     CASE_OP_32_64(and):
237         return x & y;
238 
239     CASE_OP_32_64(or):
240         return x | y;
241 
242     CASE_OP_32_64(xor):
243         return x ^ y;
244 
245     case INDEX_op_shl_i32:
246         return (uint32_t)x << (y & 31);
247 
248     case INDEX_op_shl_i64:
249         return (uint64_t)x << (y & 63);
250 
251     case INDEX_op_shr_i32:
252         return (uint32_t)x >> (y & 31);
253 
254     case INDEX_op_trunc_shr_i32:
255     case INDEX_op_shr_i64:
256         return (uint64_t)x >> (y & 63);
257 
258     case INDEX_op_sar_i32:
259         return (int32_t)x >> (y & 31);
260 
261     case INDEX_op_sar_i64:
262         return (int64_t)x >> (y & 63);
263 
264     case INDEX_op_rotr_i32:
265         return ror32(x, y & 31);
266 
267     case INDEX_op_rotr_i64:
268         return ror64(x, y & 63);
269 
270     case INDEX_op_rotl_i32:
271         return rol32(x, y & 31);
272 
273     case INDEX_op_rotl_i64:
274         return rol64(x, y & 63);
275 
276     CASE_OP_32_64(not):
277         return ~x;
278 
279     CASE_OP_32_64(neg):
280         return -x;
281 
282     CASE_OP_32_64(andc):
283         return x & ~y;
284 
285     CASE_OP_32_64(orc):
286         return x | ~y;
287 
288     CASE_OP_32_64(eqv):
289         return ~(x ^ y);
290 
291     CASE_OP_32_64(nand):
292         return ~(x & y);
293 
294     CASE_OP_32_64(nor):
295         return ~(x | y);
296 
297     CASE_OP_32_64(ext8s):
298         return (int8_t)x;
299 
300     CASE_OP_32_64(ext16s):
301         return (int16_t)x;
302 
303     CASE_OP_32_64(ext8u):
304         return (uint8_t)x;
305 
306     CASE_OP_32_64(ext16u):
307         return (uint16_t)x;
308 
309     case INDEX_op_ext32s_i64:
310         return (int32_t)x;
311 
312     case INDEX_op_ext32u_i64:
313         return (uint32_t)x;
314 
315     case INDEX_op_muluh_i32:
316         return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
317     case INDEX_op_mulsh_i32:
318         return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
319 
320     case INDEX_op_muluh_i64:
321         mulu64(&l64, &h64, x, y);
322         return h64;
323     case INDEX_op_mulsh_i64:
324         muls64(&l64, &h64, x, y);
325         return h64;
326 
327     case INDEX_op_div_i32:
328         /* Avoid crashing on divide by zero, otherwise undefined.  */
329         return (int32_t)x / ((int32_t)y ? : 1);
330     case INDEX_op_divu_i32:
331         return (uint32_t)x / ((uint32_t)y ? : 1);
332     case INDEX_op_div_i64:
333         return (int64_t)x / ((int64_t)y ? : 1);
334     case INDEX_op_divu_i64:
335         return (uint64_t)x / ((uint64_t)y ? : 1);
336 
337     case INDEX_op_rem_i32:
338         return (int32_t)x % ((int32_t)y ? : 1);
339     case INDEX_op_remu_i32:
340         return (uint32_t)x % ((uint32_t)y ? : 1);
341     case INDEX_op_rem_i64:
342         return (int64_t)x % ((int64_t)y ? : 1);
343     case INDEX_op_remu_i64:
344         return (uint64_t)x % ((uint64_t)y ? : 1);
345 
346     default:
347         fprintf(stderr,
348                 "Unrecognized operation %d in do_constant_folding.\n", op);
349         tcg_abort();
350     }
351 }
352 
353 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
354 {
355     TCGArg res = do_constant_folding_2(op, x, y);
356     if (op_bits(op) == 32) {
357         res &= 0xffffffff;
358     }
359     return res;
360 }
361 
362 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
363 {
364     switch (c) {
365     case TCG_COND_EQ:
366         return x == y;
367     case TCG_COND_NE:
368         return x != y;
369     case TCG_COND_LT:
370         return (int32_t)x < (int32_t)y;
371     case TCG_COND_GE:
372         return (int32_t)x >= (int32_t)y;
373     case TCG_COND_LE:
374         return (int32_t)x <= (int32_t)y;
375     case TCG_COND_GT:
376         return (int32_t)x > (int32_t)y;
377     case TCG_COND_LTU:
378         return x < y;
379     case TCG_COND_GEU:
380         return x >= y;
381     case TCG_COND_LEU:
382         return x <= y;
383     case TCG_COND_GTU:
384         return x > y;
385     default:
386         tcg_abort();
387     }
388 }
389 
390 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
391 {
392     switch (c) {
393     case TCG_COND_EQ:
394         return x == y;
395     case TCG_COND_NE:
396         return x != y;
397     case TCG_COND_LT:
398         return (int64_t)x < (int64_t)y;
399     case TCG_COND_GE:
400         return (int64_t)x >= (int64_t)y;
401     case TCG_COND_LE:
402         return (int64_t)x <= (int64_t)y;
403     case TCG_COND_GT:
404         return (int64_t)x > (int64_t)y;
405     case TCG_COND_LTU:
406         return x < y;
407     case TCG_COND_GEU:
408         return x >= y;
409     case TCG_COND_LEU:
410         return x <= y;
411     case TCG_COND_GTU:
412         return x > y;
413     default:
414         tcg_abort();
415     }
416 }
417 
418 static bool do_constant_folding_cond_eq(TCGCond c)
419 {
420     switch (c) {
421     case TCG_COND_GT:
422     case TCG_COND_LTU:
423     case TCG_COND_LT:
424     case TCG_COND_GTU:
425     case TCG_COND_NE:
426         return 0;
427     case TCG_COND_GE:
428     case TCG_COND_GEU:
429     case TCG_COND_LE:
430     case TCG_COND_LEU:
431     case TCG_COND_EQ:
432         return 1;
433     default:
434         tcg_abort();
435     }
436 }
437 
438 /* Return 2 if the condition can't be simplified, and the result
439    of the condition (0 or 1) if it can */
440 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
441                                        TCGArg y, TCGCond c)
442 {
443     if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
444         switch (op_bits(op)) {
445         case 32:
446             return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
447         case 64:
448             return do_constant_folding_cond_64(temps[x].val, temps[y].val, c);
449         default:
450             tcg_abort();
451         }
452     } else if (temps_are_copies(x, y)) {
453         return do_constant_folding_cond_eq(c);
454     } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
455         switch (c) {
456         case TCG_COND_LTU:
457             return 0;
458         case TCG_COND_GEU:
459             return 1;
460         default:
461             return 2;
462         }
463     } else {
464         return 2;
465     }
466 }
467 
468 /* Return 2 if the condition can't be simplified, and the result
469    of the condition (0 or 1) if it can */
470 static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
471 {
472     TCGArg al = p1[0], ah = p1[1];
473     TCGArg bl = p2[0], bh = p2[1];
474 
475     if (temps[bl].state == TCG_TEMP_CONST
476         && temps[bh].state == TCG_TEMP_CONST) {
477         uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
478 
479         if (temps[al].state == TCG_TEMP_CONST
480             && temps[ah].state == TCG_TEMP_CONST) {
481             uint64_t a;
482             a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
483             return do_constant_folding_cond_64(a, b, c);
484         }
485         if (b == 0) {
486             switch (c) {
487             case TCG_COND_LTU:
488                 return 0;
489             case TCG_COND_GEU:
490                 return 1;
491             default:
492                 break;
493             }
494         }
495     }
496     if (temps_are_copies(al, bl) && temps_are_copies(ah, bh)) {
497         return do_constant_folding_cond_eq(c);
498     }
499     return 2;
500 }
501 
502 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
503 {
504     TCGArg a1 = *p1, a2 = *p2;
505     int sum = 0;
506     sum += temps[a1].state == TCG_TEMP_CONST;
507     sum -= temps[a2].state == TCG_TEMP_CONST;
508 
509     /* Prefer the constant in second argument, and then the form
510        op a, a, b, which is better handled on non-RISC hosts. */
511     if (sum > 0 || (sum == 0 && dest == a2)) {
512         *p1 = a2;
513         *p2 = a1;
514         return true;
515     }
516     return false;
517 }
518 
519 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
520 {
521     int sum = 0;
522     sum += temps[p1[0]].state == TCG_TEMP_CONST;
523     sum += temps[p1[1]].state == TCG_TEMP_CONST;
524     sum -= temps[p2[0]].state == TCG_TEMP_CONST;
525     sum -= temps[p2[1]].state == TCG_TEMP_CONST;
526     if (sum > 0) {
527         TCGArg t;
528         t = p1[0], p1[0] = p2[0], p2[0] = t;
529         t = p1[1], p1[1] = p2[1], p2[1] = t;
530         return true;
531     }
532     return false;
533 }
534 
535 /* Propagate constants and copies, fold constant expressions. */
536 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
537                                     TCGArg *args, TCGOpDef *tcg_op_defs)
538 {
539     int nb_ops, op_index, nb_temps, nb_globals;
540     TCGArg *gen_args;
541 
542     /* Array VALS has an element for each temp.
543        If this temp holds a constant then its value is kept in VALS' element.
544        If this temp is a copy of other ones then the other copies are
545        available through the doubly linked circular list. */
546 
547     nb_temps = s->nb_temps;
548     nb_globals = s->nb_globals;
549     reset_all_temps(nb_temps);
550 
551     nb_ops = tcg_opc_ptr - s->gen_opc_buf;
552     gen_args = args;
553     for (op_index = 0; op_index < nb_ops; op_index++) {
554         TCGOpcode op = s->gen_opc_buf[op_index];
555         const TCGOpDef *def = &tcg_op_defs[op];
556         tcg_target_ulong mask, partmask, affected;
557         int nb_oargs, nb_iargs, nb_args, i;
558         TCGArg tmp;
559 
560         if (op == INDEX_op_call) {
561             *gen_args++ = tmp = *args++;
562             nb_oargs = tmp >> 16;
563             nb_iargs = tmp & 0xffff;
564             nb_args = nb_oargs + nb_iargs + def->nb_cargs;
565         } else {
566             nb_oargs = def->nb_oargs;
567             nb_iargs = def->nb_iargs;
568             nb_args = def->nb_args;
569         }
570 
571         /* Do copy propagation */
572         for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
573             if (temps[args[i]].state == TCG_TEMP_COPY) {
574                 args[i] = find_better_copy(s, args[i]);
575             }
576         }
577 
578         /* For commutative operations make constant second argument */
579         switch (op) {
580         CASE_OP_32_64(add):
581         CASE_OP_32_64(mul):
582         CASE_OP_32_64(and):
583         CASE_OP_32_64(or):
584         CASE_OP_32_64(xor):
585         CASE_OP_32_64(eqv):
586         CASE_OP_32_64(nand):
587         CASE_OP_32_64(nor):
588         CASE_OP_32_64(muluh):
589         CASE_OP_32_64(mulsh):
590             swap_commutative(args[0], &args[1], &args[2]);
591             break;
592         CASE_OP_32_64(brcond):
593             if (swap_commutative(-1, &args[0], &args[1])) {
594                 args[2] = tcg_swap_cond(args[2]);
595             }
596             break;
597         CASE_OP_32_64(setcond):
598             if (swap_commutative(args[0], &args[1], &args[2])) {
599                 args[3] = tcg_swap_cond(args[3]);
600             }
601             break;
602         CASE_OP_32_64(movcond):
603             if (swap_commutative(-1, &args[1], &args[2])) {
604                 args[5] = tcg_swap_cond(args[5]);
605             }
606             /* For movcond, we canonicalize the "false" input reg to match
607                the destination reg so that the tcg backend can implement
608                a "move if true" operation.  */
609             if (swap_commutative(args[0], &args[4], &args[3])) {
610                 args[5] = tcg_invert_cond(args[5]);
611             }
612             break;
613         CASE_OP_32_64(add2):
614             swap_commutative(args[0], &args[2], &args[4]);
615             swap_commutative(args[1], &args[3], &args[5]);
616             break;
617         CASE_OP_32_64(mulu2):
618         CASE_OP_32_64(muls2):
619             swap_commutative(args[0], &args[2], &args[3]);
620             break;
621         case INDEX_op_brcond2_i32:
622             if (swap_commutative2(&args[0], &args[2])) {
623                 args[4] = tcg_swap_cond(args[4]);
624             }
625             break;
626         case INDEX_op_setcond2_i32:
627             if (swap_commutative2(&args[1], &args[3])) {
628                 args[5] = tcg_swap_cond(args[5]);
629             }
630             break;
631         default:
632             break;
633         }
634 
635         /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
636            and "sub r, 0, a => neg r, a" case.  */
637         switch (op) {
638         CASE_OP_32_64(shl):
639         CASE_OP_32_64(shr):
640         CASE_OP_32_64(sar):
641         CASE_OP_32_64(rotl):
642         CASE_OP_32_64(rotr):
643             if (temps[args[1]].state == TCG_TEMP_CONST
644                 && temps[args[1]].val == 0) {
645                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
646                 args += 3;
647                 gen_args += 2;
648                 continue;
649             }
650             break;
651         CASE_OP_32_64(sub):
652             {
653                 TCGOpcode neg_op;
654                 bool have_neg;
655 
656                 if (temps[args[2]].state == TCG_TEMP_CONST) {
657                     /* Proceed with possible constant folding. */
658                     break;
659                 }
660                 if (op == INDEX_op_sub_i32) {
661                     neg_op = INDEX_op_neg_i32;
662                     have_neg = TCG_TARGET_HAS_neg_i32;
663                 } else {
664                     neg_op = INDEX_op_neg_i64;
665                     have_neg = TCG_TARGET_HAS_neg_i64;
666                 }
667                 if (!have_neg) {
668                     break;
669                 }
670                 if (temps[args[1]].state == TCG_TEMP_CONST
671                     && temps[args[1]].val == 0) {
672                     s->gen_opc_buf[op_index] = neg_op;
673                     reset_temp(args[0]);
674                     gen_args[0] = args[0];
675                     gen_args[1] = args[2];
676                     args += 3;
677                     gen_args += 2;
678                     continue;
679                 }
680             }
681             break;
682         CASE_OP_32_64(xor):
683         CASE_OP_32_64(nand):
684             if (temps[args[1]].state != TCG_TEMP_CONST
685                 && temps[args[2]].state == TCG_TEMP_CONST
686                 && temps[args[2]].val == -1) {
687                 i = 1;
688                 goto try_not;
689             }
690             break;
691         CASE_OP_32_64(nor):
692             if (temps[args[1]].state != TCG_TEMP_CONST
693                 && temps[args[2]].state == TCG_TEMP_CONST
694                 && temps[args[2]].val == 0) {
695                 i = 1;
696                 goto try_not;
697             }
698             break;
699         CASE_OP_32_64(andc):
700             if (temps[args[2]].state != TCG_TEMP_CONST
701                 && temps[args[1]].state == TCG_TEMP_CONST
702                 && temps[args[1]].val == -1) {
703                 i = 2;
704                 goto try_not;
705             }
706             break;
707         CASE_OP_32_64(orc):
708         CASE_OP_32_64(eqv):
709             if (temps[args[2]].state != TCG_TEMP_CONST
710                 && temps[args[1]].state == TCG_TEMP_CONST
711                 && temps[args[1]].val == 0) {
712                 i = 2;
713                 goto try_not;
714             }
715             break;
716         try_not:
717             {
718                 TCGOpcode not_op;
719                 bool have_not;
720 
721                 if (def->flags & TCG_OPF_64BIT) {
722                     not_op = INDEX_op_not_i64;
723                     have_not = TCG_TARGET_HAS_not_i64;
724                 } else {
725                     not_op = INDEX_op_not_i32;
726                     have_not = TCG_TARGET_HAS_not_i32;
727                 }
728                 if (!have_not) {
729                     break;
730                 }
731                 s->gen_opc_buf[op_index] = not_op;
732                 reset_temp(args[0]);
733                 gen_args[0] = args[0];
734                 gen_args[1] = args[i];
735                 args += 3;
736                 gen_args += 2;
737                 continue;
738             }
739         default:
740             break;
741         }
742 
743         /* Simplify expression for "op r, a, const => mov r, a" cases */
744         switch (op) {
745         CASE_OP_32_64(add):
746         CASE_OP_32_64(sub):
747         CASE_OP_32_64(shl):
748         CASE_OP_32_64(shr):
749         CASE_OP_32_64(sar):
750         CASE_OP_32_64(rotl):
751         CASE_OP_32_64(rotr):
752         CASE_OP_32_64(or):
753         CASE_OP_32_64(xor):
754         CASE_OP_32_64(andc):
755             if (temps[args[1]].state != TCG_TEMP_CONST
756                 && temps[args[2]].state == TCG_TEMP_CONST
757                 && temps[args[2]].val == 0) {
758                 goto do_mov3;
759             }
760             break;
761         CASE_OP_32_64(and):
762         CASE_OP_32_64(orc):
763         CASE_OP_32_64(eqv):
764             if (temps[args[1]].state != TCG_TEMP_CONST
765                 && temps[args[2]].state == TCG_TEMP_CONST
766                 && temps[args[2]].val == -1) {
767                 goto do_mov3;
768             }
769             break;
770         do_mov3:
771             if (temps_are_copies(args[0], args[1])) {
772                 s->gen_opc_buf[op_index] = INDEX_op_nop;
773             } else {
774                 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
775                 gen_args += 2;
776             }
777             args += 3;
778             continue;
779         default:
780             break;
781         }
782 
783         /* Simplify using known-zero bits. Currently only ops with a single
784            output argument is supported. */
785         mask = -1;
786         affected = -1;
787         switch (op) {
788         CASE_OP_32_64(ext8s):
789             if ((temps[args[1]].mask & 0x80) != 0) {
790                 break;
791             }
792         CASE_OP_32_64(ext8u):
793             mask = 0xff;
794             goto and_const;
795         CASE_OP_32_64(ext16s):
796             if ((temps[args[1]].mask & 0x8000) != 0) {
797                 break;
798             }
799         CASE_OP_32_64(ext16u):
800             mask = 0xffff;
801             goto and_const;
802         case INDEX_op_ext32s_i64:
803             if ((temps[args[1]].mask & 0x80000000) != 0) {
804                 break;
805             }
806         case INDEX_op_ext32u_i64:
807             mask = 0xffffffffU;
808             goto and_const;
809 
810         CASE_OP_32_64(and):
811             mask = temps[args[2]].mask;
812             if (temps[args[2]].state == TCG_TEMP_CONST) {
813         and_const:
814                 affected = temps[args[1]].mask & ~mask;
815             }
816             mask = temps[args[1]].mask & mask;
817             break;
818 
819         CASE_OP_32_64(andc):
820             /* Known-zeros does not imply known-ones.  Therefore unless
821                args[2] is constant, we can't infer anything from it.  */
822             if (temps[args[2]].state == TCG_TEMP_CONST) {
823                 mask = ~temps[args[2]].mask;
824                 goto and_const;
825             }
826             /* But we certainly know nothing outside args[1] may be set. */
827             mask = temps[args[1]].mask;
828             break;
829 
830         case INDEX_op_sar_i32:
831             if (temps[args[2]].state == TCG_TEMP_CONST) {
832                 tmp = temps[args[2]].val & 31;
833                 mask = (int32_t)temps[args[1]].mask >> tmp;
834             }
835             break;
836         case INDEX_op_sar_i64:
837             if (temps[args[2]].state == TCG_TEMP_CONST) {
838                 tmp = temps[args[2]].val & 63;
839                 mask = (int64_t)temps[args[1]].mask >> tmp;
840             }
841             break;
842 
843         case INDEX_op_shr_i32:
844             if (temps[args[2]].state == TCG_TEMP_CONST) {
845                 tmp = temps[args[2]].val & 31;
846                 mask = (uint32_t)temps[args[1]].mask >> tmp;
847             }
848             break;
849         case INDEX_op_shr_i64:
850             if (temps[args[2]].state == TCG_TEMP_CONST) {
851                 tmp = temps[args[2]].val & 63;
852                 mask = (uint64_t)temps[args[1]].mask >> tmp;
853             }
854             break;
855 
856         case INDEX_op_trunc_shr_i32:
857             mask = (uint64_t)temps[args[1]].mask >> args[2];
858             break;
859 
860         CASE_OP_32_64(shl):
861             if (temps[args[2]].state == TCG_TEMP_CONST) {
862                 tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
863                 mask = temps[args[1]].mask << tmp;
864             }
865             break;
866 
867         CASE_OP_32_64(neg):
868             /* Set to 1 all bits to the left of the rightmost.  */
869             mask = -(temps[args[1]].mask & -temps[args[1]].mask);
870             break;
871 
872         CASE_OP_32_64(deposit):
873             mask = deposit64(temps[args[1]].mask, args[3], args[4],
874                              temps[args[2]].mask);
875             break;
876 
877         CASE_OP_32_64(or):
878         CASE_OP_32_64(xor):
879             mask = temps[args[1]].mask | temps[args[2]].mask;
880             break;
881 
882         CASE_OP_32_64(setcond):
883         case INDEX_op_setcond2_i32:
884             mask = 1;
885             break;
886 
887         CASE_OP_32_64(movcond):
888             mask = temps[args[3]].mask | temps[args[4]].mask;
889             break;
890 
891         CASE_OP_32_64(ld8u):
892             mask = 0xff;
893             break;
894         CASE_OP_32_64(ld16u):
895             mask = 0xffff;
896             break;
897         case INDEX_op_ld32u_i64:
898             mask = 0xffffffffu;
899             break;
900 
901         CASE_OP_32_64(qemu_ld):
902             {
903                 TCGMemOp mop = args[nb_oargs + nb_iargs];
904                 if (!(mop & MO_SIGN)) {
905                     mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
906                 }
907             }
908             break;
909 
910         default:
911             break;
912         }
913 
914         /* 32-bit ops (non 64-bit ops and non load/store ops) generate
915            32-bit results.  For the result is zero test below, we can
916            ignore high bits, but for further optimizations we need to
917            record that the high bits contain garbage.  */
918         partmask = mask;
919         if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_64BIT))) {
920             mask |= ~(tcg_target_ulong)0xffffffffu;
921             partmask &= 0xffffffffu;
922             affected &= 0xffffffffu;
923         }
924 
925         if (partmask == 0) {
926             assert(nb_oargs == 1);
927             tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
928             args += nb_args;
929             gen_args += 2;
930             continue;
931         }
932         if (affected == 0) {
933             assert(nb_oargs == 1);
934             if (temps_are_copies(args[0], args[1])) {
935                 s->gen_opc_buf[op_index] = INDEX_op_nop;
936             } else if (temps[args[1]].state != TCG_TEMP_CONST) {
937                 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
938                 gen_args += 2;
939             } else {
940                 tcg_opt_gen_movi(s, op_index, gen_args, op,
941                                  args[0], temps[args[1]].val);
942                 gen_args += 2;
943             }
944             args += nb_args;
945             continue;
946         }
947 
948         /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
949         switch (op) {
950         CASE_OP_32_64(and):
951         CASE_OP_32_64(mul):
952         CASE_OP_32_64(muluh):
953         CASE_OP_32_64(mulsh):
954             if ((temps[args[2]].state == TCG_TEMP_CONST
955                 && temps[args[2]].val == 0)) {
956                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
957                 args += 3;
958                 gen_args += 2;
959                 continue;
960             }
961             break;
962         default:
963             break;
964         }
965 
966         /* Simplify expression for "op r, a, a => mov r, a" cases */
967         switch (op) {
968         CASE_OP_32_64(or):
969         CASE_OP_32_64(and):
970             if (temps_are_copies(args[1], args[2])) {
971                 if (temps_are_copies(args[0], args[1])) {
972                     s->gen_opc_buf[op_index] = INDEX_op_nop;
973                 } else {
974                     tcg_opt_gen_mov(s, op_index, gen_args, op,
975                                     args[0], args[1]);
976                     gen_args += 2;
977                 }
978                 args += 3;
979                 continue;
980             }
981             break;
982         default:
983             break;
984         }
985 
986         /* Simplify expression for "op r, a, a => movi r, 0" cases */
987         switch (op) {
988         CASE_OP_32_64(andc):
989         CASE_OP_32_64(sub):
990         CASE_OP_32_64(xor):
991             if (temps_are_copies(args[1], args[2])) {
992                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
993                 gen_args += 2;
994                 args += 3;
995                 continue;
996             }
997             break;
998         default:
999             break;
1000         }
1001 
1002         /* Propagate constants through copy operations and do constant
1003            folding.  Constants will be substituted to arguments by register
1004            allocator where needed and possible.  Also detect copies. */
1005         switch (op) {
1006         CASE_OP_32_64(mov):
1007             if (temps_are_copies(args[0], args[1])) {
1008                 args += 2;
1009                 s->gen_opc_buf[op_index] = INDEX_op_nop;
1010                 break;
1011             }
1012             if (temps[args[1]].state != TCG_TEMP_CONST) {
1013                 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
1014                 gen_args += 2;
1015                 args += 2;
1016                 break;
1017             }
1018             /* Source argument is constant.  Rewrite the operation and
1019                let movi case handle it. */
1020             args[1] = temps[args[1]].val;
1021             /* fallthrough */
1022         CASE_OP_32_64(movi):
1023             tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], args[1]);
1024             gen_args += 2;
1025             args += 2;
1026             break;
1027 
1028         CASE_OP_32_64(not):
1029         CASE_OP_32_64(neg):
1030         CASE_OP_32_64(ext8s):
1031         CASE_OP_32_64(ext8u):
1032         CASE_OP_32_64(ext16s):
1033         CASE_OP_32_64(ext16u):
1034         case INDEX_op_ext32s_i64:
1035         case INDEX_op_ext32u_i64:
1036             if (temps[args[1]].state == TCG_TEMP_CONST) {
1037                 tmp = do_constant_folding(op, temps[args[1]].val, 0);
1038                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1039                 gen_args += 2;
1040                 args += 2;
1041                 break;
1042             }
1043             goto do_default;
1044 
1045         case INDEX_op_trunc_shr_i32:
1046             if (temps[args[1]].state == TCG_TEMP_CONST) {
1047                 tmp = do_constant_folding(op, temps[args[1]].val, args[2]);
1048                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1049                 gen_args += 2;
1050                 args += 3;
1051                 break;
1052             }
1053             goto do_default;
1054 
1055         CASE_OP_32_64(add):
1056         CASE_OP_32_64(sub):
1057         CASE_OP_32_64(mul):
1058         CASE_OP_32_64(or):
1059         CASE_OP_32_64(and):
1060         CASE_OP_32_64(xor):
1061         CASE_OP_32_64(shl):
1062         CASE_OP_32_64(shr):
1063         CASE_OP_32_64(sar):
1064         CASE_OP_32_64(rotl):
1065         CASE_OP_32_64(rotr):
1066         CASE_OP_32_64(andc):
1067         CASE_OP_32_64(orc):
1068         CASE_OP_32_64(eqv):
1069         CASE_OP_32_64(nand):
1070         CASE_OP_32_64(nor):
1071         CASE_OP_32_64(muluh):
1072         CASE_OP_32_64(mulsh):
1073         CASE_OP_32_64(div):
1074         CASE_OP_32_64(divu):
1075         CASE_OP_32_64(rem):
1076         CASE_OP_32_64(remu):
1077             if (temps[args[1]].state == TCG_TEMP_CONST
1078                 && temps[args[2]].state == TCG_TEMP_CONST) {
1079                 tmp = do_constant_folding(op, temps[args[1]].val,
1080                                           temps[args[2]].val);
1081                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1082                 gen_args += 2;
1083                 args += 3;
1084                 break;
1085             }
1086             goto do_default;
1087 
1088         CASE_OP_32_64(deposit):
1089             if (temps[args[1]].state == TCG_TEMP_CONST
1090                 && temps[args[2]].state == TCG_TEMP_CONST) {
1091                 tmp = deposit64(temps[args[1]].val, args[3], args[4],
1092                                 temps[args[2]].val);
1093                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1094                 gen_args += 2;
1095                 args += 5;
1096                 break;
1097             }
1098             goto do_default;
1099 
1100         CASE_OP_32_64(setcond):
1101             tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
1102             if (tmp != 2) {
1103                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1104                 gen_args += 2;
1105                 args += 4;
1106                 break;
1107             }
1108             goto do_default;
1109 
1110         CASE_OP_32_64(brcond):
1111             tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
1112             if (tmp != 2) {
1113                 if (tmp) {
1114                     reset_all_temps(nb_temps);
1115                     s->gen_opc_buf[op_index] = INDEX_op_br;
1116                     gen_args[0] = args[3];
1117                     gen_args += 1;
1118                 } else {
1119                     s->gen_opc_buf[op_index] = INDEX_op_nop;
1120                 }
1121                 args += 4;
1122                 break;
1123             }
1124             goto do_default;
1125 
1126         CASE_OP_32_64(movcond):
1127             tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
1128             if (tmp != 2) {
1129                 if (temps_are_copies(args[0], args[4-tmp])) {
1130                     s->gen_opc_buf[op_index] = INDEX_op_nop;
1131                 } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
1132                     tcg_opt_gen_movi(s, op_index, gen_args, op,
1133                                      args[0], temps[args[4-tmp]].val);
1134                     gen_args += 2;
1135                 } else {
1136                     tcg_opt_gen_mov(s, op_index, gen_args, op,
1137                                     args[0], args[4-tmp]);
1138                     gen_args += 2;
1139                 }
1140                 args += 6;
1141                 break;
1142             }
1143             goto do_default;
1144 
1145         case INDEX_op_add2_i32:
1146         case INDEX_op_sub2_i32:
1147             if (temps[args[2]].state == TCG_TEMP_CONST
1148                 && temps[args[3]].state == TCG_TEMP_CONST
1149                 && temps[args[4]].state == TCG_TEMP_CONST
1150                 && temps[args[5]].state == TCG_TEMP_CONST) {
1151                 uint32_t al = temps[args[2]].val;
1152                 uint32_t ah = temps[args[3]].val;
1153                 uint32_t bl = temps[args[4]].val;
1154                 uint32_t bh = temps[args[5]].val;
1155                 uint64_t a = ((uint64_t)ah << 32) | al;
1156                 uint64_t b = ((uint64_t)bh << 32) | bl;
1157                 TCGArg rl, rh;
1158 
1159                 if (op == INDEX_op_add2_i32) {
1160                     a += b;
1161                 } else {
1162                     a -= b;
1163                 }
1164 
1165                 /* We emit the extra nop when we emit the add2/sub2.  */
1166                 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1167 
1168                 rl = args[0];
1169                 rh = args[1];
1170                 tcg_opt_gen_movi(s, op_index, &gen_args[0],
1171                                  op, rl, (uint32_t)a);
1172                 tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
1173                                  op, rh, (uint32_t)(a >> 32));
1174                 gen_args += 4;
1175                 args += 6;
1176                 break;
1177             }
1178             goto do_default;
1179 
1180         case INDEX_op_mulu2_i32:
1181             if (temps[args[2]].state == TCG_TEMP_CONST
1182                 && temps[args[3]].state == TCG_TEMP_CONST) {
1183                 uint32_t a = temps[args[2]].val;
1184                 uint32_t b = temps[args[3]].val;
1185                 uint64_t r = (uint64_t)a * b;
1186                 TCGArg rl, rh;
1187 
1188                 /* We emit the extra nop when we emit the mulu2.  */
1189                 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1190 
1191                 rl = args[0];
1192                 rh = args[1];
1193                 tcg_opt_gen_movi(s, op_index, &gen_args[0],
1194                                  op, rl, (uint32_t)r);
1195                 tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
1196                                  op, rh, (uint32_t)(r >> 32));
1197                 gen_args += 4;
1198                 args += 4;
1199                 break;
1200             }
1201             goto do_default;
1202 
1203         case INDEX_op_brcond2_i32:
1204             tmp = do_constant_folding_cond2(&args[0], &args[2], args[4]);
1205             if (tmp != 2) {
1206                 if (tmp) {
1207             do_brcond_true:
1208                     reset_all_temps(nb_temps);
1209                     s->gen_opc_buf[op_index] = INDEX_op_br;
1210                     gen_args[0] = args[5];
1211                     gen_args += 1;
1212                 } else {
1213             do_brcond_false:
1214                     s->gen_opc_buf[op_index] = INDEX_op_nop;
1215                 }
1216             } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
1217                        && temps[args[2]].state == TCG_TEMP_CONST
1218                        && temps[args[3]].state == TCG_TEMP_CONST
1219                        && temps[args[2]].val == 0
1220                        && temps[args[3]].val == 0) {
1221                 /* Simplify LT/GE comparisons vs zero to a single compare
1222                    vs the high word of the input.  */
1223             do_brcond_high:
1224                 reset_all_temps(nb_temps);
1225                 s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
1226                 gen_args[0] = args[1];
1227                 gen_args[1] = args[3];
1228                 gen_args[2] = args[4];
1229                 gen_args[3] = args[5];
1230                 gen_args += 4;
1231             } else if (args[4] == TCG_COND_EQ) {
1232                 /* Simplify EQ comparisons where one of the pairs
1233                    can be simplified.  */
1234                 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1235                                                args[0], args[2], TCG_COND_EQ);
1236                 if (tmp == 0) {
1237                     goto do_brcond_false;
1238                 } else if (tmp == 1) {
1239                     goto do_brcond_high;
1240                 }
1241                 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1242                                                args[1], args[3], TCG_COND_EQ);
1243                 if (tmp == 0) {
1244                     goto do_brcond_false;
1245                 } else if (tmp != 1) {
1246                     goto do_default;
1247                 }
1248             do_brcond_low:
1249                 reset_all_temps(nb_temps);
1250                 s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
1251                 gen_args[0] = args[0];
1252                 gen_args[1] = args[2];
1253                 gen_args[2] = args[4];
1254                 gen_args[3] = args[5];
1255                 gen_args += 4;
1256             } else if (args[4] == TCG_COND_NE) {
1257                 /* Simplify NE comparisons where one of the pairs
1258                    can be simplified.  */
1259                 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1260                                                args[0], args[2], TCG_COND_NE);
1261                 if (tmp == 0) {
1262                     goto do_brcond_high;
1263                 } else if (tmp == 1) {
1264                     goto do_brcond_true;
1265                 }
1266                 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1267                                                args[1], args[3], TCG_COND_NE);
1268                 if (tmp == 0) {
1269                     goto do_brcond_low;
1270                 } else if (tmp == 1) {
1271                     goto do_brcond_true;
1272                 }
1273                 goto do_default;
1274             } else {
1275                 goto do_default;
1276             }
1277             args += 6;
1278             break;
1279 
1280         case INDEX_op_setcond2_i32:
1281             tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]);
1282             if (tmp != 2) {
1283             do_setcond_const:
1284                 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1285                 gen_args += 2;
1286             } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
1287                        && temps[args[3]].state == TCG_TEMP_CONST
1288                        && temps[args[4]].state == TCG_TEMP_CONST
1289                        && temps[args[3]].val == 0
1290                        && temps[args[4]].val == 0) {
1291                 /* Simplify LT/GE comparisons vs zero to a single compare
1292                    vs the high word of the input.  */
1293             do_setcond_high:
1294                 s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
1295                 reset_temp(args[0]);
1296                 temps[args[0]].mask = 1;
1297                 gen_args[0] = args[0];
1298                 gen_args[1] = args[2];
1299                 gen_args[2] = args[4];
1300                 gen_args[3] = args[5];
1301                 gen_args += 4;
1302             } else if (args[5] == TCG_COND_EQ) {
1303                 /* Simplify EQ comparisons where one of the pairs
1304                    can be simplified.  */
1305                 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1306                                                args[1], args[3], TCG_COND_EQ);
1307                 if (tmp == 0) {
1308                     goto do_setcond_const;
1309                 } else if (tmp == 1) {
1310                     goto do_setcond_high;
1311                 }
1312                 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1313                                                args[2], args[4], TCG_COND_EQ);
1314                 if (tmp == 0) {
1315                     goto do_setcond_high;
1316                 } else if (tmp != 1) {
1317                     goto do_default;
1318                 }
1319             do_setcond_low:
1320                 reset_temp(args[0]);
1321                 temps[args[0]].mask = 1;
1322                 s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
1323                 gen_args[0] = args[0];
1324                 gen_args[1] = args[1];
1325                 gen_args[2] = args[3];
1326                 gen_args[3] = args[5];
1327                 gen_args += 4;
1328             } else if (args[5] == TCG_COND_NE) {
1329                 /* Simplify NE comparisons where one of the pairs
1330                    can be simplified.  */
1331                 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1332                                                args[1], args[3], TCG_COND_NE);
1333                 if (tmp == 0) {
1334                     goto do_setcond_high;
1335                 } else if (tmp == 1) {
1336                     goto do_setcond_const;
1337                 }
1338                 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1339                                                args[2], args[4], TCG_COND_NE);
1340                 if (tmp == 0) {
1341                     goto do_setcond_low;
1342                 } else if (tmp == 1) {
1343                     goto do_setcond_const;
1344                 }
1345                 goto do_default;
1346             } else {
1347                 goto do_default;
1348             }
1349             args += 6;
1350             break;
1351 
1352         case INDEX_op_call:
1353             if (!(args[nb_oargs + nb_iargs + 1]
1354                   & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1355                 for (i = 0; i < nb_globals; i++) {
1356                     reset_temp(i);
1357                 }
1358             }
1359             goto do_reset_output;
1360 
1361         default:
1362         do_default:
1363             /* Default case: we know nothing about operation (or were unable
1364                to compute the operation result) so no propagation is done.
1365                We trash everything if the operation is the end of a basic
1366                block, otherwise we only trash the output args.  "mask" is
1367                the non-zero bits mask for the first output arg.  */
1368             if (def->flags & TCG_OPF_BB_END) {
1369                 reset_all_temps(nb_temps);
1370             } else {
1371         do_reset_output:
1372                 for (i = 0; i < nb_oargs; i++) {
1373                     reset_temp(args[i]);
1374                     /* Save the corresponding known-zero bits mask for the
1375                        first output argument (only one supported so far). */
1376                     if (i == 0) {
1377                         temps[args[i]].mask = mask;
1378                     }
1379                 }
1380             }
1381             for (i = 0; i < nb_args; i++) {
1382                 gen_args[i] = args[i];
1383             }
1384             args += nb_args;
1385             gen_args += nb_args;
1386             break;
1387         }
1388     }
1389 
1390     return gen_args;
1391 }
1392 
1393 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
1394         TCGArg *args, TCGOpDef *tcg_op_defs)
1395 {
1396     TCGArg *res;
1397     res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
1398     return res;
1399 }
1400