1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h"
22 #include "tcg/helper-info.h"
23 #include "tcg/tcg-ldst.h"
24 #include "disas/dis-asm.h"
25 #include "tcg-has.h"
26 #include <ffi.h>
27
28
29 /*
30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
31 * Without assertions, the interpreter runs much faster.
32 */
33 #if defined(CONFIG_DEBUG_TCG)
34 # define tci_assert(cond) assert(cond)
35 #else
36 # define tci_assert(cond) ((void)(cond))
37 #endif
38
39 __thread uintptr_t tci_tb_ptr;
40
tci_write_reg64(tcg_target_ulong * regs,uint32_t high_index,uint32_t low_index,uint64_t value)41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
42 uint32_t low_index, uint64_t value)
43 {
44 regs[low_index] = (uint32_t)value;
45 regs[high_index] = value >> 32;
46 }
47
48 /* Create a 64 bit value from two 32 bit values. */
tci_uint64(uint32_t high,uint32_t low)49 static uint64_t tci_uint64(uint32_t high, uint32_t low)
50 {
51 return ((uint64_t)high << 32) + low;
52 }
53
54 /*
55 * Load sets of arguments all at once. The naming convention is:
56 * tci_args_<arguments>
57 * where arguments is a sequence of
58 *
59 * b = immediate (bit position)
60 * c = condition (TCGCond)
61 * i = immediate (uint32_t)
62 * I = immediate (tcg_target_ulong)
63 * l = label or pointer
64 * m = immediate (MemOpIdx)
65 * n = immediate (call return length)
66 * r = register
67 * s = signed ldst offset
68 */
69
tci_args_l(uint32_t insn,const void * tb_ptr,void ** l0)70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
71 {
72 int diff = sextract32(insn, 12, 20);
73 *l0 = diff ? (void *)tb_ptr + diff : NULL;
74 }
75
tci_args_r(uint32_t insn,TCGReg * r0)76 static void tci_args_r(uint32_t insn, TCGReg *r0)
77 {
78 *r0 = extract32(insn, 8, 4);
79 }
80
tci_args_nl(uint32_t insn,const void * tb_ptr,uint8_t * n0,void ** l1)81 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
82 uint8_t *n0, void **l1)
83 {
84 *n0 = extract32(insn, 8, 4);
85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
86 }
87
tci_args_rl(uint32_t insn,const void * tb_ptr,TCGReg * r0,void ** l1)88 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
89 TCGReg *r0, void **l1)
90 {
91 *r0 = extract32(insn, 8, 4);
92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
93 }
94
tci_args_rr(uint32_t insn,TCGReg * r0,TCGReg * r1)95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
96 {
97 *r0 = extract32(insn, 8, 4);
98 *r1 = extract32(insn, 12, 4);
99 }
100
tci_args_ri(uint32_t insn,TCGReg * r0,tcg_target_ulong * i1)101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
102 {
103 *r0 = extract32(insn, 8, 4);
104 *i1 = sextract32(insn, 12, 20);
105 }
106
tci_args_rrm(uint32_t insn,TCGReg * r0,TCGReg * r1,MemOpIdx * m2)107 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
108 TCGReg *r1, MemOpIdx *m2)
109 {
110 *r0 = extract32(insn, 8, 4);
111 *r1 = extract32(insn, 12, 4);
112 *m2 = extract32(insn, 16, 16);
113 }
114
tci_args_rrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2)115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
116 {
117 *r0 = extract32(insn, 8, 4);
118 *r1 = extract32(insn, 12, 4);
119 *r2 = extract32(insn, 16, 4);
120 }
121
tci_args_rrs(uint32_t insn,TCGReg * r0,TCGReg * r1,int32_t * i2)122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
123 {
124 *r0 = extract32(insn, 8, 4);
125 *r1 = extract32(insn, 12, 4);
126 *i2 = sextract32(insn, 16, 16);
127 }
128
tci_args_rrbb(uint32_t insn,TCGReg * r0,TCGReg * r1,uint8_t * i2,uint8_t * i3)129 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
130 uint8_t *i2, uint8_t *i3)
131 {
132 *r0 = extract32(insn, 8, 4);
133 *r1 = extract32(insn, 12, 4);
134 *i2 = extract32(insn, 16, 6);
135 *i3 = extract32(insn, 22, 6);
136 }
137
tci_args_rrrc(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGCond * c3)138 static void tci_args_rrrc(uint32_t insn,
139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
140 {
141 *r0 = extract32(insn, 8, 4);
142 *r1 = extract32(insn, 12, 4);
143 *r2 = extract32(insn, 16, 4);
144 *c3 = extract32(insn, 20, 4);
145 }
146
tci_args_rrrbb(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,uint8_t * i3,uint8_t * i4)147 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
148 TCGReg *r2, uint8_t *i3, uint8_t *i4)
149 {
150 *r0 = extract32(insn, 8, 4);
151 *r1 = extract32(insn, 12, 4);
152 *r2 = extract32(insn, 16, 4);
153 *i3 = extract32(insn, 20, 6);
154 *i4 = extract32(insn, 26, 6);
155 }
156
tci_args_rrrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3)157 static void tci_args_rrrr(uint32_t insn,
158 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
159 {
160 *r0 = extract32(insn, 8, 4);
161 *r1 = extract32(insn, 12, 4);
162 *r2 = extract32(insn, 16, 4);
163 *r3 = extract32(insn, 20, 4);
164 }
165
tci_args_rrrrrc(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3,TCGReg * r4,TCGCond * c5)166 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
167 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
168 {
169 *r0 = extract32(insn, 8, 4);
170 *r1 = extract32(insn, 12, 4);
171 *r2 = extract32(insn, 16, 4);
172 *r3 = extract32(insn, 20, 4);
173 *r4 = extract32(insn, 24, 4);
174 *c5 = extract32(insn, 28, 4);
175 }
176
tci_args_rrrrrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3,TCGReg * r4,TCGReg * r5)177 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
178 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
179 {
180 *r0 = extract32(insn, 8, 4);
181 *r1 = extract32(insn, 12, 4);
182 *r2 = extract32(insn, 16, 4);
183 *r3 = extract32(insn, 20, 4);
184 *r4 = extract32(insn, 24, 4);
185 *r5 = extract32(insn, 28, 4);
186 }
187
tci_compare32(uint32_t u0,uint32_t u1,TCGCond condition)188 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
189 {
190 bool result = false;
191 int32_t i0 = u0;
192 int32_t i1 = u1;
193 switch (condition) {
194 case TCG_COND_EQ:
195 result = (u0 == u1);
196 break;
197 case TCG_COND_NE:
198 result = (u0 != u1);
199 break;
200 case TCG_COND_LT:
201 result = (i0 < i1);
202 break;
203 case TCG_COND_GE:
204 result = (i0 >= i1);
205 break;
206 case TCG_COND_LE:
207 result = (i0 <= i1);
208 break;
209 case TCG_COND_GT:
210 result = (i0 > i1);
211 break;
212 case TCG_COND_LTU:
213 result = (u0 < u1);
214 break;
215 case TCG_COND_GEU:
216 result = (u0 >= u1);
217 break;
218 case TCG_COND_LEU:
219 result = (u0 <= u1);
220 break;
221 case TCG_COND_GTU:
222 result = (u0 > u1);
223 break;
224 case TCG_COND_TSTEQ:
225 result = (u0 & u1) == 0;
226 break;
227 case TCG_COND_TSTNE:
228 result = (u0 & u1) != 0;
229 break;
230 default:
231 g_assert_not_reached();
232 }
233 return result;
234 }
235
tci_compare64(uint64_t u0,uint64_t u1,TCGCond condition)236 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
237 {
238 bool result = false;
239 int64_t i0 = u0;
240 int64_t i1 = u1;
241 switch (condition) {
242 case TCG_COND_EQ:
243 result = (u0 == u1);
244 break;
245 case TCG_COND_NE:
246 result = (u0 != u1);
247 break;
248 case TCG_COND_LT:
249 result = (i0 < i1);
250 break;
251 case TCG_COND_GE:
252 result = (i0 >= i1);
253 break;
254 case TCG_COND_LE:
255 result = (i0 <= i1);
256 break;
257 case TCG_COND_GT:
258 result = (i0 > i1);
259 break;
260 case TCG_COND_LTU:
261 result = (u0 < u1);
262 break;
263 case TCG_COND_GEU:
264 result = (u0 >= u1);
265 break;
266 case TCG_COND_LEU:
267 result = (u0 <= u1);
268 break;
269 case TCG_COND_GTU:
270 result = (u0 > u1);
271 break;
272 case TCG_COND_TSTEQ:
273 result = (u0 & u1) == 0;
274 break;
275 case TCG_COND_TSTNE:
276 result = (u0 & u1) != 0;
277 break;
278 default:
279 g_assert_not_reached();
280 }
281 return result;
282 }
283
tci_qemu_ld(CPUArchState * env,uint64_t taddr,MemOpIdx oi,const void * tb_ptr)284 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
285 MemOpIdx oi, const void *tb_ptr)
286 {
287 MemOp mop = get_memop(oi);
288 uintptr_t ra = (uintptr_t)tb_ptr;
289
290 switch (mop & MO_SSIZE) {
291 case MO_UB:
292 return helper_ldub_mmu(env, taddr, oi, ra);
293 case MO_SB:
294 return helper_ldsb_mmu(env, taddr, oi, ra);
295 case MO_UW:
296 return helper_lduw_mmu(env, taddr, oi, ra);
297 case MO_SW:
298 return helper_ldsw_mmu(env, taddr, oi, ra);
299 case MO_UL:
300 return helper_ldul_mmu(env, taddr, oi, ra);
301 case MO_SL:
302 return helper_ldsl_mmu(env, taddr, oi, ra);
303 case MO_UQ:
304 return helper_ldq_mmu(env, taddr, oi, ra);
305 default:
306 g_assert_not_reached();
307 }
308 }
309
tci_qemu_st(CPUArchState * env,uint64_t taddr,uint64_t val,MemOpIdx oi,const void * tb_ptr)310 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
311 MemOpIdx oi, const void *tb_ptr)
312 {
313 MemOp mop = get_memop(oi);
314 uintptr_t ra = (uintptr_t)tb_ptr;
315
316 switch (mop & MO_SIZE) {
317 case MO_UB:
318 helper_stb_mmu(env, taddr, val, oi, ra);
319 break;
320 case MO_UW:
321 helper_stw_mmu(env, taddr, val, oi, ra);
322 break;
323 case MO_UL:
324 helper_stl_mmu(env, taddr, val, oi, ra);
325 break;
326 case MO_UQ:
327 helper_stq_mmu(env, taddr, val, oi, ra);
328 break;
329 default:
330 g_assert_not_reached();
331 }
332 }
333
334 #if TCG_TARGET_REG_BITS == 64
335 # define CASE_32_64(x) \
336 case glue(glue(INDEX_op_, x), _i64): \
337 case glue(glue(INDEX_op_, x), _i32):
338 # define CASE_64(x) \
339 case glue(glue(INDEX_op_, x), _i64):
340 #else
341 # define CASE_32_64(x) \
342 case glue(glue(INDEX_op_, x), _i32):
343 # define CASE_64(x)
344 #endif
345
346 /* Interpret pseudo code in tb. */
347 /*
348 * Disable CFI checks.
349 * One possible operation in the pseudo code is a call to binary code.
350 * Therefore, disable CFI checks in the interpreter function
351 */
tcg_qemu_tb_exec(CPUArchState * env,const void * v_tb_ptr)352 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
353 const void *v_tb_ptr)
354 {
355 const uint32_t *tb_ptr = v_tb_ptr;
356 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
357 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
358 / sizeof(uint64_t)];
359
360 regs[TCG_AREG0] = (tcg_target_ulong)env;
361 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
362 tci_assert(tb_ptr);
363
364 for (;;) {
365 uint32_t insn;
366 TCGOpcode opc;
367 TCGReg r0, r1, r2, r3, r4, r5;
368 tcg_target_ulong t1;
369 TCGCond condition;
370 uint8_t pos, len;
371 uint32_t tmp32;
372 uint64_t tmp64, taddr;
373 uint64_t T1, T2;
374 MemOpIdx oi;
375 int32_t ofs;
376 void *ptr;
377
378 insn = *tb_ptr++;
379 opc = extract32(insn, 0, 8);
380
381 switch (opc) {
382 case INDEX_op_call:
383 {
384 void *call_slots[MAX_CALL_IARGS];
385 ffi_cif *cif;
386 void *func;
387 unsigned i, s, n;
388
389 tci_args_nl(insn, tb_ptr, &len, &ptr);
390 func = ((void **)ptr)[0];
391 cif = ((void **)ptr)[1];
392
393 n = cif->nargs;
394 for (i = s = 0; i < n; ++i) {
395 ffi_type *t = cif->arg_types[i];
396 call_slots[i] = &stack[s];
397 s += DIV_ROUND_UP(t->size, 8);
398 }
399
400 /* Helper functions may need to access the "return address" */
401 tci_tb_ptr = (uintptr_t)tb_ptr;
402 ffi_call(cif, func, stack, call_slots);
403 }
404
405 switch (len) {
406 case 0: /* void */
407 break;
408 case 1: /* uint32_t */
409 /*
410 * The result winds up "left-aligned" in the stack[0] slot.
411 * Note that libffi has an odd special case in that it will
412 * always widen an integral result to ffi_arg.
413 */
414 if (sizeof(ffi_arg) == 8) {
415 regs[TCG_REG_R0] = (uint32_t)stack[0];
416 } else {
417 regs[TCG_REG_R0] = *(uint32_t *)stack;
418 }
419 break;
420 case 2: /* uint64_t */
421 /*
422 * For TCG_TARGET_REG_BITS == 32, the register pair
423 * must stay in host memory order.
424 */
425 memcpy(®s[TCG_REG_R0], stack, 8);
426 break;
427 case 3: /* Int128 */
428 memcpy(®s[TCG_REG_R0], stack, 16);
429 break;
430 default:
431 g_assert_not_reached();
432 }
433 break;
434
435 case INDEX_op_br:
436 tci_args_l(insn, tb_ptr, &ptr);
437 tb_ptr = ptr;
438 continue;
439 case INDEX_op_setcond_i32:
440 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
441 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
442 break;
443 case INDEX_op_movcond_i32:
444 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
445 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
446 regs[r0] = regs[tmp32 ? r3 : r4];
447 break;
448 #if TCG_TARGET_REG_BITS == 32
449 case INDEX_op_setcond2_i32:
450 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
451 T1 = tci_uint64(regs[r2], regs[r1]);
452 T2 = tci_uint64(regs[r4], regs[r3]);
453 regs[r0] = tci_compare64(T1, T2, condition);
454 break;
455 #elif TCG_TARGET_REG_BITS == 64
456 case INDEX_op_setcond_i64:
457 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
458 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
459 break;
460 case INDEX_op_movcond_i64:
461 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
462 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
463 regs[r0] = regs[tmp32 ? r3 : r4];
464 break;
465 #endif
466 CASE_32_64(mov)
467 tci_args_rr(insn, &r0, &r1);
468 regs[r0] = regs[r1];
469 break;
470 case INDEX_op_tci_movi:
471 tci_args_ri(insn, &r0, &t1);
472 regs[r0] = t1;
473 break;
474 case INDEX_op_tci_movl:
475 tci_args_rl(insn, tb_ptr, &r0, &ptr);
476 regs[r0] = *(tcg_target_ulong *)ptr;
477 break;
478
479 /* Load/store operations (32 bit). */
480
481 CASE_32_64(ld8u)
482 tci_args_rrs(insn, &r0, &r1, &ofs);
483 ptr = (void *)(regs[r1] + ofs);
484 regs[r0] = *(uint8_t *)ptr;
485 break;
486 CASE_32_64(ld8s)
487 tci_args_rrs(insn, &r0, &r1, &ofs);
488 ptr = (void *)(regs[r1] + ofs);
489 regs[r0] = *(int8_t *)ptr;
490 break;
491 CASE_32_64(ld16u)
492 tci_args_rrs(insn, &r0, &r1, &ofs);
493 ptr = (void *)(regs[r1] + ofs);
494 regs[r0] = *(uint16_t *)ptr;
495 break;
496 CASE_32_64(ld16s)
497 tci_args_rrs(insn, &r0, &r1, &ofs);
498 ptr = (void *)(regs[r1] + ofs);
499 regs[r0] = *(int16_t *)ptr;
500 break;
501 case INDEX_op_ld_i32:
502 CASE_64(ld32u)
503 tci_args_rrs(insn, &r0, &r1, &ofs);
504 ptr = (void *)(regs[r1] + ofs);
505 regs[r0] = *(uint32_t *)ptr;
506 break;
507 CASE_32_64(st8)
508 tci_args_rrs(insn, &r0, &r1, &ofs);
509 ptr = (void *)(regs[r1] + ofs);
510 *(uint8_t *)ptr = regs[r0];
511 break;
512 CASE_32_64(st16)
513 tci_args_rrs(insn, &r0, &r1, &ofs);
514 ptr = (void *)(regs[r1] + ofs);
515 *(uint16_t *)ptr = regs[r0];
516 break;
517 case INDEX_op_st_i32:
518 CASE_64(st32)
519 tci_args_rrs(insn, &r0, &r1, &ofs);
520 ptr = (void *)(regs[r1] + ofs);
521 *(uint32_t *)ptr = regs[r0];
522 break;
523
524 /* Arithmetic operations (mixed 32/64 bit). */
525
526 CASE_32_64(add)
527 tci_args_rrr(insn, &r0, &r1, &r2);
528 regs[r0] = regs[r1] + regs[r2];
529 break;
530 CASE_32_64(sub)
531 tci_args_rrr(insn, &r0, &r1, &r2);
532 regs[r0] = regs[r1] - regs[r2];
533 break;
534 CASE_32_64(mul)
535 tci_args_rrr(insn, &r0, &r1, &r2);
536 regs[r0] = regs[r1] * regs[r2];
537 break;
538 CASE_32_64(and)
539 tci_args_rrr(insn, &r0, &r1, &r2);
540 regs[r0] = regs[r1] & regs[r2];
541 break;
542 CASE_32_64(or)
543 tci_args_rrr(insn, &r0, &r1, &r2);
544 regs[r0] = regs[r1] | regs[r2];
545 break;
546 CASE_32_64(xor)
547 tci_args_rrr(insn, &r0, &r1, &r2);
548 regs[r0] = regs[r1] ^ regs[r2];
549 break;
550 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
551 CASE_32_64(andc)
552 tci_args_rrr(insn, &r0, &r1, &r2);
553 regs[r0] = regs[r1] & ~regs[r2];
554 break;
555 #endif
556 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
557 CASE_32_64(orc)
558 tci_args_rrr(insn, &r0, &r1, &r2);
559 regs[r0] = regs[r1] | ~regs[r2];
560 break;
561 #endif
562 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
563 CASE_32_64(eqv)
564 tci_args_rrr(insn, &r0, &r1, &r2);
565 regs[r0] = ~(regs[r1] ^ regs[r2]);
566 break;
567 #endif
568 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
569 CASE_32_64(nand)
570 tci_args_rrr(insn, &r0, &r1, &r2);
571 regs[r0] = ~(regs[r1] & regs[r2]);
572 break;
573 #endif
574 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
575 CASE_32_64(nor)
576 tci_args_rrr(insn, &r0, &r1, &r2);
577 regs[r0] = ~(regs[r1] | regs[r2]);
578 break;
579 #endif
580
581 /* Arithmetic operations (32 bit). */
582
583 case INDEX_op_div_i32:
584 tci_args_rrr(insn, &r0, &r1, &r2);
585 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
586 break;
587 case INDEX_op_divu_i32:
588 tci_args_rrr(insn, &r0, &r1, &r2);
589 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
590 break;
591 case INDEX_op_rem_i32:
592 tci_args_rrr(insn, &r0, &r1, &r2);
593 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
594 break;
595 case INDEX_op_remu_i32:
596 tci_args_rrr(insn, &r0, &r1, &r2);
597 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
598 break;
599 #if TCG_TARGET_HAS_clz_i32
600 case INDEX_op_clz_i32:
601 tci_args_rrr(insn, &r0, &r1, &r2);
602 tmp32 = regs[r1];
603 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
604 break;
605 #endif
606 #if TCG_TARGET_HAS_ctz_i32
607 case INDEX_op_ctz_i32:
608 tci_args_rrr(insn, &r0, &r1, &r2);
609 tmp32 = regs[r1];
610 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
611 break;
612 #endif
613 #if TCG_TARGET_HAS_ctpop_i32
614 case INDEX_op_ctpop_i32:
615 tci_args_rr(insn, &r0, &r1);
616 regs[r0] = ctpop32(regs[r1]);
617 break;
618 #endif
619
620 /* Shift/rotate operations (32 bit). */
621
622 case INDEX_op_shl_i32:
623 tci_args_rrr(insn, &r0, &r1, &r2);
624 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
625 break;
626 case INDEX_op_shr_i32:
627 tci_args_rrr(insn, &r0, &r1, &r2);
628 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
629 break;
630 case INDEX_op_sar_i32:
631 tci_args_rrr(insn, &r0, &r1, &r2);
632 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
633 break;
634 #if TCG_TARGET_HAS_rot_i32
635 case INDEX_op_rotl_i32:
636 tci_args_rrr(insn, &r0, &r1, &r2);
637 regs[r0] = rol32(regs[r1], regs[r2] & 31);
638 break;
639 case INDEX_op_rotr_i32:
640 tci_args_rrr(insn, &r0, &r1, &r2);
641 regs[r0] = ror32(regs[r1], regs[r2] & 31);
642 break;
643 #endif
644 case INDEX_op_deposit_i32:
645 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
646 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
647 break;
648 case INDEX_op_extract_i32:
649 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
650 regs[r0] = extract32(regs[r1], pos, len);
651 break;
652 case INDEX_op_sextract_i32:
653 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
654 regs[r0] = sextract32(regs[r1], pos, len);
655 break;
656 case INDEX_op_brcond_i32:
657 tci_args_rl(insn, tb_ptr, &r0, &ptr);
658 if ((uint32_t)regs[r0]) {
659 tb_ptr = ptr;
660 }
661 break;
662 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
663 case INDEX_op_add2_i32:
664 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
665 T1 = tci_uint64(regs[r3], regs[r2]);
666 T2 = tci_uint64(regs[r5], regs[r4]);
667 tci_write_reg64(regs, r1, r0, T1 + T2);
668 break;
669 #endif
670 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
671 case INDEX_op_sub2_i32:
672 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
673 T1 = tci_uint64(regs[r3], regs[r2]);
674 T2 = tci_uint64(regs[r5], regs[r4]);
675 tci_write_reg64(regs, r1, r0, T1 - T2);
676 break;
677 #endif
678 #if TCG_TARGET_HAS_mulu2_i32
679 case INDEX_op_mulu2_i32:
680 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
681 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
682 tci_write_reg64(regs, r1, r0, tmp64);
683 break;
684 #endif
685 #if TCG_TARGET_HAS_muls2_i32
686 case INDEX_op_muls2_i32:
687 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
688 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
689 tci_write_reg64(regs, r1, r0, tmp64);
690 break;
691 #endif
692 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
693 CASE_32_64(ext8s)
694 tci_args_rr(insn, &r0, &r1);
695 regs[r0] = (int8_t)regs[r1];
696 break;
697 #endif
698 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
699 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
700 CASE_32_64(ext16s)
701 tci_args_rr(insn, &r0, &r1);
702 regs[r0] = (int16_t)regs[r1];
703 break;
704 #endif
705 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
706 CASE_32_64(ext8u)
707 tci_args_rr(insn, &r0, &r1);
708 regs[r0] = (uint8_t)regs[r1];
709 break;
710 #endif
711 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
712 CASE_32_64(ext16u)
713 tci_args_rr(insn, &r0, &r1);
714 regs[r0] = (uint16_t)regs[r1];
715 break;
716 #endif
717 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
718 CASE_32_64(bswap16)
719 tci_args_rr(insn, &r0, &r1);
720 regs[r0] = bswap16(regs[r1]);
721 break;
722 #endif
723 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
724 CASE_32_64(bswap32)
725 tci_args_rr(insn, &r0, &r1);
726 regs[r0] = bswap32(regs[r1]);
727 break;
728 #endif
729 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
730 CASE_32_64(not)
731 tci_args_rr(insn, &r0, &r1);
732 regs[r0] = ~regs[r1];
733 break;
734 #endif
735 CASE_32_64(neg)
736 tci_args_rr(insn, &r0, &r1);
737 regs[r0] = -regs[r1];
738 break;
739 #if TCG_TARGET_REG_BITS == 64
740 /* Load/store operations (64 bit). */
741
742 case INDEX_op_ld32s_i64:
743 tci_args_rrs(insn, &r0, &r1, &ofs);
744 ptr = (void *)(regs[r1] + ofs);
745 regs[r0] = *(int32_t *)ptr;
746 break;
747 case INDEX_op_ld_i64:
748 tci_args_rrs(insn, &r0, &r1, &ofs);
749 ptr = (void *)(regs[r1] + ofs);
750 regs[r0] = *(uint64_t *)ptr;
751 break;
752 case INDEX_op_st_i64:
753 tci_args_rrs(insn, &r0, &r1, &ofs);
754 ptr = (void *)(regs[r1] + ofs);
755 *(uint64_t *)ptr = regs[r0];
756 break;
757
758 /* Arithmetic operations (64 bit). */
759
760 case INDEX_op_div_i64:
761 tci_args_rrr(insn, &r0, &r1, &r2);
762 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
763 break;
764 case INDEX_op_divu_i64:
765 tci_args_rrr(insn, &r0, &r1, &r2);
766 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
767 break;
768 case INDEX_op_rem_i64:
769 tci_args_rrr(insn, &r0, &r1, &r2);
770 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
771 break;
772 case INDEX_op_remu_i64:
773 tci_args_rrr(insn, &r0, &r1, &r2);
774 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
775 break;
776 #if TCG_TARGET_HAS_clz_i64
777 case INDEX_op_clz_i64:
778 tci_args_rrr(insn, &r0, &r1, &r2);
779 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
780 break;
781 #endif
782 #if TCG_TARGET_HAS_ctz_i64
783 case INDEX_op_ctz_i64:
784 tci_args_rrr(insn, &r0, &r1, &r2);
785 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
786 break;
787 #endif
788 #if TCG_TARGET_HAS_ctpop_i64
789 case INDEX_op_ctpop_i64:
790 tci_args_rr(insn, &r0, &r1);
791 regs[r0] = ctpop64(regs[r1]);
792 break;
793 #endif
794 #if TCG_TARGET_HAS_mulu2_i64
795 case INDEX_op_mulu2_i64:
796 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
797 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
798 break;
799 #endif
800 #if TCG_TARGET_HAS_muls2_i64
801 case INDEX_op_muls2_i64:
802 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
803 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
804 break;
805 #endif
806 #if TCG_TARGET_HAS_add2_i64
807 case INDEX_op_add2_i64:
808 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
809 T1 = regs[r2] + regs[r4];
810 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
811 regs[r0] = T1;
812 regs[r1] = T2;
813 break;
814 #endif
815 #if TCG_TARGET_HAS_add2_i64
816 case INDEX_op_sub2_i64:
817 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
818 T1 = regs[r2] - regs[r4];
819 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
820 regs[r0] = T1;
821 regs[r1] = T2;
822 break;
823 #endif
824
825 /* Shift/rotate operations (64 bit). */
826
827 case INDEX_op_shl_i64:
828 tci_args_rrr(insn, &r0, &r1, &r2);
829 regs[r0] = regs[r1] << (regs[r2] & 63);
830 break;
831 case INDEX_op_shr_i64:
832 tci_args_rrr(insn, &r0, &r1, &r2);
833 regs[r0] = regs[r1] >> (regs[r2] & 63);
834 break;
835 case INDEX_op_sar_i64:
836 tci_args_rrr(insn, &r0, &r1, &r2);
837 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
838 break;
839 #if TCG_TARGET_HAS_rot_i64
840 case INDEX_op_rotl_i64:
841 tci_args_rrr(insn, &r0, &r1, &r2);
842 regs[r0] = rol64(regs[r1], regs[r2] & 63);
843 break;
844 case INDEX_op_rotr_i64:
845 tci_args_rrr(insn, &r0, &r1, &r2);
846 regs[r0] = ror64(regs[r1], regs[r2] & 63);
847 break;
848 #endif
849 case INDEX_op_deposit_i64:
850 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
851 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
852 break;
853 case INDEX_op_extract_i64:
854 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
855 regs[r0] = extract64(regs[r1], pos, len);
856 break;
857 case INDEX_op_sextract_i64:
858 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
859 regs[r0] = sextract64(regs[r1], pos, len);
860 break;
861 case INDEX_op_brcond_i64:
862 tci_args_rl(insn, tb_ptr, &r0, &ptr);
863 if (regs[r0]) {
864 tb_ptr = ptr;
865 }
866 break;
867 case INDEX_op_ext32s_i64:
868 case INDEX_op_ext_i32_i64:
869 tci_args_rr(insn, &r0, &r1);
870 regs[r0] = (int32_t)regs[r1];
871 break;
872 case INDEX_op_ext32u_i64:
873 case INDEX_op_extu_i32_i64:
874 tci_args_rr(insn, &r0, &r1);
875 regs[r0] = (uint32_t)regs[r1];
876 break;
877 #if TCG_TARGET_HAS_bswap64_i64
878 case INDEX_op_bswap64_i64:
879 tci_args_rr(insn, &r0, &r1);
880 regs[r0] = bswap64(regs[r1]);
881 break;
882 #endif
883 #endif /* TCG_TARGET_REG_BITS == 64 */
884
885 /* QEMU specific operations. */
886
887 case INDEX_op_exit_tb:
888 tci_args_l(insn, tb_ptr, &ptr);
889 return (uintptr_t)ptr;
890
891 case INDEX_op_goto_tb:
892 tci_args_l(insn, tb_ptr, &ptr);
893 tb_ptr = *(void **)ptr;
894 break;
895
896 case INDEX_op_goto_ptr:
897 tci_args_r(insn, &r0);
898 ptr = (void *)regs[r0];
899 if (!ptr) {
900 return 0;
901 }
902 tb_ptr = ptr;
903 break;
904
905 case INDEX_op_qemu_ld_i32:
906 tci_args_rrm(insn, &r0, &r1, &oi);
907 taddr = regs[r1];
908 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
909 break;
910
911 case INDEX_op_qemu_ld_i64:
912 if (TCG_TARGET_REG_BITS == 64) {
913 tci_args_rrm(insn, &r0, &r1, &oi);
914 taddr = regs[r1];
915 } else {
916 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
917 taddr = regs[r2];
918 oi = regs[r3];
919 }
920 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
921 if (TCG_TARGET_REG_BITS == 32) {
922 tci_write_reg64(regs, r1, r0, tmp64);
923 } else {
924 regs[r0] = tmp64;
925 }
926 break;
927
928 case INDEX_op_qemu_st_i32:
929 tci_args_rrm(insn, &r0, &r1, &oi);
930 taddr = regs[r1];
931 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
932 break;
933
934 case INDEX_op_qemu_st_i64:
935 if (TCG_TARGET_REG_BITS == 64) {
936 tci_args_rrm(insn, &r0, &r1, &oi);
937 tmp64 = regs[r0];
938 taddr = regs[r1];
939 } else {
940 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
941 tmp64 = tci_uint64(regs[r1], regs[r0]);
942 taddr = regs[r2];
943 oi = regs[r3];
944 }
945 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
946 break;
947
948 case INDEX_op_mb:
949 /* Ensure ordering for all kinds */
950 smp_mb();
951 break;
952 default:
953 g_assert_not_reached();
954 }
955 }
956 }
957
958 /*
959 * Disassembler that matches the interpreter
960 */
961
str_r(TCGReg r)962 static const char *str_r(TCGReg r)
963 {
964 static const char regs[TCG_TARGET_NB_REGS][4] = {
965 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
966 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
967 };
968
969 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
970 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
971
972 assert((unsigned)r < TCG_TARGET_NB_REGS);
973 return regs[r];
974 }
975
str_c(TCGCond c)976 static const char *str_c(TCGCond c)
977 {
978 static const char cond[16][8] = {
979 [TCG_COND_NEVER] = "never",
980 [TCG_COND_ALWAYS] = "always",
981 [TCG_COND_EQ] = "eq",
982 [TCG_COND_NE] = "ne",
983 [TCG_COND_LT] = "lt",
984 [TCG_COND_GE] = "ge",
985 [TCG_COND_LE] = "le",
986 [TCG_COND_GT] = "gt",
987 [TCG_COND_LTU] = "ltu",
988 [TCG_COND_GEU] = "geu",
989 [TCG_COND_LEU] = "leu",
990 [TCG_COND_GTU] = "gtu",
991 [TCG_COND_TSTEQ] = "tsteq",
992 [TCG_COND_TSTNE] = "tstne",
993 };
994
995 assert((unsigned)c < ARRAY_SIZE(cond));
996 assert(cond[c][0] != 0);
997 return cond[c];
998 }
999
1000 /* Disassemble TCI bytecode. */
print_insn_tci(bfd_vma addr,disassemble_info * info)1001 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1002 {
1003 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1004 const TCGOpDef *def;
1005 const char *op_name;
1006 uint32_t insn;
1007 TCGOpcode op;
1008 TCGReg r0, r1, r2, r3, r4, r5;
1009 tcg_target_ulong i1;
1010 int32_t s2;
1011 TCGCond c;
1012 MemOpIdx oi;
1013 uint8_t pos, len;
1014 void *ptr;
1015
1016 /* TCI is always the host, so we don't need to load indirect. */
1017 insn = *tb_ptr++;
1018
1019 info->fprintf_func(info->stream, "%08x ", insn);
1020
1021 op = extract32(insn, 0, 8);
1022 def = &tcg_op_defs[op];
1023 op_name = def->name;
1024
1025 switch (op) {
1026 case INDEX_op_br:
1027 case INDEX_op_exit_tb:
1028 case INDEX_op_goto_tb:
1029 tci_args_l(insn, tb_ptr, &ptr);
1030 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1031 break;
1032
1033 case INDEX_op_goto_ptr:
1034 tci_args_r(insn, &r0);
1035 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1036 break;
1037
1038 case INDEX_op_call:
1039 tci_args_nl(insn, tb_ptr, &len, &ptr);
1040 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1041 break;
1042
1043 case INDEX_op_brcond_i32:
1044 case INDEX_op_brcond_i64:
1045 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1046 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1047 op_name, str_r(r0), ptr);
1048 break;
1049
1050 case INDEX_op_setcond_i32:
1051 case INDEX_op_setcond_i64:
1052 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1053 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1054 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1055 break;
1056
1057 case INDEX_op_tci_movi:
1058 tci_args_ri(insn, &r0, &i1);
1059 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1060 op_name, str_r(r0), i1);
1061 break;
1062
1063 case INDEX_op_tci_movl:
1064 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1065 info->fprintf_func(info->stream, "%-12s %s, %p",
1066 op_name, str_r(r0), ptr);
1067 break;
1068
1069 case INDEX_op_ld8u_i32:
1070 case INDEX_op_ld8u_i64:
1071 case INDEX_op_ld8s_i32:
1072 case INDEX_op_ld8s_i64:
1073 case INDEX_op_ld16u_i32:
1074 case INDEX_op_ld16u_i64:
1075 case INDEX_op_ld16s_i32:
1076 case INDEX_op_ld16s_i64:
1077 case INDEX_op_ld32u_i64:
1078 case INDEX_op_ld32s_i64:
1079 case INDEX_op_ld_i32:
1080 case INDEX_op_ld_i64:
1081 case INDEX_op_st8_i32:
1082 case INDEX_op_st8_i64:
1083 case INDEX_op_st16_i32:
1084 case INDEX_op_st16_i64:
1085 case INDEX_op_st32_i64:
1086 case INDEX_op_st_i32:
1087 case INDEX_op_st_i64:
1088 tci_args_rrs(insn, &r0, &r1, &s2);
1089 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1090 op_name, str_r(r0), str_r(r1), s2);
1091 break;
1092
1093 case INDEX_op_mov_i32:
1094 case INDEX_op_mov_i64:
1095 case INDEX_op_ext8s_i32:
1096 case INDEX_op_ext8s_i64:
1097 case INDEX_op_ext8u_i32:
1098 case INDEX_op_ext8u_i64:
1099 case INDEX_op_ext16s_i32:
1100 case INDEX_op_ext16s_i64:
1101 case INDEX_op_ext16u_i32:
1102 case INDEX_op_ext32s_i64:
1103 case INDEX_op_ext32u_i64:
1104 case INDEX_op_ext_i32_i64:
1105 case INDEX_op_extu_i32_i64:
1106 case INDEX_op_bswap16_i32:
1107 case INDEX_op_bswap16_i64:
1108 case INDEX_op_bswap32_i32:
1109 case INDEX_op_bswap32_i64:
1110 case INDEX_op_bswap64_i64:
1111 case INDEX_op_not_i32:
1112 case INDEX_op_not_i64:
1113 case INDEX_op_neg_i32:
1114 case INDEX_op_neg_i64:
1115 case INDEX_op_ctpop_i32:
1116 case INDEX_op_ctpop_i64:
1117 tci_args_rr(insn, &r0, &r1);
1118 info->fprintf_func(info->stream, "%-12s %s, %s",
1119 op_name, str_r(r0), str_r(r1));
1120 break;
1121
1122 case INDEX_op_add_i32:
1123 case INDEX_op_add_i64:
1124 case INDEX_op_sub_i32:
1125 case INDEX_op_sub_i64:
1126 case INDEX_op_mul_i32:
1127 case INDEX_op_mul_i64:
1128 case INDEX_op_and_i32:
1129 case INDEX_op_and_i64:
1130 case INDEX_op_or_i32:
1131 case INDEX_op_or_i64:
1132 case INDEX_op_xor_i32:
1133 case INDEX_op_xor_i64:
1134 case INDEX_op_andc_i32:
1135 case INDEX_op_andc_i64:
1136 case INDEX_op_orc_i32:
1137 case INDEX_op_orc_i64:
1138 case INDEX_op_eqv_i32:
1139 case INDEX_op_eqv_i64:
1140 case INDEX_op_nand_i32:
1141 case INDEX_op_nand_i64:
1142 case INDEX_op_nor_i32:
1143 case INDEX_op_nor_i64:
1144 case INDEX_op_div_i32:
1145 case INDEX_op_div_i64:
1146 case INDEX_op_rem_i32:
1147 case INDEX_op_rem_i64:
1148 case INDEX_op_divu_i32:
1149 case INDEX_op_divu_i64:
1150 case INDEX_op_remu_i32:
1151 case INDEX_op_remu_i64:
1152 case INDEX_op_shl_i32:
1153 case INDEX_op_shl_i64:
1154 case INDEX_op_shr_i32:
1155 case INDEX_op_shr_i64:
1156 case INDEX_op_sar_i32:
1157 case INDEX_op_sar_i64:
1158 case INDEX_op_rotl_i32:
1159 case INDEX_op_rotl_i64:
1160 case INDEX_op_rotr_i32:
1161 case INDEX_op_rotr_i64:
1162 case INDEX_op_clz_i32:
1163 case INDEX_op_clz_i64:
1164 case INDEX_op_ctz_i32:
1165 case INDEX_op_ctz_i64:
1166 tci_args_rrr(insn, &r0, &r1, &r2);
1167 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1168 op_name, str_r(r0), str_r(r1), str_r(r2));
1169 break;
1170
1171 case INDEX_op_deposit_i32:
1172 case INDEX_op_deposit_i64:
1173 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1174 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1175 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1176 break;
1177
1178 case INDEX_op_extract_i32:
1179 case INDEX_op_extract_i64:
1180 case INDEX_op_sextract_i32:
1181 case INDEX_op_sextract_i64:
1182 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1183 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1184 op_name, str_r(r0), str_r(r1), pos, len);
1185 break;
1186
1187 case INDEX_op_movcond_i32:
1188 case INDEX_op_movcond_i64:
1189 case INDEX_op_setcond2_i32:
1190 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1191 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1192 op_name, str_r(r0), str_r(r1), str_r(r2),
1193 str_r(r3), str_r(r4), str_c(c));
1194 break;
1195
1196 case INDEX_op_mulu2_i32:
1197 case INDEX_op_mulu2_i64:
1198 case INDEX_op_muls2_i32:
1199 case INDEX_op_muls2_i64:
1200 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1201 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1202 op_name, str_r(r0), str_r(r1),
1203 str_r(r2), str_r(r3));
1204 break;
1205
1206 case INDEX_op_add2_i32:
1207 case INDEX_op_add2_i64:
1208 case INDEX_op_sub2_i32:
1209 case INDEX_op_sub2_i64:
1210 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1211 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1212 op_name, str_r(r0), str_r(r1), str_r(r2),
1213 str_r(r3), str_r(r4), str_r(r5));
1214 break;
1215
1216 case INDEX_op_qemu_ld_i64:
1217 case INDEX_op_qemu_st_i64:
1218 if (TCG_TARGET_REG_BITS == 32) {
1219 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1220 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1221 op_name, str_r(r0), str_r(r1),
1222 str_r(r2), str_r(r3));
1223 break;
1224 }
1225 /* fall through */
1226 case INDEX_op_qemu_ld_i32:
1227 case INDEX_op_qemu_st_i32:
1228 tci_args_rrm(insn, &r0, &r1, &oi);
1229 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1230 op_name, str_r(r0), str_r(r1), oi);
1231 break;
1232
1233 case 0:
1234 /* tcg_out_nop_fill uses zeros */
1235 if (insn == 0) {
1236 info->fprintf_func(info->stream, "align");
1237 break;
1238 }
1239 /* fall through */
1240
1241 default:
1242 info->fprintf_func(info->stream, "illegal opcode %d", op);
1243 break;
1244 }
1245
1246 return sizeof(insn);
1247 }
1248