1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h"
22 #include "tcg/helper-info.h"
23 #include "tcg/tcg-ldst.h"
24 #include <ffi.h>
25
26
27 /*
28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
29 * Without assertions, the interpreter runs much faster.
30 */
31 #if defined(CONFIG_DEBUG_TCG)
32 # define tci_assert(cond) assert(cond)
33 #else
34 # define tci_assert(cond) ((void)(cond))
35 #endif
36
37 __thread uintptr_t tci_tb_ptr;
38
tci_write_reg64(tcg_target_ulong * regs,uint32_t high_index,uint32_t low_index,uint64_t value)39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
40 uint32_t low_index, uint64_t value)
41 {
42 regs[low_index] = (uint32_t)value;
43 regs[high_index] = value >> 32;
44 }
45
46 /* Create a 64 bit value from two 32 bit values. */
tci_uint64(uint32_t high,uint32_t low)47 static uint64_t tci_uint64(uint32_t high, uint32_t low)
48 {
49 return ((uint64_t)high << 32) + low;
50 }
51
52 /*
53 * Load sets of arguments all at once. The naming convention is:
54 * tci_args_<arguments>
55 * where arguments is a sequence of
56 *
57 * b = immediate (bit position)
58 * c = condition (TCGCond)
59 * i = immediate (uint32_t)
60 * I = immediate (tcg_target_ulong)
61 * l = label or pointer
62 * m = immediate (MemOpIdx)
63 * n = immediate (call return length)
64 * r = register
65 * s = signed ldst offset
66 */
67
tci_args_l(uint32_t insn,const void * tb_ptr,void ** l0)68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
69 {
70 int diff = sextract32(insn, 12, 20);
71 *l0 = diff ? (void *)tb_ptr + diff : NULL;
72 }
73
tci_args_r(uint32_t insn,TCGReg * r0)74 static void tci_args_r(uint32_t insn, TCGReg *r0)
75 {
76 *r0 = extract32(insn, 8, 4);
77 }
78
tci_args_nl(uint32_t insn,const void * tb_ptr,uint8_t * n0,void ** l1)79 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
80 uint8_t *n0, void **l1)
81 {
82 *n0 = extract32(insn, 8, 4);
83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
84 }
85
tci_args_rl(uint32_t insn,const void * tb_ptr,TCGReg * r0,void ** l1)86 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
87 TCGReg *r0, void **l1)
88 {
89 *r0 = extract32(insn, 8, 4);
90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
91 }
92
tci_args_rr(uint32_t insn,TCGReg * r0,TCGReg * r1)93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
94 {
95 *r0 = extract32(insn, 8, 4);
96 *r1 = extract32(insn, 12, 4);
97 }
98
tci_args_ri(uint32_t insn,TCGReg * r0,tcg_target_ulong * i1)99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
100 {
101 *r0 = extract32(insn, 8, 4);
102 *i1 = sextract32(insn, 12, 20);
103 }
104
tci_args_rrm(uint32_t insn,TCGReg * r0,TCGReg * r1,MemOpIdx * m2)105 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
106 TCGReg *r1, MemOpIdx *m2)
107 {
108 *r0 = extract32(insn, 8, 4);
109 *r1 = extract32(insn, 12, 4);
110 *m2 = extract32(insn, 16, 16);
111 }
112
tci_args_rrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2)113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
114 {
115 *r0 = extract32(insn, 8, 4);
116 *r1 = extract32(insn, 12, 4);
117 *r2 = extract32(insn, 16, 4);
118 }
119
tci_args_rrs(uint32_t insn,TCGReg * r0,TCGReg * r1,int32_t * i2)120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
121 {
122 *r0 = extract32(insn, 8, 4);
123 *r1 = extract32(insn, 12, 4);
124 *i2 = sextract32(insn, 16, 16);
125 }
126
tci_args_rrbb(uint32_t insn,TCGReg * r0,TCGReg * r1,uint8_t * i2,uint8_t * i3)127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
128 uint8_t *i2, uint8_t *i3)
129 {
130 *r0 = extract32(insn, 8, 4);
131 *r1 = extract32(insn, 12, 4);
132 *i2 = extract32(insn, 16, 6);
133 *i3 = extract32(insn, 22, 6);
134 }
135
tci_args_rrrc(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGCond * c3)136 static void tci_args_rrrc(uint32_t insn,
137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
138 {
139 *r0 = extract32(insn, 8, 4);
140 *r1 = extract32(insn, 12, 4);
141 *r2 = extract32(insn, 16, 4);
142 *c3 = extract32(insn, 20, 4);
143 }
144
tci_args_rrrbb(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,uint8_t * i3,uint8_t * i4)145 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
146 TCGReg *r2, uint8_t *i3, uint8_t *i4)
147 {
148 *r0 = extract32(insn, 8, 4);
149 *r1 = extract32(insn, 12, 4);
150 *r2 = extract32(insn, 16, 4);
151 *i3 = extract32(insn, 20, 6);
152 *i4 = extract32(insn, 26, 6);
153 }
154
tci_args_rrrrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3,TCGReg * r4)155 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
156 TCGReg *r2, TCGReg *r3, TCGReg *r4)
157 {
158 *r0 = extract32(insn, 8, 4);
159 *r1 = extract32(insn, 12, 4);
160 *r2 = extract32(insn, 16, 4);
161 *r3 = extract32(insn, 20, 4);
162 *r4 = extract32(insn, 24, 4);
163 }
164
tci_args_rrrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3)165 static void tci_args_rrrr(uint32_t insn,
166 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
167 {
168 *r0 = extract32(insn, 8, 4);
169 *r1 = extract32(insn, 12, 4);
170 *r2 = extract32(insn, 16, 4);
171 *r3 = extract32(insn, 20, 4);
172 }
173
tci_args_rrrrrc(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3,TCGReg * r4,TCGCond * c5)174 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
175 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
176 {
177 *r0 = extract32(insn, 8, 4);
178 *r1 = extract32(insn, 12, 4);
179 *r2 = extract32(insn, 16, 4);
180 *r3 = extract32(insn, 20, 4);
181 *r4 = extract32(insn, 24, 4);
182 *c5 = extract32(insn, 28, 4);
183 }
184
tci_args_rrrrrr(uint32_t insn,TCGReg * r0,TCGReg * r1,TCGReg * r2,TCGReg * r3,TCGReg * r4,TCGReg * r5)185 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
186 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
187 {
188 *r0 = extract32(insn, 8, 4);
189 *r1 = extract32(insn, 12, 4);
190 *r2 = extract32(insn, 16, 4);
191 *r3 = extract32(insn, 20, 4);
192 *r4 = extract32(insn, 24, 4);
193 *r5 = extract32(insn, 28, 4);
194 }
195
tci_compare32(uint32_t u0,uint32_t u1,TCGCond condition)196 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
197 {
198 bool result = false;
199 int32_t i0 = u0;
200 int32_t i1 = u1;
201 switch (condition) {
202 case TCG_COND_EQ:
203 result = (u0 == u1);
204 break;
205 case TCG_COND_NE:
206 result = (u0 != u1);
207 break;
208 case TCG_COND_LT:
209 result = (i0 < i1);
210 break;
211 case TCG_COND_GE:
212 result = (i0 >= i1);
213 break;
214 case TCG_COND_LE:
215 result = (i0 <= i1);
216 break;
217 case TCG_COND_GT:
218 result = (i0 > i1);
219 break;
220 case TCG_COND_LTU:
221 result = (u0 < u1);
222 break;
223 case TCG_COND_GEU:
224 result = (u0 >= u1);
225 break;
226 case TCG_COND_LEU:
227 result = (u0 <= u1);
228 break;
229 case TCG_COND_GTU:
230 result = (u0 > u1);
231 break;
232 case TCG_COND_TSTEQ:
233 result = (u0 & u1) == 0;
234 break;
235 case TCG_COND_TSTNE:
236 result = (u0 & u1) != 0;
237 break;
238 default:
239 g_assert_not_reached();
240 }
241 return result;
242 }
243
tci_compare64(uint64_t u0,uint64_t u1,TCGCond condition)244 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
245 {
246 bool result = false;
247 int64_t i0 = u0;
248 int64_t i1 = u1;
249 switch (condition) {
250 case TCG_COND_EQ:
251 result = (u0 == u1);
252 break;
253 case TCG_COND_NE:
254 result = (u0 != u1);
255 break;
256 case TCG_COND_LT:
257 result = (i0 < i1);
258 break;
259 case TCG_COND_GE:
260 result = (i0 >= i1);
261 break;
262 case TCG_COND_LE:
263 result = (i0 <= i1);
264 break;
265 case TCG_COND_GT:
266 result = (i0 > i1);
267 break;
268 case TCG_COND_LTU:
269 result = (u0 < u1);
270 break;
271 case TCG_COND_GEU:
272 result = (u0 >= u1);
273 break;
274 case TCG_COND_LEU:
275 result = (u0 <= u1);
276 break;
277 case TCG_COND_GTU:
278 result = (u0 > u1);
279 break;
280 case TCG_COND_TSTEQ:
281 result = (u0 & u1) == 0;
282 break;
283 case TCG_COND_TSTNE:
284 result = (u0 & u1) != 0;
285 break;
286 default:
287 g_assert_not_reached();
288 }
289 return result;
290 }
291
tci_qemu_ld(CPUArchState * env,uint64_t taddr,MemOpIdx oi,const void * tb_ptr)292 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
293 MemOpIdx oi, const void *tb_ptr)
294 {
295 MemOp mop = get_memop(oi);
296 uintptr_t ra = (uintptr_t)tb_ptr;
297
298 switch (mop & MO_SSIZE) {
299 case MO_UB:
300 return helper_ldub_mmu(env, taddr, oi, ra);
301 case MO_SB:
302 return helper_ldsb_mmu(env, taddr, oi, ra);
303 case MO_UW:
304 return helper_lduw_mmu(env, taddr, oi, ra);
305 case MO_SW:
306 return helper_ldsw_mmu(env, taddr, oi, ra);
307 case MO_UL:
308 return helper_ldul_mmu(env, taddr, oi, ra);
309 case MO_SL:
310 return helper_ldsl_mmu(env, taddr, oi, ra);
311 case MO_UQ:
312 return helper_ldq_mmu(env, taddr, oi, ra);
313 default:
314 g_assert_not_reached();
315 }
316 }
317
tci_qemu_st(CPUArchState * env,uint64_t taddr,uint64_t val,MemOpIdx oi,const void * tb_ptr)318 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
319 MemOpIdx oi, const void *tb_ptr)
320 {
321 MemOp mop = get_memop(oi);
322 uintptr_t ra = (uintptr_t)tb_ptr;
323
324 switch (mop & MO_SIZE) {
325 case MO_UB:
326 helper_stb_mmu(env, taddr, val, oi, ra);
327 break;
328 case MO_UW:
329 helper_stw_mmu(env, taddr, val, oi, ra);
330 break;
331 case MO_UL:
332 helper_stl_mmu(env, taddr, val, oi, ra);
333 break;
334 case MO_UQ:
335 helper_stq_mmu(env, taddr, val, oi, ra);
336 break;
337 default:
338 g_assert_not_reached();
339 }
340 }
341
342 #if TCG_TARGET_REG_BITS == 64
343 # define CASE_32_64(x) \
344 case glue(glue(INDEX_op_, x), _i64): \
345 case glue(glue(INDEX_op_, x), _i32):
346 # define CASE_64(x) \
347 case glue(glue(INDEX_op_, x), _i64):
348 #else
349 # define CASE_32_64(x) \
350 case glue(glue(INDEX_op_, x), _i32):
351 # define CASE_64(x)
352 #endif
353
354 /* Interpret pseudo code in tb. */
355 /*
356 * Disable CFI checks.
357 * One possible operation in the pseudo code is a call to binary code.
358 * Therefore, disable CFI checks in the interpreter function
359 */
tcg_qemu_tb_exec(CPUArchState * env,const void * v_tb_ptr)360 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
361 const void *v_tb_ptr)
362 {
363 const uint32_t *tb_ptr = v_tb_ptr;
364 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
365 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
366 / sizeof(uint64_t)];
367
368 regs[TCG_AREG0] = (tcg_target_ulong)env;
369 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
370 tci_assert(tb_ptr);
371
372 for (;;) {
373 uint32_t insn;
374 TCGOpcode opc;
375 TCGReg r0, r1, r2, r3, r4, r5;
376 tcg_target_ulong t1;
377 TCGCond condition;
378 uint8_t pos, len;
379 uint32_t tmp32;
380 uint64_t tmp64, taddr;
381 uint64_t T1, T2;
382 MemOpIdx oi;
383 int32_t ofs;
384 void *ptr;
385
386 insn = *tb_ptr++;
387 opc = extract32(insn, 0, 8);
388
389 switch (opc) {
390 case INDEX_op_call:
391 {
392 void *call_slots[MAX_CALL_IARGS];
393 ffi_cif *cif;
394 void *func;
395 unsigned i, s, n;
396
397 tci_args_nl(insn, tb_ptr, &len, &ptr);
398 func = ((void **)ptr)[0];
399 cif = ((void **)ptr)[1];
400
401 n = cif->nargs;
402 for (i = s = 0; i < n; ++i) {
403 ffi_type *t = cif->arg_types[i];
404 call_slots[i] = &stack[s];
405 s += DIV_ROUND_UP(t->size, 8);
406 }
407
408 /* Helper functions may need to access the "return address" */
409 tci_tb_ptr = (uintptr_t)tb_ptr;
410 ffi_call(cif, func, stack, call_slots);
411 }
412
413 switch (len) {
414 case 0: /* void */
415 break;
416 case 1: /* uint32_t */
417 /*
418 * The result winds up "left-aligned" in the stack[0] slot.
419 * Note that libffi has an odd special case in that it will
420 * always widen an integral result to ffi_arg.
421 */
422 if (sizeof(ffi_arg) == 8) {
423 regs[TCG_REG_R0] = (uint32_t)stack[0];
424 } else {
425 regs[TCG_REG_R0] = *(uint32_t *)stack;
426 }
427 break;
428 case 2: /* uint64_t */
429 /*
430 * For TCG_TARGET_REG_BITS == 32, the register pair
431 * must stay in host memory order.
432 */
433 memcpy(®s[TCG_REG_R0], stack, 8);
434 break;
435 case 3: /* Int128 */
436 memcpy(®s[TCG_REG_R0], stack, 16);
437 break;
438 default:
439 g_assert_not_reached();
440 }
441 break;
442
443 case INDEX_op_br:
444 tci_args_l(insn, tb_ptr, &ptr);
445 tb_ptr = ptr;
446 continue;
447 case INDEX_op_setcond_i32:
448 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
449 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
450 break;
451 case INDEX_op_movcond_i32:
452 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
453 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
454 regs[r0] = regs[tmp32 ? r3 : r4];
455 break;
456 #if TCG_TARGET_REG_BITS == 32
457 case INDEX_op_setcond2_i32:
458 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
459 T1 = tci_uint64(regs[r2], regs[r1]);
460 T2 = tci_uint64(regs[r4], regs[r3]);
461 regs[r0] = tci_compare64(T1, T2, condition);
462 break;
463 #elif TCG_TARGET_REG_BITS == 64
464 case INDEX_op_setcond_i64:
465 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
466 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
467 break;
468 case INDEX_op_movcond_i64:
469 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
470 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
471 regs[r0] = regs[tmp32 ? r3 : r4];
472 break;
473 #endif
474 CASE_32_64(mov)
475 tci_args_rr(insn, &r0, &r1);
476 regs[r0] = regs[r1];
477 break;
478 case INDEX_op_tci_movi:
479 tci_args_ri(insn, &r0, &t1);
480 regs[r0] = t1;
481 break;
482 case INDEX_op_tci_movl:
483 tci_args_rl(insn, tb_ptr, &r0, &ptr);
484 regs[r0] = *(tcg_target_ulong *)ptr;
485 break;
486
487 /* Load/store operations (32 bit). */
488
489 CASE_32_64(ld8u)
490 tci_args_rrs(insn, &r0, &r1, &ofs);
491 ptr = (void *)(regs[r1] + ofs);
492 regs[r0] = *(uint8_t *)ptr;
493 break;
494 CASE_32_64(ld8s)
495 tci_args_rrs(insn, &r0, &r1, &ofs);
496 ptr = (void *)(regs[r1] + ofs);
497 regs[r0] = *(int8_t *)ptr;
498 break;
499 CASE_32_64(ld16u)
500 tci_args_rrs(insn, &r0, &r1, &ofs);
501 ptr = (void *)(regs[r1] + ofs);
502 regs[r0] = *(uint16_t *)ptr;
503 break;
504 CASE_32_64(ld16s)
505 tci_args_rrs(insn, &r0, &r1, &ofs);
506 ptr = (void *)(regs[r1] + ofs);
507 regs[r0] = *(int16_t *)ptr;
508 break;
509 case INDEX_op_ld_i32:
510 CASE_64(ld32u)
511 tci_args_rrs(insn, &r0, &r1, &ofs);
512 ptr = (void *)(regs[r1] + ofs);
513 regs[r0] = *(uint32_t *)ptr;
514 break;
515 CASE_32_64(st8)
516 tci_args_rrs(insn, &r0, &r1, &ofs);
517 ptr = (void *)(regs[r1] + ofs);
518 *(uint8_t *)ptr = regs[r0];
519 break;
520 CASE_32_64(st16)
521 tci_args_rrs(insn, &r0, &r1, &ofs);
522 ptr = (void *)(regs[r1] + ofs);
523 *(uint16_t *)ptr = regs[r0];
524 break;
525 case INDEX_op_st_i32:
526 CASE_64(st32)
527 tci_args_rrs(insn, &r0, &r1, &ofs);
528 ptr = (void *)(regs[r1] + ofs);
529 *(uint32_t *)ptr = regs[r0];
530 break;
531
532 /* Arithmetic operations (mixed 32/64 bit). */
533
534 CASE_32_64(add)
535 tci_args_rrr(insn, &r0, &r1, &r2);
536 regs[r0] = regs[r1] + regs[r2];
537 break;
538 CASE_32_64(sub)
539 tci_args_rrr(insn, &r0, &r1, &r2);
540 regs[r0] = regs[r1] - regs[r2];
541 break;
542 CASE_32_64(mul)
543 tci_args_rrr(insn, &r0, &r1, &r2);
544 regs[r0] = regs[r1] * regs[r2];
545 break;
546 CASE_32_64(and)
547 tci_args_rrr(insn, &r0, &r1, &r2);
548 regs[r0] = regs[r1] & regs[r2];
549 break;
550 CASE_32_64(or)
551 tci_args_rrr(insn, &r0, &r1, &r2);
552 regs[r0] = regs[r1] | regs[r2];
553 break;
554 CASE_32_64(xor)
555 tci_args_rrr(insn, &r0, &r1, &r2);
556 regs[r0] = regs[r1] ^ regs[r2];
557 break;
558 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
559 CASE_32_64(andc)
560 tci_args_rrr(insn, &r0, &r1, &r2);
561 regs[r0] = regs[r1] & ~regs[r2];
562 break;
563 #endif
564 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
565 CASE_32_64(orc)
566 tci_args_rrr(insn, &r0, &r1, &r2);
567 regs[r0] = regs[r1] | ~regs[r2];
568 break;
569 #endif
570 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
571 CASE_32_64(eqv)
572 tci_args_rrr(insn, &r0, &r1, &r2);
573 regs[r0] = ~(regs[r1] ^ regs[r2]);
574 break;
575 #endif
576 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
577 CASE_32_64(nand)
578 tci_args_rrr(insn, &r0, &r1, &r2);
579 regs[r0] = ~(regs[r1] & regs[r2]);
580 break;
581 #endif
582 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
583 CASE_32_64(nor)
584 tci_args_rrr(insn, &r0, &r1, &r2);
585 regs[r0] = ~(regs[r1] | regs[r2]);
586 break;
587 #endif
588
589 /* Arithmetic operations (32 bit). */
590
591 case INDEX_op_div_i32:
592 tci_args_rrr(insn, &r0, &r1, &r2);
593 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
594 break;
595 case INDEX_op_divu_i32:
596 tci_args_rrr(insn, &r0, &r1, &r2);
597 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
598 break;
599 case INDEX_op_rem_i32:
600 tci_args_rrr(insn, &r0, &r1, &r2);
601 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
602 break;
603 case INDEX_op_remu_i32:
604 tci_args_rrr(insn, &r0, &r1, &r2);
605 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
606 break;
607 #if TCG_TARGET_HAS_clz_i32
608 case INDEX_op_clz_i32:
609 tci_args_rrr(insn, &r0, &r1, &r2);
610 tmp32 = regs[r1];
611 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
612 break;
613 #endif
614 #if TCG_TARGET_HAS_ctz_i32
615 case INDEX_op_ctz_i32:
616 tci_args_rrr(insn, &r0, &r1, &r2);
617 tmp32 = regs[r1];
618 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
619 break;
620 #endif
621 #if TCG_TARGET_HAS_ctpop_i32
622 case INDEX_op_ctpop_i32:
623 tci_args_rr(insn, &r0, &r1);
624 regs[r0] = ctpop32(regs[r1]);
625 break;
626 #endif
627
628 /* Shift/rotate operations (32 bit). */
629
630 case INDEX_op_shl_i32:
631 tci_args_rrr(insn, &r0, &r1, &r2);
632 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
633 break;
634 case INDEX_op_shr_i32:
635 tci_args_rrr(insn, &r0, &r1, &r2);
636 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
637 break;
638 case INDEX_op_sar_i32:
639 tci_args_rrr(insn, &r0, &r1, &r2);
640 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
641 break;
642 #if TCG_TARGET_HAS_rot_i32
643 case INDEX_op_rotl_i32:
644 tci_args_rrr(insn, &r0, &r1, &r2);
645 regs[r0] = rol32(regs[r1], regs[r2] & 31);
646 break;
647 case INDEX_op_rotr_i32:
648 tci_args_rrr(insn, &r0, &r1, &r2);
649 regs[r0] = ror32(regs[r1], regs[r2] & 31);
650 break;
651 #endif
652 #if TCG_TARGET_HAS_deposit_i32
653 case INDEX_op_deposit_i32:
654 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
655 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
656 break;
657 #endif
658 #if TCG_TARGET_HAS_extract_i32
659 case INDEX_op_extract_i32:
660 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
661 regs[r0] = extract32(regs[r1], pos, len);
662 break;
663 #endif
664 #if TCG_TARGET_HAS_sextract_i32
665 case INDEX_op_sextract_i32:
666 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
667 regs[r0] = sextract32(regs[r1], pos, len);
668 break;
669 #endif
670 case INDEX_op_brcond_i32:
671 tci_args_rl(insn, tb_ptr, &r0, &ptr);
672 if ((uint32_t)regs[r0]) {
673 tb_ptr = ptr;
674 }
675 break;
676 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
677 case INDEX_op_add2_i32:
678 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
679 T1 = tci_uint64(regs[r3], regs[r2]);
680 T2 = tci_uint64(regs[r5], regs[r4]);
681 tci_write_reg64(regs, r1, r0, T1 + T2);
682 break;
683 #endif
684 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
685 case INDEX_op_sub2_i32:
686 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
687 T1 = tci_uint64(regs[r3], regs[r2]);
688 T2 = tci_uint64(regs[r5], regs[r4]);
689 tci_write_reg64(regs, r1, r0, T1 - T2);
690 break;
691 #endif
692 #if TCG_TARGET_HAS_mulu2_i32
693 case INDEX_op_mulu2_i32:
694 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
695 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
696 tci_write_reg64(regs, r1, r0, tmp64);
697 break;
698 #endif
699 #if TCG_TARGET_HAS_muls2_i32
700 case INDEX_op_muls2_i32:
701 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
702 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
703 tci_write_reg64(regs, r1, r0, tmp64);
704 break;
705 #endif
706 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
707 CASE_32_64(ext8s)
708 tci_args_rr(insn, &r0, &r1);
709 regs[r0] = (int8_t)regs[r1];
710 break;
711 #endif
712 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
713 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
714 CASE_32_64(ext16s)
715 tci_args_rr(insn, &r0, &r1);
716 regs[r0] = (int16_t)regs[r1];
717 break;
718 #endif
719 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
720 CASE_32_64(ext8u)
721 tci_args_rr(insn, &r0, &r1);
722 regs[r0] = (uint8_t)regs[r1];
723 break;
724 #endif
725 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
726 CASE_32_64(ext16u)
727 tci_args_rr(insn, &r0, &r1);
728 regs[r0] = (uint16_t)regs[r1];
729 break;
730 #endif
731 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
732 CASE_32_64(bswap16)
733 tci_args_rr(insn, &r0, &r1);
734 regs[r0] = bswap16(regs[r1]);
735 break;
736 #endif
737 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
738 CASE_32_64(bswap32)
739 tci_args_rr(insn, &r0, &r1);
740 regs[r0] = bswap32(regs[r1]);
741 break;
742 #endif
743 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
744 CASE_32_64(not)
745 tci_args_rr(insn, &r0, &r1);
746 regs[r0] = ~regs[r1];
747 break;
748 #endif
749 CASE_32_64(neg)
750 tci_args_rr(insn, &r0, &r1);
751 regs[r0] = -regs[r1];
752 break;
753 #if TCG_TARGET_REG_BITS == 64
754 /* Load/store operations (64 bit). */
755
756 case INDEX_op_ld32s_i64:
757 tci_args_rrs(insn, &r0, &r1, &ofs);
758 ptr = (void *)(regs[r1] + ofs);
759 regs[r0] = *(int32_t *)ptr;
760 break;
761 case INDEX_op_ld_i64:
762 tci_args_rrs(insn, &r0, &r1, &ofs);
763 ptr = (void *)(regs[r1] + ofs);
764 regs[r0] = *(uint64_t *)ptr;
765 break;
766 case INDEX_op_st_i64:
767 tci_args_rrs(insn, &r0, &r1, &ofs);
768 ptr = (void *)(regs[r1] + ofs);
769 *(uint64_t *)ptr = regs[r0];
770 break;
771
772 /* Arithmetic operations (64 bit). */
773
774 case INDEX_op_div_i64:
775 tci_args_rrr(insn, &r0, &r1, &r2);
776 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
777 break;
778 case INDEX_op_divu_i64:
779 tci_args_rrr(insn, &r0, &r1, &r2);
780 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
781 break;
782 case INDEX_op_rem_i64:
783 tci_args_rrr(insn, &r0, &r1, &r2);
784 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
785 break;
786 case INDEX_op_remu_i64:
787 tci_args_rrr(insn, &r0, &r1, &r2);
788 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
789 break;
790 #if TCG_TARGET_HAS_clz_i64
791 case INDEX_op_clz_i64:
792 tci_args_rrr(insn, &r0, &r1, &r2);
793 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
794 break;
795 #endif
796 #if TCG_TARGET_HAS_ctz_i64
797 case INDEX_op_ctz_i64:
798 tci_args_rrr(insn, &r0, &r1, &r2);
799 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
800 break;
801 #endif
802 #if TCG_TARGET_HAS_ctpop_i64
803 case INDEX_op_ctpop_i64:
804 tci_args_rr(insn, &r0, &r1);
805 regs[r0] = ctpop64(regs[r1]);
806 break;
807 #endif
808 #if TCG_TARGET_HAS_mulu2_i64
809 case INDEX_op_mulu2_i64:
810 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
811 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
812 break;
813 #endif
814 #if TCG_TARGET_HAS_muls2_i64
815 case INDEX_op_muls2_i64:
816 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
817 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
818 break;
819 #endif
820 #if TCG_TARGET_HAS_add2_i64
821 case INDEX_op_add2_i64:
822 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
823 T1 = regs[r2] + regs[r4];
824 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
825 regs[r0] = T1;
826 regs[r1] = T2;
827 break;
828 #endif
829 #if TCG_TARGET_HAS_add2_i64
830 case INDEX_op_sub2_i64:
831 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
832 T1 = regs[r2] - regs[r4];
833 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
834 regs[r0] = T1;
835 regs[r1] = T2;
836 break;
837 #endif
838
839 /* Shift/rotate operations (64 bit). */
840
841 case INDEX_op_shl_i64:
842 tci_args_rrr(insn, &r0, &r1, &r2);
843 regs[r0] = regs[r1] << (regs[r2] & 63);
844 break;
845 case INDEX_op_shr_i64:
846 tci_args_rrr(insn, &r0, &r1, &r2);
847 regs[r0] = regs[r1] >> (regs[r2] & 63);
848 break;
849 case INDEX_op_sar_i64:
850 tci_args_rrr(insn, &r0, &r1, &r2);
851 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
852 break;
853 #if TCG_TARGET_HAS_rot_i64
854 case INDEX_op_rotl_i64:
855 tci_args_rrr(insn, &r0, &r1, &r2);
856 regs[r0] = rol64(regs[r1], regs[r2] & 63);
857 break;
858 case INDEX_op_rotr_i64:
859 tci_args_rrr(insn, &r0, &r1, &r2);
860 regs[r0] = ror64(regs[r1], regs[r2] & 63);
861 break;
862 #endif
863 #if TCG_TARGET_HAS_deposit_i64
864 case INDEX_op_deposit_i64:
865 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
866 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
867 break;
868 #endif
869 #if TCG_TARGET_HAS_extract_i64
870 case INDEX_op_extract_i64:
871 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
872 regs[r0] = extract64(regs[r1], pos, len);
873 break;
874 #endif
875 #if TCG_TARGET_HAS_sextract_i64
876 case INDEX_op_sextract_i64:
877 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
878 regs[r0] = sextract64(regs[r1], pos, len);
879 break;
880 #endif
881 case INDEX_op_brcond_i64:
882 tci_args_rl(insn, tb_ptr, &r0, &ptr);
883 if (regs[r0]) {
884 tb_ptr = ptr;
885 }
886 break;
887 case INDEX_op_ext32s_i64:
888 case INDEX_op_ext_i32_i64:
889 tci_args_rr(insn, &r0, &r1);
890 regs[r0] = (int32_t)regs[r1];
891 break;
892 case INDEX_op_ext32u_i64:
893 case INDEX_op_extu_i32_i64:
894 tci_args_rr(insn, &r0, &r1);
895 regs[r0] = (uint32_t)regs[r1];
896 break;
897 #if TCG_TARGET_HAS_bswap64_i64
898 case INDEX_op_bswap64_i64:
899 tci_args_rr(insn, &r0, &r1);
900 regs[r0] = bswap64(regs[r1]);
901 break;
902 #endif
903 #endif /* TCG_TARGET_REG_BITS == 64 */
904
905 /* QEMU specific operations. */
906
907 case INDEX_op_exit_tb:
908 tci_args_l(insn, tb_ptr, &ptr);
909 return (uintptr_t)ptr;
910
911 case INDEX_op_goto_tb:
912 tci_args_l(insn, tb_ptr, &ptr);
913 tb_ptr = *(void **)ptr;
914 break;
915
916 case INDEX_op_goto_ptr:
917 tci_args_r(insn, &r0);
918 ptr = (void *)regs[r0];
919 if (!ptr) {
920 return 0;
921 }
922 tb_ptr = ptr;
923 break;
924
925 case INDEX_op_qemu_ld_a32_i32:
926 tci_args_rrm(insn, &r0, &r1, &oi);
927 taddr = (uint32_t)regs[r1];
928 goto do_ld_i32;
929 case INDEX_op_qemu_ld_a64_i32:
930 if (TCG_TARGET_REG_BITS == 64) {
931 tci_args_rrm(insn, &r0, &r1, &oi);
932 taddr = regs[r1];
933 } else {
934 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
935 taddr = tci_uint64(regs[r2], regs[r1]);
936 oi = regs[r3];
937 }
938 do_ld_i32:
939 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
940 break;
941
942 case INDEX_op_qemu_ld_a32_i64:
943 if (TCG_TARGET_REG_BITS == 64) {
944 tci_args_rrm(insn, &r0, &r1, &oi);
945 taddr = (uint32_t)regs[r1];
946 } else {
947 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
948 taddr = (uint32_t)regs[r2];
949 oi = regs[r3];
950 }
951 goto do_ld_i64;
952 case INDEX_op_qemu_ld_a64_i64:
953 if (TCG_TARGET_REG_BITS == 64) {
954 tci_args_rrm(insn, &r0, &r1, &oi);
955 taddr = regs[r1];
956 } else {
957 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
958 taddr = tci_uint64(regs[r3], regs[r2]);
959 oi = regs[r4];
960 }
961 do_ld_i64:
962 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
963 if (TCG_TARGET_REG_BITS == 32) {
964 tci_write_reg64(regs, r1, r0, tmp64);
965 } else {
966 regs[r0] = tmp64;
967 }
968 break;
969
970 case INDEX_op_qemu_st_a32_i32:
971 tci_args_rrm(insn, &r0, &r1, &oi);
972 taddr = (uint32_t)regs[r1];
973 goto do_st_i32;
974 case INDEX_op_qemu_st_a64_i32:
975 if (TCG_TARGET_REG_BITS == 64) {
976 tci_args_rrm(insn, &r0, &r1, &oi);
977 taddr = regs[r1];
978 } else {
979 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
980 taddr = tci_uint64(regs[r2], regs[r1]);
981 oi = regs[r3];
982 }
983 do_st_i32:
984 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
985 break;
986
987 case INDEX_op_qemu_st_a32_i64:
988 if (TCG_TARGET_REG_BITS == 64) {
989 tci_args_rrm(insn, &r0, &r1, &oi);
990 tmp64 = regs[r0];
991 taddr = (uint32_t)regs[r1];
992 } else {
993 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
994 tmp64 = tci_uint64(regs[r1], regs[r0]);
995 taddr = (uint32_t)regs[r2];
996 oi = regs[r3];
997 }
998 goto do_st_i64;
999 case INDEX_op_qemu_st_a64_i64:
1000 if (TCG_TARGET_REG_BITS == 64) {
1001 tci_args_rrm(insn, &r0, &r1, &oi);
1002 tmp64 = regs[r0];
1003 taddr = regs[r1];
1004 } else {
1005 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1006 tmp64 = tci_uint64(regs[r1], regs[r0]);
1007 taddr = tci_uint64(regs[r3], regs[r2]);
1008 oi = regs[r4];
1009 }
1010 do_st_i64:
1011 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1012 break;
1013
1014 case INDEX_op_mb:
1015 /* Ensure ordering for all kinds */
1016 smp_mb();
1017 break;
1018 default:
1019 g_assert_not_reached();
1020 }
1021 }
1022 }
1023
1024 /*
1025 * Disassembler that matches the interpreter
1026 */
1027
str_r(TCGReg r)1028 static const char *str_r(TCGReg r)
1029 {
1030 static const char regs[TCG_TARGET_NB_REGS][4] = {
1031 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1032 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1033 };
1034
1035 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1036 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1037
1038 assert((unsigned)r < TCG_TARGET_NB_REGS);
1039 return regs[r];
1040 }
1041
str_c(TCGCond c)1042 static const char *str_c(TCGCond c)
1043 {
1044 static const char cond[16][8] = {
1045 [TCG_COND_NEVER] = "never",
1046 [TCG_COND_ALWAYS] = "always",
1047 [TCG_COND_EQ] = "eq",
1048 [TCG_COND_NE] = "ne",
1049 [TCG_COND_LT] = "lt",
1050 [TCG_COND_GE] = "ge",
1051 [TCG_COND_LE] = "le",
1052 [TCG_COND_GT] = "gt",
1053 [TCG_COND_LTU] = "ltu",
1054 [TCG_COND_GEU] = "geu",
1055 [TCG_COND_LEU] = "leu",
1056 [TCG_COND_GTU] = "gtu",
1057 [TCG_COND_TSTEQ] = "tsteq",
1058 [TCG_COND_TSTNE] = "tstne",
1059 };
1060
1061 assert((unsigned)c < ARRAY_SIZE(cond));
1062 assert(cond[c][0] != 0);
1063 return cond[c];
1064 }
1065
1066 /* Disassemble TCI bytecode. */
print_insn_tci(bfd_vma addr,disassemble_info * info)1067 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1068 {
1069 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1070 const TCGOpDef *def;
1071 const char *op_name;
1072 uint32_t insn;
1073 TCGOpcode op;
1074 TCGReg r0, r1, r2, r3, r4, r5;
1075 tcg_target_ulong i1;
1076 int32_t s2;
1077 TCGCond c;
1078 MemOpIdx oi;
1079 uint8_t pos, len;
1080 void *ptr;
1081
1082 /* TCI is always the host, so we don't need to load indirect. */
1083 insn = *tb_ptr++;
1084
1085 info->fprintf_func(info->stream, "%08x ", insn);
1086
1087 op = extract32(insn, 0, 8);
1088 def = &tcg_op_defs[op];
1089 op_name = def->name;
1090
1091 switch (op) {
1092 case INDEX_op_br:
1093 case INDEX_op_exit_tb:
1094 case INDEX_op_goto_tb:
1095 tci_args_l(insn, tb_ptr, &ptr);
1096 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1097 break;
1098
1099 case INDEX_op_goto_ptr:
1100 tci_args_r(insn, &r0);
1101 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1102 break;
1103
1104 case INDEX_op_call:
1105 tci_args_nl(insn, tb_ptr, &len, &ptr);
1106 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1107 break;
1108
1109 case INDEX_op_brcond_i32:
1110 case INDEX_op_brcond_i64:
1111 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1112 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1113 op_name, str_r(r0), ptr);
1114 break;
1115
1116 case INDEX_op_setcond_i32:
1117 case INDEX_op_setcond_i64:
1118 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1119 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1120 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1121 break;
1122
1123 case INDEX_op_tci_movi:
1124 tci_args_ri(insn, &r0, &i1);
1125 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1126 op_name, str_r(r0), i1);
1127 break;
1128
1129 case INDEX_op_tci_movl:
1130 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1131 info->fprintf_func(info->stream, "%-12s %s, %p",
1132 op_name, str_r(r0), ptr);
1133 break;
1134
1135 case INDEX_op_ld8u_i32:
1136 case INDEX_op_ld8u_i64:
1137 case INDEX_op_ld8s_i32:
1138 case INDEX_op_ld8s_i64:
1139 case INDEX_op_ld16u_i32:
1140 case INDEX_op_ld16u_i64:
1141 case INDEX_op_ld16s_i32:
1142 case INDEX_op_ld16s_i64:
1143 case INDEX_op_ld32u_i64:
1144 case INDEX_op_ld32s_i64:
1145 case INDEX_op_ld_i32:
1146 case INDEX_op_ld_i64:
1147 case INDEX_op_st8_i32:
1148 case INDEX_op_st8_i64:
1149 case INDEX_op_st16_i32:
1150 case INDEX_op_st16_i64:
1151 case INDEX_op_st32_i64:
1152 case INDEX_op_st_i32:
1153 case INDEX_op_st_i64:
1154 tci_args_rrs(insn, &r0, &r1, &s2);
1155 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1156 op_name, str_r(r0), str_r(r1), s2);
1157 break;
1158
1159 case INDEX_op_mov_i32:
1160 case INDEX_op_mov_i64:
1161 case INDEX_op_ext8s_i32:
1162 case INDEX_op_ext8s_i64:
1163 case INDEX_op_ext8u_i32:
1164 case INDEX_op_ext8u_i64:
1165 case INDEX_op_ext16s_i32:
1166 case INDEX_op_ext16s_i64:
1167 case INDEX_op_ext16u_i32:
1168 case INDEX_op_ext32s_i64:
1169 case INDEX_op_ext32u_i64:
1170 case INDEX_op_ext_i32_i64:
1171 case INDEX_op_extu_i32_i64:
1172 case INDEX_op_bswap16_i32:
1173 case INDEX_op_bswap16_i64:
1174 case INDEX_op_bswap32_i32:
1175 case INDEX_op_bswap32_i64:
1176 case INDEX_op_bswap64_i64:
1177 case INDEX_op_not_i32:
1178 case INDEX_op_not_i64:
1179 case INDEX_op_neg_i32:
1180 case INDEX_op_neg_i64:
1181 case INDEX_op_ctpop_i32:
1182 case INDEX_op_ctpop_i64:
1183 tci_args_rr(insn, &r0, &r1);
1184 info->fprintf_func(info->stream, "%-12s %s, %s",
1185 op_name, str_r(r0), str_r(r1));
1186 break;
1187
1188 case INDEX_op_add_i32:
1189 case INDEX_op_add_i64:
1190 case INDEX_op_sub_i32:
1191 case INDEX_op_sub_i64:
1192 case INDEX_op_mul_i32:
1193 case INDEX_op_mul_i64:
1194 case INDEX_op_and_i32:
1195 case INDEX_op_and_i64:
1196 case INDEX_op_or_i32:
1197 case INDEX_op_or_i64:
1198 case INDEX_op_xor_i32:
1199 case INDEX_op_xor_i64:
1200 case INDEX_op_andc_i32:
1201 case INDEX_op_andc_i64:
1202 case INDEX_op_orc_i32:
1203 case INDEX_op_orc_i64:
1204 case INDEX_op_eqv_i32:
1205 case INDEX_op_eqv_i64:
1206 case INDEX_op_nand_i32:
1207 case INDEX_op_nand_i64:
1208 case INDEX_op_nor_i32:
1209 case INDEX_op_nor_i64:
1210 case INDEX_op_div_i32:
1211 case INDEX_op_div_i64:
1212 case INDEX_op_rem_i32:
1213 case INDEX_op_rem_i64:
1214 case INDEX_op_divu_i32:
1215 case INDEX_op_divu_i64:
1216 case INDEX_op_remu_i32:
1217 case INDEX_op_remu_i64:
1218 case INDEX_op_shl_i32:
1219 case INDEX_op_shl_i64:
1220 case INDEX_op_shr_i32:
1221 case INDEX_op_shr_i64:
1222 case INDEX_op_sar_i32:
1223 case INDEX_op_sar_i64:
1224 case INDEX_op_rotl_i32:
1225 case INDEX_op_rotl_i64:
1226 case INDEX_op_rotr_i32:
1227 case INDEX_op_rotr_i64:
1228 case INDEX_op_clz_i32:
1229 case INDEX_op_clz_i64:
1230 case INDEX_op_ctz_i32:
1231 case INDEX_op_ctz_i64:
1232 tci_args_rrr(insn, &r0, &r1, &r2);
1233 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1234 op_name, str_r(r0), str_r(r1), str_r(r2));
1235 break;
1236
1237 case INDEX_op_deposit_i32:
1238 case INDEX_op_deposit_i64:
1239 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1240 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1241 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1242 break;
1243
1244 case INDEX_op_extract_i32:
1245 case INDEX_op_extract_i64:
1246 case INDEX_op_sextract_i32:
1247 case INDEX_op_sextract_i64:
1248 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1249 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1250 op_name, str_r(r0), str_r(r1), pos, len);
1251 break;
1252
1253 case INDEX_op_movcond_i32:
1254 case INDEX_op_movcond_i64:
1255 case INDEX_op_setcond2_i32:
1256 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1257 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1258 op_name, str_r(r0), str_r(r1), str_r(r2),
1259 str_r(r3), str_r(r4), str_c(c));
1260 break;
1261
1262 case INDEX_op_mulu2_i32:
1263 case INDEX_op_mulu2_i64:
1264 case INDEX_op_muls2_i32:
1265 case INDEX_op_muls2_i64:
1266 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1267 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1268 op_name, str_r(r0), str_r(r1),
1269 str_r(r2), str_r(r3));
1270 break;
1271
1272 case INDEX_op_add2_i32:
1273 case INDEX_op_add2_i64:
1274 case INDEX_op_sub2_i32:
1275 case INDEX_op_sub2_i64:
1276 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1277 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1278 op_name, str_r(r0), str_r(r1), str_r(r2),
1279 str_r(r3), str_r(r4), str_r(r5));
1280 break;
1281
1282 case INDEX_op_qemu_ld_a32_i32:
1283 case INDEX_op_qemu_st_a32_i32:
1284 len = 1 + 1;
1285 goto do_qemu_ldst;
1286 case INDEX_op_qemu_ld_a32_i64:
1287 case INDEX_op_qemu_st_a32_i64:
1288 case INDEX_op_qemu_ld_a64_i32:
1289 case INDEX_op_qemu_st_a64_i32:
1290 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1291 goto do_qemu_ldst;
1292 case INDEX_op_qemu_ld_a64_i64:
1293 case INDEX_op_qemu_st_a64_i64:
1294 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1295 goto do_qemu_ldst;
1296 do_qemu_ldst:
1297 switch (len) {
1298 case 2:
1299 tci_args_rrm(insn, &r0, &r1, &oi);
1300 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1301 op_name, str_r(r0), str_r(r1), oi);
1302 break;
1303 case 3:
1304 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1305 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1306 op_name, str_r(r0), str_r(r1),
1307 str_r(r2), str_r(r3));
1308 break;
1309 case 4:
1310 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1311 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1312 op_name, str_r(r0), str_r(r1),
1313 str_r(r2), str_r(r3), str_r(r4));
1314 break;
1315 default:
1316 g_assert_not_reached();
1317 }
1318 break;
1319
1320 case 0:
1321 /* tcg_out_nop_fill uses zeros */
1322 if (insn == 0) {
1323 info->fprintf_func(info->stream, "align");
1324 break;
1325 }
1326 /* fall through */
1327
1328 default:
1329 info->fprintf_func(info->stream, "illegal opcode %d", op);
1330 break;
1331 }
1332
1333 return sizeof(insn);
1334 }
1335