1/* 2 * MIPS16 extension (Code Compaction) ASE translation routines 3 * 4 * Copyright (c) 2004-2005 Jocelyn Mayer 5 * Copyright (c) 2006 Marius Groeger (FPU operations) 6 * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) 7 * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) 8 * 9 * SPDX-License-Identifier: LGPL-2.1-or-later 10 */ 11 12/* MIPS16 major opcodes */ 13enum { 14 M16_OPC_ADDIUSP = 0x00, 15 M16_OPC_ADDIUPC = 0x01, 16 M16_OPC_B = 0x02, 17 M16_OPC_JAL = 0x03, 18 M16_OPC_BEQZ = 0x04, 19 M16_OPC_BNEQZ = 0x05, 20 M16_OPC_SHIFT = 0x06, 21 M16_OPC_LD = 0x07, 22 M16_OPC_RRIA = 0x08, 23 M16_OPC_ADDIU8 = 0x09, 24 M16_OPC_SLTI = 0x0a, 25 M16_OPC_SLTIU = 0x0b, 26 M16_OPC_I8 = 0x0c, 27 M16_OPC_LI = 0x0d, 28 M16_OPC_CMPI = 0x0e, 29 M16_OPC_SD = 0x0f, 30 M16_OPC_LB = 0x10, 31 M16_OPC_LH = 0x11, 32 M16_OPC_LWSP = 0x12, 33 M16_OPC_LW = 0x13, 34 M16_OPC_LBU = 0x14, 35 M16_OPC_LHU = 0x15, 36 M16_OPC_LWPC = 0x16, 37 M16_OPC_LWU = 0x17, 38 M16_OPC_SB = 0x18, 39 M16_OPC_SH = 0x19, 40 M16_OPC_SWSP = 0x1a, 41 M16_OPC_SW = 0x1b, 42 M16_OPC_RRR = 0x1c, 43 M16_OPC_RR = 0x1d, 44 M16_OPC_EXTEND = 0x1e, 45 M16_OPC_I64 = 0x1f 46}; 47 48/* I8 funct field */ 49enum { 50 I8_BTEQZ = 0x0, 51 I8_BTNEZ = 0x1, 52 I8_SWRASP = 0x2, 53 I8_ADJSP = 0x3, 54 I8_SVRS = 0x4, 55 I8_MOV32R = 0x5, 56 I8_MOVR32 = 0x7 57}; 58 59/* RRR f field */ 60enum { 61 RRR_DADDU = 0x0, 62 RRR_ADDU = 0x1, 63 RRR_DSUBU = 0x2, 64 RRR_SUBU = 0x3 65}; 66 67/* RR funct field */ 68enum { 69 RR_JR = 0x00, 70 RR_SDBBP = 0x01, 71 RR_SLT = 0x02, 72 RR_SLTU = 0x03, 73 RR_SLLV = 0x04, 74 RR_BREAK = 0x05, 75 RR_SRLV = 0x06, 76 RR_SRAV = 0x07, 77 RR_DSRL = 0x08, 78 RR_CMP = 0x0a, 79 RR_NEG = 0x0b, 80 RR_AND = 0x0c, 81 RR_OR = 0x0d, 82 RR_XOR = 0x0e, 83 RR_NOT = 0x0f, 84 RR_MFHI = 0x10, 85 RR_CNVT = 0x11, 86 RR_MFLO = 0x12, 87 RR_DSRA = 0x13, 88 RR_DSLLV = 0x14, 89 RR_DSRLV = 0x16, 90 RR_DSRAV = 0x17, 91 RR_MULT = 0x18, 92 RR_MULTU = 0x19, 93 RR_DIV = 0x1a, 94 RR_DIVU = 0x1b, 95 RR_DMULT = 0x1c, 96 RR_DMULTU = 0x1d, 97 RR_DDIV = 0x1e, 98 RR_DDIVU = 0x1f 99}; 100 101/* I64 funct field */ 102enum { 103 I64_LDSP = 0x0, 104 I64_SDSP = 0x1, 105 I64_SDRASP = 0x2, 106 I64_DADJSP = 0x3, 107 I64_LDPC = 0x4, 108 I64_DADDIU5 = 0x5, 109 I64_DADDIUPC = 0x6, 110 I64_DADDIUSP = 0x7 111}; 112 113/* RR ry field for CNVT */ 114enum { 115 RR_RY_CNVT_ZEB = 0x0, 116 RR_RY_CNVT_ZEH = 0x1, 117 RR_RY_CNVT_ZEW = 0x2, 118 RR_RY_CNVT_SEB = 0x4, 119 RR_RY_CNVT_SEH = 0x5, 120 RR_RY_CNVT_SEW = 0x6, 121}; 122 123static int xlat(int r) 124{ 125 static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 126 127 return map[r]; 128} 129 130static void gen_mips16_save(DisasContext *ctx, 131 int xsregs, int aregs, 132 int do_ra, int do_s0, int do_s1, 133 int framesize) 134{ 135 TCGv t0 = tcg_temp_new(); 136 TCGv t1 = tcg_temp_new(); 137 TCGv t2 = tcg_temp_new(); 138 int args, astatic; 139 140 switch (aregs) { 141 case 0: 142 case 1: 143 case 2: 144 case 3: 145 case 11: 146 args = 0; 147 break; 148 case 4: 149 case 5: 150 case 6: 151 case 7: 152 args = 1; 153 break; 154 case 8: 155 case 9: 156 case 10: 157 args = 2; 158 break; 159 case 12: 160 case 13: 161 args = 3; 162 break; 163 case 14: 164 args = 4; 165 break; 166 default: 167 gen_reserved_instruction(ctx); 168 return; 169 } 170 171 switch (args) { 172 case 4: 173 gen_base_offset_addr(ctx, t0, 29, 12); 174 gen_load_gpr(t1, 7); 175 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | 176 ctx->default_tcg_memop_mask); 177 /* Fall through */ 178 case 3: 179 gen_base_offset_addr(ctx, t0, 29, 8); 180 gen_load_gpr(t1, 6); 181 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | 182 ctx->default_tcg_memop_mask); 183 /* Fall through */ 184 case 2: 185 gen_base_offset_addr(ctx, t0, 29, 4); 186 gen_load_gpr(t1, 5); 187 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | 188 ctx->default_tcg_memop_mask); 189 /* Fall through */ 190 case 1: 191 gen_base_offset_addr(ctx, t0, 29, 0); 192 gen_load_gpr(t1, 4); 193 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | 194 ctx->default_tcg_memop_mask); 195 } 196 197 gen_load_gpr(t0, 29); 198 199#define DECR_AND_STORE(reg) do { \ 200 tcg_gen_movi_tl(t2, -4); \ 201 gen_op_addr_add(ctx, t0, t0, t2); \ 202 gen_load_gpr(t1, reg); \ 203 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \ 204 ctx->default_tcg_memop_mask); \ 205 } while (0) 206 207 if (do_ra) { 208 DECR_AND_STORE(31); 209 } 210 211 switch (xsregs) { 212 case 7: 213 DECR_AND_STORE(30); 214 /* Fall through */ 215 case 6: 216 DECR_AND_STORE(23); 217 /* Fall through */ 218 case 5: 219 DECR_AND_STORE(22); 220 /* Fall through */ 221 case 4: 222 DECR_AND_STORE(21); 223 /* Fall through */ 224 case 3: 225 DECR_AND_STORE(20); 226 /* Fall through */ 227 case 2: 228 DECR_AND_STORE(19); 229 /* Fall through */ 230 case 1: 231 DECR_AND_STORE(18); 232 } 233 234 if (do_s1) { 235 DECR_AND_STORE(17); 236 } 237 if (do_s0) { 238 DECR_AND_STORE(16); 239 } 240 241 switch (aregs) { 242 case 0: 243 case 4: 244 case 8: 245 case 12: 246 case 14: 247 astatic = 0; 248 break; 249 case 1: 250 case 5: 251 case 9: 252 case 13: 253 astatic = 1; 254 break; 255 case 2: 256 case 6: 257 case 10: 258 astatic = 2; 259 break; 260 case 3: 261 case 7: 262 astatic = 3; 263 break; 264 case 11: 265 astatic = 4; 266 break; 267 default: 268 gen_reserved_instruction(ctx); 269 return; 270 } 271 272 if (astatic > 0) { 273 DECR_AND_STORE(7); 274 if (astatic > 1) { 275 DECR_AND_STORE(6); 276 if (astatic > 2) { 277 DECR_AND_STORE(5); 278 if (astatic > 3) { 279 DECR_AND_STORE(4); 280 } 281 } 282 } 283 } 284#undef DECR_AND_STORE 285 286 tcg_gen_movi_tl(t2, -framesize); 287 gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); 288} 289 290static void gen_mips16_restore(DisasContext *ctx, 291 int xsregs, int aregs, 292 int do_ra, int do_s0, int do_s1, 293 int framesize) 294{ 295 int astatic; 296 TCGv t0 = tcg_temp_new(); 297 TCGv t1 = tcg_temp_new(); 298 TCGv t2 = tcg_temp_new(); 299 300 tcg_gen_movi_tl(t2, framesize); 301 gen_op_addr_add(ctx, t0, cpu_gpr[29], t2); 302 303#define DECR_AND_LOAD(reg) do { \ 304 tcg_gen_movi_tl(t2, -4); \ 305 gen_op_addr_add(ctx, t0, t0, t2); \ 306 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \ 307 ctx->default_tcg_memop_mask); \ 308 gen_store_gpr(t1, reg); \ 309 } while (0) 310 311 if (do_ra) { 312 DECR_AND_LOAD(31); 313 } 314 315 switch (xsregs) { 316 case 7: 317 DECR_AND_LOAD(30); 318 /* Fall through */ 319 case 6: 320 DECR_AND_LOAD(23); 321 /* Fall through */ 322 case 5: 323 DECR_AND_LOAD(22); 324 /* Fall through */ 325 case 4: 326 DECR_AND_LOAD(21); 327 /* Fall through */ 328 case 3: 329 DECR_AND_LOAD(20); 330 /* Fall through */ 331 case 2: 332 DECR_AND_LOAD(19); 333 /* Fall through */ 334 case 1: 335 DECR_AND_LOAD(18); 336 } 337 338 if (do_s1) { 339 DECR_AND_LOAD(17); 340 } 341 if (do_s0) { 342 DECR_AND_LOAD(16); 343 } 344 345 switch (aregs) { 346 case 0: 347 case 4: 348 case 8: 349 case 12: 350 case 14: 351 astatic = 0; 352 break; 353 case 1: 354 case 5: 355 case 9: 356 case 13: 357 astatic = 1; 358 break; 359 case 2: 360 case 6: 361 case 10: 362 astatic = 2; 363 break; 364 case 3: 365 case 7: 366 astatic = 3; 367 break; 368 case 11: 369 astatic = 4; 370 break; 371 default: 372 gen_reserved_instruction(ctx); 373 return; 374 } 375 376 if (astatic > 0) { 377 DECR_AND_LOAD(7); 378 if (astatic > 1) { 379 DECR_AND_LOAD(6); 380 if (astatic > 2) { 381 DECR_AND_LOAD(5); 382 if (astatic > 3) { 383 DECR_AND_LOAD(4); 384 } 385 } 386 } 387 } 388#undef DECR_AND_LOAD 389 390 tcg_gen_movi_tl(t2, framesize); 391 gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); 392} 393 394#if defined(TARGET_MIPS64) 395static void decode_i64_mips16(DisasContext *ctx, 396 int ry, int funct, int16_t offset, 397 int extended) 398{ 399 switch (funct) { 400 case I64_LDSP: 401 check_insn(ctx, ISA_MIPS3); 402 check_mips_64(ctx); 403 offset = extended ? offset : offset << 3; 404 gen_ld(ctx, OPC_LD, ry, 29, offset); 405 break; 406 case I64_SDSP: 407 check_insn(ctx, ISA_MIPS3); 408 check_mips_64(ctx); 409 offset = extended ? offset : offset << 3; 410 gen_st(ctx, OPC_SD, ry, 29, offset); 411 break; 412 case I64_SDRASP: 413 check_insn(ctx, ISA_MIPS3); 414 check_mips_64(ctx); 415 offset = extended ? offset : (ctx->opcode & 0xff) << 3; 416 gen_st(ctx, OPC_SD, 31, 29, offset); 417 break; 418 case I64_DADJSP: 419 check_insn(ctx, ISA_MIPS3); 420 check_mips_64(ctx); 421 offset = extended ? offset : ((int8_t)ctx->opcode) << 3; 422 gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); 423 break; 424 case I64_LDPC: 425 check_insn(ctx, ISA_MIPS3); 426 check_mips_64(ctx); 427 if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { 428 gen_reserved_instruction(ctx); 429 } else { 430 offset = extended ? offset : offset << 3; 431 gen_ld(ctx, OPC_LDPC, ry, 0, offset); 432 } 433 break; 434 case I64_DADDIU5: 435 check_insn(ctx, ISA_MIPS3); 436 check_mips_64(ctx); 437 offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; 438 gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); 439 break; 440 case I64_DADDIUPC: 441 check_insn(ctx, ISA_MIPS3); 442 check_mips_64(ctx); 443 offset = extended ? offset : offset << 2; 444 gen_addiupc(ctx, ry, offset, 1, extended); 445 break; 446 case I64_DADDIUSP: 447 check_insn(ctx, ISA_MIPS3); 448 check_mips_64(ctx); 449 offset = extended ? offset : offset << 2; 450 gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); 451 break; 452 } 453} 454#endif 455 456static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) 457{ 458 int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 459 int op, rx, ry, funct, sa; 460 int16_t imm, offset; 461 462 ctx->opcode = (ctx->opcode << 16) | extend; 463 op = (ctx->opcode >> 11) & 0x1f; 464 sa = (ctx->opcode >> 22) & 0x1f; 465 funct = (ctx->opcode >> 8) & 0x7; 466 rx = xlat((ctx->opcode >> 8) & 0x7); 467 ry = xlat((ctx->opcode >> 5) & 0x7); 468 offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 469 | ((ctx->opcode >> 21) & 0x3f) << 5 470 | (ctx->opcode & 0x1f)); 471 472 /* 473 * The extended opcodes cleverly reuse the opcodes from their 16-bit 474 * counterparts. 475 */ 476 switch (op) { 477 case M16_OPC_ADDIUSP: 478 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 479 break; 480 case M16_OPC_ADDIUPC: 481 gen_addiupc(ctx, rx, imm, 0, 1); 482 break; 483 case M16_OPC_B: 484 gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0); 485 /* No delay slot, so just process as a normal instruction */ 486 break; 487 case M16_OPC_BEQZ: 488 gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0); 489 /* No delay slot, so just process as a normal instruction */ 490 break; 491 case M16_OPC_BNEQZ: 492 gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0); 493 /* No delay slot, so just process as a normal instruction */ 494 break; 495 case M16_OPC_SHIFT: 496 switch (ctx->opcode & 0x3) { 497 case 0x0: 498 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 499 break; 500 case 0x1: 501#if defined(TARGET_MIPS64) 502 check_mips_64(ctx); 503 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 504#else 505 gen_reserved_instruction(ctx); 506#endif 507 break; 508 case 0x2: 509 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 510 break; 511 case 0x3: 512 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 513 break; 514 } 515 break; 516#if defined(TARGET_MIPS64) 517 case M16_OPC_LD: 518 check_insn(ctx, ISA_MIPS3); 519 check_mips_64(ctx); 520 gen_ld(ctx, OPC_LD, ry, rx, offset); 521 break; 522#endif 523 case M16_OPC_RRIA: 524 imm = ctx->opcode & 0xf; 525 imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; 526 imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; 527 imm = (int16_t) (imm << 1) >> 1; 528 if ((ctx->opcode >> 4) & 0x1) { 529#if defined(TARGET_MIPS64) 530 check_mips_64(ctx); 531 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 532#else 533 gen_reserved_instruction(ctx); 534#endif 535 } else { 536 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 537 } 538 break; 539 case M16_OPC_ADDIU8: 540 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 541 break; 542 case M16_OPC_SLTI: 543 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 544 break; 545 case M16_OPC_SLTIU: 546 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 547 break; 548 case M16_OPC_I8: 549 switch (funct) { 550 case I8_BTEQZ: 551 gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0); 552 break; 553 case I8_BTNEZ: 554 gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0); 555 break; 556 case I8_SWRASP: 557 gen_st(ctx, OPC_SW, 31, 29, imm); 558 break; 559 case I8_ADJSP: 560 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); 561 break; 562 case I8_SVRS: 563 check_insn(ctx, ISA_MIPS_R1); 564 { 565 int xsregs = (ctx->opcode >> 24) & 0x7; 566 int aregs = (ctx->opcode >> 16) & 0xf; 567 int do_ra = (ctx->opcode >> 6) & 0x1; 568 int do_s0 = (ctx->opcode >> 5) & 0x1; 569 int do_s1 = (ctx->opcode >> 4) & 0x1; 570 int framesize = (((ctx->opcode >> 20) & 0xf) << 4 571 | (ctx->opcode & 0xf)) << 3; 572 573 if (ctx->opcode & (1 << 7)) { 574 gen_mips16_save(ctx, xsregs, aregs, 575 do_ra, do_s0, do_s1, 576 framesize); 577 } else { 578 gen_mips16_restore(ctx, xsregs, aregs, 579 do_ra, do_s0, do_s1, 580 framesize); 581 } 582 } 583 break; 584 default: 585 gen_reserved_instruction(ctx); 586 break; 587 } 588 break; 589 case M16_OPC_LI: 590 tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm); 591 break; 592 case M16_OPC_CMPI: 593 tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm); 594 break; 595#if defined(TARGET_MIPS64) 596 case M16_OPC_SD: 597 check_insn(ctx, ISA_MIPS3); 598 check_mips_64(ctx); 599 gen_st(ctx, OPC_SD, ry, rx, offset); 600 break; 601#endif 602 case M16_OPC_LB: 603 gen_ld(ctx, OPC_LB, ry, rx, offset); 604 break; 605 case M16_OPC_LH: 606 gen_ld(ctx, OPC_LH, ry, rx, offset); 607 break; 608 case M16_OPC_LWSP: 609 gen_ld(ctx, OPC_LW, rx, 29, offset); 610 break; 611 case M16_OPC_LW: 612 gen_ld(ctx, OPC_LW, ry, rx, offset); 613 break; 614 case M16_OPC_LBU: 615 gen_ld(ctx, OPC_LBU, ry, rx, offset); 616 break; 617 case M16_OPC_LHU: 618 gen_ld(ctx, OPC_LHU, ry, rx, offset); 619 break; 620 case M16_OPC_LWPC: 621 gen_ld(ctx, OPC_LWPC, rx, 0, offset); 622 break; 623#if defined(TARGET_MIPS64) 624 case M16_OPC_LWU: 625 check_insn(ctx, ISA_MIPS3); 626 check_mips_64(ctx); 627 gen_ld(ctx, OPC_LWU, ry, rx, offset); 628 break; 629#endif 630 case M16_OPC_SB: 631 gen_st(ctx, OPC_SB, ry, rx, offset); 632 break; 633 case M16_OPC_SH: 634 gen_st(ctx, OPC_SH, ry, rx, offset); 635 break; 636 case M16_OPC_SWSP: 637 gen_st(ctx, OPC_SW, rx, 29, offset); 638 break; 639 case M16_OPC_SW: 640 gen_st(ctx, OPC_SW, ry, rx, offset); 641 break; 642#if defined(TARGET_MIPS64) 643 case M16_OPC_I64: 644 decode_i64_mips16(ctx, ry, funct, offset, 1); 645 break; 646#endif 647 default: 648 gen_reserved_instruction(ctx); 649 break; 650 } 651 652 return 4; 653} 654 655static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) 656{ 657 int rx, ry; 658 int sa; 659 int op, cnvt_op, op1, offset; 660 int funct; 661 int n_bytes; 662 663 op = (ctx->opcode >> 11) & 0x1f; 664 sa = (ctx->opcode >> 2) & 0x7; 665 sa = sa == 0 ? 8 : sa; 666 rx = xlat((ctx->opcode >> 8) & 0x7); 667 cnvt_op = (ctx->opcode >> 5) & 0x7; 668 ry = xlat((ctx->opcode >> 5) & 0x7); 669 op1 = offset = ctx->opcode & 0x1f; 670 671 n_bytes = 2; 672 673 switch (op) { 674 case M16_OPC_ADDIUSP: 675 { 676 int16_t imm = ((uint8_t) ctx->opcode) << 2; 677 678 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 679 } 680 break; 681 case M16_OPC_ADDIUPC: 682 gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); 683 break; 684 case M16_OPC_B: 685 offset = (ctx->opcode & 0x7ff) << 1; 686 offset = (int16_t)(offset << 4) >> 4; 687 gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); 688 /* No delay slot, so just process as a normal instruction */ 689 break; 690 case M16_OPC_JAL: 691 offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 692 offset = (((ctx->opcode & 0x1f) << 21) 693 | ((ctx->opcode >> 5) & 0x1f) << 16 694 | offset) << 2; 695 op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; 696 gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); 697 n_bytes = 4; 698 break; 699 case M16_OPC_BEQZ: 700 gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, 701 ((int8_t)ctx->opcode) << 1, 0); 702 /* No delay slot, so just process as a normal instruction */ 703 break; 704 case M16_OPC_BNEQZ: 705 gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, 706 ((int8_t)ctx->opcode) << 1, 0); 707 /* No delay slot, so just process as a normal instruction */ 708 break; 709 case M16_OPC_SHIFT: 710 switch (ctx->opcode & 0x3) { 711 case 0x0: 712 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 713 break; 714 case 0x1: 715#if defined(TARGET_MIPS64) 716 check_insn(ctx, ISA_MIPS3); 717 check_mips_64(ctx); 718 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 719#else 720 gen_reserved_instruction(ctx); 721#endif 722 break; 723 case 0x2: 724 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 725 break; 726 case 0x3: 727 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 728 break; 729 } 730 break; 731#if defined(TARGET_MIPS64) 732 case M16_OPC_LD: 733 check_insn(ctx, ISA_MIPS3); 734 check_mips_64(ctx); 735 gen_ld(ctx, OPC_LD, ry, rx, offset << 3); 736 break; 737#endif 738 case M16_OPC_RRIA: 739 { 740 int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; 741 742 if ((ctx->opcode >> 4) & 1) { 743#if defined(TARGET_MIPS64) 744 check_insn(ctx, ISA_MIPS3); 745 check_mips_64(ctx); 746 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 747#else 748 gen_reserved_instruction(ctx); 749#endif 750 } else { 751 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 752 } 753 } 754 break; 755 case M16_OPC_ADDIU8: 756 { 757 int16_t imm = (int8_t) ctx->opcode; 758 759 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 760 } 761 break; 762 case M16_OPC_SLTI: 763 { 764 int16_t imm = (uint8_t) ctx->opcode; 765 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 766 } 767 break; 768 case M16_OPC_SLTIU: 769 { 770 int16_t imm = (uint8_t) ctx->opcode; 771 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 772 } 773 break; 774 case M16_OPC_I8: 775 { 776 int reg32; 777 778 funct = (ctx->opcode >> 8) & 0x7; 779 switch (funct) { 780 case I8_BTEQZ: 781 gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, 782 ((int8_t)ctx->opcode) << 1, 0); 783 break; 784 case I8_BTNEZ: 785 gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, 786 ((int8_t)ctx->opcode) << 1, 0); 787 break; 788 case I8_SWRASP: 789 gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); 790 break; 791 case I8_ADJSP: 792 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, 793 ((int8_t)ctx->opcode) << 3); 794 break; 795 case I8_SVRS: 796 check_insn(ctx, ISA_MIPS_R1); 797 { 798 int do_ra = ctx->opcode & (1 << 6); 799 int do_s0 = ctx->opcode & (1 << 5); 800 int do_s1 = ctx->opcode & (1 << 4); 801 int framesize = ctx->opcode & 0xf; 802 803 if (framesize == 0) { 804 framesize = 128; 805 } else { 806 framesize = framesize << 3; 807 } 808 809 if (ctx->opcode & (1 << 7)) { 810 gen_mips16_save(ctx, 0, 0, 811 do_ra, do_s0, do_s1, framesize); 812 } else { 813 gen_mips16_restore(ctx, 0, 0, 814 do_ra, do_s0, do_s1, framesize); 815 } 816 } 817 break; 818 case I8_MOV32R: 819 { 820 int rz = xlat(ctx->opcode & 0x7); 821 822 reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | 823 ((ctx->opcode >> 5) & 0x7); 824 gen_arith(ctx, OPC_ADDU, reg32, rz, 0); 825 } 826 break; 827 case I8_MOVR32: 828 reg32 = ctx->opcode & 0x1f; 829 gen_arith(ctx, OPC_ADDU, ry, reg32, 0); 830 break; 831 default: 832 gen_reserved_instruction(ctx); 833 break; 834 } 835 } 836 break; 837 case M16_OPC_LI: 838 { 839 int16_t imm = (uint8_t) ctx->opcode; 840 841 gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); 842 } 843 break; 844 case M16_OPC_CMPI: 845 { 846 int16_t imm = (uint8_t) ctx->opcode; 847 gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); 848 } 849 break; 850#if defined(TARGET_MIPS64) 851 case M16_OPC_SD: 852 check_insn(ctx, ISA_MIPS3); 853 check_mips_64(ctx); 854 gen_st(ctx, OPC_SD, ry, rx, offset << 3); 855 break; 856#endif 857 case M16_OPC_LB: 858 gen_ld(ctx, OPC_LB, ry, rx, offset); 859 break; 860 case M16_OPC_LH: 861 gen_ld(ctx, OPC_LH, ry, rx, offset << 1); 862 break; 863 case M16_OPC_LWSP: 864 gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); 865 break; 866 case M16_OPC_LW: 867 gen_ld(ctx, OPC_LW, ry, rx, offset << 2); 868 break; 869 case M16_OPC_LBU: 870 gen_ld(ctx, OPC_LBU, ry, rx, offset); 871 break; 872 case M16_OPC_LHU: 873 gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); 874 break; 875 case M16_OPC_LWPC: 876 gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); 877 break; 878#if defined(TARGET_MIPS64) 879 case M16_OPC_LWU: 880 check_insn(ctx, ISA_MIPS3); 881 check_mips_64(ctx); 882 gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); 883 break; 884#endif 885 case M16_OPC_SB: 886 gen_st(ctx, OPC_SB, ry, rx, offset); 887 break; 888 case M16_OPC_SH: 889 gen_st(ctx, OPC_SH, ry, rx, offset << 1); 890 break; 891 case M16_OPC_SWSP: 892 gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); 893 break; 894 case M16_OPC_SW: 895 gen_st(ctx, OPC_SW, ry, rx, offset << 2); 896 break; 897 case M16_OPC_RRR: 898 { 899 int rz = xlat((ctx->opcode >> 2) & 0x7); 900 int mips32_op; 901 902 switch (ctx->opcode & 0x3) { 903 case RRR_ADDU: 904 mips32_op = OPC_ADDU; 905 break; 906 case RRR_SUBU: 907 mips32_op = OPC_SUBU; 908 break; 909#if defined(TARGET_MIPS64) 910 case RRR_DADDU: 911 mips32_op = OPC_DADDU; 912 check_insn(ctx, ISA_MIPS3); 913 check_mips_64(ctx); 914 break; 915 case RRR_DSUBU: 916 mips32_op = OPC_DSUBU; 917 check_insn(ctx, ISA_MIPS3); 918 check_mips_64(ctx); 919 break; 920#endif 921 default: 922 gen_reserved_instruction(ctx); 923 goto done; 924 } 925 926 gen_arith(ctx, mips32_op, rz, rx, ry); 927 done: 928 ; 929 } 930 break; 931 case M16_OPC_RR: 932 switch (op1) { 933 case RR_JR: 934 { 935 int nd = (ctx->opcode >> 7) & 0x1; 936 int link = (ctx->opcode >> 6) & 0x1; 937 int ra = (ctx->opcode >> 5) & 0x1; 938 939 if (nd) { 940 check_insn(ctx, ISA_MIPS_R1); 941 } 942 943 if (link) { 944 op = OPC_JALR; 945 } else { 946 op = OPC_JR; 947 } 948 949 gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, 950 (nd ? 0 : 2)); 951 } 952 break; 953 case RR_SDBBP: 954 if (is_uhi(ctx, extract32(ctx->opcode, 5, 6))) { 955 ctx->base.is_jmp = DISAS_SEMIHOST; 956 } else { 957 /* 958 * XXX: not clear which exception should be raised 959 * when in debug mode... 960 */ 961 check_insn(ctx, ISA_MIPS_R1); 962 generate_exception_end(ctx, EXCP_DBp); 963 } 964 break; 965 case RR_SLT: 966 gen_slt(ctx, OPC_SLT, 24, rx, ry); 967 break; 968 case RR_SLTU: 969 gen_slt(ctx, OPC_SLTU, 24, rx, ry); 970 break; 971 case RR_BREAK: 972 generate_exception_break(ctx, extract32(ctx->opcode, 5, 6)); 973 break; 974 case RR_SLLV: 975 gen_shift(ctx, OPC_SLLV, ry, rx, ry); 976 break; 977 case RR_SRLV: 978 gen_shift(ctx, OPC_SRLV, ry, rx, ry); 979 break; 980 case RR_SRAV: 981 gen_shift(ctx, OPC_SRAV, ry, rx, ry); 982 break; 983#if defined(TARGET_MIPS64) 984 case RR_DSRL: 985 check_insn(ctx, ISA_MIPS3); 986 check_mips_64(ctx); 987 gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); 988 break; 989#endif 990 case RR_CMP: 991 gen_logic(ctx, OPC_XOR, 24, rx, ry); 992 break; 993 case RR_NEG: 994 gen_arith(ctx, OPC_SUBU, rx, 0, ry); 995 break; 996 case RR_AND: 997 gen_logic(ctx, OPC_AND, rx, rx, ry); 998 break; 999 case RR_OR: 1000 gen_logic(ctx, OPC_OR, rx, rx, ry); 1001 break; 1002 case RR_XOR: 1003 gen_logic(ctx, OPC_XOR, rx, rx, ry); 1004 break; 1005 case RR_NOT: 1006 gen_logic(ctx, OPC_NOR, rx, ry, 0); 1007 break; 1008 case RR_MFHI: 1009 gen_HILO(ctx, OPC_MFHI, 0, rx); 1010 break; 1011 case RR_CNVT: 1012 check_insn(ctx, ISA_MIPS_R1); 1013 switch (cnvt_op) { 1014 case RR_RY_CNVT_ZEB: 1015 tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1016 break; 1017 case RR_RY_CNVT_ZEH: 1018 tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1019 break; 1020 case RR_RY_CNVT_SEB: 1021 tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1022 break; 1023 case RR_RY_CNVT_SEH: 1024 tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1025 break; 1026#if defined(TARGET_MIPS64) 1027 case RR_RY_CNVT_ZEW: 1028 check_insn(ctx, ISA_MIPS_R1); 1029 check_mips_64(ctx); 1030 tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1031 break; 1032 case RR_RY_CNVT_SEW: 1033 check_insn(ctx, ISA_MIPS_R1); 1034 check_mips_64(ctx); 1035 tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1036 break; 1037#endif 1038 default: 1039 gen_reserved_instruction(ctx); 1040 break; 1041 } 1042 break; 1043 case RR_MFLO: 1044 gen_HILO(ctx, OPC_MFLO, 0, rx); 1045 break; 1046#if defined(TARGET_MIPS64) 1047 case RR_DSRA: 1048 check_insn(ctx, ISA_MIPS3); 1049 check_mips_64(ctx); 1050 gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); 1051 break; 1052 case RR_DSLLV: 1053 check_insn(ctx, ISA_MIPS3); 1054 check_mips_64(ctx); 1055 gen_shift(ctx, OPC_DSLLV, ry, rx, ry); 1056 break; 1057 case RR_DSRLV: 1058 check_insn(ctx, ISA_MIPS3); 1059 check_mips_64(ctx); 1060 gen_shift(ctx, OPC_DSRLV, ry, rx, ry); 1061 break; 1062 case RR_DSRAV: 1063 check_insn(ctx, ISA_MIPS3); 1064 check_mips_64(ctx); 1065 gen_shift(ctx, OPC_DSRAV, ry, rx, ry); 1066 break; 1067#endif 1068 case RR_MULT: 1069 gen_muldiv(ctx, OPC_MULT, 0, rx, ry); 1070 break; 1071 case RR_MULTU: 1072 gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); 1073 break; 1074 case RR_DIV: 1075 gen_muldiv(ctx, OPC_DIV, 0, rx, ry); 1076 break; 1077 case RR_DIVU: 1078 gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); 1079 break; 1080#if defined(TARGET_MIPS64) 1081 case RR_DMULT: 1082 check_insn(ctx, ISA_MIPS3); 1083 check_mips_64(ctx); 1084 gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); 1085 break; 1086 case RR_DMULTU: 1087 check_insn(ctx, ISA_MIPS3); 1088 check_mips_64(ctx); 1089 gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); 1090 break; 1091 case RR_DDIV: 1092 check_insn(ctx, ISA_MIPS3); 1093 check_mips_64(ctx); 1094 gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); 1095 break; 1096 case RR_DDIVU: 1097 check_insn(ctx, ISA_MIPS3); 1098 check_mips_64(ctx); 1099 gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); 1100 break; 1101#endif 1102 default: 1103 gen_reserved_instruction(ctx); 1104 break; 1105 } 1106 break; 1107 case M16_OPC_EXTEND: 1108 decode_extended_mips16_opc(env, ctx); 1109 n_bytes = 4; 1110 break; 1111#if defined(TARGET_MIPS64) 1112 case M16_OPC_I64: 1113 funct = (ctx->opcode >> 8) & 0x7; 1114 decode_i64_mips16(ctx, ry, funct, offset, 0); 1115 break; 1116#endif 1117 default: 1118 gen_reserved_instruction(ctx); 1119 break; 1120 } 1121 1122 return n_bytes; 1123} 1124