1/* 2 * MIPS16 extension (Code Compaction) ASE translation routines 3 * 4 * Copyright (c) 2004-2005 Jocelyn Mayer 5 * Copyright (c) 2006 Marius Groeger (FPU operations) 6 * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) 7 * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) 8 * 9 * SPDX-License-Identifier: LGPL-2.1-or-later 10 */ 11 12/* MIPS16 major opcodes */ 13enum { 14 M16_OPC_ADDIUSP = 0x00, 15 M16_OPC_ADDIUPC = 0x01, 16 M16_OPC_B = 0x02, 17 M16_OPC_JAL = 0x03, 18 M16_OPC_BEQZ = 0x04, 19 M16_OPC_BNEQZ = 0x05, 20 M16_OPC_SHIFT = 0x06, 21 M16_OPC_LD = 0x07, 22 M16_OPC_RRIA = 0x08, 23 M16_OPC_ADDIU8 = 0x09, 24 M16_OPC_SLTI = 0x0a, 25 M16_OPC_SLTIU = 0x0b, 26 M16_OPC_I8 = 0x0c, 27 M16_OPC_LI = 0x0d, 28 M16_OPC_CMPI = 0x0e, 29 M16_OPC_SD = 0x0f, 30 M16_OPC_LB = 0x10, 31 M16_OPC_LH = 0x11, 32 M16_OPC_LWSP = 0x12, 33 M16_OPC_LW = 0x13, 34 M16_OPC_LBU = 0x14, 35 M16_OPC_LHU = 0x15, 36 M16_OPC_LWPC = 0x16, 37 M16_OPC_LWU = 0x17, 38 M16_OPC_SB = 0x18, 39 M16_OPC_SH = 0x19, 40 M16_OPC_SWSP = 0x1a, 41 M16_OPC_SW = 0x1b, 42 M16_OPC_RRR = 0x1c, 43 M16_OPC_RR = 0x1d, 44 M16_OPC_EXTEND = 0x1e, 45 M16_OPC_I64 = 0x1f 46}; 47 48/* I8 funct field */ 49enum { 50 I8_BTEQZ = 0x0, 51 I8_BTNEZ = 0x1, 52 I8_SWRASP = 0x2, 53 I8_ADJSP = 0x3, 54 I8_SVRS = 0x4, 55 I8_MOV32R = 0x5, 56 I8_MOVR32 = 0x7 57}; 58 59/* RRR f field */ 60enum { 61 RRR_DADDU = 0x0, 62 RRR_ADDU = 0x1, 63 RRR_DSUBU = 0x2, 64 RRR_SUBU = 0x3 65}; 66 67/* RR funct field */ 68enum { 69 RR_JR = 0x00, 70 RR_SDBBP = 0x01, 71 RR_SLT = 0x02, 72 RR_SLTU = 0x03, 73 RR_SLLV = 0x04, 74 RR_BREAK = 0x05, 75 RR_SRLV = 0x06, 76 RR_SRAV = 0x07, 77 RR_DSRL = 0x08, 78 RR_CMP = 0x0a, 79 RR_NEG = 0x0b, 80 RR_AND = 0x0c, 81 RR_OR = 0x0d, 82 RR_XOR = 0x0e, 83 RR_NOT = 0x0f, 84 RR_MFHI = 0x10, 85 RR_CNVT = 0x11, 86 RR_MFLO = 0x12, 87 RR_DSRA = 0x13, 88 RR_DSLLV = 0x14, 89 RR_DSRLV = 0x16, 90 RR_DSRAV = 0x17, 91 RR_MULT = 0x18, 92 RR_MULTU = 0x19, 93 RR_DIV = 0x1a, 94 RR_DIVU = 0x1b, 95 RR_DMULT = 0x1c, 96 RR_DMULTU = 0x1d, 97 RR_DDIV = 0x1e, 98 RR_DDIVU = 0x1f 99}; 100 101/* I64 funct field */ 102enum { 103 I64_LDSP = 0x0, 104 I64_SDSP = 0x1, 105 I64_SDRASP = 0x2, 106 I64_DADJSP = 0x3, 107 I64_LDPC = 0x4, 108 I64_DADDIU5 = 0x5, 109 I64_DADDIUPC = 0x6, 110 I64_DADDIUSP = 0x7 111}; 112 113/* RR ry field for CNVT */ 114enum { 115 RR_RY_CNVT_ZEB = 0x0, 116 RR_RY_CNVT_ZEH = 0x1, 117 RR_RY_CNVT_ZEW = 0x2, 118 RR_RY_CNVT_SEB = 0x4, 119 RR_RY_CNVT_SEH = 0x5, 120 RR_RY_CNVT_SEW = 0x6, 121}; 122 123static int xlat(int r) 124{ 125 static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 126 127 return map[r]; 128} 129 130static void gen_mips16_save(DisasContext *ctx, 131 int xsregs, int aregs, 132 int do_ra, int do_s0, int do_s1, 133 int framesize) 134{ 135 TCGv t0 = tcg_temp_new(); 136 TCGv t1 = tcg_temp_new(); 137 TCGv t2 = tcg_temp_new(); 138 int args, astatic; 139 140 switch (aregs) { 141 case 0: 142 case 1: 143 case 2: 144 case 3: 145 case 11: 146 args = 0; 147 break; 148 case 4: 149 case 5: 150 case 6: 151 case 7: 152 args = 1; 153 break; 154 case 8: 155 case 9: 156 case 10: 157 args = 2; 158 break; 159 case 12: 160 case 13: 161 args = 3; 162 break; 163 case 14: 164 args = 4; 165 break; 166 default: 167 gen_reserved_instruction(ctx); 168 return; 169 } 170 171 switch (args) { 172 case 4: 173 gen_base_offset_addr(ctx, t0, 29, 12); 174 gen_load_gpr(t1, 7); 175 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); 176 /* Fall through */ 177 case 3: 178 gen_base_offset_addr(ctx, t0, 29, 8); 179 gen_load_gpr(t1, 6); 180 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); 181 /* Fall through */ 182 case 2: 183 gen_base_offset_addr(ctx, t0, 29, 4); 184 gen_load_gpr(t1, 5); 185 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); 186 /* Fall through */ 187 case 1: 188 gen_base_offset_addr(ctx, t0, 29, 0); 189 gen_load_gpr(t1, 4); 190 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); 191 } 192 193 gen_load_gpr(t0, 29); 194 195#define DECR_AND_STORE(reg) do { \ 196 tcg_gen_movi_tl(t2, -4); \ 197 gen_op_addr_add(ctx, t0, t0, t2); \ 198 gen_load_gpr(t1, reg); \ 199 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \ 200 } while (0) 201 202 if (do_ra) { 203 DECR_AND_STORE(31); 204 } 205 206 switch (xsregs) { 207 case 7: 208 DECR_AND_STORE(30); 209 /* Fall through */ 210 case 6: 211 DECR_AND_STORE(23); 212 /* Fall through */ 213 case 5: 214 DECR_AND_STORE(22); 215 /* Fall through */ 216 case 4: 217 DECR_AND_STORE(21); 218 /* Fall through */ 219 case 3: 220 DECR_AND_STORE(20); 221 /* Fall through */ 222 case 2: 223 DECR_AND_STORE(19); 224 /* Fall through */ 225 case 1: 226 DECR_AND_STORE(18); 227 } 228 229 if (do_s1) { 230 DECR_AND_STORE(17); 231 } 232 if (do_s0) { 233 DECR_AND_STORE(16); 234 } 235 236 switch (aregs) { 237 case 0: 238 case 4: 239 case 8: 240 case 12: 241 case 14: 242 astatic = 0; 243 break; 244 case 1: 245 case 5: 246 case 9: 247 case 13: 248 astatic = 1; 249 break; 250 case 2: 251 case 6: 252 case 10: 253 astatic = 2; 254 break; 255 case 3: 256 case 7: 257 astatic = 3; 258 break; 259 case 11: 260 astatic = 4; 261 break; 262 default: 263 gen_reserved_instruction(ctx); 264 return; 265 } 266 267 if (astatic > 0) { 268 DECR_AND_STORE(7); 269 if (astatic > 1) { 270 DECR_AND_STORE(6); 271 if (astatic > 2) { 272 DECR_AND_STORE(5); 273 if (astatic > 3) { 274 DECR_AND_STORE(4); 275 } 276 } 277 } 278 } 279#undef DECR_AND_STORE 280 281 tcg_gen_movi_tl(t2, -framesize); 282 gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); 283} 284 285static void gen_mips16_restore(DisasContext *ctx, 286 int xsregs, int aregs, 287 int do_ra, int do_s0, int do_s1, 288 int framesize) 289{ 290 int astatic; 291 TCGv t0 = tcg_temp_new(); 292 TCGv t1 = tcg_temp_new(); 293 TCGv t2 = tcg_temp_new(); 294 295 tcg_gen_movi_tl(t2, framesize); 296 gen_op_addr_add(ctx, t0, cpu_gpr[29], t2); 297 298#define DECR_AND_LOAD(reg) do { \ 299 tcg_gen_movi_tl(t2, -4); \ 300 gen_op_addr_add(ctx, t0, t0, t2); \ 301 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \ 302 gen_store_gpr(t1, reg); \ 303 } while (0) 304 305 if (do_ra) { 306 DECR_AND_LOAD(31); 307 } 308 309 switch (xsregs) { 310 case 7: 311 DECR_AND_LOAD(30); 312 /* Fall through */ 313 case 6: 314 DECR_AND_LOAD(23); 315 /* Fall through */ 316 case 5: 317 DECR_AND_LOAD(22); 318 /* Fall through */ 319 case 4: 320 DECR_AND_LOAD(21); 321 /* Fall through */ 322 case 3: 323 DECR_AND_LOAD(20); 324 /* Fall through */ 325 case 2: 326 DECR_AND_LOAD(19); 327 /* Fall through */ 328 case 1: 329 DECR_AND_LOAD(18); 330 } 331 332 if (do_s1) { 333 DECR_AND_LOAD(17); 334 } 335 if (do_s0) { 336 DECR_AND_LOAD(16); 337 } 338 339 switch (aregs) { 340 case 0: 341 case 4: 342 case 8: 343 case 12: 344 case 14: 345 astatic = 0; 346 break; 347 case 1: 348 case 5: 349 case 9: 350 case 13: 351 astatic = 1; 352 break; 353 case 2: 354 case 6: 355 case 10: 356 astatic = 2; 357 break; 358 case 3: 359 case 7: 360 astatic = 3; 361 break; 362 case 11: 363 astatic = 4; 364 break; 365 default: 366 gen_reserved_instruction(ctx); 367 return; 368 } 369 370 if (astatic > 0) { 371 DECR_AND_LOAD(7); 372 if (astatic > 1) { 373 DECR_AND_LOAD(6); 374 if (astatic > 2) { 375 DECR_AND_LOAD(5); 376 if (astatic > 3) { 377 DECR_AND_LOAD(4); 378 } 379 } 380 } 381 } 382#undef DECR_AND_LOAD 383 384 tcg_gen_movi_tl(t2, framesize); 385 gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); 386} 387 388#if defined(TARGET_MIPS64) 389static void decode_i64_mips16(DisasContext *ctx, 390 int ry, int funct, int16_t offset, 391 int extended) 392{ 393 switch (funct) { 394 case I64_LDSP: 395 check_insn(ctx, ISA_MIPS3); 396 check_mips_64(ctx); 397 offset = extended ? offset : offset << 3; 398 gen_ld(ctx, OPC_LD, ry, 29, offset); 399 break; 400 case I64_SDSP: 401 check_insn(ctx, ISA_MIPS3); 402 check_mips_64(ctx); 403 offset = extended ? offset : offset << 3; 404 gen_st(ctx, OPC_SD, ry, 29, offset); 405 break; 406 case I64_SDRASP: 407 check_insn(ctx, ISA_MIPS3); 408 check_mips_64(ctx); 409 offset = extended ? offset : (ctx->opcode & 0xff) << 3; 410 gen_st(ctx, OPC_SD, 31, 29, offset); 411 break; 412 case I64_DADJSP: 413 check_insn(ctx, ISA_MIPS3); 414 check_mips_64(ctx); 415 offset = extended ? offset : ((int8_t)ctx->opcode) << 3; 416 gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); 417 break; 418 case I64_LDPC: 419 check_insn(ctx, ISA_MIPS3); 420 check_mips_64(ctx); 421 if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { 422 gen_reserved_instruction(ctx); 423 } else { 424 offset = extended ? offset : offset << 3; 425 gen_ld(ctx, OPC_LDPC, ry, 0, offset); 426 } 427 break; 428 case I64_DADDIU5: 429 check_insn(ctx, ISA_MIPS3); 430 check_mips_64(ctx); 431 offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; 432 gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); 433 break; 434 case I64_DADDIUPC: 435 check_insn(ctx, ISA_MIPS3); 436 check_mips_64(ctx); 437 offset = extended ? offset : offset << 2; 438 gen_addiupc(ctx, ry, offset, 1, extended); 439 break; 440 case I64_DADDIUSP: 441 check_insn(ctx, ISA_MIPS3); 442 check_mips_64(ctx); 443 offset = extended ? offset : offset << 2; 444 gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); 445 break; 446 } 447} 448#endif 449 450static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) 451{ 452 int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 453 int op, rx, ry, funct, sa; 454 int16_t imm, offset; 455 456 ctx->opcode = (ctx->opcode << 16) | extend; 457 op = (ctx->opcode >> 11) & 0x1f; 458 sa = (ctx->opcode >> 22) & 0x1f; 459 funct = (ctx->opcode >> 8) & 0x7; 460 rx = xlat((ctx->opcode >> 8) & 0x7); 461 ry = xlat((ctx->opcode >> 5) & 0x7); 462 offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 463 | ((ctx->opcode >> 21) & 0x3f) << 5 464 | (ctx->opcode & 0x1f)); 465 466 /* 467 * The extended opcodes cleverly reuse the opcodes from their 16-bit 468 * counterparts. 469 */ 470 switch (op) { 471 case M16_OPC_ADDIUSP: 472 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 473 break; 474 case M16_OPC_ADDIUPC: 475 gen_addiupc(ctx, rx, imm, 0, 1); 476 break; 477 case M16_OPC_B: 478 gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0); 479 /* No delay slot, so just process as a normal instruction */ 480 break; 481 case M16_OPC_BEQZ: 482 gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0); 483 /* No delay slot, so just process as a normal instruction */ 484 break; 485 case M16_OPC_BNEQZ: 486 gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0); 487 /* No delay slot, so just process as a normal instruction */ 488 break; 489 case M16_OPC_SHIFT: 490 switch (ctx->opcode & 0x3) { 491 case 0x0: 492 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 493 break; 494 case 0x1: 495#if defined(TARGET_MIPS64) 496 check_mips_64(ctx); 497 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 498#else 499 gen_reserved_instruction(ctx); 500#endif 501 break; 502 case 0x2: 503 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 504 break; 505 case 0x3: 506 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 507 break; 508 } 509 break; 510#if defined(TARGET_MIPS64) 511 case M16_OPC_LD: 512 check_insn(ctx, ISA_MIPS3); 513 check_mips_64(ctx); 514 gen_ld(ctx, OPC_LD, ry, rx, offset); 515 break; 516#endif 517 case M16_OPC_RRIA: 518 imm = ctx->opcode & 0xf; 519 imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; 520 imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; 521 imm = (int16_t) (imm << 1) >> 1; 522 if ((ctx->opcode >> 4) & 0x1) { 523#if defined(TARGET_MIPS64) 524 check_mips_64(ctx); 525 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 526#else 527 gen_reserved_instruction(ctx); 528#endif 529 } else { 530 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 531 } 532 break; 533 case M16_OPC_ADDIU8: 534 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 535 break; 536 case M16_OPC_SLTI: 537 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 538 break; 539 case M16_OPC_SLTIU: 540 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 541 break; 542 case M16_OPC_I8: 543 switch (funct) { 544 case I8_BTEQZ: 545 gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0); 546 break; 547 case I8_BTNEZ: 548 gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0); 549 break; 550 case I8_SWRASP: 551 gen_st(ctx, OPC_SW, 31, 29, imm); 552 break; 553 case I8_ADJSP: 554 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); 555 break; 556 case I8_SVRS: 557 check_insn(ctx, ISA_MIPS_R1); 558 { 559 int xsregs = (ctx->opcode >> 24) & 0x7; 560 int aregs = (ctx->opcode >> 16) & 0xf; 561 int do_ra = (ctx->opcode >> 6) & 0x1; 562 int do_s0 = (ctx->opcode >> 5) & 0x1; 563 int do_s1 = (ctx->opcode >> 4) & 0x1; 564 int framesize = (((ctx->opcode >> 20) & 0xf) << 4 565 | (ctx->opcode & 0xf)) << 3; 566 567 if (ctx->opcode & (1 << 7)) { 568 gen_mips16_save(ctx, xsregs, aregs, 569 do_ra, do_s0, do_s1, 570 framesize); 571 } else { 572 gen_mips16_restore(ctx, xsregs, aregs, 573 do_ra, do_s0, do_s1, 574 framesize); 575 } 576 } 577 break; 578 default: 579 gen_reserved_instruction(ctx); 580 break; 581 } 582 break; 583 case M16_OPC_LI: 584 tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm); 585 break; 586 case M16_OPC_CMPI: 587 tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm); 588 break; 589#if defined(TARGET_MIPS64) 590 case M16_OPC_SD: 591 check_insn(ctx, ISA_MIPS3); 592 check_mips_64(ctx); 593 gen_st(ctx, OPC_SD, ry, rx, offset); 594 break; 595#endif 596 case M16_OPC_LB: 597 gen_ld(ctx, OPC_LB, ry, rx, offset); 598 break; 599 case M16_OPC_LH: 600 gen_ld(ctx, OPC_LH, ry, rx, offset); 601 break; 602 case M16_OPC_LWSP: 603 gen_ld(ctx, OPC_LW, rx, 29, offset); 604 break; 605 case M16_OPC_LW: 606 gen_ld(ctx, OPC_LW, ry, rx, offset); 607 break; 608 case M16_OPC_LBU: 609 gen_ld(ctx, OPC_LBU, ry, rx, offset); 610 break; 611 case M16_OPC_LHU: 612 gen_ld(ctx, OPC_LHU, ry, rx, offset); 613 break; 614 case M16_OPC_LWPC: 615 gen_ld(ctx, OPC_LWPC, rx, 0, offset); 616 break; 617#if defined(TARGET_MIPS64) 618 case M16_OPC_LWU: 619 check_insn(ctx, ISA_MIPS3); 620 check_mips_64(ctx); 621 gen_ld(ctx, OPC_LWU, ry, rx, offset); 622 break; 623#endif 624 case M16_OPC_SB: 625 gen_st(ctx, OPC_SB, ry, rx, offset); 626 break; 627 case M16_OPC_SH: 628 gen_st(ctx, OPC_SH, ry, rx, offset); 629 break; 630 case M16_OPC_SWSP: 631 gen_st(ctx, OPC_SW, rx, 29, offset); 632 break; 633 case M16_OPC_SW: 634 gen_st(ctx, OPC_SW, ry, rx, offset); 635 break; 636#if defined(TARGET_MIPS64) 637 case M16_OPC_I64: 638 decode_i64_mips16(ctx, ry, funct, offset, 1); 639 break; 640#endif 641 default: 642 gen_reserved_instruction(ctx); 643 break; 644 } 645 646 return 4; 647} 648 649static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) 650{ 651 int rx, ry; 652 int sa; 653 int op, cnvt_op, op1, offset; 654 int funct; 655 int n_bytes; 656 657 op = (ctx->opcode >> 11) & 0x1f; 658 sa = (ctx->opcode >> 2) & 0x7; 659 sa = sa == 0 ? 8 : sa; 660 rx = xlat((ctx->opcode >> 8) & 0x7); 661 cnvt_op = (ctx->opcode >> 5) & 0x7; 662 ry = xlat((ctx->opcode >> 5) & 0x7); 663 op1 = offset = ctx->opcode & 0x1f; 664 665 n_bytes = 2; 666 667 switch (op) { 668 case M16_OPC_ADDIUSP: 669 { 670 int16_t imm = ((uint8_t) ctx->opcode) << 2; 671 672 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 673 } 674 break; 675 case M16_OPC_ADDIUPC: 676 gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); 677 break; 678 case M16_OPC_B: 679 offset = (ctx->opcode & 0x7ff) << 1; 680 offset = (int16_t)(offset << 4) >> 4; 681 gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); 682 /* No delay slot, so just process as a normal instruction */ 683 break; 684 case M16_OPC_JAL: 685 offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 686 offset = (((ctx->opcode & 0x1f) << 21) 687 | ((ctx->opcode >> 5) & 0x1f) << 16 688 | offset) << 2; 689 op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; 690 gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); 691 n_bytes = 4; 692 break; 693 case M16_OPC_BEQZ: 694 gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, 695 ((int8_t)ctx->opcode) << 1, 0); 696 /* No delay slot, so just process as a normal instruction */ 697 break; 698 case M16_OPC_BNEQZ: 699 gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, 700 ((int8_t)ctx->opcode) << 1, 0); 701 /* No delay slot, so just process as a normal instruction */ 702 break; 703 case M16_OPC_SHIFT: 704 switch (ctx->opcode & 0x3) { 705 case 0x0: 706 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 707 break; 708 case 0x1: 709#if defined(TARGET_MIPS64) 710 check_insn(ctx, ISA_MIPS3); 711 check_mips_64(ctx); 712 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 713#else 714 gen_reserved_instruction(ctx); 715#endif 716 break; 717 case 0x2: 718 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 719 break; 720 case 0x3: 721 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 722 break; 723 } 724 break; 725#if defined(TARGET_MIPS64) 726 case M16_OPC_LD: 727 check_insn(ctx, ISA_MIPS3); 728 check_mips_64(ctx); 729 gen_ld(ctx, OPC_LD, ry, rx, offset << 3); 730 break; 731#endif 732 case M16_OPC_RRIA: 733 { 734 int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; 735 736 if ((ctx->opcode >> 4) & 1) { 737#if defined(TARGET_MIPS64) 738 check_insn(ctx, ISA_MIPS3); 739 check_mips_64(ctx); 740 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 741#else 742 gen_reserved_instruction(ctx); 743#endif 744 } else { 745 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 746 } 747 } 748 break; 749 case M16_OPC_ADDIU8: 750 { 751 int16_t imm = (int8_t) ctx->opcode; 752 753 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 754 } 755 break; 756 case M16_OPC_SLTI: 757 { 758 int16_t imm = (uint8_t) ctx->opcode; 759 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 760 } 761 break; 762 case M16_OPC_SLTIU: 763 { 764 int16_t imm = (uint8_t) ctx->opcode; 765 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 766 } 767 break; 768 case M16_OPC_I8: 769 { 770 int reg32; 771 772 funct = (ctx->opcode >> 8) & 0x7; 773 switch (funct) { 774 case I8_BTEQZ: 775 gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, 776 ((int8_t)ctx->opcode) << 1, 0); 777 break; 778 case I8_BTNEZ: 779 gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, 780 ((int8_t)ctx->opcode) << 1, 0); 781 break; 782 case I8_SWRASP: 783 gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); 784 break; 785 case I8_ADJSP: 786 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, 787 ((int8_t)ctx->opcode) << 3); 788 break; 789 case I8_SVRS: 790 check_insn(ctx, ISA_MIPS_R1); 791 { 792 int do_ra = ctx->opcode & (1 << 6); 793 int do_s0 = ctx->opcode & (1 << 5); 794 int do_s1 = ctx->opcode & (1 << 4); 795 int framesize = ctx->opcode & 0xf; 796 797 if (framesize == 0) { 798 framesize = 128; 799 } else { 800 framesize = framesize << 3; 801 } 802 803 if (ctx->opcode & (1 << 7)) { 804 gen_mips16_save(ctx, 0, 0, 805 do_ra, do_s0, do_s1, framesize); 806 } else { 807 gen_mips16_restore(ctx, 0, 0, 808 do_ra, do_s0, do_s1, framesize); 809 } 810 } 811 break; 812 case I8_MOV32R: 813 { 814 int rz = xlat(ctx->opcode & 0x7); 815 816 reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | 817 ((ctx->opcode >> 5) & 0x7); 818 gen_arith(ctx, OPC_ADDU, reg32, rz, 0); 819 } 820 break; 821 case I8_MOVR32: 822 reg32 = ctx->opcode & 0x1f; 823 gen_arith(ctx, OPC_ADDU, ry, reg32, 0); 824 break; 825 default: 826 gen_reserved_instruction(ctx); 827 break; 828 } 829 } 830 break; 831 case M16_OPC_LI: 832 { 833 int16_t imm = (uint8_t) ctx->opcode; 834 835 gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); 836 } 837 break; 838 case M16_OPC_CMPI: 839 { 840 int16_t imm = (uint8_t) ctx->opcode; 841 gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); 842 } 843 break; 844#if defined(TARGET_MIPS64) 845 case M16_OPC_SD: 846 check_insn(ctx, ISA_MIPS3); 847 check_mips_64(ctx); 848 gen_st(ctx, OPC_SD, ry, rx, offset << 3); 849 break; 850#endif 851 case M16_OPC_LB: 852 gen_ld(ctx, OPC_LB, ry, rx, offset); 853 break; 854 case M16_OPC_LH: 855 gen_ld(ctx, OPC_LH, ry, rx, offset << 1); 856 break; 857 case M16_OPC_LWSP: 858 gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); 859 break; 860 case M16_OPC_LW: 861 gen_ld(ctx, OPC_LW, ry, rx, offset << 2); 862 break; 863 case M16_OPC_LBU: 864 gen_ld(ctx, OPC_LBU, ry, rx, offset); 865 break; 866 case M16_OPC_LHU: 867 gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); 868 break; 869 case M16_OPC_LWPC: 870 gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); 871 break; 872#if defined(TARGET_MIPS64) 873 case M16_OPC_LWU: 874 check_insn(ctx, ISA_MIPS3); 875 check_mips_64(ctx); 876 gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); 877 break; 878#endif 879 case M16_OPC_SB: 880 gen_st(ctx, OPC_SB, ry, rx, offset); 881 break; 882 case M16_OPC_SH: 883 gen_st(ctx, OPC_SH, ry, rx, offset << 1); 884 break; 885 case M16_OPC_SWSP: 886 gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); 887 break; 888 case M16_OPC_SW: 889 gen_st(ctx, OPC_SW, ry, rx, offset << 2); 890 break; 891 case M16_OPC_RRR: 892 { 893 int rz = xlat((ctx->opcode >> 2) & 0x7); 894 int mips32_op; 895 896 switch (ctx->opcode & 0x3) { 897 case RRR_ADDU: 898 mips32_op = OPC_ADDU; 899 break; 900 case RRR_SUBU: 901 mips32_op = OPC_SUBU; 902 break; 903#if defined(TARGET_MIPS64) 904 case RRR_DADDU: 905 mips32_op = OPC_DADDU; 906 check_insn(ctx, ISA_MIPS3); 907 check_mips_64(ctx); 908 break; 909 case RRR_DSUBU: 910 mips32_op = OPC_DSUBU; 911 check_insn(ctx, ISA_MIPS3); 912 check_mips_64(ctx); 913 break; 914#endif 915 default: 916 gen_reserved_instruction(ctx); 917 goto done; 918 } 919 920 gen_arith(ctx, mips32_op, rz, rx, ry); 921 done: 922 ; 923 } 924 break; 925 case M16_OPC_RR: 926 switch (op1) { 927 case RR_JR: 928 { 929 int nd = (ctx->opcode >> 7) & 0x1; 930 int link = (ctx->opcode >> 6) & 0x1; 931 int ra = (ctx->opcode >> 5) & 0x1; 932 933 if (nd) { 934 check_insn(ctx, ISA_MIPS_R1); 935 } 936 937 if (link) { 938 op = OPC_JALR; 939 } else { 940 op = OPC_JR; 941 } 942 943 gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, 944 (nd ? 0 : 2)); 945 } 946 break; 947 case RR_SDBBP: 948 if (is_uhi(ctx, extract32(ctx->opcode, 5, 6))) { 949 ctx->base.is_jmp = DISAS_SEMIHOST; 950 } else { 951 /* 952 * XXX: not clear which exception should be raised 953 * when in debug mode... 954 */ 955 check_insn(ctx, ISA_MIPS_R1); 956 generate_exception_end(ctx, EXCP_DBp); 957 } 958 break; 959 case RR_SLT: 960 gen_slt(ctx, OPC_SLT, 24, rx, ry); 961 break; 962 case RR_SLTU: 963 gen_slt(ctx, OPC_SLTU, 24, rx, ry); 964 break; 965 case RR_BREAK: 966 generate_exception_break(ctx, extract32(ctx->opcode, 5, 6)); 967 break; 968 case RR_SLLV: 969 gen_shift(ctx, OPC_SLLV, ry, rx, ry); 970 break; 971 case RR_SRLV: 972 gen_shift(ctx, OPC_SRLV, ry, rx, ry); 973 break; 974 case RR_SRAV: 975 gen_shift(ctx, OPC_SRAV, ry, rx, ry); 976 break; 977#if defined(TARGET_MIPS64) 978 case RR_DSRL: 979 check_insn(ctx, ISA_MIPS3); 980 check_mips_64(ctx); 981 gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); 982 break; 983#endif 984 case RR_CMP: 985 gen_logic(ctx, OPC_XOR, 24, rx, ry); 986 break; 987 case RR_NEG: 988 gen_arith(ctx, OPC_SUBU, rx, 0, ry); 989 break; 990 case RR_AND: 991 gen_logic(ctx, OPC_AND, rx, rx, ry); 992 break; 993 case RR_OR: 994 gen_logic(ctx, OPC_OR, rx, rx, ry); 995 break; 996 case RR_XOR: 997 gen_logic(ctx, OPC_XOR, rx, rx, ry); 998 break; 999 case RR_NOT: 1000 gen_logic(ctx, OPC_NOR, rx, ry, 0); 1001 break; 1002 case RR_MFHI: 1003 gen_HILO(ctx, OPC_MFHI, 0, rx); 1004 break; 1005 case RR_CNVT: 1006 check_insn(ctx, ISA_MIPS_R1); 1007 switch (cnvt_op) { 1008 case RR_RY_CNVT_ZEB: 1009 tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1010 break; 1011 case RR_RY_CNVT_ZEH: 1012 tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1013 break; 1014 case RR_RY_CNVT_SEB: 1015 tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1016 break; 1017 case RR_RY_CNVT_SEH: 1018 tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1019 break; 1020#if defined(TARGET_MIPS64) 1021 case RR_RY_CNVT_ZEW: 1022 check_insn(ctx, ISA_MIPS_R1); 1023 check_mips_64(ctx); 1024 tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1025 break; 1026 case RR_RY_CNVT_SEW: 1027 check_insn(ctx, ISA_MIPS_R1); 1028 check_mips_64(ctx); 1029 tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1030 break; 1031#endif 1032 default: 1033 gen_reserved_instruction(ctx); 1034 break; 1035 } 1036 break; 1037 case RR_MFLO: 1038 gen_HILO(ctx, OPC_MFLO, 0, rx); 1039 break; 1040#if defined(TARGET_MIPS64) 1041 case RR_DSRA: 1042 check_insn(ctx, ISA_MIPS3); 1043 check_mips_64(ctx); 1044 gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); 1045 break; 1046 case RR_DSLLV: 1047 check_insn(ctx, ISA_MIPS3); 1048 check_mips_64(ctx); 1049 gen_shift(ctx, OPC_DSLLV, ry, rx, ry); 1050 break; 1051 case RR_DSRLV: 1052 check_insn(ctx, ISA_MIPS3); 1053 check_mips_64(ctx); 1054 gen_shift(ctx, OPC_DSRLV, ry, rx, ry); 1055 break; 1056 case RR_DSRAV: 1057 check_insn(ctx, ISA_MIPS3); 1058 check_mips_64(ctx); 1059 gen_shift(ctx, OPC_DSRAV, ry, rx, ry); 1060 break; 1061#endif 1062 case RR_MULT: 1063 gen_muldiv(ctx, OPC_MULT, 0, rx, ry); 1064 break; 1065 case RR_MULTU: 1066 gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); 1067 break; 1068 case RR_DIV: 1069 gen_muldiv(ctx, OPC_DIV, 0, rx, ry); 1070 break; 1071 case RR_DIVU: 1072 gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); 1073 break; 1074#if defined(TARGET_MIPS64) 1075 case RR_DMULT: 1076 check_insn(ctx, ISA_MIPS3); 1077 check_mips_64(ctx); 1078 gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); 1079 break; 1080 case RR_DMULTU: 1081 check_insn(ctx, ISA_MIPS3); 1082 check_mips_64(ctx); 1083 gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); 1084 break; 1085 case RR_DDIV: 1086 check_insn(ctx, ISA_MIPS3); 1087 check_mips_64(ctx); 1088 gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); 1089 break; 1090 case RR_DDIVU: 1091 check_insn(ctx, ISA_MIPS3); 1092 check_mips_64(ctx); 1093 gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); 1094 break; 1095#endif 1096 default: 1097 gen_reserved_instruction(ctx); 1098 break; 1099 } 1100 break; 1101 case M16_OPC_EXTEND: 1102 decode_extended_mips16_opc(env, ctx); 1103 n_bytes = 4; 1104 break; 1105#if defined(TARGET_MIPS64) 1106 case M16_OPC_I64: 1107 funct = (ctx->opcode >> 8) & 0x7; 1108 decode_i64_mips16(ctx, ry, funct, offset, 0); 1109 break; 1110#endif 1111 default: 1112 gen_reserved_instruction(ctx); 1113 break; 1114 } 1115 1116 return n_bytes; 1117} 1118