1/* 2 * MIPS16 extension (Code Compaction) ASE translation routines 3 * 4 * Copyright (c) 2004-2005 Jocelyn Mayer 5 * Copyright (c) 2006 Marius Groeger (FPU operations) 6 * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) 7 * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) 8 * 9 * SPDX-License-Identifier: LGPL-2.1-or-later 10 */ 11 12/* MIPS16 major opcodes */ 13enum { 14 M16_OPC_ADDIUSP = 0x00, 15 M16_OPC_ADDIUPC = 0x01, 16 M16_OPC_B = 0x02, 17 M16_OPC_JAL = 0x03, 18 M16_OPC_BEQZ = 0x04, 19 M16_OPC_BNEQZ = 0x05, 20 M16_OPC_SHIFT = 0x06, 21 M16_OPC_LD = 0x07, 22 M16_OPC_RRIA = 0x08, 23 M16_OPC_ADDIU8 = 0x09, 24 M16_OPC_SLTI = 0x0a, 25 M16_OPC_SLTIU = 0x0b, 26 M16_OPC_I8 = 0x0c, 27 M16_OPC_LI = 0x0d, 28 M16_OPC_CMPI = 0x0e, 29 M16_OPC_SD = 0x0f, 30 M16_OPC_LB = 0x10, 31 M16_OPC_LH = 0x11, 32 M16_OPC_LWSP = 0x12, 33 M16_OPC_LW = 0x13, 34 M16_OPC_LBU = 0x14, 35 M16_OPC_LHU = 0x15, 36 M16_OPC_LWPC = 0x16, 37 M16_OPC_LWU = 0x17, 38 M16_OPC_SB = 0x18, 39 M16_OPC_SH = 0x19, 40 M16_OPC_SWSP = 0x1a, 41 M16_OPC_SW = 0x1b, 42 M16_OPC_RRR = 0x1c, 43 M16_OPC_RR = 0x1d, 44 M16_OPC_EXTEND = 0x1e, 45 M16_OPC_I64 = 0x1f 46}; 47 48/* I8 funct field */ 49enum { 50 I8_BTEQZ = 0x0, 51 I8_BTNEZ = 0x1, 52 I8_SWRASP = 0x2, 53 I8_ADJSP = 0x3, 54 I8_SVRS = 0x4, 55 I8_MOV32R = 0x5, 56 I8_MOVR32 = 0x7 57}; 58 59/* RRR f field */ 60enum { 61 RRR_DADDU = 0x0, 62 RRR_ADDU = 0x1, 63 RRR_DSUBU = 0x2, 64 RRR_SUBU = 0x3 65}; 66 67/* RR funct field */ 68enum { 69 RR_JR = 0x00, 70 RR_SDBBP = 0x01, 71 RR_SLT = 0x02, 72 RR_SLTU = 0x03, 73 RR_SLLV = 0x04, 74 RR_BREAK = 0x05, 75 RR_SRLV = 0x06, 76 RR_SRAV = 0x07, 77 RR_DSRL = 0x08, 78 RR_CMP = 0x0a, 79 RR_NEG = 0x0b, 80 RR_AND = 0x0c, 81 RR_OR = 0x0d, 82 RR_XOR = 0x0e, 83 RR_NOT = 0x0f, 84 RR_MFHI = 0x10, 85 RR_CNVT = 0x11, 86 RR_MFLO = 0x12, 87 RR_DSRA = 0x13, 88 RR_DSLLV = 0x14, 89 RR_DSRLV = 0x16, 90 RR_DSRAV = 0x17, 91 RR_MULT = 0x18, 92 RR_MULTU = 0x19, 93 RR_DIV = 0x1a, 94 RR_DIVU = 0x1b, 95 RR_DMULT = 0x1c, 96 RR_DMULTU = 0x1d, 97 RR_DDIV = 0x1e, 98 RR_DDIVU = 0x1f 99}; 100 101/* I64 funct field */ 102enum { 103 I64_LDSP = 0x0, 104 I64_SDSP = 0x1, 105 I64_SDRASP = 0x2, 106 I64_DADJSP = 0x3, 107 I64_LDPC = 0x4, 108 I64_DADDIU5 = 0x5, 109 I64_DADDIUPC = 0x6, 110 I64_DADDIUSP = 0x7 111}; 112 113/* RR ry field for CNVT */ 114enum { 115 RR_RY_CNVT_ZEB = 0x0, 116 RR_RY_CNVT_ZEH = 0x1, 117 RR_RY_CNVT_ZEW = 0x2, 118 RR_RY_CNVT_SEB = 0x4, 119 RR_RY_CNVT_SEH = 0x5, 120 RR_RY_CNVT_SEW = 0x6, 121}; 122 123static int xlat(int r) 124{ 125 static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 126 127 return map[r]; 128} 129 130static void decr_and_store(DisasContext *ctx, unsigned regidx, TCGv t0) 131{ 132 TCGv t1 = tcg_temp_new(); 133 134 gen_op_addr_addi(ctx, t0, t0, -4); 135 gen_load_gpr(t1, regidx); 136 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | 137 ctx->default_tcg_memop_mask); 138} 139 140static void gen_mips16_save(DisasContext *ctx, 141 int xsregs, int aregs, 142 int do_ra, int do_s0, int do_s1, 143 int framesize) 144{ 145 TCGv t0 = tcg_temp_new(); 146 TCGv t1 = tcg_temp_new(); 147 int args, astatic; 148 149 switch (aregs) { 150 case 0: 151 case 1: 152 case 2: 153 case 3: 154 case 11: 155 args = 0; 156 break; 157 case 4: 158 case 5: 159 case 6: 160 case 7: 161 args = 1; 162 break; 163 case 8: 164 case 9: 165 case 10: 166 args = 2; 167 break; 168 case 12: 169 case 13: 170 args = 3; 171 break; 172 case 14: 173 args = 4; 174 break; 175 default: 176 gen_reserved_instruction(ctx); 177 return; 178 } 179 180 switch (args) { 181 case 4: 182 gen_base_offset_addr(ctx, t0, 29, 12); 183 gen_load_gpr(t1, 7); 184 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | 185 ctx->default_tcg_memop_mask); 186 /* Fall through */ 187 case 3: 188 gen_base_offset_addr(ctx, t0, 29, 8); 189 gen_load_gpr(t1, 6); 190 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | 191 ctx->default_tcg_memop_mask); 192 /* Fall through */ 193 case 2: 194 gen_base_offset_addr(ctx, t0, 29, 4); 195 gen_load_gpr(t1, 5); 196 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | 197 ctx->default_tcg_memop_mask); 198 /* Fall through */ 199 case 1: 200 gen_base_offset_addr(ctx, t0, 29, 0); 201 gen_load_gpr(t1, 4); 202 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | 203 ctx->default_tcg_memop_mask); 204 } 205 206 gen_load_gpr(t0, 29); 207 208 if (do_ra) { 209 decr_and_store(ctx, 31, t0); 210 } 211 212 switch (xsregs) { 213 case 7: 214 decr_and_store(ctx, 30, t0); 215 /* Fall through */ 216 case 6: 217 decr_and_store(ctx, 23, t0); 218 /* Fall through */ 219 case 5: 220 decr_and_store(ctx, 22, t0); 221 /* Fall through */ 222 case 4: 223 decr_and_store(ctx, 21, t0); 224 /* Fall through */ 225 case 3: 226 decr_and_store(ctx, 20, t0); 227 /* Fall through */ 228 case 2: 229 decr_and_store(ctx, 19, t0); 230 /* Fall through */ 231 case 1: 232 decr_and_store(ctx, 18, t0); 233 } 234 235 if (do_s1) { 236 decr_and_store(ctx, 17, t0); 237 } 238 if (do_s0) { 239 decr_and_store(ctx, 16, t0); 240 } 241 242 switch (aregs) { 243 case 0: 244 case 4: 245 case 8: 246 case 12: 247 case 14: 248 astatic = 0; 249 break; 250 case 1: 251 case 5: 252 case 9: 253 case 13: 254 astatic = 1; 255 break; 256 case 2: 257 case 6: 258 case 10: 259 astatic = 2; 260 break; 261 case 3: 262 case 7: 263 astatic = 3; 264 break; 265 case 11: 266 astatic = 4; 267 break; 268 default: 269 gen_reserved_instruction(ctx); 270 return; 271 } 272 273 if (astatic > 0) { 274 decr_and_store(ctx, 7, t0); 275 if (astatic > 1) { 276 decr_and_store(ctx, 6, t0); 277 if (astatic > 2) { 278 decr_and_store(ctx, 5, t0); 279 if (astatic > 3) { 280 decr_and_store(ctx, 4, t0); 281 } 282 } 283 } 284 } 285 286 gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize); 287} 288 289static void decr_and_load(DisasContext *ctx, unsigned regidx, TCGv t0) 290{ 291 TCGv t1 = tcg_temp_new(); 292 TCGv t2 = tcg_temp_new(); 293 294 tcg_gen_movi_tl(t2, -4); 295 gen_op_addr_add(ctx, t0, t0, t2); 296 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TE | MO_SL | 297 ctx->default_tcg_memop_mask); 298 gen_store_gpr(t1, regidx); 299} 300 301static void gen_mips16_restore(DisasContext *ctx, 302 int xsregs, int aregs, 303 int do_ra, int do_s0, int do_s1, 304 int framesize) 305{ 306 int astatic; 307 TCGv t0 = tcg_temp_new(); 308 309 gen_op_addr_addi(ctx, t0, cpu_gpr[29], -framesize); 310 311 if (do_ra) { 312 decr_and_load(ctx, 31, t0); 313 } 314 315 switch (xsregs) { 316 case 7: 317 decr_and_load(ctx, 30, t0); 318 /* Fall through */ 319 case 6: 320 decr_and_load(ctx, 23, t0); 321 /* Fall through */ 322 case 5: 323 decr_and_load(ctx, 22, t0); 324 /* Fall through */ 325 case 4: 326 decr_and_load(ctx, 21, t0); 327 /* Fall through */ 328 case 3: 329 decr_and_load(ctx, 20, t0); 330 /* Fall through */ 331 case 2: 332 decr_and_load(ctx, 19, t0); 333 /* Fall through */ 334 case 1: 335 decr_and_load(ctx, 18, t0); 336 } 337 338 if (do_s1) { 339 decr_and_load(ctx, 17, t0); 340 } 341 if (do_s0) { 342 decr_and_load(ctx, 16, t0); 343 } 344 345 switch (aregs) { 346 case 0: 347 case 4: 348 case 8: 349 case 12: 350 case 14: 351 astatic = 0; 352 break; 353 case 1: 354 case 5: 355 case 9: 356 case 13: 357 astatic = 1; 358 break; 359 case 2: 360 case 6: 361 case 10: 362 astatic = 2; 363 break; 364 case 3: 365 case 7: 366 astatic = 3; 367 break; 368 case 11: 369 astatic = 4; 370 break; 371 default: 372 gen_reserved_instruction(ctx); 373 return; 374 } 375 376 if (astatic > 0) { 377 decr_and_load(ctx, 7, t0); 378 if (astatic > 1) { 379 decr_and_load(ctx, 6, t0); 380 if (astatic > 2) { 381 decr_and_load(ctx, 5, t0); 382 if (astatic > 3) { 383 decr_and_load(ctx, 4, t0); 384 } 385 } 386 } 387 } 388 389 gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize); 390} 391 392#if defined(TARGET_MIPS64) 393static void decode_i64_mips16(DisasContext *ctx, 394 int ry, int funct, int16_t offset, 395 int extended) 396{ 397 switch (funct) { 398 case I64_LDSP: 399 check_insn(ctx, ISA_MIPS3); 400 check_mips_64(ctx); 401 offset = extended ? offset : offset << 3; 402 gen_ld(ctx, OPC_LD, ry, 29, offset); 403 break; 404 case I64_SDSP: 405 check_insn(ctx, ISA_MIPS3); 406 check_mips_64(ctx); 407 offset = extended ? offset : offset << 3; 408 gen_st(ctx, OPC_SD, ry, 29, offset); 409 break; 410 case I64_SDRASP: 411 check_insn(ctx, ISA_MIPS3); 412 check_mips_64(ctx); 413 offset = extended ? offset : (ctx->opcode & 0xff) << 3; 414 gen_st(ctx, OPC_SD, 31, 29, offset); 415 break; 416 case I64_DADJSP: 417 check_insn(ctx, ISA_MIPS3); 418 check_mips_64(ctx); 419 offset = extended ? offset : ((int8_t)ctx->opcode) << 3; 420 gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); 421 break; 422 case I64_LDPC: 423 check_insn(ctx, ISA_MIPS3); 424 check_mips_64(ctx); 425 if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { 426 gen_reserved_instruction(ctx); 427 } else { 428 offset = extended ? offset : offset << 3; 429 gen_ld(ctx, OPC_LDPC, ry, 0, offset); 430 } 431 break; 432 case I64_DADDIU5: 433 check_insn(ctx, ISA_MIPS3); 434 check_mips_64(ctx); 435 offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; 436 gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); 437 break; 438 case I64_DADDIUPC: 439 check_insn(ctx, ISA_MIPS3); 440 check_mips_64(ctx); 441 offset = extended ? offset : offset << 2; 442 gen_addiupc(ctx, ry, offset, 1, extended); 443 break; 444 case I64_DADDIUSP: 445 check_insn(ctx, ISA_MIPS3); 446 check_mips_64(ctx); 447 offset = extended ? offset : offset << 2; 448 gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); 449 break; 450 } 451} 452#endif 453 454static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) 455{ 456 int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 457 int op, rx, ry, funct, sa; 458 int16_t imm, offset; 459 460 ctx->opcode = (ctx->opcode << 16) | extend; 461 op = (ctx->opcode >> 11) & 0x1f; 462 sa = (ctx->opcode >> 22) & 0x1f; 463 funct = (ctx->opcode >> 8) & 0x7; 464 rx = xlat((ctx->opcode >> 8) & 0x7); 465 ry = xlat((ctx->opcode >> 5) & 0x7); 466 offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 467 | ((ctx->opcode >> 21) & 0x3f) << 5 468 | (ctx->opcode & 0x1f)); 469 470 /* 471 * The extended opcodes cleverly reuse the opcodes from their 16-bit 472 * counterparts. 473 */ 474 switch (op) { 475 case M16_OPC_ADDIUSP: 476 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 477 break; 478 case M16_OPC_ADDIUPC: 479 gen_addiupc(ctx, rx, imm, 0, 1); 480 break; 481 case M16_OPC_B: 482 gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0); 483 /* No delay slot, so just process as a normal instruction */ 484 break; 485 case M16_OPC_BEQZ: 486 gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0); 487 /* No delay slot, so just process as a normal instruction */ 488 break; 489 case M16_OPC_BNEQZ: 490 gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0); 491 /* No delay slot, so just process as a normal instruction */ 492 break; 493 case M16_OPC_SHIFT: 494 switch (ctx->opcode & 0x3) { 495 case 0x0: 496 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 497 break; 498 case 0x1: 499#if defined(TARGET_MIPS64) 500 check_mips_64(ctx); 501 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 502#else 503 gen_reserved_instruction(ctx); 504#endif 505 break; 506 case 0x2: 507 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 508 break; 509 case 0x3: 510 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 511 break; 512 } 513 break; 514#if defined(TARGET_MIPS64) 515 case M16_OPC_LD: 516 check_insn(ctx, ISA_MIPS3); 517 check_mips_64(ctx); 518 gen_ld(ctx, OPC_LD, ry, rx, offset); 519 break; 520#endif 521 case M16_OPC_RRIA: 522 imm = ctx->opcode & 0xf; 523 imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; 524 imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; 525 imm = (int16_t) (imm << 1) >> 1; 526 if ((ctx->opcode >> 4) & 0x1) { 527#if defined(TARGET_MIPS64) 528 check_mips_64(ctx); 529 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 530#else 531 gen_reserved_instruction(ctx); 532#endif 533 } else { 534 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 535 } 536 break; 537 case M16_OPC_ADDIU8: 538 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 539 break; 540 case M16_OPC_SLTI: 541 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 542 break; 543 case M16_OPC_SLTIU: 544 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 545 break; 546 case M16_OPC_I8: 547 switch (funct) { 548 case I8_BTEQZ: 549 gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0); 550 break; 551 case I8_BTNEZ: 552 gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0); 553 break; 554 case I8_SWRASP: 555 gen_st(ctx, OPC_SW, 31, 29, imm); 556 break; 557 case I8_ADJSP: 558 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); 559 break; 560 case I8_SVRS: 561 check_insn(ctx, ISA_MIPS_R1); 562 { 563 int xsregs = (ctx->opcode >> 24) & 0x7; 564 int aregs = (ctx->opcode >> 16) & 0xf; 565 int do_ra = (ctx->opcode >> 6) & 0x1; 566 int do_s0 = (ctx->opcode >> 5) & 0x1; 567 int do_s1 = (ctx->opcode >> 4) & 0x1; 568 int framesize = (((ctx->opcode >> 20) & 0xf) << 4 569 | (ctx->opcode & 0xf)) << 3; 570 571 if (ctx->opcode & (1 << 7)) { 572 gen_mips16_save(ctx, xsregs, aregs, 573 do_ra, do_s0, do_s1, 574 framesize); 575 } else { 576 gen_mips16_restore(ctx, xsregs, aregs, 577 do_ra, do_s0, do_s1, 578 framesize); 579 } 580 } 581 break; 582 default: 583 gen_reserved_instruction(ctx); 584 break; 585 } 586 break; 587 case M16_OPC_LI: 588 tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm); 589 break; 590 case M16_OPC_CMPI: 591 tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm); 592 break; 593#if defined(TARGET_MIPS64) 594 case M16_OPC_SD: 595 check_insn(ctx, ISA_MIPS3); 596 check_mips_64(ctx); 597 gen_st(ctx, OPC_SD, ry, rx, offset); 598 break; 599#endif 600 case M16_OPC_LB: 601 gen_ld(ctx, OPC_LB, ry, rx, offset); 602 break; 603 case M16_OPC_LH: 604 gen_ld(ctx, OPC_LH, ry, rx, offset); 605 break; 606 case M16_OPC_LWSP: 607 gen_ld(ctx, OPC_LW, rx, 29, offset); 608 break; 609 case M16_OPC_LW: 610 gen_ld(ctx, OPC_LW, ry, rx, offset); 611 break; 612 case M16_OPC_LBU: 613 gen_ld(ctx, OPC_LBU, ry, rx, offset); 614 break; 615 case M16_OPC_LHU: 616 gen_ld(ctx, OPC_LHU, ry, rx, offset); 617 break; 618 case M16_OPC_LWPC: 619 gen_ld(ctx, OPC_LWPC, rx, 0, offset); 620 break; 621#if defined(TARGET_MIPS64) 622 case M16_OPC_LWU: 623 check_insn(ctx, ISA_MIPS3); 624 check_mips_64(ctx); 625 gen_ld(ctx, OPC_LWU, ry, rx, offset); 626 break; 627#endif 628 case M16_OPC_SB: 629 gen_st(ctx, OPC_SB, ry, rx, offset); 630 break; 631 case M16_OPC_SH: 632 gen_st(ctx, OPC_SH, ry, rx, offset); 633 break; 634 case M16_OPC_SWSP: 635 gen_st(ctx, OPC_SW, rx, 29, offset); 636 break; 637 case M16_OPC_SW: 638 gen_st(ctx, OPC_SW, ry, rx, offset); 639 break; 640#if defined(TARGET_MIPS64) 641 case M16_OPC_I64: 642 decode_i64_mips16(ctx, ry, funct, offset, 1); 643 break; 644#endif 645 default: 646 gen_reserved_instruction(ctx); 647 break; 648 } 649 650 return 4; 651} 652 653static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx) 654{ 655 int rx, ry; 656 int sa; 657 int op, cnvt_op, op1, offset; 658 int funct; 659 int n_bytes; 660 661 op = (ctx->opcode >> 11) & 0x1f; 662 sa = (ctx->opcode >> 2) & 0x7; 663 sa = sa == 0 ? 8 : sa; 664 rx = xlat((ctx->opcode >> 8) & 0x7); 665 cnvt_op = (ctx->opcode >> 5) & 0x7; 666 ry = xlat((ctx->opcode >> 5) & 0x7); 667 op1 = offset = ctx->opcode & 0x1f; 668 669 n_bytes = 2; 670 671 switch (op) { 672 case M16_OPC_ADDIUSP: 673 { 674 int16_t imm = ((uint8_t) ctx->opcode) << 2; 675 676 gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); 677 } 678 break; 679 case M16_OPC_ADDIUPC: 680 gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); 681 break; 682 case M16_OPC_B: 683 offset = (ctx->opcode & 0x7ff) << 1; 684 offset = (int16_t)(offset << 4) >> 4; 685 gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); 686 /* No delay slot, so just process as a normal instruction */ 687 break; 688 case M16_OPC_JAL: 689 offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2); 690 offset = (((ctx->opcode & 0x1f) << 21) 691 | ((ctx->opcode >> 5) & 0x1f) << 16 692 | offset) << 2; 693 op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; 694 gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); 695 n_bytes = 4; 696 break; 697 case M16_OPC_BEQZ: 698 gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, 699 ((int8_t)ctx->opcode) << 1, 0); 700 /* No delay slot, so just process as a normal instruction */ 701 break; 702 case M16_OPC_BNEQZ: 703 gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, 704 ((int8_t)ctx->opcode) << 1, 0); 705 /* No delay slot, so just process as a normal instruction */ 706 break; 707 case M16_OPC_SHIFT: 708 switch (ctx->opcode & 0x3) { 709 case 0x0: 710 gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); 711 break; 712 case 0x1: 713#if defined(TARGET_MIPS64) 714 check_insn(ctx, ISA_MIPS3); 715 check_mips_64(ctx); 716 gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); 717#else 718 gen_reserved_instruction(ctx); 719#endif 720 break; 721 case 0x2: 722 gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); 723 break; 724 case 0x3: 725 gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); 726 break; 727 } 728 break; 729#if defined(TARGET_MIPS64) 730 case M16_OPC_LD: 731 check_insn(ctx, ISA_MIPS3); 732 check_mips_64(ctx); 733 gen_ld(ctx, OPC_LD, ry, rx, offset << 3); 734 break; 735#endif 736 case M16_OPC_RRIA: 737 { 738 int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; 739 740 if ((ctx->opcode >> 4) & 1) { 741#if defined(TARGET_MIPS64) 742 check_insn(ctx, ISA_MIPS3); 743 check_mips_64(ctx); 744 gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); 745#else 746 gen_reserved_instruction(ctx); 747#endif 748 } else { 749 gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); 750 } 751 } 752 break; 753 case M16_OPC_ADDIU8: 754 { 755 int16_t imm = (int8_t) ctx->opcode; 756 757 gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); 758 } 759 break; 760 case M16_OPC_SLTI: 761 { 762 int16_t imm = (uint8_t) ctx->opcode; 763 gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); 764 } 765 break; 766 case M16_OPC_SLTIU: 767 { 768 int16_t imm = (uint8_t) ctx->opcode; 769 gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); 770 } 771 break; 772 case M16_OPC_I8: 773 { 774 int reg32; 775 776 funct = (ctx->opcode >> 8) & 0x7; 777 switch (funct) { 778 case I8_BTEQZ: 779 gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, 780 ((int8_t)ctx->opcode) << 1, 0); 781 break; 782 case I8_BTNEZ: 783 gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, 784 ((int8_t)ctx->opcode) << 1, 0); 785 break; 786 case I8_SWRASP: 787 gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); 788 break; 789 case I8_ADJSP: 790 gen_arith_imm(ctx, OPC_ADDIU, 29, 29, 791 ((int8_t)ctx->opcode) << 3); 792 break; 793 case I8_SVRS: 794 check_insn(ctx, ISA_MIPS_R1); 795 { 796 int do_ra = ctx->opcode & (1 << 6); 797 int do_s0 = ctx->opcode & (1 << 5); 798 int do_s1 = ctx->opcode & (1 << 4); 799 int framesize = ctx->opcode & 0xf; 800 801 if (framesize == 0) { 802 framesize = 128; 803 } else { 804 framesize = framesize << 3; 805 } 806 807 if (ctx->opcode & (1 << 7)) { 808 gen_mips16_save(ctx, 0, 0, 809 do_ra, do_s0, do_s1, framesize); 810 } else { 811 gen_mips16_restore(ctx, 0, 0, 812 do_ra, do_s0, do_s1, framesize); 813 } 814 } 815 break; 816 case I8_MOV32R: 817 { 818 int rz = xlat(ctx->opcode & 0x7); 819 820 reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | 821 ((ctx->opcode >> 5) & 0x7); 822 gen_arith(ctx, OPC_ADDU, reg32, rz, 0); 823 } 824 break; 825 case I8_MOVR32: 826 reg32 = ctx->opcode & 0x1f; 827 gen_arith(ctx, OPC_ADDU, ry, reg32, 0); 828 break; 829 default: 830 gen_reserved_instruction(ctx); 831 break; 832 } 833 } 834 break; 835 case M16_OPC_LI: 836 { 837 int16_t imm = (uint8_t) ctx->opcode; 838 839 gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); 840 } 841 break; 842 case M16_OPC_CMPI: 843 { 844 int16_t imm = (uint8_t) ctx->opcode; 845 gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); 846 } 847 break; 848#if defined(TARGET_MIPS64) 849 case M16_OPC_SD: 850 check_insn(ctx, ISA_MIPS3); 851 check_mips_64(ctx); 852 gen_st(ctx, OPC_SD, ry, rx, offset << 3); 853 break; 854#endif 855 case M16_OPC_LB: 856 gen_ld(ctx, OPC_LB, ry, rx, offset); 857 break; 858 case M16_OPC_LH: 859 gen_ld(ctx, OPC_LH, ry, rx, offset << 1); 860 break; 861 case M16_OPC_LWSP: 862 gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); 863 break; 864 case M16_OPC_LW: 865 gen_ld(ctx, OPC_LW, ry, rx, offset << 2); 866 break; 867 case M16_OPC_LBU: 868 gen_ld(ctx, OPC_LBU, ry, rx, offset); 869 break; 870 case M16_OPC_LHU: 871 gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); 872 break; 873 case M16_OPC_LWPC: 874 gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); 875 break; 876#if defined(TARGET_MIPS64) 877 case M16_OPC_LWU: 878 check_insn(ctx, ISA_MIPS3); 879 check_mips_64(ctx); 880 gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); 881 break; 882#endif 883 case M16_OPC_SB: 884 gen_st(ctx, OPC_SB, ry, rx, offset); 885 break; 886 case M16_OPC_SH: 887 gen_st(ctx, OPC_SH, ry, rx, offset << 1); 888 break; 889 case M16_OPC_SWSP: 890 gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); 891 break; 892 case M16_OPC_SW: 893 gen_st(ctx, OPC_SW, ry, rx, offset << 2); 894 break; 895 case M16_OPC_RRR: 896 { 897 int rz = xlat((ctx->opcode >> 2) & 0x7); 898 int mips32_op; 899 900 switch (ctx->opcode & 0x3) { 901 case RRR_ADDU: 902 mips32_op = OPC_ADDU; 903 break; 904 case RRR_SUBU: 905 mips32_op = OPC_SUBU; 906 break; 907#if defined(TARGET_MIPS64) 908 case RRR_DADDU: 909 mips32_op = OPC_DADDU; 910 check_insn(ctx, ISA_MIPS3); 911 check_mips_64(ctx); 912 break; 913 case RRR_DSUBU: 914 mips32_op = OPC_DSUBU; 915 check_insn(ctx, ISA_MIPS3); 916 check_mips_64(ctx); 917 break; 918#endif 919 default: 920 gen_reserved_instruction(ctx); 921 goto done; 922 } 923 924 gen_arith(ctx, mips32_op, rz, rx, ry); 925 done: 926 ; 927 } 928 break; 929 case M16_OPC_RR: 930 switch (op1) { 931 case RR_JR: 932 { 933 int nd = (ctx->opcode >> 7) & 0x1; 934 int link = (ctx->opcode >> 6) & 0x1; 935 int ra = (ctx->opcode >> 5) & 0x1; 936 937 if (nd) { 938 check_insn(ctx, ISA_MIPS_R1); 939 } 940 941 if (link) { 942 op = OPC_JALR; 943 } else { 944 op = OPC_JR; 945 } 946 947 gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, 948 (nd ? 0 : 2)); 949 } 950 break; 951 case RR_SDBBP: 952 if (is_uhi(ctx, extract32(ctx->opcode, 5, 6))) { 953 ctx->base.is_jmp = DISAS_SEMIHOST; 954 } else { 955 /* 956 * XXX: not clear which exception should be raised 957 * when in debug mode... 958 */ 959 check_insn(ctx, ISA_MIPS_R1); 960 generate_exception_end(ctx, EXCP_DBp); 961 } 962 break; 963 case RR_SLT: 964 gen_slt(ctx, OPC_SLT, 24, rx, ry); 965 break; 966 case RR_SLTU: 967 gen_slt(ctx, OPC_SLTU, 24, rx, ry); 968 break; 969 case RR_BREAK: 970 generate_exception_break(ctx, extract32(ctx->opcode, 5, 6)); 971 break; 972 case RR_SLLV: 973 gen_shift(ctx, OPC_SLLV, ry, rx, ry); 974 break; 975 case RR_SRLV: 976 gen_shift(ctx, OPC_SRLV, ry, rx, ry); 977 break; 978 case RR_SRAV: 979 gen_shift(ctx, OPC_SRAV, ry, rx, ry); 980 break; 981#if defined(TARGET_MIPS64) 982 case RR_DSRL: 983 check_insn(ctx, ISA_MIPS3); 984 check_mips_64(ctx); 985 gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); 986 break; 987#endif 988 case RR_CMP: 989 gen_logic(ctx, OPC_XOR, 24, rx, ry); 990 break; 991 case RR_NEG: 992 gen_arith(ctx, OPC_SUBU, rx, 0, ry); 993 break; 994 case RR_AND: 995 gen_logic(ctx, OPC_AND, rx, rx, ry); 996 break; 997 case RR_OR: 998 gen_logic(ctx, OPC_OR, rx, rx, ry); 999 break; 1000 case RR_XOR: 1001 gen_logic(ctx, OPC_XOR, rx, rx, ry); 1002 break; 1003 case RR_NOT: 1004 gen_logic(ctx, OPC_NOR, rx, ry, 0); 1005 break; 1006 case RR_MFHI: 1007 gen_HILO(ctx, OPC_MFHI, 0, rx); 1008 break; 1009 case RR_CNVT: 1010 check_insn(ctx, ISA_MIPS_R1); 1011 switch (cnvt_op) { 1012 case RR_RY_CNVT_ZEB: 1013 tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1014 break; 1015 case RR_RY_CNVT_ZEH: 1016 tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1017 break; 1018 case RR_RY_CNVT_SEB: 1019 tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1020 break; 1021 case RR_RY_CNVT_SEH: 1022 tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1023 break; 1024#if defined(TARGET_MIPS64) 1025 case RR_RY_CNVT_ZEW: 1026 check_insn(ctx, ISA_MIPS_R1); 1027 check_mips_64(ctx); 1028 tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]); 1029 break; 1030 case RR_RY_CNVT_SEW: 1031 check_insn(ctx, ISA_MIPS_R1); 1032 check_mips_64(ctx); 1033 tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); 1034 break; 1035#endif 1036 default: 1037 gen_reserved_instruction(ctx); 1038 break; 1039 } 1040 break; 1041 case RR_MFLO: 1042 gen_HILO(ctx, OPC_MFLO, 0, rx); 1043 break; 1044#if defined(TARGET_MIPS64) 1045 case RR_DSRA: 1046 check_insn(ctx, ISA_MIPS3); 1047 check_mips_64(ctx); 1048 gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); 1049 break; 1050 case RR_DSLLV: 1051 check_insn(ctx, ISA_MIPS3); 1052 check_mips_64(ctx); 1053 gen_shift(ctx, OPC_DSLLV, ry, rx, ry); 1054 break; 1055 case RR_DSRLV: 1056 check_insn(ctx, ISA_MIPS3); 1057 check_mips_64(ctx); 1058 gen_shift(ctx, OPC_DSRLV, ry, rx, ry); 1059 break; 1060 case RR_DSRAV: 1061 check_insn(ctx, ISA_MIPS3); 1062 check_mips_64(ctx); 1063 gen_shift(ctx, OPC_DSRAV, ry, rx, ry); 1064 break; 1065#endif 1066 case RR_MULT: 1067 gen_muldiv(ctx, OPC_MULT, 0, rx, ry); 1068 break; 1069 case RR_MULTU: 1070 gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); 1071 break; 1072 case RR_DIV: 1073 gen_muldiv(ctx, OPC_DIV, 0, rx, ry); 1074 break; 1075 case RR_DIVU: 1076 gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); 1077 break; 1078#if defined(TARGET_MIPS64) 1079 case RR_DMULT: 1080 check_insn(ctx, ISA_MIPS3); 1081 check_mips_64(ctx); 1082 gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); 1083 break; 1084 case RR_DMULTU: 1085 check_insn(ctx, ISA_MIPS3); 1086 check_mips_64(ctx); 1087 gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); 1088 break; 1089 case RR_DDIV: 1090 check_insn(ctx, ISA_MIPS3); 1091 check_mips_64(ctx); 1092 gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); 1093 break; 1094 case RR_DDIVU: 1095 check_insn(ctx, ISA_MIPS3); 1096 check_mips_64(ctx); 1097 gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); 1098 break; 1099#endif 1100 default: 1101 gen_reserved_instruction(ctx); 1102 break; 1103 } 1104 break; 1105 case M16_OPC_EXTEND: 1106 decode_extended_mips16_opc(env, ctx); 1107 n_bytes = 4; 1108 break; 1109#if defined(TARGET_MIPS64) 1110 case M16_OPC_I64: 1111 funct = (ctx->opcode >> 8) & 0x7; 1112 decode_i64_mips16(ctx, ry, funct, offset, 0); 1113 break; 1114#endif 1115 default: 1116 gen_reserved_instruction(ctx); 1117 break; 1118 } 1119 1120 return n_bytes; 1121} 1122