1 /* 2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "qemu/osdep.h" 19 #include "iclass.h" 20 #include "attribs.h" 21 #include "genptr.h" 22 #include "decode.h" 23 #include "insn.h" 24 #include "printinsn.h" 25 #include "mmvec/decode_ext_mmvec.h" 26 27 #define fZXTN(N, M, VAL) ((VAL) & ((1LL << (N)) - 1)) 28 29 enum { 30 EXT_IDX_noext = 0, 31 EXT_IDX_noext_AFTER = 4, 32 EXT_IDX_mmvec = 4, 33 EXT_IDX_mmvec_AFTER = 8, 34 XX_LAST_EXT_IDX 35 }; 36 37 /* 38 * Certain operand types represent a non-contiguous set of values. 39 * For example, the compound compare-and-jump instruction can only access 40 * registers R0-R7 and R16-23. 41 * This table represents the mapping from the encoding to the actual values. 42 */ 43 44 #define DEF_REGMAP(NAME, ELEMENTS, ...) \ 45 static const unsigned int DECODE_REGISTER_##NAME[ELEMENTS] = \ 46 { __VA_ARGS__ }; 47 /* Name Num Table */ 48 DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) 49 DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22) 50 DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7) 51 52 #define DECODE_MAPPED_REG(OPNUM, NAME) \ 53 insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]]; 54 55 /* Helper functions for decode_*_generated.c.inc */ 56 #define DECODE_MAPPED(NAME) \ 57 static int decode_mapped_reg_##NAME(DisasContext *ctx, int x) \ 58 { \ 59 return DECODE_REGISTER_##NAME[x]; \ 60 } 61 DECODE_MAPPED(R_16) 62 DECODE_MAPPED(R_8) 63 DECODE_MAPPED(R__8) 64 65 /* Helper function for decodetree_trans_funcs_generated.c.inc */ 66 static int shift_left(DisasContext *ctx, int x, int n, int immno) 67 { 68 int ret = x; 69 Insn *insn = ctx->insn; 70 if (!insn->extension_valid || 71 insn->which_extended != immno) { 72 ret <<= n; 73 } 74 return ret; 75 } 76 77 /* Include the generated decoder for 32 bit insn */ 78 #include "decode_normal_generated.c.inc" 79 #include "decode_hvx_generated.c.inc" 80 81 /* Include the generated decoder for 16 bit insn */ 82 #include "decode_subinsn_a_generated.c.inc" 83 #include "decode_subinsn_l1_generated.c.inc" 84 #include "decode_subinsn_l2_generated.c.inc" 85 #include "decode_subinsn_s1_generated.c.inc" 86 #include "decode_subinsn_s2_generated.c.inc" 87 88 /* Include the generated helpers for the decoder */ 89 #include "decodetree_trans_funcs_generated.c.inc" 90 91 void decode_send_insn_to(Packet *packet, int start, int newloc) 92 { 93 Insn tmpinsn; 94 int direction; 95 int i; 96 if (start == newloc) { 97 return; 98 } 99 if (start < newloc) { 100 /* Move towards end */ 101 direction = 1; 102 } else { 103 /* move towards beginning */ 104 direction = -1; 105 } 106 for (i = start; i != newloc; i += direction) { 107 tmpinsn = packet->insn[i]; 108 packet->insn[i] = packet->insn[i + direction]; 109 packet->insn[i + direction] = tmpinsn; 110 } 111 } 112 113 /* Fill newvalue registers with the correct regno */ 114 static void 115 decode_fill_newvalue_regno(Packet *packet) 116 { 117 int i, use_regidx, offset, def_idx, dst_idx; 118 119 for (i = 1; i < packet->num_insns; i++) { 120 if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE) && 121 !GET_ATTRIB(packet->insn[i].opcode, A_EXTENSION)) { 122 123 g_assert(packet->insn[i].new_read_idx != -1); 124 use_regidx = packet->insn[i].new_read_idx; 125 126 /* 127 * What's encoded at the N-field is the offset to who's producing 128 * the value. Shift off the LSB which indicates odd/even register, 129 * then walk backwards and skip over the constant extenders. 130 */ 131 offset = packet->insn[i].regno[use_regidx] >> 1; 132 def_idx = i - offset; 133 for (int j = 0; j < offset; j++) { 134 if (GET_ATTRIB(packet->insn[i - j - 1].opcode, A_IT_EXTENDER)) { 135 def_idx--; 136 } 137 } 138 139 /* 140 * Check for a badly encoded N-field which points to an instruction 141 * out-of-range 142 */ 143 g_assert(!((def_idx < 0) || (def_idx > (packet->num_insns - 1)))); 144 145 /* Now patch up the consumer with the register number */ 146 g_assert(packet->insn[def_idx].dest_idx != -1); 147 dst_idx = packet->insn[def_idx].dest_idx; 148 packet->insn[i].regno[use_regidx] = 149 packet->insn[def_idx].regno[dst_idx]; 150 /* 151 * We need to remember who produces this value to later 152 * check if it was dynamically cancelled 153 */ 154 packet->insn[i].new_value_producer_slot = 155 packet->insn[def_idx].slot; 156 } 157 } 158 } 159 160 /* Split CJ into a compare and a jump */ 161 static void decode_split_cmpjump(Packet *pkt) 162 { 163 int last, i; 164 int numinsns = pkt->num_insns; 165 166 /* 167 * First, split all compare-jumps. 168 * The compare is sent to the end as a new instruction. 169 * Do it this way so we don't reorder dual jumps. Those need to stay in 170 * original order. 171 */ 172 for (i = 0; i < numinsns; i++) { 173 /* It's a cmp-jump */ 174 if (GET_ATTRIB(pkt->insn[i].opcode, A_NEWCMPJUMP)) { 175 last = pkt->num_insns; 176 pkt->insn[last] = pkt->insn[i]; /* copy the instruction */ 177 pkt->insn[last].part1 = true; /* last insn does the CMP */ 178 pkt->insn[i].part1 = false; /* existing insn does the JUMP */ 179 pkt->num_insns++; 180 } 181 } 182 183 /* Now re-shuffle all the compares back to the beginning */ 184 for (i = 0; i < pkt->num_insns; i++) { 185 if (pkt->insn[i].part1) { 186 decode_send_insn_to(pkt, i, 0); 187 } 188 } 189 } 190 191 static bool decode_opcode_can_jump(int opcode) 192 { 193 if ((GET_ATTRIB(opcode, A_JUMP)) || 194 (GET_ATTRIB(opcode, A_CALL)) || 195 (opcode == J2_trap0) || 196 (opcode == J2_pause)) { 197 /* Exception to A_JUMP attribute */ 198 if (opcode == J4_hintjumpr) { 199 return false; 200 } 201 return true; 202 } 203 204 return false; 205 } 206 207 static bool decode_opcode_ends_loop(int opcode) 208 { 209 return GET_ATTRIB(opcode, A_HWLOOP0_END) || 210 GET_ATTRIB(opcode, A_HWLOOP1_END); 211 } 212 213 /* Set the is_* fields in each instruction */ 214 static void decode_set_insn_attr_fields(Packet *pkt) 215 { 216 int i; 217 int numinsns = pkt->num_insns; 218 uint16_t opcode; 219 220 pkt->pkt_has_cof = false; 221 pkt->pkt_has_multi_cof = false; 222 pkt->pkt_has_endloop = false; 223 pkt->pkt_has_dczeroa = false; 224 225 for (i = 0; i < numinsns; i++) { 226 opcode = pkt->insn[i].opcode; 227 if (pkt->insn[i].part1) { 228 continue; /* Skip compare of cmp-jumps */ 229 } 230 231 if (GET_ATTRIB(opcode, A_DCZEROA)) { 232 pkt->pkt_has_dczeroa = true; 233 } 234 235 if (GET_ATTRIB(opcode, A_STORE)) { 236 if (GET_ATTRIB(opcode, A_SCALAR_STORE) && 237 !GET_ATTRIB(opcode, A_MEMSIZE_0B)) { 238 if (pkt->insn[i].slot == 0) { 239 pkt->pkt_has_store_s0 = true; 240 } else { 241 pkt->pkt_has_store_s1 = true; 242 } 243 } 244 } 245 246 if (decode_opcode_can_jump(opcode)) { 247 if (pkt->pkt_has_cof) { 248 pkt->pkt_has_multi_cof = true; 249 } 250 pkt->pkt_has_cof = true; 251 } 252 253 pkt->insn[i].is_endloop = decode_opcode_ends_loop(opcode); 254 255 pkt->pkt_has_endloop |= pkt->insn[i].is_endloop; 256 257 if (pkt->pkt_has_endloop) { 258 if (pkt->pkt_has_cof) { 259 pkt->pkt_has_multi_cof = true; 260 } 261 pkt->pkt_has_cof = true; 262 } 263 } 264 } 265 266 /* 267 * Shuffle for execution 268 * Move stores to end (in same order as encoding) 269 * Move compares to beginning (for use by .new insns) 270 */ 271 static void decode_shuffle_for_execution(Packet *packet) 272 { 273 bool changed = false; 274 int i; 275 bool flag; /* flag means we've seen a non-memory instruction */ 276 int n_mems; 277 int last_insn = packet->num_insns - 1; 278 279 /* 280 * Skip end loops, somehow an end loop is getting in and messing 281 * up the order 282 */ 283 if (decode_opcode_ends_loop(packet->insn[last_insn].opcode)) { 284 last_insn--; 285 } 286 287 do { 288 changed = false; 289 /* 290 * Stores go last, must not reorder. 291 * Cannot shuffle stores past loads, either. 292 * Iterate backwards. If we see a non-memory instruction, 293 * then a store, shuffle the store to the front. Don't shuffle 294 * stores wrt each other or a load. 295 */ 296 for (flag = false, n_mems = 0, i = last_insn; i >= 0; i--) { 297 int opcode = packet->insn[i].opcode; 298 299 if (flag && GET_ATTRIB(opcode, A_STORE)) { 300 decode_send_insn_to(packet, i, last_insn - n_mems); 301 n_mems++; 302 changed = true; 303 } else if (GET_ATTRIB(opcode, A_STORE)) { 304 n_mems++; 305 } else if (GET_ATTRIB(opcode, A_LOAD)) { 306 /* 307 * Don't set flag, since we don't want to shuffle a 308 * store past a load 309 */ 310 n_mems++; 311 } else if (GET_ATTRIB(opcode, A_DOTNEWVALUE)) { 312 /* 313 * Don't set flag, since we don't want to shuffle past 314 * a .new value 315 */ 316 } else { 317 flag = true; 318 } 319 } 320 321 if (changed) { 322 continue; 323 } 324 /* Compares go first, may be reordered wrt each other */ 325 for (flag = false, i = 0; i < last_insn + 1; i++) { 326 int opcode = packet->insn[i].opcode; 327 328 if (packet->insn[i].has_pred_dest && 329 GET_ATTRIB(opcode, A_STORE) == 0) { 330 /* This should be a compare (not a store conditional) */ 331 if (flag) { 332 decode_send_insn_to(packet, i, 0); 333 changed = true; 334 continue; 335 } 336 } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P3) && 337 !decode_opcode_ends_loop(packet->insn[i].opcode)) { 338 /* 339 * spNloop instruction 340 * Don't reorder endloops; they are not valid for .new uses, 341 * and we want to match HW 342 */ 343 if (flag) { 344 decode_send_insn_to(packet, i, 0); 345 changed = true; 346 continue; 347 } 348 } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P0) && 349 !GET_ATTRIB(opcode, A_NEWCMPJUMP)) { 350 if (flag) { 351 decode_send_insn_to(packet, i, 0); 352 changed = true; 353 continue; 354 } 355 } else { 356 flag = true; 357 } 358 } 359 if (changed) { 360 continue; 361 } 362 } while (changed); 363 364 /* 365 * If we have a .new register compare/branch, move that to the very 366 * very end, past stores 367 */ 368 for (i = 0; i < last_insn; i++) { 369 if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE)) { 370 decode_send_insn_to(packet, i, last_insn); 371 break; 372 } 373 } 374 } 375 376 static void 377 apply_extender(Packet *pkt, int i, uint32_t extender) 378 { 379 int immed_num; 380 uint32_t base_immed; 381 382 immed_num = pkt->insn[i].which_extended; 383 base_immed = pkt->insn[i].immed[immed_num]; 384 385 pkt->insn[i].immed[immed_num] = extender | fZXTN(6, 32, base_immed); 386 } 387 388 static void decode_apply_extenders(Packet *packet) 389 { 390 int i; 391 for (i = 0; i < packet->num_insns; i++) { 392 if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) { 393 packet->insn[i + 1].extension_valid = true; 394 apply_extender(packet, i + 1, packet->insn[i].immed[0]); 395 } 396 } 397 } 398 399 static void decode_remove_extenders(Packet *packet) 400 { 401 int i, j; 402 for (i = 0; i < packet->num_insns; i++) { 403 if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) { 404 /* Remove this one by moving the remaining instructions down */ 405 for (j = i; 406 (j < packet->num_insns - 1) && (j < INSTRUCTIONS_MAX - 1); 407 j++) { 408 packet->insn[j] = packet->insn[j + 1]; 409 } 410 packet->num_insns--; 411 } 412 } 413 } 414 415 static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot) 416 { 417 if (GET_ATTRIB(pkt->insn[slot].opcode, A_EXTENSION)) { 418 return mmvec_ext_decode_find_iclass_slots(pkt->insn[slot].opcode); 419 } else { 420 return find_iclass_slots(pkt->insn[slot].opcode, 421 pkt->insn[slot].iclass); 422 } 423 } 424 425 /* 426 * Section 10.3 of the Hexagon V73 Programmer's Reference Manual 427 * 428 * A duplex is encoded as a 32-bit instruction with bits [15:14] set to 00. 429 * The sub-instructions that comprise a duplex are encoded as 13-bit fields 430 * in the duplex. 431 * 432 * Per table 10-4, the 4-bit duplex iclass is encoded in bits 31:29, 13 433 */ 434 static uint32_t get_duplex_iclass(uint32_t encoding) 435 { 436 uint32_t iclass = extract32(encoding, 13, 1); 437 iclass = deposit32(iclass, 1, 3, extract32(encoding, 29, 3)); 438 return iclass; 439 } 440 441 /* 442 * Per table 10-5, the duplex ICLASS field values that specify the group of 443 * each sub-instruction in a duplex 444 * 445 * This table points to the decode instruction for each entry in the table 446 */ 447 typedef bool (*subinsn_decode_func)(DisasContext *ctx, uint16_t insn); 448 typedef struct { 449 subinsn_decode_func decode_slot0_subinsn; 450 subinsn_decode_func decode_slot1_subinsn; 451 } subinsn_decode_groups; 452 453 static const subinsn_decode_groups decode_groups[16] = { 454 [0x0] = { decode_subinsn_l1, decode_subinsn_l1 }, 455 [0x1] = { decode_subinsn_l2, decode_subinsn_l1 }, 456 [0x2] = { decode_subinsn_l2, decode_subinsn_l2 }, 457 [0x3] = { decode_subinsn_a, decode_subinsn_a }, 458 [0x4] = { decode_subinsn_l1, decode_subinsn_a }, 459 [0x5] = { decode_subinsn_l2, decode_subinsn_a }, 460 [0x6] = { decode_subinsn_s1, decode_subinsn_a }, 461 [0x7] = { decode_subinsn_s2, decode_subinsn_a }, 462 [0x8] = { decode_subinsn_s1, decode_subinsn_l1 }, 463 [0x9] = { decode_subinsn_s1, decode_subinsn_l2 }, 464 [0xa] = { decode_subinsn_s1, decode_subinsn_s1 }, 465 [0xb] = { decode_subinsn_s2, decode_subinsn_s1 }, 466 [0xc] = { decode_subinsn_s2, decode_subinsn_l1 }, 467 [0xd] = { decode_subinsn_s2, decode_subinsn_l2 }, 468 [0xe] = { decode_subinsn_s2, decode_subinsn_s2 }, 469 [0xf] = { NULL, NULL }, /* Reserved */ 470 }; 471 472 static uint16_t get_slot0_subinsn(uint32_t encoding) 473 { 474 return extract32(encoding, 0, 13); 475 } 476 477 static uint16_t get_slot1_subinsn(uint32_t encoding) 478 { 479 return extract32(encoding, 16, 13); 480 } 481 482 static unsigned int 483 decode_insns(DisasContext *ctx, Insn *insn, uint32_t encoding) 484 { 485 if (parse_bits(encoding) != 0) { 486 if (decode_normal(ctx, encoding) || 487 decode_hvx(ctx, encoding)) { 488 insn->generate = opcode_genptr[insn->opcode]; 489 insn->iclass = iclass_bits(encoding); 490 return 1; 491 } 492 g_assert_not_reached(); 493 } else { 494 uint32_t iclass = get_duplex_iclass(encoding); 495 unsigned int slot0_subinsn = get_slot0_subinsn(encoding); 496 unsigned int slot1_subinsn = get_slot1_subinsn(encoding); 497 subinsn_decode_func decode_slot0_subinsn = 498 decode_groups[iclass].decode_slot0_subinsn; 499 subinsn_decode_func decode_slot1_subinsn = 500 decode_groups[iclass].decode_slot1_subinsn; 501 502 /* The slot1 subinsn needs to be in the packet first */ 503 if (decode_slot1_subinsn(ctx, slot1_subinsn)) { 504 insn->generate = opcode_genptr[insn->opcode]; 505 insn->iclass = iclass_bits(encoding); 506 ctx->insn = ++insn; 507 if (decode_slot0_subinsn(ctx, slot0_subinsn)) { 508 insn->generate = opcode_genptr[insn->opcode]; 509 insn->iclass = iclass_bits(encoding); 510 return 2; 511 } 512 } 513 g_assert_not_reached(); 514 } 515 } 516 517 static void decode_add_endloop_insn(Insn *insn, int loopnum) 518 { 519 if (loopnum == 10) { 520 insn->opcode = J2_endloop01; 521 insn->generate = opcode_genptr[J2_endloop01]; 522 } else if (loopnum == 1) { 523 insn->opcode = J2_endloop1; 524 insn->generate = opcode_genptr[J2_endloop1]; 525 } else if (loopnum == 0) { 526 insn->opcode = J2_endloop0; 527 insn->generate = opcode_genptr[J2_endloop0]; 528 } else { 529 g_assert_not_reached(); 530 } 531 } 532 533 static bool decode_parsebits_is_loopend(uint32_t encoding32) 534 { 535 uint32_t bits = parse_bits(encoding32); 536 return bits == 0x2; 537 } 538 539 static bool has_valid_slot_assignment(Packet *pkt) 540 { 541 int used_slots = 0; 542 for (int i = 0; i < pkt->num_insns; i++) { 543 int slot_mask; 544 Insn *insn = &pkt->insn[i]; 545 if (decode_opcode_ends_loop(insn->opcode)) { 546 /* We overload slot 0 for endloop. */ 547 continue; 548 } 549 slot_mask = 1 << insn->slot; 550 if (used_slots & slot_mask) { 551 return false; 552 } 553 used_slots |= slot_mask; 554 } 555 return true; 556 } 557 558 static bool 559 decode_set_slot_number(Packet *pkt) 560 { 561 int slot; 562 int i; 563 bool hit_mem_insn = false; 564 bool hit_duplex = false; 565 bool slot0_found = false; 566 bool slot1_found = false; 567 int slot1_iidx = 0; 568 569 /* 570 * The slots are encoded in reverse order 571 * For each instruction, count down until you find a suitable slot 572 */ 573 for (i = 0, slot = 3; i < pkt->num_insns; i++) { 574 SlotMask valid_slots = get_valid_slots(pkt, i); 575 576 while (!(valid_slots & (1 << slot))) { 577 slot--; 578 } 579 pkt->insn[i].slot = slot; 580 if (slot) { 581 /* I've assigned the slot, now decrement it for the next insn */ 582 slot--; 583 } 584 } 585 586 /* Fix the exceptions - mem insns to slot 0,1 */ 587 for (i = pkt->num_insns - 1; i >= 0; i--) { 588 /* First memory instruction always goes to slot 0 */ 589 if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) || 590 GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) && 591 !hit_mem_insn) { 592 hit_mem_insn = true; 593 pkt->insn[i].slot = 0; 594 continue; 595 } 596 597 /* Next memory instruction always goes to slot 1 */ 598 if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) || 599 GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) && 600 hit_mem_insn) { 601 pkt->insn[i].slot = 1; 602 } 603 } 604 605 /* Fix the exceptions - duplex always slot 0,1 */ 606 for (i = pkt->num_insns - 1; i >= 0; i--) { 607 /* First subinsn always goes to slot 0 */ 608 if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && !hit_duplex) { 609 hit_duplex = true; 610 pkt->insn[i].slot = 0; 611 continue; 612 } 613 614 /* Next subinsn always goes to slot 1 */ 615 if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && hit_duplex) { 616 pkt->insn[i].slot = 1; 617 } 618 } 619 620 /* Fix the exceptions - slot 1 is never empty, always aligns to slot 0 */ 621 for (i = pkt->num_insns - 1; i >= 0; i--) { 622 /* Is slot0 used? */ 623 if (pkt->insn[i].slot == 0) { 624 bool is_endloop = (pkt->insn[i].opcode == J2_endloop01); 625 is_endloop |= (pkt->insn[i].opcode == J2_endloop0); 626 is_endloop |= (pkt->insn[i].opcode == J2_endloop1); 627 628 /* 629 * Make sure it's not endloop since, we're overloading 630 * slot0 for endloop 631 */ 632 if (!is_endloop) { 633 slot0_found = true; 634 } 635 } 636 /* Is slot1 used? */ 637 if (pkt->insn[i].slot == 1) { 638 slot1_found = true; 639 slot1_iidx = i; 640 } 641 } 642 /* Is slot0 empty and slot1 used? */ 643 if ((!slot0_found) && slot1_found) { 644 /* Then push it to slot0 */ 645 pkt->insn[slot1_iidx].slot = 0; 646 } 647 648 return has_valid_slot_assignment(pkt); 649 } 650 651 /* 652 * decode_packet 653 * Decodes packet with given words 654 * Returns 0 on insufficient words, 655 * or number of words used on success 656 */ 657 658 int decode_packet(DisasContext *ctx, int max_words, const uint32_t *words, 659 Packet *pkt, bool disas_only) 660 { 661 int num_insns = 0; 662 int words_read = 0; 663 bool end_of_packet = false; 664 int new_insns = 0; 665 int i; 666 uint32_t encoding32; 667 668 /* Initialize */ 669 memset(pkt, 0, sizeof(*pkt)); 670 /* Try to build packet */ 671 while (!end_of_packet && (words_read < max_words)) { 672 Insn *insn = &pkt->insn[num_insns]; 673 ctx->insn = insn; 674 encoding32 = words[words_read]; 675 end_of_packet = is_packet_end(encoding32); 676 new_insns = decode_insns(ctx, insn, encoding32); 677 g_assert(new_insns > 0); 678 /* 679 * If we saw an extender, mark next word extended so immediate 680 * decode works 681 */ 682 if (pkt->insn[num_insns].opcode == A4_ext) { 683 pkt->insn[num_insns + 1].extension_valid = true; 684 } 685 num_insns += new_insns; 686 words_read++; 687 } 688 689 pkt->num_insns = num_insns; 690 if (!end_of_packet) { 691 /* Ran out of words! */ 692 return 0; 693 } 694 pkt->encod_pkt_size_in_bytes = words_read * 4; 695 pkt->pkt_has_hvx = false; 696 for (i = 0; i < num_insns; i++) { 697 pkt->pkt_has_hvx |= 698 GET_ATTRIB(pkt->insn[i].opcode, A_CVI); 699 } 700 701 /* 702 * Check for :endloop in the parse bits 703 * Section 10.6 of the Programmer's Reference describes the encoding 704 * The end of hardware loop 0 can be encoded with 2 words 705 * The end of hardware loop 1 needs 3 words 706 */ 707 if ((words_read == 2) && (decode_parsebits_is_loopend(words[0]))) { 708 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0); 709 } 710 if (words_read >= 3) { 711 bool has_loop0, has_loop1; 712 has_loop0 = decode_parsebits_is_loopend(words[0]); 713 has_loop1 = decode_parsebits_is_loopend(words[1]); 714 if (has_loop0 && has_loop1) { 715 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 10); 716 } else if (has_loop1) { 717 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 1); 718 } else if (has_loop0) { 719 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0); 720 } 721 } 722 723 decode_apply_extenders(pkt); 724 if (!disas_only) { 725 decode_remove_extenders(pkt); 726 if (!decode_set_slot_number(pkt)) { 727 /* Invalid packet */ 728 return 0; 729 } 730 } 731 decode_fill_newvalue_regno(pkt); 732 733 if (pkt->pkt_has_hvx) { 734 mmvec_ext_decode_checks(pkt, disas_only); 735 } 736 737 if (!disas_only) { 738 decode_shuffle_for_execution(pkt); 739 decode_split_cmpjump(pkt); 740 decode_set_insn_attr_fields(pkt); 741 } 742 743 return words_read; 744 } 745 746 /* Used for "-d in_asm" logging */ 747 int disassemble_hexagon(uint32_t *words, int nwords, bfd_vma pc, 748 GString *buf) 749 { 750 DisasContext ctx; 751 Packet pkt; 752 753 memset(&ctx, 0, sizeof(DisasContext)); 754 ctx.pkt = &pkt; 755 756 if (decode_packet(&ctx, nwords, words, &pkt, true) > 0) { 757 snprint_a_pkt_disas(buf, &pkt, words, pc); 758 return pkt.encod_pkt_size_in_bytes; 759 } else { 760 g_string_assign(buf, "<invalid>"); 761 return 0; 762 } 763 } 764