1 /* 2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "qemu/osdep.h" 19 #include "cpu.h" 20 #include "internal.h" 21 #include "tcg/tcg-op.h" 22 #include "tcg/tcg-op-gvec.h" 23 #include "insn.h" 24 #include "opcodes.h" 25 #include "translate.h" 26 #define QEMU_GENERATE /* Used internally by macros.h */ 27 #include "macros.h" 28 #include "mmvec/macros.h" 29 #undef QEMU_GENERATE 30 #include "gen_tcg.h" 31 #include "gen_tcg_hvx.h" 32 #include "genptr.h" 33 34 TCGv gen_read_reg(TCGv result, int num) 35 { 36 tcg_gen_mov_tl(result, hex_gpr[num]); 37 return result; 38 } 39 40 TCGv gen_read_preg(TCGv pred, uint8_t num) 41 { 42 tcg_gen_mov_tl(pred, hex_pred[num]); 43 return pred; 44 } 45 46 #define IMMUTABLE (~0) 47 48 const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = { 49 [HEX_REG_USR] = 0xc13000c0, 50 [HEX_REG_PC] = IMMUTABLE, 51 [HEX_REG_GP] = 0x3f, 52 [HEX_REG_UPCYCLELO] = IMMUTABLE, 53 [HEX_REG_UPCYCLEHI] = IMMUTABLE, 54 [HEX_REG_UTIMERLO] = IMMUTABLE, 55 [HEX_REG_UTIMERHI] = IMMUTABLE, 56 }; 57 58 static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val, 59 target_ulong reg_mask) 60 { 61 if (reg_mask) { 62 TCGv tmp = tcg_temp_new(); 63 64 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */ 65 tcg_gen_andi_tl(new_val, new_val, ~reg_mask); 66 tcg_gen_andi_tl(tmp, cur_val, reg_mask); 67 tcg_gen_or_tl(new_val, new_val, tmp); 68 } 69 } 70 71 static TCGv get_result_gpr(DisasContext *ctx, int rnum) 72 { 73 if (ctx->need_commit) { 74 return hex_new_value[rnum]; 75 } else { 76 return hex_gpr[rnum]; 77 } 78 } 79 80 static TCGv_i64 get_result_gpr_pair(DisasContext *ctx, int rnum) 81 { 82 TCGv_i64 result = tcg_temp_new_i64(); 83 tcg_gen_concat_i32_i64(result, get_result_gpr(ctx, rnum), 84 get_result_gpr(ctx, rnum + 1)); 85 return result; 86 } 87 88 void gen_log_reg_write(DisasContext *ctx, int rnum, TCGv val) 89 { 90 const target_ulong reg_mask = reg_immut_masks[rnum]; 91 92 gen_masked_reg_write(val, hex_gpr[rnum], reg_mask); 93 tcg_gen_mov_tl(get_result_gpr(ctx, rnum), val); 94 if (HEX_DEBUG) { 95 /* Do this so HELPER(debug_commit_end) will know */ 96 tcg_gen_movi_tl(hex_reg_written[rnum], 1); 97 } 98 } 99 100 static void gen_log_reg_write_pair(DisasContext *ctx, int rnum, TCGv_i64 val) 101 { 102 TCGv val32 = tcg_temp_new(); 103 104 /* Low word */ 105 tcg_gen_extrl_i64_i32(val32, val); 106 gen_log_reg_write(ctx, rnum, val32); 107 108 /* High word */ 109 tcg_gen_extrh_i64_i32(val32, val); 110 gen_log_reg_write(ctx, rnum + 1, val32); 111 } 112 113 TCGv get_result_pred(DisasContext *ctx, int pnum) 114 { 115 if (ctx->need_commit) { 116 return hex_new_pred_value[pnum]; 117 } else { 118 return hex_pred[pnum]; 119 } 120 } 121 122 void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) 123 { 124 TCGv pred = get_result_pred(ctx, pnum); 125 TCGv base_val = tcg_temp_new(); 126 127 tcg_gen_andi_tl(base_val, val, 0xff); 128 129 /* 130 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual 131 * 132 * Multiple writes to the same preg are and'ed together 133 * If this is the first predicate write in the packet, do a 134 * straight assignment. Otherwise, do an and. 135 */ 136 if (!test_bit(pnum, ctx->pregs_written)) { 137 tcg_gen_mov_tl(pred, base_val); 138 } else { 139 tcg_gen_and_tl(pred, pred, base_val); 140 } 141 if (HEX_DEBUG) { 142 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum); 143 } 144 set_bit(pnum, ctx->pregs_written); 145 } 146 147 static inline void gen_read_p3_0(TCGv control_reg) 148 { 149 tcg_gen_movi_tl(control_reg, 0); 150 for (int i = 0; i < NUM_PREGS; i++) { 151 tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8); 152 } 153 } 154 155 /* 156 * Certain control registers require special handling on read 157 * HEX_REG_P3_0_ALIASED aliased to the predicate registers 158 * -> concat the 4 predicate registers together 159 * HEX_REG_PC actual value stored in DisasContext 160 * -> assign from ctx->base.pc_next 161 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext 162 * -> add current TB changes to existing reg value 163 */ 164 static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, 165 TCGv dest) 166 { 167 if (reg_num == HEX_REG_P3_0_ALIASED) { 168 gen_read_p3_0(dest); 169 } else if (reg_num == HEX_REG_PC) { 170 tcg_gen_movi_tl(dest, ctx->base.pc_next); 171 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { 172 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT], 173 ctx->num_packets); 174 } else if (reg_num == HEX_REG_QEMU_INSN_CNT) { 175 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT], 176 ctx->num_insns); 177 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { 178 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT], 179 ctx->num_hvx_insns); 180 } else { 181 tcg_gen_mov_tl(dest, hex_gpr[reg_num]); 182 } 183 } 184 185 static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, 186 TCGv_i64 dest) 187 { 188 if (reg_num == HEX_REG_P3_0_ALIASED) { 189 TCGv p3_0 = tcg_temp_new(); 190 gen_read_p3_0(p3_0); 191 tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); 192 } else if (reg_num == HEX_REG_PC - 1) { 193 TCGv pc = tcg_constant_tl(ctx->base.pc_next); 194 tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); 195 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { 196 TCGv pkt_cnt = tcg_temp_new(); 197 TCGv insn_cnt = tcg_temp_new(); 198 tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT], 199 ctx->num_packets); 200 tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT], 201 ctx->num_insns); 202 tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); 203 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { 204 TCGv hvx_cnt = tcg_temp_new(); 205 tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], 206 ctx->num_hvx_insns); 207 tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); 208 } else { 209 tcg_gen_concat_i32_i64(dest, 210 hex_gpr[reg_num], 211 hex_gpr[reg_num + 1]); 212 } 213 } 214 215 static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) 216 { 217 TCGv hex_p8 = tcg_temp_new(); 218 for (int i = 0; i < NUM_PREGS; i++) { 219 tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8); 220 gen_log_pred_write(ctx, i, hex_p8); 221 } 222 } 223 224 /* 225 * Certain control registers require special handling on write 226 * HEX_REG_P3_0_ALIASED aliased to the predicate registers 227 * -> break the value across 4 predicate registers 228 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext 229 * -> clear the changes 230 */ 231 static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, 232 TCGv val) 233 { 234 if (reg_num == HEX_REG_P3_0_ALIASED) { 235 gen_write_p3_0(ctx, val); 236 } else { 237 gen_log_reg_write(ctx, reg_num, val); 238 if (reg_num == HEX_REG_QEMU_PKT_CNT) { 239 ctx->num_packets = 0; 240 } 241 if (reg_num == HEX_REG_QEMU_INSN_CNT) { 242 ctx->num_insns = 0; 243 } 244 if (reg_num == HEX_REG_QEMU_HVX_CNT) { 245 ctx->num_hvx_insns = 0; 246 } 247 } 248 } 249 250 static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, 251 TCGv_i64 val) 252 { 253 if (reg_num == HEX_REG_P3_0_ALIASED) { 254 TCGv result = get_result_gpr(ctx, reg_num + 1); 255 TCGv val32 = tcg_temp_new(); 256 tcg_gen_extrl_i64_i32(val32, val); 257 gen_write_p3_0(ctx, val32); 258 tcg_gen_extrh_i64_i32(val32, val); 259 tcg_gen_mov_tl(result, val32); 260 } else { 261 gen_log_reg_write_pair(ctx, reg_num, val); 262 if (reg_num == HEX_REG_QEMU_PKT_CNT) { 263 ctx->num_packets = 0; 264 ctx->num_insns = 0; 265 } 266 if (reg_num == HEX_REG_QEMU_HVX_CNT) { 267 ctx->num_hvx_insns = 0; 268 } 269 } 270 } 271 272 TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign) 273 { 274 if (sign) { 275 tcg_gen_sextract_tl(result, src, N * 8, 8); 276 } else { 277 tcg_gen_extract_tl(result, src, N * 8, 8); 278 } 279 return result; 280 } 281 282 TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign) 283 { 284 TCGv_i64 res64 = tcg_temp_new_i64(); 285 if (sign) { 286 tcg_gen_sextract_i64(res64, src, N * 8, 8); 287 } else { 288 tcg_gen_extract_i64(res64, src, N * 8, 8); 289 } 290 tcg_gen_extrl_i64_i32(result, res64); 291 292 return result; 293 } 294 295 TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign) 296 { 297 if (sign) { 298 tcg_gen_sextract_tl(result, src, N * 16, 16); 299 } else { 300 tcg_gen_extract_tl(result, src, N * 16, 16); 301 } 302 return result; 303 } 304 305 void gen_set_half(int N, TCGv result, TCGv src) 306 { 307 tcg_gen_deposit_tl(result, result, src, N * 16, 16); 308 } 309 310 void gen_set_half_i64(int N, TCGv_i64 result, TCGv src) 311 { 312 TCGv_i64 src64 = tcg_temp_new_i64(); 313 tcg_gen_extu_i32_i64(src64, src); 314 tcg_gen_deposit_i64(result, result, src64, N * 16, 16); 315 } 316 317 void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) 318 { 319 TCGv_i64 src64 = tcg_temp_new_i64(); 320 tcg_gen_extu_i32_i64(src64, src); 321 tcg_gen_deposit_i64(result, result, src64, N * 8, 8); 322 } 323 324 static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) 325 { 326 tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL); 327 tcg_gen_mov_tl(hex_llsc_addr, vaddr); 328 tcg_gen_mov_tl(hex_llsc_val, dest); 329 } 330 331 static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) 332 { 333 tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ); 334 tcg_gen_mov_tl(hex_llsc_addr, vaddr); 335 tcg_gen_mov_i64(hex_llsc_val_i64, dest); 336 } 337 338 static inline void gen_store_conditional4(DisasContext *ctx, 339 TCGv pred, TCGv vaddr, TCGv src) 340 { 341 TCGLabel *fail = gen_new_label(); 342 TCGLabel *done = gen_new_label(); 343 TCGv one, zero, tmp; 344 345 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); 346 347 one = tcg_constant_tl(0xff); 348 zero = tcg_constant_tl(0); 349 tmp = tcg_temp_new(); 350 tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src, 351 ctx->mem_idx, MO_32); 352 tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, 353 one, zero); 354 tcg_gen_br(done); 355 356 gen_set_label(fail); 357 tcg_gen_movi_tl(pred, 0); 358 359 gen_set_label(done); 360 tcg_gen_movi_tl(hex_llsc_addr, ~0); 361 } 362 363 static inline void gen_store_conditional8(DisasContext *ctx, 364 TCGv pred, TCGv vaddr, TCGv_i64 src) 365 { 366 TCGLabel *fail = gen_new_label(); 367 TCGLabel *done = gen_new_label(); 368 TCGv_i64 one, zero, tmp; 369 370 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); 371 372 one = tcg_constant_i64(0xff); 373 zero = tcg_constant_i64(0); 374 tmp = tcg_temp_new_i64(); 375 tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src, 376 ctx->mem_idx, MO_64); 377 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, 378 one, zero); 379 tcg_gen_extrl_i64_i32(pred, tmp); 380 tcg_gen_br(done); 381 382 gen_set_label(fail); 383 tcg_gen_movi_tl(pred, 0); 384 385 gen_set_label(done); 386 tcg_gen_movi_tl(hex_llsc_addr, ~0); 387 } 388 389 void gen_store32(TCGv vaddr, TCGv src, int width, uint32_t slot) 390 { 391 tcg_gen_mov_tl(hex_store_addr[slot], vaddr); 392 tcg_gen_movi_tl(hex_store_width[slot], width); 393 tcg_gen_mov_tl(hex_store_val32[slot], src); 394 } 395 396 void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 397 { 398 gen_store32(vaddr, src, 1, slot); 399 } 400 401 void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 402 { 403 TCGv tmp = tcg_constant_tl(src); 404 gen_store1(cpu_env, vaddr, tmp, slot); 405 } 406 407 void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 408 { 409 gen_store32(vaddr, src, 2, slot); 410 } 411 412 void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 413 { 414 TCGv tmp = tcg_constant_tl(src); 415 gen_store2(cpu_env, vaddr, tmp, slot); 416 } 417 418 void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 419 { 420 gen_store32(vaddr, src, 4, slot); 421 } 422 423 void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 424 { 425 TCGv tmp = tcg_constant_tl(src); 426 gen_store4(cpu_env, vaddr, tmp, slot); 427 } 428 429 void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, uint32_t slot) 430 { 431 tcg_gen_mov_tl(hex_store_addr[slot], vaddr); 432 tcg_gen_movi_tl(hex_store_width[slot], 8); 433 tcg_gen_mov_i64(hex_store_val64[slot], src); 434 } 435 436 void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot) 437 { 438 TCGv_i64 tmp = tcg_constant_i64(src); 439 gen_store8(cpu_env, vaddr, tmp, slot); 440 } 441 442 TCGv gen_8bitsof(TCGv result, TCGv value) 443 { 444 TCGv zero = tcg_constant_tl(0); 445 TCGv ones = tcg_constant_tl(0xff); 446 tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero); 447 448 return result; 449 } 450 451 static void gen_write_new_pc_addr(DisasContext *ctx, TCGv addr, 452 TCGCond cond, TCGv pred) 453 { 454 TCGLabel *pred_false = NULL; 455 if (cond != TCG_COND_ALWAYS) { 456 pred_false = gen_new_label(); 457 tcg_gen_brcondi_tl(cond, pred, 0, pred_false); 458 } 459 460 if (ctx->pkt->pkt_has_multi_cof) { 461 /* If there are multiple branches in a packet, ignore the second one */ 462 tcg_gen_movcond_tl(TCG_COND_NE, hex_gpr[HEX_REG_PC], 463 hex_branch_taken, tcg_constant_tl(0), 464 hex_gpr[HEX_REG_PC], addr); 465 tcg_gen_movi_tl(hex_branch_taken, 1); 466 } else { 467 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], addr); 468 } 469 470 if (cond != TCG_COND_ALWAYS) { 471 gen_set_label(pred_false); 472 } 473 } 474 475 static void gen_write_new_pc_pcrel(DisasContext *ctx, int pc_off, 476 TCGCond cond, TCGv pred) 477 { 478 target_ulong dest = ctx->pkt->pc + pc_off; 479 if (ctx->pkt->pkt_has_multi_cof) { 480 gen_write_new_pc_addr(ctx, tcg_constant_tl(dest), cond, pred); 481 } else { 482 /* Defer this jump to the end of the TB */ 483 ctx->branch_cond = TCG_COND_ALWAYS; 484 if (pred != NULL) { 485 ctx->branch_cond = cond; 486 tcg_gen_mov_tl(hex_branch_taken, pred); 487 } 488 ctx->branch_dest = dest; 489 } 490 } 491 492 void gen_set_usr_field(DisasContext *ctx, int field, TCGv val) 493 { 494 TCGv usr = get_result_gpr(ctx, HEX_REG_USR); 495 tcg_gen_deposit_tl(usr, usr, val, 496 reg_field_info[field].offset, 497 reg_field_info[field].width); 498 } 499 500 void gen_set_usr_fieldi(DisasContext *ctx, int field, int x) 501 { 502 if (reg_field_info[field].width == 1) { 503 TCGv usr = get_result_gpr(ctx, HEX_REG_USR); 504 target_ulong bit = 1 << reg_field_info[field].offset; 505 if ((x & 1) == 1) { 506 tcg_gen_ori_tl(usr, usr, bit); 507 } else { 508 tcg_gen_andi_tl(usr, usr, ~bit); 509 } 510 } else { 511 TCGv val = tcg_constant_tl(x); 512 gen_set_usr_field(ctx, field, val); 513 } 514 } 515 516 static void gen_compare(TCGCond cond, TCGv res, TCGv arg1, TCGv arg2) 517 { 518 TCGv one = tcg_constant_tl(0xff); 519 TCGv zero = tcg_constant_tl(0); 520 521 tcg_gen_movcond_tl(cond, res, arg1, arg2, one, zero); 522 } 523 524 #ifndef CONFIG_HEXAGON_IDEF_PARSER 525 static inline void gen_loop0r(DisasContext *ctx, TCGv RsV, int riV) 526 { 527 fIMMEXT(riV); 528 fPCALIGN(riV); 529 gen_log_reg_write(ctx, HEX_REG_LC0, RsV); 530 gen_log_reg_write(ctx, HEX_REG_SA0, tcg_constant_tl(ctx->pkt->pc + riV)); 531 gen_set_usr_fieldi(ctx, USR_LPCFG, 0); 532 } 533 534 static void gen_loop0i(DisasContext *ctx, int count, int riV) 535 { 536 gen_loop0r(ctx, tcg_constant_tl(count), riV); 537 } 538 539 static inline void gen_loop1r(DisasContext *ctx, TCGv RsV, int riV) 540 { 541 fIMMEXT(riV); 542 fPCALIGN(riV); 543 gen_log_reg_write(ctx, HEX_REG_LC1, RsV); 544 gen_log_reg_write(ctx, HEX_REG_SA1, tcg_constant_tl(ctx->pkt->pc + riV)); 545 } 546 547 static void gen_loop1i(DisasContext *ctx, int count, int riV) 548 { 549 gen_loop1r(ctx, tcg_constant_tl(count), riV); 550 } 551 552 static void gen_ploopNsr(DisasContext *ctx, int N, TCGv RsV, int riV) 553 { 554 fIMMEXT(riV); 555 fPCALIGN(riV); 556 gen_log_reg_write(ctx, HEX_REG_LC0, RsV); 557 gen_log_reg_write(ctx, HEX_REG_SA0, tcg_constant_tl(ctx->pkt->pc + riV)); 558 gen_set_usr_fieldi(ctx, USR_LPCFG, N); 559 gen_log_pred_write(ctx, 3, tcg_constant_tl(0)); 560 } 561 562 static void gen_ploopNsi(DisasContext *ctx, int N, int count, int riV) 563 { 564 gen_ploopNsr(ctx, N, tcg_constant_tl(count), riV); 565 } 566 567 static inline void gen_comparei(TCGCond cond, TCGv res, TCGv arg1, int arg2) 568 { 569 gen_compare(cond, res, arg1, tcg_constant_tl(arg2)); 570 } 571 #endif 572 573 static void gen_cond_jumpr(DisasContext *ctx, TCGv dst_pc, 574 TCGCond cond, TCGv pred) 575 { 576 gen_write_new_pc_addr(ctx, dst_pc, cond, pred); 577 } 578 579 static void gen_cond_jumpr31(DisasContext *ctx, TCGCond cond, TCGv pred) 580 { 581 TCGv LSB = tcg_temp_new(); 582 tcg_gen_andi_tl(LSB, pred, 1); 583 gen_cond_jumpr(ctx, hex_gpr[HEX_REG_LR], cond, LSB); 584 } 585 586 static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred, 587 int pc_off) 588 { 589 gen_write_new_pc_pcrel(ctx, pc_off, cond, pred); 590 } 591 592 static void gen_cmpnd_cmp_jmp(DisasContext *ctx, 593 int pnum, TCGCond cond1, TCGv arg1, TCGv arg2, 594 TCGCond cond2, int pc_off) 595 { 596 if (ctx->insn->part1) { 597 TCGv pred = tcg_temp_new(); 598 gen_compare(cond1, pred, arg1, arg2); 599 gen_log_pred_write(ctx, pnum, pred); 600 } else { 601 TCGv pred = tcg_temp_new(); 602 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); 603 gen_cond_jump(ctx, cond2, pred, pc_off); 604 } 605 } 606 607 static void gen_cmpnd_cmp_jmp_t(DisasContext *ctx, 608 int pnum, TCGCond cond, TCGv arg1, TCGv arg2, 609 int pc_off) 610 { 611 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_EQ, pc_off); 612 } 613 614 static void gen_cmpnd_cmp_jmp_f(DisasContext *ctx, 615 int pnum, TCGCond cond, TCGv arg1, TCGv arg2, 616 int pc_off) 617 { 618 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_NE, pc_off); 619 } 620 621 static void gen_cmpnd_cmpi_jmp_t(DisasContext *ctx, 622 int pnum, TCGCond cond, TCGv arg1, int arg2, 623 int pc_off) 624 { 625 TCGv tmp = tcg_constant_tl(arg2); 626 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_EQ, pc_off); 627 } 628 629 static void gen_cmpnd_cmpi_jmp_f(DisasContext *ctx, 630 int pnum, TCGCond cond, TCGv arg1, int arg2, 631 int pc_off) 632 { 633 TCGv tmp = tcg_constant_tl(arg2); 634 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_NE, pc_off); 635 } 636 637 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext *ctx, int pnum, TCGCond cond, 638 TCGv arg, int pc_off) 639 { 640 gen_cmpnd_cmpi_jmp_t(ctx, pnum, cond, arg, -1, pc_off); 641 } 642 643 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext *ctx, int pnum, TCGCond cond, 644 TCGv arg, int pc_off) 645 { 646 gen_cmpnd_cmpi_jmp_f(ctx, pnum, cond, arg, -1, pc_off); 647 } 648 649 static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx, 650 int pnum, TCGv arg, TCGCond cond, int pc_off) 651 { 652 if (ctx->insn->part1) { 653 TCGv pred = tcg_temp_new(); 654 tcg_gen_andi_tl(pred, arg, 1); 655 gen_8bitsof(pred, pred); 656 gen_log_pred_write(ctx, pnum, pred); 657 } else { 658 TCGv pred = tcg_temp_new(); 659 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); 660 gen_cond_jump(ctx, cond, pred, pc_off); 661 } 662 } 663 664 static void gen_testbit0_jumpnv(DisasContext *ctx, 665 TCGv arg, TCGCond cond, int pc_off) 666 { 667 TCGv pred = tcg_temp_new(); 668 tcg_gen_andi_tl(pred, arg, 1); 669 gen_cond_jump(ctx, cond, pred, pc_off); 670 } 671 672 static void gen_jump(DisasContext *ctx, int pc_off) 673 { 674 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL); 675 } 676 677 static void gen_jumpr(DisasContext *ctx, TCGv new_pc) 678 { 679 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL); 680 } 681 682 static void gen_call(DisasContext *ctx, int pc_off) 683 { 684 TCGv lr = get_result_gpr(ctx, HEX_REG_LR); 685 tcg_gen_movi_tl(lr, ctx->next_PC); 686 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL); 687 } 688 689 static void gen_callr(DisasContext *ctx, TCGv new_pc) 690 { 691 TCGv lr = get_result_gpr(ctx, HEX_REG_LR); 692 tcg_gen_movi_tl(lr, ctx->next_PC); 693 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL); 694 } 695 696 static void gen_cond_call(DisasContext *ctx, TCGv pred, 697 TCGCond cond, int pc_off) 698 { 699 TCGv lr = get_result_gpr(ctx, HEX_REG_LR); 700 TCGv lsb = tcg_temp_new(); 701 TCGLabel *skip = gen_new_label(); 702 tcg_gen_andi_tl(lsb, pred, 1); 703 gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb); 704 tcg_gen_brcondi_tl(cond, lsb, 0, skip); 705 tcg_gen_movi_tl(lr, ctx->next_PC); 706 gen_set_label(skip); 707 } 708 709 static void gen_cond_callr(DisasContext *ctx, 710 TCGCond cond, TCGv pred, TCGv new_pc) 711 { 712 TCGv lsb = tcg_temp_new(); 713 TCGLabel *skip = gen_new_label(); 714 tcg_gen_andi_tl(lsb, pred, 1); 715 tcg_gen_brcondi_tl(cond, lsb, 0, skip); 716 gen_callr(ctx, new_pc); 717 gen_set_label(skip); 718 } 719 720 #ifndef CONFIG_HEXAGON_IDEF_PARSER 721 /* frame = ((LR << 32) | FP) ^ (FRAMEKEY << 32)) */ 722 static TCGv_i64 gen_frame_scramble(void) 723 { 724 TCGv_i64 frame = tcg_temp_new_i64(); 725 TCGv tmp = tcg_temp_new(); 726 tcg_gen_xor_tl(tmp, hex_gpr[HEX_REG_LR], hex_gpr[HEX_REG_FRAMEKEY]); 727 tcg_gen_concat_i32_i64(frame, hex_gpr[HEX_REG_FP], tmp); 728 return frame; 729 } 730 #endif 731 732 /* frame ^= (int64_t)FRAMEKEY << 32 */ 733 static void gen_frame_unscramble(TCGv_i64 frame) 734 { 735 TCGv_i64 framekey = tcg_temp_new_i64(); 736 tcg_gen_extu_i32_i64(framekey, hex_gpr[HEX_REG_FRAMEKEY]); 737 tcg_gen_shli_i64(framekey, framekey, 32); 738 tcg_gen_xor_i64(frame, frame, framekey); 739 } 740 741 static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA) 742 { 743 Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ 744 CHECK_NOSHUF(EA, 8); 745 tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ); 746 } 747 748 #ifndef CONFIG_HEXAGON_IDEF_PARSER 749 /* Stack overflow check */ 750 static void gen_framecheck(TCGv EA, int framesize) 751 { 752 /* Not modelled in linux-user mode */ 753 /* Placeholder for system mode */ 754 #ifndef CONFIG_USER_ONLY 755 g_assert_not_reached(); 756 #endif 757 } 758 759 static void gen_allocframe(DisasContext *ctx, TCGv r29, int framesize) 760 { 761 TCGv r30 = tcg_temp_new(); 762 TCGv_i64 frame; 763 tcg_gen_addi_tl(r30, r29, -8); 764 frame = gen_frame_scramble(); 765 gen_store8(cpu_env, r30, frame, ctx->insn->slot); 766 gen_log_reg_write(ctx, HEX_REG_FP, r30); 767 gen_framecheck(r30, framesize); 768 tcg_gen_subi_tl(r29, r30, framesize); 769 } 770 771 static void gen_deallocframe(DisasContext *ctx, TCGv_i64 r31_30, TCGv r30) 772 { 773 TCGv r29 = tcg_temp_new(); 774 TCGv_i64 frame = tcg_temp_new_i64(); 775 gen_load_frame(ctx, frame, r30); 776 gen_frame_unscramble(frame); 777 tcg_gen_mov_i64(r31_30, frame); 778 tcg_gen_addi_tl(r29, r30, 8); 779 gen_log_reg_write(ctx, HEX_REG_SP, r29); 780 } 781 #endif 782 783 static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src) 784 { 785 /* 786 * frame = *src 787 * dst = frame_unscramble(frame) 788 * SP = src + 8 789 * PC = dst.w[1] 790 */ 791 TCGv_i64 frame = tcg_temp_new_i64(); 792 TCGv r31 = tcg_temp_new(); 793 TCGv r29 = get_result_gpr(ctx, HEX_REG_SP); 794 795 gen_load_frame(ctx, frame, src); 796 gen_frame_unscramble(frame); 797 tcg_gen_mov_i64(dst, frame); 798 tcg_gen_addi_tl(r29, src, 8); 799 tcg_gen_extrh_i64_i32(r31, dst); 800 gen_jumpr(ctx, r31); 801 } 802 803 /* if (pred) dst = dealloc_return(src):raw */ 804 static void gen_cond_return(DisasContext *ctx, TCGv_i64 dst, TCGv src, 805 TCGv pred, TCGCond cond) 806 { 807 TCGv LSB = tcg_temp_new(); 808 TCGLabel *skip = gen_new_label(); 809 tcg_gen_andi_tl(LSB, pred, 1); 810 811 tcg_gen_brcondi_tl(cond, LSB, 0, skip); 812 gen_return(ctx, dst, src); 813 gen_set_label(skip); 814 } 815 816 /* sub-instruction version (no RddV, so handle it manually) */ 817 static void gen_cond_return_subinsn(DisasContext *ctx, TCGCond cond, TCGv pred) 818 { 819 TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); 820 gen_cond_return(ctx, RddV, hex_gpr[HEX_REG_FP], pred, cond); 821 gen_log_reg_write_pair(ctx, HEX_REG_FP, RddV); 822 } 823 824 static void gen_endloop0(DisasContext *ctx) 825 { 826 TCGv lpcfg = tcg_temp_new(); 827 828 GET_USR_FIELD(USR_LPCFG, lpcfg); 829 830 /* 831 * if (lpcfg == 1) { 832 * p3 = 0xff; 833 * } 834 */ 835 TCGLabel *label1 = gen_new_label(); 836 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1); 837 { 838 gen_log_pred_write(ctx, 3, tcg_constant_tl(0xff)); 839 } 840 gen_set_label(label1); 841 842 /* 843 * if (lpcfg) { 844 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1); 845 * } 846 */ 847 TCGLabel *label2 = gen_new_label(); 848 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2); 849 { 850 tcg_gen_subi_tl(lpcfg, lpcfg, 1); 851 gen_set_usr_field(ctx, USR_LPCFG, lpcfg); 852 } 853 gen_set_label(label2); 854 855 /* 856 * If we're in a tight loop, we'll do this at the end of the TB to take 857 * advantage of direct block chaining. 858 */ 859 if (!ctx->is_tight_loop) { 860 /* 861 * if (hex_gpr[HEX_REG_LC0] > 1) { 862 * PC = hex_gpr[HEX_REG_SA0]; 863 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1; 864 * } 865 */ 866 TCGLabel *label3 = gen_new_label(); 867 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3); 868 { 869 TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0); 870 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]); 871 tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1); 872 } 873 gen_set_label(label3); 874 } 875 } 876 877 static void gen_endloop1(DisasContext *ctx) 878 { 879 /* 880 * if (hex_gpr[HEX_REG_LC1] > 1) { 881 * PC = hex_gpr[HEX_REG_SA1]; 882 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1; 883 * } 884 */ 885 TCGLabel *label = gen_new_label(); 886 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, label); 887 { 888 TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1); 889 gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]); 890 tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1); 891 } 892 gen_set_label(label); 893 } 894 895 static void gen_endloop01(DisasContext *ctx) 896 { 897 TCGv lpcfg = tcg_temp_new(); 898 TCGLabel *label1 = gen_new_label(); 899 TCGLabel *label2 = gen_new_label(); 900 TCGLabel *label3 = gen_new_label(); 901 TCGLabel *done = gen_new_label(); 902 903 GET_USR_FIELD(USR_LPCFG, lpcfg); 904 905 /* 906 * if (lpcfg == 1) { 907 * p3 = 0xff; 908 * } 909 */ 910 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1); 911 { 912 gen_log_pred_write(ctx, 3, tcg_constant_tl(0xff)); 913 } 914 gen_set_label(label1); 915 916 /* 917 * if (lpcfg) { 918 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1); 919 * } 920 */ 921 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2); 922 { 923 tcg_gen_subi_tl(lpcfg, lpcfg, 1); 924 gen_set_usr_field(ctx, USR_LPCFG, lpcfg); 925 } 926 gen_set_label(label2); 927 928 /* 929 * if (hex_gpr[HEX_REG_LC0] > 1) { 930 * PC = hex_gpr[HEX_REG_SA0]; 931 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1; 932 * } else { 933 * if (hex_gpr[HEX_REG_LC1] > 1) { 934 * hex_next_pc = hex_gpr[HEX_REG_SA1]; 935 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1; 936 * } 937 * } 938 */ 939 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3); 940 { 941 TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0); 942 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]); 943 tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1); 944 tcg_gen_br(done); 945 } 946 gen_set_label(label3); 947 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, done); 948 { 949 TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1); 950 gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]); 951 tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1); 952 } 953 gen_set_label(done); 954 } 955 956 static void gen_cmp_jumpnv(DisasContext *ctx, 957 TCGCond cond, TCGv val, TCGv src, int pc_off) 958 { 959 TCGv pred = tcg_temp_new(); 960 tcg_gen_setcond_tl(cond, pred, val, src); 961 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); 962 } 963 964 static void gen_cmpi_jumpnv(DisasContext *ctx, 965 TCGCond cond, TCGv val, int src, int pc_off) 966 { 967 TCGv pred = tcg_temp_new(); 968 tcg_gen_setcondi_tl(cond, pred, val, src); 969 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); 970 } 971 972 /* Shift left with saturation */ 973 static void gen_shl_sat(DisasContext *ctx, TCGv dst, TCGv src, TCGv shift_amt) 974 { 975 TCGv tmp = tcg_temp_new(); /* In case dst == src */ 976 TCGv usr = get_result_gpr(ctx, HEX_REG_USR); 977 TCGv sh32 = tcg_temp_new(); 978 TCGv dst_sar = tcg_temp_new(); 979 TCGv ovf = tcg_temp_new(); 980 TCGv satval = tcg_temp_new(); 981 TCGv min = tcg_constant_tl(0x80000000); 982 TCGv max = tcg_constant_tl(0x7fffffff); 983 984 /* 985 * Possible values for shift_amt are 0 .. 64 986 * We need special handling for values above 31 987 * 988 * sh32 = shift & 31; 989 * dst = sh32 == shift ? src : 0; 990 * dst <<= sh32; 991 * dst_sar = dst >> sh32; 992 * satval = src < 0 ? min : max; 993 * if (dst_asr != src) { 994 * usr.OVF |= 1; 995 * dst = satval; 996 * } 997 */ 998 999 tcg_gen_andi_tl(sh32, shift_amt, 31); 1000 tcg_gen_movcond_tl(TCG_COND_EQ, tmp, sh32, shift_amt, 1001 src, tcg_constant_tl(0)); 1002 tcg_gen_shl_tl(tmp, tmp, sh32); 1003 tcg_gen_sar_tl(dst_sar, tmp, sh32); 1004 tcg_gen_movcond_tl(TCG_COND_LT, satval, src, tcg_constant_tl(0), min, max); 1005 1006 tcg_gen_setcond_tl(TCG_COND_NE, ovf, dst_sar, src); 1007 tcg_gen_shli_tl(ovf, ovf, reg_field_info[USR_OVF].offset); 1008 tcg_gen_or_tl(usr, usr, ovf); 1009 1010 tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, tmp, satval); 1011 } 1012 1013 static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) 1014 { 1015 /* 1016 * Shift arithmetic right 1017 * Robust when shift_amt is >31 bits 1018 */ 1019 TCGv tmp = tcg_temp_new(); 1020 tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31)); 1021 tcg_gen_sar_tl(dst, src, tmp); 1022 } 1023 1024 /* Bidirectional shift right with saturation */ 1025 static void gen_asr_r_r_sat(DisasContext *ctx, TCGv RdV, TCGv RsV, TCGv RtV) 1026 { 1027 TCGv shift_amt = tcg_temp_new(); 1028 TCGLabel *positive = gen_new_label(); 1029 TCGLabel *done = gen_new_label(); 1030 1031 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7); 1032 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive); 1033 1034 /* Negative shift amount => shift left */ 1035 tcg_gen_neg_tl(shift_amt, shift_amt); 1036 gen_shl_sat(ctx, RdV, RsV, shift_amt); 1037 tcg_gen_br(done); 1038 1039 gen_set_label(positive); 1040 /* Positive shift amount => shift right */ 1041 gen_sar(RdV, RsV, shift_amt); 1042 1043 gen_set_label(done); 1044 } 1045 1046 /* Bidirectional shift left with saturation */ 1047 static void gen_asl_r_r_sat(DisasContext *ctx, TCGv RdV, TCGv RsV, TCGv RtV) 1048 { 1049 TCGv shift_amt = tcg_temp_new(); 1050 TCGLabel *positive = gen_new_label(); 1051 TCGLabel *done = gen_new_label(); 1052 1053 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7); 1054 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive); 1055 1056 /* Negative shift amount => shift right */ 1057 tcg_gen_neg_tl(shift_amt, shift_amt); 1058 gen_sar(RdV, RsV, shift_amt); 1059 tcg_gen_br(done); 1060 1061 gen_set_label(positive); 1062 /* Positive shift amount => shift left */ 1063 gen_shl_sat(ctx, RdV, RsV, shift_amt); 1064 1065 gen_set_label(done); 1066 } 1067 1068 static void gen_insert_rp(DisasContext *ctx, TCGv RxV, TCGv RsV, TCGv_i64 RttV) 1069 { 1070 /* 1071 * int width = fZXTN(6, 32, (fGETWORD(1, RttV))); 1072 * int offset = fSXTN(7, 32, (fGETWORD(0, RttV))); 1073 * size8u_t mask = ((fCONSTLL(1) << width) - 1); 1074 * if (offset < 0) { 1075 * RxV = 0; 1076 * } else { 1077 * RxV &= ~(mask << offset); 1078 * RxV |= ((RsV & mask) << offset); 1079 * } 1080 */ 1081 1082 TCGv width = tcg_temp_new(); 1083 TCGv offset = tcg_temp_new(); 1084 TCGv_i64 mask = tcg_temp_new_i64(); 1085 TCGv_i64 result = tcg_temp_new_i64(); 1086 TCGv_i64 tmp = tcg_temp_new_i64(); 1087 TCGv_i64 offset64 = tcg_temp_new_i64(); 1088 TCGLabel *label = gen_new_label(); 1089 TCGLabel *done = gen_new_label(); 1090 1091 tcg_gen_extrh_i64_i32(width, RttV); 1092 tcg_gen_extract_tl(width, width, 0, 6); 1093 tcg_gen_extrl_i64_i32(offset, RttV); 1094 tcg_gen_sextract_tl(offset, offset, 0, 7); 1095 /* Possible values for offset are -64 .. 63 */ 1096 tcg_gen_brcondi_tl(TCG_COND_GE, offset, 0, label); 1097 /* For negative offsets, zero out the result */ 1098 tcg_gen_movi_tl(RxV, 0); 1099 tcg_gen_br(done); 1100 gen_set_label(label); 1101 /* At this point, possible values of offset are 0 .. 63 */ 1102 tcg_gen_ext_i32_i64(mask, width); 1103 tcg_gen_shl_i64(mask, tcg_constant_i64(1), mask); 1104 tcg_gen_subi_i64(mask, mask, 1); 1105 tcg_gen_extu_i32_i64(result, RxV); 1106 tcg_gen_ext_i32_i64(tmp, offset); 1107 tcg_gen_shl_i64(tmp, mask, tmp); 1108 tcg_gen_andc_i64(result, result, tmp); 1109 tcg_gen_extu_i32_i64(tmp, RsV); 1110 tcg_gen_and_i64(tmp, tmp, mask); 1111 tcg_gen_extu_i32_i64(offset64, offset); 1112 tcg_gen_shl_i64(tmp, tmp, offset64); 1113 tcg_gen_or_i64(result, result, tmp); 1114 tcg_gen_extrl_i64_i32(RxV, result); 1115 gen_set_label(done); 1116 } 1117 1118 static void gen_asr_r_svw_trun(DisasContext *ctx, TCGv RdV, 1119 TCGv_i64 RssV, TCGv RtV) 1120 { 1121 /* 1122 * for (int i = 0; i < 2; i++) { 1123 * fSETHALF(i, RdV, fGETHALF(0, ((fSXTN(7, 32, RtV) > 0) ? 1124 * (fCAST4_8s(fGETWORD(i, RssV)) >> fSXTN(7, 32, RtV)) : 1125 * (fCAST4_8s(fGETWORD(i, RssV)) << -fSXTN(7, 32, RtV))))); 1126 * } 1127 */ 1128 TCGv shift_amt32 = tcg_temp_new(); 1129 TCGv_i64 shift_amt64 = tcg_temp_new_i64(); 1130 TCGv_i64 tmp64 = tcg_temp_new_i64(); 1131 TCGv tmp32 = tcg_temp_new(); 1132 TCGLabel *label = gen_new_label(); 1133 TCGLabel *zero = gen_new_label(); 1134 TCGLabel *done = gen_new_label(); 1135 1136 tcg_gen_sextract_tl(shift_amt32, RtV, 0, 7); 1137 /* Possible values of shift_amt32 are -64 .. 63 */ 1138 tcg_gen_brcondi_tl(TCG_COND_LE, shift_amt32, 0, label); 1139 /* After branch, possible values of shift_amt32 are 1 .. 63 */ 1140 tcg_gen_ext_i32_i64(shift_amt64, shift_amt32); 1141 for (int i = 0; i < 2; i++) { 1142 tcg_gen_sextract_i64(tmp64, RssV, i * 32, 32); 1143 tcg_gen_sar_i64(tmp64, tmp64, shift_amt64); 1144 tcg_gen_extrl_i64_i32(tmp32, tmp64); 1145 tcg_gen_deposit_tl(RdV, RdV, tmp32, i * 16, 16); 1146 } 1147 tcg_gen_br(done); 1148 gen_set_label(label); 1149 tcg_gen_neg_tl(shift_amt32, shift_amt32); 1150 /*At this point, possible values of shift_amt32 are 0 .. 64 */ 1151 tcg_gen_brcondi_tl(TCG_COND_GT, shift_amt32, 63, zero); 1152 /*At this point, possible values of shift_amt32 are 0 .. 63 */ 1153 tcg_gen_ext_i32_i64(shift_amt64, shift_amt32); 1154 for (int i = 0; i < 2; i++) { 1155 tcg_gen_sextract_i64(tmp64, RssV, i * 32, 32); 1156 tcg_gen_shl_i64(tmp64, tmp64, shift_amt64); 1157 tcg_gen_extrl_i64_i32(tmp32, tmp64); 1158 tcg_gen_deposit_tl(RdV, RdV, tmp32, i * 16, 16); 1159 } 1160 tcg_gen_br(done); 1161 gen_set_label(zero); 1162 /* When the shift_amt is 64, zero out the result */ 1163 tcg_gen_movi_tl(RdV, 0); 1164 gen_set_label(done); 1165 } 1166 1167 static intptr_t vreg_src_off(DisasContext *ctx, int num) 1168 { 1169 intptr_t offset = offsetof(CPUHexagonState, VRegs[num]); 1170 1171 if (test_bit(num, ctx->vregs_select)) { 1172 offset = ctx_future_vreg_off(ctx, num, 1, false); 1173 } 1174 if (test_bit(num, ctx->vregs_updated_tmp)) { 1175 offset = ctx_tmp_vreg_off(ctx, num, 1, false); 1176 } 1177 return offset; 1178 } 1179 1180 static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, 1181 VRegWriteType type) 1182 { 1183 intptr_t dstoff; 1184 1185 if (type != EXT_TMP) { 1186 dstoff = ctx_future_vreg_off(ctx, num, 1, true); 1187 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, 1188 sizeof(MMVector), sizeof(MMVector)); 1189 } else { 1190 dstoff = ctx_tmp_vreg_off(ctx, num, 1, false); 1191 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, 1192 sizeof(MMVector), sizeof(MMVector)); 1193 } 1194 } 1195 1196 static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num, 1197 VRegWriteType type) 1198 { 1199 gen_log_vreg_write(ctx, srcoff, num ^ 0, type); 1200 srcoff += sizeof(MMVector); 1201 gen_log_vreg_write(ctx, srcoff, num ^ 1, type); 1202 } 1203 1204 static intptr_t get_result_qreg(DisasContext *ctx, int qnum) 1205 { 1206 if (ctx->need_commit) { 1207 return offsetof(CPUHexagonState, future_QRegs[qnum]); 1208 } else { 1209 return offsetof(CPUHexagonState, QRegs[qnum]); 1210 } 1211 } 1212 1213 static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, 1214 bool aligned) 1215 { 1216 TCGv_i64 tmp = tcg_temp_new_i64(); 1217 if (aligned) { 1218 tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); 1219 } 1220 for (int i = 0; i < sizeof(MMVector) / 8; i++) { 1221 tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ); 1222 tcg_gen_addi_tl(src, src, 8); 1223 tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); 1224 } 1225 } 1226 1227 static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, 1228 int slot, bool aligned) 1229 { 1230 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); 1231 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); 1232 1233 if (is_gather_store_insn(ctx)) { 1234 TCGv sl = tcg_constant_tl(slot); 1235 gen_helper_gather_store(cpu_env, EA, sl); 1236 return; 1237 } 1238 1239 tcg_gen_movi_tl(hex_vstore_pending[slot], 1); 1240 if (aligned) { 1241 tcg_gen_andi_tl(hex_vstore_addr[slot], EA, 1242 ~((int32_t)sizeof(MMVector) - 1)); 1243 } else { 1244 tcg_gen_mov_tl(hex_vstore_addr[slot], EA); 1245 } 1246 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); 1247 1248 /* Copy the data to the vstore buffer */ 1249 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); 1250 /* Set the mask to all 1's */ 1251 tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL); 1252 } 1253 1254 static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, 1255 intptr_t bitsoff, int slot, bool invert) 1256 { 1257 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); 1258 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); 1259 1260 tcg_gen_movi_tl(hex_vstore_pending[slot], 1); 1261 tcg_gen_andi_tl(hex_vstore_addr[slot], EA, 1262 ~((int32_t)sizeof(MMVector) - 1)); 1263 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); 1264 1265 /* Copy the data to the vstore buffer */ 1266 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); 1267 /* Copy the mask */ 1268 tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg)); 1269 if (invert) { 1270 tcg_gen_gvec_not(MO_64, maskoff, maskoff, 1271 sizeof(MMQReg), sizeof(MMQReg)); 1272 } 1273 } 1274 1275 static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) 1276 { 1277 TCGv_i64 tmp = tcg_temp_new_i64(); 1278 TCGv_i64 word = tcg_temp_new_i64(); 1279 TCGv_i64 bits = tcg_temp_new_i64(); 1280 TCGv_i64 mask = tcg_temp_new_i64(); 1281 TCGv_i64 zero = tcg_constant_i64(0); 1282 TCGv_i64 ones = tcg_constant_i64(~0); 1283 1284 for (int i = 0; i < sizeof(MMVector) / 8; i++) { 1285 tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8); 1286 tcg_gen_movi_i64(mask, 0); 1287 1288 for (int j = 0; j < 8; j += size) { 1289 tcg_gen_extract_i64(word, tmp, j * 8, size * 8); 1290 tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero); 1291 tcg_gen_deposit_i64(mask, mask, bits, j, size); 1292 } 1293 1294 tcg_gen_st8_i64(mask, cpu_env, dstoff + i); 1295 } 1296 } 1297 1298 void probe_noshuf_load(TCGv va, int s, int mi) 1299 { 1300 TCGv size = tcg_constant_tl(s); 1301 TCGv mem_idx = tcg_constant_tl(mi); 1302 gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx); 1303 } 1304 1305 /* 1306 * Note: Since this function might branch, `val` is 1307 * required to be a `tcg_temp_local`. 1308 */ 1309 void gen_set_usr_field_if(DisasContext *ctx, int field, TCGv val) 1310 { 1311 /* Sets the USR field if `val` is non-zero */ 1312 if (reg_field_info[field].width == 1) { 1313 TCGv usr = get_result_gpr(ctx, HEX_REG_USR); 1314 TCGv tmp = tcg_temp_new(); 1315 tcg_gen_extract_tl(tmp, val, 0, reg_field_info[field].width); 1316 tcg_gen_shli_tl(tmp, tmp, reg_field_info[field].offset); 1317 tcg_gen_or_tl(usr, usr, tmp); 1318 } else { 1319 TCGLabel *skip_label = gen_new_label(); 1320 tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label); 1321 gen_set_usr_field(ctx, field, val); 1322 gen_set_label(skip_label); 1323 } 1324 } 1325 1326 void gen_sat_i32(TCGv dest, TCGv source, int width) 1327 { 1328 TCGv max_val = tcg_constant_tl((1 << (width - 1)) - 1); 1329 TCGv min_val = tcg_constant_tl(-(1 << (width - 1))); 1330 tcg_gen_smin_tl(dest, source, max_val); 1331 tcg_gen_smax_tl(dest, dest, min_val); 1332 } 1333 1334 void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) 1335 { 1336 TCGv tmp = tcg_temp_new(); /* In case dest == source */ 1337 gen_sat_i32(tmp, source, width); 1338 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, tmp); 1339 tcg_gen_mov_tl(dest, tmp); 1340 } 1341 1342 void gen_satu_i32(TCGv dest, TCGv source, int width) 1343 { 1344 TCGv tmp = tcg_temp_new(); /* In case dest == source */ 1345 TCGv max_val = tcg_constant_tl((1 << width) - 1); 1346 TCGv zero = tcg_constant_tl(0); 1347 tcg_gen_movcond_tl(TCG_COND_GTU, tmp, source, max_val, max_val, source); 1348 tcg_gen_movcond_tl(TCG_COND_LT, tmp, source, zero, zero, tmp); 1349 tcg_gen_mov_tl(dest, tmp); 1350 } 1351 1352 void gen_satu_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) 1353 { 1354 TCGv tmp = tcg_temp_new(); /* In case dest == source */ 1355 gen_satu_i32(tmp, source, width); 1356 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, tmp); 1357 tcg_gen_mov_tl(dest, tmp); 1358 } 1359 1360 void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width) 1361 { 1362 TCGv_i64 max_val = tcg_constant_i64((1LL << (width - 1)) - 1LL); 1363 TCGv_i64 min_val = tcg_constant_i64(-(1LL << (width - 1))); 1364 tcg_gen_smin_i64(dest, source, max_val); 1365 tcg_gen_smax_i64(dest, dest, min_val); 1366 } 1367 1368 void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) 1369 { 1370 TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ 1371 TCGv_i64 ovfl_64; 1372 gen_sat_i64(tmp, source, width); 1373 ovfl_64 = tcg_temp_new_i64(); 1374 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, tmp, source); 1375 tcg_gen_mov_i64(dest, tmp); 1376 tcg_gen_trunc_i64_tl(ovfl, ovfl_64); 1377 } 1378 1379 void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width) 1380 { 1381 TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ 1382 TCGv_i64 max_val = tcg_constant_i64((1LL << width) - 1LL); 1383 TCGv_i64 zero = tcg_constant_i64(0); 1384 tcg_gen_movcond_i64(TCG_COND_GTU, tmp, source, max_val, max_val, source); 1385 tcg_gen_movcond_i64(TCG_COND_LT, tmp, source, zero, zero, tmp); 1386 tcg_gen_mov_i64(dest, tmp); 1387 } 1388 1389 void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) 1390 { 1391 TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ 1392 TCGv_i64 ovfl_64; 1393 gen_satu_i64(tmp, source, width); 1394 ovfl_64 = tcg_temp_new_i64(); 1395 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, tmp, source); 1396 tcg_gen_mov_i64(dest, tmp); 1397 tcg_gen_trunc_i64_tl(ovfl, ovfl_64); 1398 } 1399 1400 /* Implements the fADDSAT64 macro in TCG */ 1401 void gen_add_sat_i64(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) 1402 { 1403 TCGv_i64 sum = tcg_temp_new_i64(); 1404 TCGv_i64 xor = tcg_temp_new_i64(); 1405 TCGv_i64 cond1 = tcg_temp_new_i64(); 1406 TCGv_i64 cond2 = tcg_temp_new_i64(); 1407 TCGv_i64 cond3 = tcg_temp_new_i64(); 1408 TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL); 1409 TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL); 1410 TCGv_i64 max_neg = tcg_constant_i64(0x8000000000000000LL); 1411 TCGv_i64 zero = tcg_constant_i64(0); 1412 TCGLabel *no_ovfl_label = gen_new_label(); 1413 TCGLabel *ovfl_label = gen_new_label(); 1414 TCGLabel *ret_label = gen_new_label(); 1415 1416 tcg_gen_add_i64(sum, a, b); 1417 tcg_gen_xor_i64(xor, a, b); 1418 1419 /* if (xor & mask) */ 1420 tcg_gen_and_i64(cond1, xor, mask); 1421 tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label); 1422 1423 /* else if ((a ^ sum) & mask) */ 1424 tcg_gen_xor_i64(cond2, a, sum); 1425 tcg_gen_and_i64(cond2, cond2, mask); 1426 tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label); 1427 /* fallthrough to no_ovfl_label branch */ 1428 1429 /* if branch */ 1430 gen_set_label(no_ovfl_label); 1431 tcg_gen_mov_i64(ret, sum); 1432 tcg_gen_br(ret_label); 1433 1434 /* else if branch */ 1435 gen_set_label(ovfl_label); 1436 tcg_gen_and_i64(cond3, sum, mask); 1437 tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg); 1438 gen_set_usr_fieldi(ctx, USR_OVF, 1); 1439 1440 gen_set_label(ret_label); 1441 } 1442 1443 #include "tcg_funcs_generated.c.inc" 1444 #include "tcg_func_table_generated.c.inc" 1445