1 /* 2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "qemu/osdep.h" 19 #include "cpu.h" 20 #include "internal.h" 21 #include "tcg/tcg-op.h" 22 #include "tcg/tcg-op-gvec.h" 23 #include "insn.h" 24 #include "opcodes.h" 25 #include "translate.h" 26 #define QEMU_GENERATE /* Used internally by macros.h */ 27 #include "macros.h" 28 #include "mmvec/macros.h" 29 #undef QEMU_GENERATE 30 #include "gen_tcg.h" 31 #include "gen_tcg_hvx.h" 32 #include "genptr.h" 33 34 TCGv gen_read_reg(TCGv result, int num) 35 { 36 tcg_gen_mov_tl(result, hex_gpr[num]); 37 return result; 38 } 39 40 TCGv gen_read_preg(TCGv pred, uint8_t num) 41 { 42 tcg_gen_mov_tl(pred, hex_pred[num]); 43 return pred; 44 } 45 46 #define IMMUTABLE (~0) 47 48 static const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = { 49 [HEX_REG_USR] = 0xc13000c0, 50 [HEX_REG_PC] = IMMUTABLE, 51 [HEX_REG_GP] = 0x3f, 52 [HEX_REG_UPCYCLELO] = IMMUTABLE, 53 [HEX_REG_UPCYCLEHI] = IMMUTABLE, 54 [HEX_REG_UTIMERLO] = IMMUTABLE, 55 [HEX_REG_UTIMERHI] = IMMUTABLE, 56 }; 57 58 static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val, 59 target_ulong reg_mask) 60 { 61 if (reg_mask) { 62 TCGv tmp = tcg_temp_new(); 63 64 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */ 65 tcg_gen_andi_tl(new_val, new_val, ~reg_mask); 66 tcg_gen_andi_tl(tmp, cur_val, reg_mask); 67 tcg_gen_or_tl(new_val, new_val, tmp); 68 } 69 } 70 71 static inline void gen_log_predicated_reg_write(int rnum, TCGv val, 72 uint32_t slot) 73 { 74 TCGv zero = tcg_constant_tl(0); 75 TCGv slot_mask = tcg_temp_new(); 76 77 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); 78 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero, 79 val, hex_new_value[rnum]); 80 if (HEX_DEBUG) { 81 /* 82 * Do this so HELPER(debug_commit_end) will know 83 * 84 * Note that slot_mask indicates the value is not written 85 * (i.e., slot was cancelled), so we create a true/false value before 86 * or'ing with hex_reg_written[rnum]. 87 */ 88 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); 89 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); 90 } 91 } 92 93 void gen_log_reg_write(int rnum, TCGv val) 94 { 95 const target_ulong reg_mask = reg_immut_masks[rnum]; 96 97 gen_masked_reg_write(val, hex_gpr[rnum], reg_mask); 98 tcg_gen_mov_tl(hex_new_value[rnum], val); 99 if (HEX_DEBUG) { 100 /* Do this so HELPER(debug_commit_end) will know */ 101 tcg_gen_movi_tl(hex_reg_written[rnum], 1); 102 } 103 } 104 105 static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, 106 uint32_t slot) 107 { 108 TCGv val32 = tcg_temp_new(); 109 TCGv zero = tcg_constant_tl(0); 110 TCGv slot_mask = tcg_temp_new(); 111 112 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); 113 /* Low word */ 114 tcg_gen_extrl_i64_i32(val32, val); 115 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], 116 slot_mask, zero, 117 val32, hex_new_value[rnum]); 118 /* High word */ 119 tcg_gen_extrh_i64_i32(val32, val); 120 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1], 121 slot_mask, zero, 122 val32, hex_new_value[rnum + 1]); 123 if (HEX_DEBUG) { 124 /* 125 * Do this so HELPER(debug_commit_end) will know 126 * 127 * Note that slot_mask indicates the value is not written 128 * (i.e., slot was cancelled), so we create a true/false value before 129 * or'ing with hex_reg_written[rnum]. 130 */ 131 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); 132 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); 133 tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1], 134 slot_mask); 135 } 136 } 137 138 static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) 139 { 140 const target_ulong reg_mask_low = reg_immut_masks[rnum]; 141 const target_ulong reg_mask_high = reg_immut_masks[rnum + 1]; 142 TCGv val32 = tcg_temp_new(); 143 144 /* Low word */ 145 tcg_gen_extrl_i64_i32(val32, val); 146 gen_masked_reg_write(val32, hex_gpr[rnum], reg_mask_low); 147 tcg_gen_mov_tl(hex_new_value[rnum], val32); 148 if (HEX_DEBUG) { 149 /* Do this so HELPER(debug_commit_end) will know */ 150 tcg_gen_movi_tl(hex_reg_written[rnum], 1); 151 } 152 153 /* High word */ 154 tcg_gen_extrh_i64_i32(val32, val); 155 gen_masked_reg_write(val32, hex_gpr[rnum + 1], reg_mask_high); 156 tcg_gen_mov_tl(hex_new_value[rnum + 1], val32); 157 if (HEX_DEBUG) { 158 /* Do this so HELPER(debug_commit_end) will know */ 159 tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); 160 } 161 } 162 163 void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) 164 { 165 TCGv base_val = tcg_temp_new(); 166 167 tcg_gen_andi_tl(base_val, val, 0xff); 168 169 /* 170 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual 171 * 172 * Multiple writes to the same preg are and'ed together 173 * If this is the first predicate write in the packet, do a 174 * straight assignment. Otherwise, do an and. 175 */ 176 if (!test_bit(pnum, ctx->pregs_written)) { 177 tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val); 178 } else { 179 tcg_gen_and_tl(hex_new_pred_value[pnum], 180 hex_new_pred_value[pnum], base_val); 181 } 182 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum); 183 } 184 185 static inline void gen_read_p3_0(TCGv control_reg) 186 { 187 tcg_gen_movi_tl(control_reg, 0); 188 for (int i = 0; i < NUM_PREGS; i++) { 189 tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8); 190 } 191 } 192 193 /* 194 * Certain control registers require special handling on read 195 * HEX_REG_P3_0_ALIASED aliased to the predicate registers 196 * -> concat the 4 predicate registers together 197 * HEX_REG_PC actual value stored in DisasContext 198 * -> assign from ctx->base.pc_next 199 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext 200 * -> add current TB changes to existing reg value 201 */ 202 static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, 203 TCGv dest) 204 { 205 if (reg_num == HEX_REG_P3_0_ALIASED) { 206 gen_read_p3_0(dest); 207 } else if (reg_num == HEX_REG_PC) { 208 tcg_gen_movi_tl(dest, ctx->base.pc_next); 209 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { 210 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT], 211 ctx->num_packets); 212 } else if (reg_num == HEX_REG_QEMU_INSN_CNT) { 213 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT], 214 ctx->num_insns); 215 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { 216 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT], 217 ctx->num_hvx_insns); 218 } else { 219 tcg_gen_mov_tl(dest, hex_gpr[reg_num]); 220 } 221 } 222 223 static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, 224 TCGv_i64 dest) 225 { 226 if (reg_num == HEX_REG_P3_0_ALIASED) { 227 TCGv p3_0 = tcg_temp_new(); 228 gen_read_p3_0(p3_0); 229 tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); 230 } else if (reg_num == HEX_REG_PC - 1) { 231 TCGv pc = tcg_constant_tl(ctx->base.pc_next); 232 tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); 233 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { 234 TCGv pkt_cnt = tcg_temp_new(); 235 TCGv insn_cnt = tcg_temp_new(); 236 tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT], 237 ctx->num_packets); 238 tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT], 239 ctx->num_insns); 240 tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); 241 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { 242 TCGv hvx_cnt = tcg_temp_new(); 243 tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], 244 ctx->num_hvx_insns); 245 tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); 246 } else { 247 tcg_gen_concat_i32_i64(dest, 248 hex_gpr[reg_num], 249 hex_gpr[reg_num + 1]); 250 } 251 } 252 253 static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) 254 { 255 TCGv hex_p8 = tcg_temp_new(); 256 for (int i = 0; i < NUM_PREGS; i++) { 257 tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8); 258 gen_log_pred_write(ctx, i, hex_p8); 259 ctx_log_pred_write(ctx, i); 260 } 261 } 262 263 /* 264 * Certain control registers require special handling on write 265 * HEX_REG_P3_0_ALIASED aliased to the predicate registers 266 * -> break the value across 4 predicate registers 267 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext 268 * -> clear the changes 269 */ 270 static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, 271 TCGv val) 272 { 273 if (reg_num == HEX_REG_P3_0_ALIASED) { 274 gen_write_p3_0(ctx, val); 275 } else { 276 gen_log_reg_write(reg_num, val); 277 ctx_log_reg_write(ctx, reg_num); 278 if (reg_num == HEX_REG_QEMU_PKT_CNT) { 279 ctx->num_packets = 0; 280 } 281 if (reg_num == HEX_REG_QEMU_INSN_CNT) { 282 ctx->num_insns = 0; 283 } 284 if (reg_num == HEX_REG_QEMU_HVX_CNT) { 285 ctx->num_hvx_insns = 0; 286 } 287 } 288 } 289 290 static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, 291 TCGv_i64 val) 292 { 293 if (reg_num == HEX_REG_P3_0_ALIASED) { 294 TCGv val32 = tcg_temp_new(); 295 tcg_gen_extrl_i64_i32(val32, val); 296 gen_write_p3_0(ctx, val32); 297 tcg_gen_extrh_i64_i32(val32, val); 298 gen_log_reg_write(reg_num + 1, val32); 299 ctx_log_reg_write(ctx, reg_num + 1); 300 } else { 301 gen_log_reg_write_pair(reg_num, val); 302 ctx_log_reg_write_pair(ctx, reg_num); 303 if (reg_num == HEX_REG_QEMU_PKT_CNT) { 304 ctx->num_packets = 0; 305 ctx->num_insns = 0; 306 } 307 if (reg_num == HEX_REG_QEMU_HVX_CNT) { 308 ctx->num_hvx_insns = 0; 309 } 310 } 311 } 312 313 TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign) 314 { 315 if (sign) { 316 tcg_gen_sextract_tl(result, src, N * 8, 8); 317 } else { 318 tcg_gen_extract_tl(result, src, N * 8, 8); 319 } 320 return result; 321 } 322 323 TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign) 324 { 325 TCGv_i64 res64 = tcg_temp_new_i64(); 326 if (sign) { 327 tcg_gen_sextract_i64(res64, src, N * 8, 8); 328 } else { 329 tcg_gen_extract_i64(res64, src, N * 8, 8); 330 } 331 tcg_gen_extrl_i64_i32(result, res64); 332 333 return result; 334 } 335 336 TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign) 337 { 338 if (sign) { 339 tcg_gen_sextract_tl(result, src, N * 16, 16); 340 } else { 341 tcg_gen_extract_tl(result, src, N * 16, 16); 342 } 343 return result; 344 } 345 346 void gen_set_half(int N, TCGv result, TCGv src) 347 { 348 tcg_gen_deposit_tl(result, result, src, N * 16, 16); 349 } 350 351 void gen_set_half_i64(int N, TCGv_i64 result, TCGv src) 352 { 353 TCGv_i64 src64 = tcg_temp_new_i64(); 354 tcg_gen_extu_i32_i64(src64, src); 355 tcg_gen_deposit_i64(result, result, src64, N * 16, 16); 356 } 357 358 void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) 359 { 360 TCGv_i64 src64 = tcg_temp_new_i64(); 361 tcg_gen_extu_i32_i64(src64, src); 362 tcg_gen_deposit_i64(result, result, src64, N * 8, 8); 363 } 364 365 static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) 366 { 367 tcg_gen_qemu_ld32u(dest, vaddr, mem_index); 368 tcg_gen_mov_tl(hex_llsc_addr, vaddr); 369 tcg_gen_mov_tl(hex_llsc_val, dest); 370 } 371 372 static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) 373 { 374 tcg_gen_qemu_ld64(dest, vaddr, mem_index); 375 tcg_gen_mov_tl(hex_llsc_addr, vaddr); 376 tcg_gen_mov_i64(hex_llsc_val_i64, dest); 377 } 378 379 static inline void gen_store_conditional4(DisasContext *ctx, 380 TCGv pred, TCGv vaddr, TCGv src) 381 { 382 TCGLabel *fail = gen_new_label(); 383 TCGLabel *done = gen_new_label(); 384 TCGv one, zero, tmp; 385 386 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); 387 388 one = tcg_constant_tl(0xff); 389 zero = tcg_constant_tl(0); 390 tmp = tcg_temp_new(); 391 tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src, 392 ctx->mem_idx, MO_32); 393 tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, 394 one, zero); 395 tcg_gen_br(done); 396 397 gen_set_label(fail); 398 tcg_gen_movi_tl(pred, 0); 399 400 gen_set_label(done); 401 tcg_gen_movi_tl(hex_llsc_addr, ~0); 402 } 403 404 static inline void gen_store_conditional8(DisasContext *ctx, 405 TCGv pred, TCGv vaddr, TCGv_i64 src) 406 { 407 TCGLabel *fail = gen_new_label(); 408 TCGLabel *done = gen_new_label(); 409 TCGv_i64 one, zero, tmp; 410 411 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); 412 413 one = tcg_constant_i64(0xff); 414 zero = tcg_constant_i64(0); 415 tmp = tcg_temp_new_i64(); 416 tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src, 417 ctx->mem_idx, MO_64); 418 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, 419 one, zero); 420 tcg_gen_extrl_i64_i32(pred, tmp); 421 tcg_gen_br(done); 422 423 gen_set_label(fail); 424 tcg_gen_movi_tl(pred, 0); 425 426 gen_set_label(done); 427 tcg_gen_movi_tl(hex_llsc_addr, ~0); 428 } 429 430 void gen_store32(TCGv vaddr, TCGv src, int width, uint32_t slot) 431 { 432 tcg_gen_mov_tl(hex_store_addr[slot], vaddr); 433 tcg_gen_movi_tl(hex_store_width[slot], width); 434 tcg_gen_mov_tl(hex_store_val32[slot], src); 435 } 436 437 void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 438 { 439 gen_store32(vaddr, src, 1, slot); 440 } 441 442 void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 443 { 444 TCGv tmp = tcg_constant_tl(src); 445 gen_store1(cpu_env, vaddr, tmp, slot); 446 } 447 448 void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 449 { 450 gen_store32(vaddr, src, 2, slot); 451 } 452 453 void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 454 { 455 TCGv tmp = tcg_constant_tl(src); 456 gen_store2(cpu_env, vaddr, tmp, slot); 457 } 458 459 void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot) 460 { 461 gen_store32(vaddr, src, 4, slot); 462 } 463 464 void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot) 465 { 466 TCGv tmp = tcg_constant_tl(src); 467 gen_store4(cpu_env, vaddr, tmp, slot); 468 } 469 470 void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, uint32_t slot) 471 { 472 tcg_gen_mov_tl(hex_store_addr[slot], vaddr); 473 tcg_gen_movi_tl(hex_store_width[slot], 8); 474 tcg_gen_mov_i64(hex_store_val64[slot], src); 475 } 476 477 void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot) 478 { 479 TCGv_i64 tmp = tcg_constant_i64(src); 480 gen_store8(cpu_env, vaddr, tmp, slot); 481 } 482 483 TCGv gen_8bitsof(TCGv result, TCGv value) 484 { 485 TCGv zero = tcg_constant_tl(0); 486 TCGv ones = tcg_constant_tl(0xff); 487 tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero); 488 489 return result; 490 } 491 492 static void gen_write_new_pc_addr(DisasContext *ctx, TCGv addr, 493 TCGCond cond, TCGv pred) 494 { 495 TCGLabel *pred_false = NULL; 496 if (cond != TCG_COND_ALWAYS) { 497 pred_false = gen_new_label(); 498 tcg_gen_brcondi_tl(cond, pred, 0, pred_false); 499 } 500 501 if (ctx->pkt->pkt_has_multi_cof) { 502 /* If there are multiple branches in a packet, ignore the second one */ 503 tcg_gen_movcond_tl(TCG_COND_NE, hex_gpr[HEX_REG_PC], 504 hex_branch_taken, tcg_constant_tl(0), 505 hex_gpr[HEX_REG_PC], addr); 506 tcg_gen_movi_tl(hex_branch_taken, 1); 507 } else { 508 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], addr); 509 } 510 511 if (cond != TCG_COND_ALWAYS) { 512 gen_set_label(pred_false); 513 } 514 } 515 516 static void gen_write_new_pc_pcrel(DisasContext *ctx, int pc_off, 517 TCGCond cond, TCGv pred) 518 { 519 target_ulong dest = ctx->pkt->pc + pc_off; 520 if (ctx->pkt->pkt_has_multi_cof) { 521 gen_write_new_pc_addr(ctx, tcg_constant_tl(dest), cond, pred); 522 } else { 523 /* Defer this jump to the end of the TB */ 524 ctx->branch_cond = TCG_COND_ALWAYS; 525 if (pred != NULL) { 526 ctx->branch_cond = cond; 527 tcg_gen_mov_tl(hex_branch_taken, pred); 528 } 529 ctx->branch_dest = dest; 530 } 531 } 532 533 void gen_set_usr_field(int field, TCGv val) 534 { 535 tcg_gen_deposit_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], 536 val, 537 reg_field_info[field].offset, 538 reg_field_info[field].width); 539 } 540 541 void gen_set_usr_fieldi(int field, int x) 542 { 543 if (reg_field_info[field].width == 1) { 544 target_ulong bit = 1 << reg_field_info[field].offset; 545 if ((x & 1) == 1) { 546 tcg_gen_ori_tl(hex_new_value[HEX_REG_USR], 547 hex_new_value[HEX_REG_USR], 548 bit); 549 } else { 550 tcg_gen_andi_tl(hex_new_value[HEX_REG_USR], 551 hex_new_value[HEX_REG_USR], 552 ~bit); 553 } 554 } else { 555 TCGv val = tcg_constant_tl(x); 556 gen_set_usr_field(field, val); 557 } 558 } 559 560 static void gen_compare(TCGCond cond, TCGv res, TCGv arg1, TCGv arg2) 561 { 562 TCGv one = tcg_constant_tl(0xff); 563 TCGv zero = tcg_constant_tl(0); 564 565 tcg_gen_movcond_tl(cond, res, arg1, arg2, one, zero); 566 } 567 568 static void gen_cond_jumpr(DisasContext *ctx, TCGv dst_pc, 569 TCGCond cond, TCGv pred) 570 { 571 gen_write_new_pc_addr(ctx, dst_pc, cond, pred); 572 } 573 574 static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred, 575 int pc_off) 576 { 577 gen_write_new_pc_pcrel(ctx, pc_off, cond, pred); 578 } 579 580 static void gen_cmpnd_cmp_jmp(DisasContext *ctx, 581 int pnum, TCGCond cond1, TCGv arg1, TCGv arg2, 582 TCGCond cond2, int pc_off) 583 { 584 if (ctx->insn->part1) { 585 TCGv pred = tcg_temp_new(); 586 gen_compare(cond1, pred, arg1, arg2); 587 gen_log_pred_write(ctx, pnum, pred); 588 } else { 589 TCGv pred = tcg_temp_new(); 590 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); 591 gen_cond_jump(ctx, cond2, pred, pc_off); 592 } 593 } 594 595 static void gen_cmpnd_cmp_jmp_t(DisasContext *ctx, 596 int pnum, TCGCond cond, TCGv arg1, TCGv arg2, 597 int pc_off) 598 { 599 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_EQ, pc_off); 600 } 601 602 static void gen_cmpnd_cmp_jmp_f(DisasContext *ctx, 603 int pnum, TCGCond cond, TCGv arg1, TCGv arg2, 604 int pc_off) 605 { 606 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_NE, pc_off); 607 } 608 609 static void gen_cmpnd_cmpi_jmp_t(DisasContext *ctx, 610 int pnum, TCGCond cond, TCGv arg1, int arg2, 611 int pc_off) 612 { 613 TCGv tmp = tcg_constant_tl(arg2); 614 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_EQ, pc_off); 615 } 616 617 static void gen_cmpnd_cmpi_jmp_f(DisasContext *ctx, 618 int pnum, TCGCond cond, TCGv arg1, int arg2, 619 int pc_off) 620 { 621 TCGv tmp = tcg_constant_tl(arg2); 622 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_NE, pc_off); 623 } 624 625 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext *ctx, int pnum, TCGCond cond, 626 TCGv arg, int pc_off) 627 { 628 gen_cmpnd_cmpi_jmp_t(ctx, pnum, cond, arg, -1, pc_off); 629 } 630 631 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext *ctx, int pnum, TCGCond cond, 632 TCGv arg, int pc_off) 633 { 634 gen_cmpnd_cmpi_jmp_f(ctx, pnum, cond, arg, -1, pc_off); 635 } 636 637 static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx, 638 int pnum, TCGv arg, TCGCond cond, int pc_off) 639 { 640 if (ctx->insn->part1) { 641 TCGv pred = tcg_temp_new(); 642 tcg_gen_andi_tl(pred, arg, 1); 643 gen_8bitsof(pred, pred); 644 gen_log_pred_write(ctx, pnum, pred); 645 } else { 646 TCGv pred = tcg_temp_new(); 647 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); 648 gen_cond_jump(ctx, cond, pred, pc_off); 649 } 650 } 651 652 static void gen_testbit0_jumpnv(DisasContext *ctx, 653 TCGv arg, TCGCond cond, int pc_off) 654 { 655 TCGv pred = tcg_temp_new(); 656 tcg_gen_andi_tl(pred, arg, 1); 657 gen_cond_jump(ctx, cond, pred, pc_off); 658 } 659 660 static void gen_jump(DisasContext *ctx, int pc_off) 661 { 662 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL); 663 } 664 665 static void gen_jumpr(DisasContext *ctx, TCGv new_pc) 666 { 667 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL); 668 } 669 670 static void gen_call(DisasContext *ctx, int pc_off) 671 { 672 TCGv next_PC = 673 tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes); 674 gen_log_reg_write(HEX_REG_LR, next_PC); 675 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL); 676 } 677 678 static void gen_cond_call(DisasContext *ctx, TCGv pred, 679 TCGCond cond, int pc_off) 680 { 681 TCGv next_PC; 682 TCGv lsb = tcg_temp_new(); 683 TCGLabel *skip = gen_new_label(); 684 tcg_gen_andi_tl(lsb, pred, 1); 685 gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb); 686 tcg_gen_brcondi_tl(cond, lsb, 0, skip); 687 next_PC = 688 tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes); 689 gen_log_reg_write(HEX_REG_LR, next_PC); 690 gen_set_label(skip); 691 } 692 693 static void gen_endloop0(DisasContext *ctx) 694 { 695 TCGv lpcfg = tcg_temp_new(); 696 697 GET_USR_FIELD(USR_LPCFG, lpcfg); 698 699 /* 700 * if (lpcfg == 1) { 701 * hex_new_pred_value[3] = 0xff; 702 * hex_pred_written |= 1 << 3; 703 * } 704 */ 705 TCGLabel *label1 = gen_new_label(); 706 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1); 707 { 708 tcg_gen_movi_tl(hex_new_pred_value[3], 0xff); 709 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3); 710 } 711 gen_set_label(label1); 712 713 /* 714 * if (lpcfg) { 715 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1); 716 * } 717 */ 718 TCGLabel *label2 = gen_new_label(); 719 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2); 720 { 721 tcg_gen_subi_tl(lpcfg, lpcfg, 1); 722 SET_USR_FIELD(USR_LPCFG, lpcfg); 723 } 724 gen_set_label(label2); 725 726 /* 727 * If we're in a tight loop, we'll do this at the end of the TB to take 728 * advantage of direct block chaining. 729 */ 730 if (!ctx->is_tight_loop) { 731 /* 732 * if (hex_gpr[HEX_REG_LC0] > 1) { 733 * PC = hex_gpr[HEX_REG_SA0]; 734 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1; 735 * } 736 */ 737 TCGLabel *label3 = gen_new_label(); 738 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3); 739 { 740 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]); 741 tcg_gen_subi_tl(hex_new_value[HEX_REG_LC0], 742 hex_gpr[HEX_REG_LC0], 1); 743 } 744 gen_set_label(label3); 745 } 746 } 747 748 static void gen_cmp_jumpnv(DisasContext *ctx, 749 TCGCond cond, TCGv val, TCGv src, int pc_off) 750 { 751 TCGv pred = tcg_temp_new(); 752 tcg_gen_setcond_tl(cond, pred, val, src); 753 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); 754 } 755 756 static void gen_cmpi_jumpnv(DisasContext *ctx, 757 TCGCond cond, TCGv val, int src, int pc_off) 758 { 759 TCGv pred = tcg_temp_new(); 760 tcg_gen_setcondi_tl(cond, pred, val, src); 761 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); 762 } 763 764 /* Shift left with saturation */ 765 static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt) 766 { 767 TCGv sh32 = tcg_temp_new(); 768 TCGv dst_sar = tcg_temp_new(); 769 TCGv ovf = tcg_temp_new(); 770 TCGv satval = tcg_temp_new(); 771 TCGv min = tcg_constant_tl(0x80000000); 772 TCGv max = tcg_constant_tl(0x7fffffff); 773 774 /* 775 * Possible values for shift_amt are 0 .. 64 776 * We need special handling for values above 31 777 * 778 * sh32 = shift & 31; 779 * dst = sh32 == shift ? src : 0; 780 * dst <<= sh32; 781 * dst_sar = dst >> sh32; 782 * satval = src < 0 ? min : max; 783 * if (dst_asr != src) { 784 * usr.OVF |= 1; 785 * dst = satval; 786 * } 787 */ 788 789 tcg_gen_andi_tl(sh32, shift_amt, 31); 790 tcg_gen_movcond_tl(TCG_COND_EQ, dst, sh32, shift_amt, 791 src, tcg_constant_tl(0)); 792 tcg_gen_shl_tl(dst, dst, sh32); 793 tcg_gen_sar_tl(dst_sar, dst, sh32); 794 tcg_gen_movcond_tl(TCG_COND_LT, satval, src, tcg_constant_tl(0), min, max); 795 796 tcg_gen_setcond_tl(TCG_COND_NE, ovf, dst_sar, src); 797 tcg_gen_shli_tl(ovf, ovf, reg_field_info[USR_OVF].offset); 798 tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], ovf); 799 800 tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, dst, satval); 801 } 802 803 static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) 804 { 805 /* 806 * Shift arithmetic right 807 * Robust when shift_amt is >31 bits 808 */ 809 TCGv tmp = tcg_temp_new(); 810 tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31)); 811 tcg_gen_sar_tl(dst, src, tmp); 812 } 813 814 /* Bidirectional shift right with saturation */ 815 static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) 816 { 817 TCGv shift_amt = tcg_temp_new(); 818 TCGLabel *positive = gen_new_label(); 819 TCGLabel *done = gen_new_label(); 820 821 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7); 822 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive); 823 824 /* Negative shift amount => shift left */ 825 tcg_gen_neg_tl(shift_amt, shift_amt); 826 gen_shl_sat(RdV, RsV, shift_amt); 827 tcg_gen_br(done); 828 829 gen_set_label(positive); 830 /* Positive shift amount => shift right */ 831 gen_sar(RdV, RsV, shift_amt); 832 833 gen_set_label(done); 834 } 835 836 /* Bidirectional shift left with saturation */ 837 static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) 838 { 839 TCGv shift_amt = tcg_temp_new(); 840 TCGLabel *positive = gen_new_label(); 841 TCGLabel *done = gen_new_label(); 842 843 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7); 844 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive); 845 846 /* Negative shift amount => shift right */ 847 tcg_gen_neg_tl(shift_amt, shift_amt); 848 gen_sar(RdV, RsV, shift_amt); 849 tcg_gen_br(done); 850 851 gen_set_label(positive); 852 /* Positive shift amount => shift left */ 853 gen_shl_sat(RdV, RsV, shift_amt); 854 855 gen_set_label(done); 856 } 857 858 static intptr_t vreg_src_off(DisasContext *ctx, int num) 859 { 860 intptr_t offset = offsetof(CPUHexagonState, VRegs[num]); 861 862 if (test_bit(num, ctx->vregs_select)) { 863 offset = ctx_future_vreg_off(ctx, num, 1, false); 864 } 865 if (test_bit(num, ctx->vregs_updated_tmp)) { 866 offset = ctx_tmp_vreg_off(ctx, num, 1, false); 867 } 868 return offset; 869 } 870 871 static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, 872 VRegWriteType type, int slot_num, 873 bool is_predicated) 874 { 875 TCGLabel *label_end = NULL; 876 intptr_t dstoff; 877 878 if (is_predicated) { 879 TCGv cancelled = tcg_temp_new(); 880 label_end = gen_new_label(); 881 882 /* Don't do anything if the slot was cancelled */ 883 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); 884 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); 885 } 886 887 if (type != EXT_TMP) { 888 dstoff = ctx_future_vreg_off(ctx, num, 1, true); 889 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, 890 sizeof(MMVector), sizeof(MMVector)); 891 tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num); 892 } else { 893 dstoff = ctx_tmp_vreg_off(ctx, num, 1, false); 894 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, 895 sizeof(MMVector), sizeof(MMVector)); 896 } 897 898 if (is_predicated) { 899 gen_set_label(label_end); 900 } 901 } 902 903 static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num, 904 VRegWriteType type, int slot_num, 905 bool is_predicated) 906 { 907 gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated); 908 srcoff += sizeof(MMVector); 909 gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated); 910 } 911 912 static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, 913 int slot_num, bool is_predicated) 914 { 915 TCGLabel *label_end = NULL; 916 intptr_t dstoff; 917 918 if (is_predicated) { 919 TCGv cancelled = tcg_temp_new(); 920 label_end = gen_new_label(); 921 922 /* Don't do anything if the slot was cancelled */ 923 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); 924 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); 925 } 926 927 dstoff = offsetof(CPUHexagonState, future_QRegs[num]); 928 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg)); 929 930 if (is_predicated) { 931 tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num); 932 gen_set_label(label_end); 933 } 934 } 935 936 static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, 937 bool aligned) 938 { 939 TCGv_i64 tmp = tcg_temp_new_i64(); 940 if (aligned) { 941 tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); 942 } 943 for (int i = 0; i < sizeof(MMVector) / 8; i++) { 944 tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); 945 tcg_gen_addi_tl(src, src, 8); 946 tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); 947 } 948 } 949 950 static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, 951 int slot, bool aligned) 952 { 953 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); 954 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); 955 956 if (is_gather_store_insn(ctx)) { 957 TCGv sl = tcg_constant_tl(slot); 958 gen_helper_gather_store(cpu_env, EA, sl); 959 return; 960 } 961 962 tcg_gen_movi_tl(hex_vstore_pending[slot], 1); 963 if (aligned) { 964 tcg_gen_andi_tl(hex_vstore_addr[slot], EA, 965 ~((int32_t)sizeof(MMVector) - 1)); 966 } else { 967 tcg_gen_mov_tl(hex_vstore_addr[slot], EA); 968 } 969 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); 970 971 /* Copy the data to the vstore buffer */ 972 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); 973 /* Set the mask to all 1's */ 974 tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL); 975 } 976 977 static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, 978 intptr_t bitsoff, int slot, bool invert) 979 { 980 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); 981 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); 982 983 tcg_gen_movi_tl(hex_vstore_pending[slot], 1); 984 tcg_gen_andi_tl(hex_vstore_addr[slot], EA, 985 ~((int32_t)sizeof(MMVector) - 1)); 986 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); 987 988 /* Copy the data to the vstore buffer */ 989 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); 990 /* Copy the mask */ 991 tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg)); 992 if (invert) { 993 tcg_gen_gvec_not(MO_64, maskoff, maskoff, 994 sizeof(MMQReg), sizeof(MMQReg)); 995 } 996 } 997 998 static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) 999 { 1000 TCGv_i64 tmp = tcg_temp_new_i64(); 1001 TCGv_i64 word = tcg_temp_new_i64(); 1002 TCGv_i64 bits = tcg_temp_new_i64(); 1003 TCGv_i64 mask = tcg_temp_new_i64(); 1004 TCGv_i64 zero = tcg_constant_i64(0); 1005 TCGv_i64 ones = tcg_constant_i64(~0); 1006 1007 for (int i = 0; i < sizeof(MMVector) / 8; i++) { 1008 tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8); 1009 tcg_gen_movi_i64(mask, 0); 1010 1011 for (int j = 0; j < 8; j += size) { 1012 tcg_gen_extract_i64(word, tmp, j * 8, size * 8); 1013 tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero); 1014 tcg_gen_deposit_i64(mask, mask, bits, j, size); 1015 } 1016 1017 tcg_gen_st8_i64(mask, cpu_env, dstoff + i); 1018 } 1019 } 1020 1021 void probe_noshuf_load(TCGv va, int s, int mi) 1022 { 1023 TCGv size = tcg_constant_tl(s); 1024 TCGv mem_idx = tcg_constant_tl(mi); 1025 gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx); 1026 } 1027 1028 /* 1029 * Note: Since this function might branch, `val` is 1030 * required to be a `tcg_temp_local`. 1031 */ 1032 void gen_set_usr_field_if(int field, TCGv val) 1033 { 1034 /* Sets the USR field if `val` is non-zero */ 1035 if (reg_field_info[field].width == 1) { 1036 TCGv tmp = tcg_temp_new(); 1037 tcg_gen_extract_tl(tmp, val, 0, reg_field_info[field].width); 1038 tcg_gen_shli_tl(tmp, tmp, reg_field_info[field].offset); 1039 tcg_gen_or_tl(hex_new_value[HEX_REG_USR], 1040 hex_new_value[HEX_REG_USR], 1041 tmp); 1042 } else { 1043 TCGLabel *skip_label = gen_new_label(); 1044 tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label); 1045 gen_set_usr_field(field, val); 1046 gen_set_label(skip_label); 1047 } 1048 } 1049 1050 void gen_sat_i32(TCGv dest, TCGv source, int width) 1051 { 1052 TCGv max_val = tcg_constant_tl((1 << (width - 1)) - 1); 1053 TCGv min_val = tcg_constant_tl(-(1 << (width - 1))); 1054 tcg_gen_smin_tl(dest, source, max_val); 1055 tcg_gen_smax_tl(dest, dest, min_val); 1056 } 1057 1058 void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) 1059 { 1060 gen_sat_i32(dest, source, width); 1061 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest); 1062 } 1063 1064 void gen_satu_i32(TCGv dest, TCGv source, int width) 1065 { 1066 TCGv max_val = tcg_constant_tl((1 << width) - 1); 1067 TCGv zero = tcg_constant_tl(0); 1068 tcg_gen_movcond_tl(TCG_COND_GTU, dest, source, max_val, max_val, source); 1069 tcg_gen_movcond_tl(TCG_COND_LT, dest, source, zero, zero, dest); 1070 } 1071 1072 void gen_satu_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) 1073 { 1074 gen_satu_i32(dest, source, width); 1075 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest); 1076 } 1077 1078 void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width) 1079 { 1080 TCGv_i64 max_val = tcg_constant_i64((1LL << (width - 1)) - 1LL); 1081 TCGv_i64 min_val = tcg_constant_i64(-(1LL << (width - 1))); 1082 tcg_gen_smin_i64(dest, source, max_val); 1083 tcg_gen_smax_i64(dest, dest, min_val); 1084 } 1085 1086 void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) 1087 { 1088 TCGv_i64 ovfl_64; 1089 gen_sat_i64(dest, source, width); 1090 ovfl_64 = tcg_temp_new_i64(); 1091 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); 1092 tcg_gen_trunc_i64_tl(ovfl, ovfl_64); 1093 } 1094 1095 void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width) 1096 { 1097 TCGv_i64 max_val = tcg_constant_i64((1LL << width) - 1LL); 1098 TCGv_i64 zero = tcg_constant_i64(0); 1099 tcg_gen_movcond_i64(TCG_COND_GTU, dest, source, max_val, max_val, source); 1100 tcg_gen_movcond_i64(TCG_COND_LT, dest, source, zero, zero, dest); 1101 } 1102 1103 void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) 1104 { 1105 TCGv_i64 ovfl_64; 1106 gen_satu_i64(dest, source, width); 1107 ovfl_64 = tcg_temp_new_i64(); 1108 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); 1109 tcg_gen_trunc_i64_tl(ovfl, ovfl_64); 1110 } 1111 1112 /* Implements the fADDSAT64 macro in TCG */ 1113 void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) 1114 { 1115 TCGv_i64 sum = tcg_temp_new_i64(); 1116 TCGv_i64 xor = tcg_temp_new_i64(); 1117 TCGv_i64 cond1 = tcg_temp_new_i64(); 1118 TCGv_i64 cond2 = tcg_temp_new_i64(); 1119 TCGv_i64 cond3 = tcg_temp_new_i64(); 1120 TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL); 1121 TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL); 1122 TCGv_i64 max_neg = tcg_constant_i64(0x8000000000000000LL); 1123 TCGv_i64 zero = tcg_constant_i64(0); 1124 TCGLabel *no_ovfl_label = gen_new_label(); 1125 TCGLabel *ovfl_label = gen_new_label(); 1126 TCGLabel *ret_label = gen_new_label(); 1127 1128 tcg_gen_add_i64(sum, a, b); 1129 tcg_gen_xor_i64(xor, a, b); 1130 1131 /* if (xor & mask) */ 1132 tcg_gen_and_i64(cond1, xor, mask); 1133 tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label); 1134 1135 /* else if ((a ^ sum) & mask) */ 1136 tcg_gen_xor_i64(cond2, a, sum); 1137 tcg_gen_and_i64(cond2, cond2, mask); 1138 tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label); 1139 /* fallthrough to no_ovfl_label branch */ 1140 1141 /* if branch */ 1142 gen_set_label(no_ovfl_label); 1143 tcg_gen_mov_i64(ret, sum); 1144 tcg_gen_br(ret_label); 1145 1146 /* else if branch */ 1147 gen_set_label(ovfl_label); 1148 tcg_gen_and_i64(cond3, sum, mask); 1149 tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg); 1150 SET_USR_FIELD(USR_OVF, 1); 1151 1152 gen_set_label(ret_label); 1153 } 1154 1155 #include "tcg_funcs_generated.c.inc" 1156 #include "tcg_func_table_generated.c.inc" 1157