1 /* 2 * OpenRISC translation 3 * 4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> 5 * Feng Gao <gf91597@gmail.com> 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "disas/disas.h" 25 #include "tcg-op.h" 26 #include "qemu-common.h" 27 #include "qemu/log.h" 28 #include "qemu/bitops.h" 29 #include "exec/cpu_ldst.h" 30 #include "exec/translator.h" 31 32 #include "exec/helper-proto.h" 33 #include "exec/helper-gen.h" 34 #include "exec/gen-icount.h" 35 36 #include "trace-tcg.h" 37 #include "exec/log.h" 38 39 /* is_jmp field values */ 40 #define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */ 41 #define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */ 42 43 typedef struct DisasContext { 44 DisasContextBase base; 45 uint32_t mem_idx; 46 uint32_t tb_flags; 47 uint32_t delayed_branch; 48 49 /* If not -1, jmp_pc contains this value and so is a direct jump. */ 50 target_ulong jmp_pc_imm; 51 } DisasContext; 52 53 static inline bool is_user(DisasContext *dc) 54 { 55 #ifdef CONFIG_USER_ONLY 56 return true; 57 #else 58 return !(dc->tb_flags & TB_FLAGS_SM); 59 #endif 60 } 61 62 /* Include the auto-generated decoder. */ 63 #include "decode.inc.c" 64 65 static TCGv cpu_sr; 66 static TCGv cpu_R[32]; 67 static TCGv cpu_R0; 68 static TCGv cpu_pc; 69 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */ 70 static TCGv cpu_ppc; 71 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */ 72 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */ 73 static TCGv cpu_sr_ov; /* signed overflow */ 74 static TCGv cpu_lock_addr; 75 static TCGv cpu_lock_value; 76 static TCGv_i32 fpcsr; 77 static TCGv_i64 cpu_mac; /* MACHI:MACLO */ 78 static TCGv_i32 cpu_dflag; 79 80 void openrisc_translate_init(void) 81 { 82 static const char * const regnames[] = { 83 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 84 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 85 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 86 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 87 }; 88 int i; 89 90 cpu_sr = tcg_global_mem_new(cpu_env, 91 offsetof(CPUOpenRISCState, sr), "sr"); 92 cpu_dflag = tcg_global_mem_new_i32(cpu_env, 93 offsetof(CPUOpenRISCState, dflag), 94 "dflag"); 95 cpu_pc = tcg_global_mem_new(cpu_env, 96 offsetof(CPUOpenRISCState, pc), "pc"); 97 cpu_ppc = tcg_global_mem_new(cpu_env, 98 offsetof(CPUOpenRISCState, ppc), "ppc"); 99 jmp_pc = tcg_global_mem_new(cpu_env, 100 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc"); 101 cpu_sr_f = tcg_global_mem_new(cpu_env, 102 offsetof(CPUOpenRISCState, sr_f), "sr_f"); 103 cpu_sr_cy = tcg_global_mem_new(cpu_env, 104 offsetof(CPUOpenRISCState, sr_cy), "sr_cy"); 105 cpu_sr_ov = tcg_global_mem_new(cpu_env, 106 offsetof(CPUOpenRISCState, sr_ov), "sr_ov"); 107 cpu_lock_addr = tcg_global_mem_new(cpu_env, 108 offsetof(CPUOpenRISCState, lock_addr), 109 "lock_addr"); 110 cpu_lock_value = tcg_global_mem_new(cpu_env, 111 offsetof(CPUOpenRISCState, lock_value), 112 "lock_value"); 113 fpcsr = tcg_global_mem_new_i32(cpu_env, 114 offsetof(CPUOpenRISCState, fpcsr), 115 "fpcsr"); 116 cpu_mac = tcg_global_mem_new_i64(cpu_env, 117 offsetof(CPUOpenRISCState, mac), 118 "mac"); 119 for (i = 0; i < 32; i++) { 120 cpu_R[i] = tcg_global_mem_new(cpu_env, 121 offsetof(CPUOpenRISCState, 122 shadow_gpr[0][i]), 123 regnames[i]); 124 } 125 cpu_R0 = cpu_R[0]; 126 } 127 128 static void gen_exception(DisasContext *dc, unsigned int excp) 129 { 130 TCGv_i32 tmp = tcg_const_i32(excp); 131 gen_helper_exception(cpu_env, tmp); 132 tcg_temp_free_i32(tmp); 133 } 134 135 static void gen_illegal_exception(DisasContext *dc) 136 { 137 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 138 gen_exception(dc, EXCP_ILLEGAL); 139 dc->base.is_jmp = DISAS_NORETURN; 140 } 141 142 /* not used yet, open it when we need or64. */ 143 /*#ifdef TARGET_OPENRISC64 144 static void check_ob64s(DisasContext *dc) 145 { 146 if (!(dc->flags & CPUCFGR_OB64S)) { 147 gen_illegal_exception(dc); 148 } 149 } 150 151 static void check_of64s(DisasContext *dc) 152 { 153 if (!(dc->flags & CPUCFGR_OF64S)) { 154 gen_illegal_exception(dc); 155 } 156 } 157 158 static void check_ov64s(DisasContext *dc) 159 { 160 if (!(dc->flags & CPUCFGR_OV64S)) { 161 gen_illegal_exception(dc); 162 } 163 } 164 #endif*/ 165 166 /* We're about to write to REG. On the off-chance that the user is 167 writing to R0, re-instate the architectural register. */ 168 #define check_r0_write(reg) \ 169 do { \ 170 if (unlikely(reg == 0)) { \ 171 cpu_R[0] = cpu_R0; \ 172 } \ 173 } while (0) 174 175 static void gen_ove_cy(DisasContext *dc) 176 { 177 if (dc->tb_flags & SR_OVE) { 178 gen_helper_ove_cy(cpu_env); 179 } 180 } 181 182 static void gen_ove_ov(DisasContext *dc) 183 { 184 if (dc->tb_flags & SR_OVE) { 185 gen_helper_ove_ov(cpu_env); 186 } 187 } 188 189 static void gen_ove_cyov(DisasContext *dc) 190 { 191 if (dc->tb_flags & SR_OVE) { 192 gen_helper_ove_cyov(cpu_env); 193 } 194 } 195 196 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 197 { 198 TCGv t0 = tcg_const_tl(0); 199 TCGv res = tcg_temp_new(); 200 201 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0); 202 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 203 tcg_gen_xor_tl(t0, res, srcb); 204 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 205 tcg_temp_free(t0); 206 207 tcg_gen_mov_tl(dest, res); 208 tcg_temp_free(res); 209 210 gen_ove_cyov(dc); 211 } 212 213 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 214 { 215 TCGv t0 = tcg_const_tl(0); 216 TCGv res = tcg_temp_new(); 217 218 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0); 219 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0); 220 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 221 tcg_gen_xor_tl(t0, res, srcb); 222 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 223 tcg_temp_free(t0); 224 225 tcg_gen_mov_tl(dest, res); 226 tcg_temp_free(res); 227 228 gen_ove_cyov(dc); 229 } 230 231 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 232 { 233 TCGv res = tcg_temp_new(); 234 235 tcg_gen_sub_tl(res, srca, srcb); 236 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb); 237 tcg_gen_xor_tl(cpu_sr_ov, res, srcb); 238 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); 239 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); 240 241 tcg_gen_mov_tl(dest, res); 242 tcg_temp_free(res); 243 244 gen_ove_cyov(dc); 245 } 246 247 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 248 { 249 TCGv t0 = tcg_temp_new(); 250 251 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); 252 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); 253 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); 254 tcg_temp_free(t0); 255 256 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 257 gen_ove_ov(dc); 258 } 259 260 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 261 { 262 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb); 263 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); 264 265 gen_ove_cy(dc); 266 } 267 268 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 269 { 270 TCGv t0 = tcg_temp_new(); 271 272 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0); 273 /* The result of divide-by-zero is undefined. 274 Supress the host-side exception by dividing by 1. */ 275 tcg_gen_or_tl(t0, srcb, cpu_sr_ov); 276 tcg_gen_div_tl(dest, srca, t0); 277 tcg_temp_free(t0); 278 279 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 280 gen_ove_ov(dc); 281 } 282 283 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 284 { 285 TCGv t0 = tcg_temp_new(); 286 287 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0); 288 /* The result of divide-by-zero is undefined. 289 Supress the host-side exception by dividing by 1. */ 290 tcg_gen_or_tl(t0, srcb, cpu_sr_cy); 291 tcg_gen_divu_tl(dest, srca, t0); 292 tcg_temp_free(t0); 293 294 gen_ove_cy(dc); 295 } 296 297 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) 298 { 299 TCGv_i64 t1 = tcg_temp_new_i64(); 300 TCGv_i64 t2 = tcg_temp_new_i64(); 301 302 tcg_gen_ext_tl_i64(t1, srca); 303 tcg_gen_ext_tl_i64(t2, srcb); 304 if (TARGET_LONG_BITS == 32) { 305 tcg_gen_mul_i64(cpu_mac, t1, t2); 306 tcg_gen_movi_tl(cpu_sr_ov, 0); 307 } else { 308 TCGv_i64 high = tcg_temp_new_i64(); 309 310 tcg_gen_muls2_i64(cpu_mac, high, t1, t2); 311 tcg_gen_sari_i64(t1, cpu_mac, 63); 312 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); 313 tcg_temp_free_i64(high); 314 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); 315 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 316 317 gen_ove_ov(dc); 318 } 319 tcg_temp_free_i64(t1); 320 tcg_temp_free_i64(t2); 321 } 322 323 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) 324 { 325 TCGv_i64 t1 = tcg_temp_new_i64(); 326 TCGv_i64 t2 = tcg_temp_new_i64(); 327 328 tcg_gen_extu_tl_i64(t1, srca); 329 tcg_gen_extu_tl_i64(t2, srcb); 330 if (TARGET_LONG_BITS == 32) { 331 tcg_gen_mul_i64(cpu_mac, t1, t2); 332 tcg_gen_movi_tl(cpu_sr_cy, 0); 333 } else { 334 TCGv_i64 high = tcg_temp_new_i64(); 335 336 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); 337 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); 338 tcg_gen_trunc_i64_tl(cpu_sr_cy, high); 339 tcg_temp_free_i64(high); 340 341 gen_ove_cy(dc); 342 } 343 tcg_temp_free_i64(t1); 344 tcg_temp_free_i64(t2); 345 } 346 347 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) 348 { 349 TCGv_i64 t1 = tcg_temp_new_i64(); 350 TCGv_i64 t2 = tcg_temp_new_i64(); 351 352 tcg_gen_ext_tl_i64(t1, srca); 353 tcg_gen_ext_tl_i64(t2, srcb); 354 tcg_gen_mul_i64(t1, t1, t2); 355 356 /* Note that overflow is only computed during addition stage. */ 357 tcg_gen_xor_i64(t2, cpu_mac, t1); 358 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 359 tcg_gen_xor_i64(t1, t1, cpu_mac); 360 tcg_gen_andc_i64(t1, t1, t2); 361 tcg_temp_free_i64(t2); 362 363 #if TARGET_LONG_BITS == 32 364 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 365 #else 366 tcg_gen_mov_i64(cpu_sr_ov, t1); 367 #endif 368 tcg_temp_free_i64(t1); 369 370 gen_ove_ov(dc); 371 } 372 373 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) 374 { 375 TCGv_i64 t1 = tcg_temp_new_i64(); 376 TCGv_i64 t2 = tcg_temp_new_i64(); 377 378 tcg_gen_extu_tl_i64(t1, srca); 379 tcg_gen_extu_tl_i64(t2, srcb); 380 tcg_gen_mul_i64(t1, t1, t2); 381 tcg_temp_free_i64(t2); 382 383 /* Note that overflow is only computed during addition stage. */ 384 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 385 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); 386 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); 387 tcg_temp_free_i64(t1); 388 389 gen_ove_cy(dc); 390 } 391 392 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) 393 { 394 TCGv_i64 t1 = tcg_temp_new_i64(); 395 TCGv_i64 t2 = tcg_temp_new_i64(); 396 397 tcg_gen_ext_tl_i64(t1, srca); 398 tcg_gen_ext_tl_i64(t2, srcb); 399 tcg_gen_mul_i64(t1, t1, t2); 400 401 /* Note that overflow is only computed during subtraction stage. */ 402 tcg_gen_xor_i64(t2, cpu_mac, t1); 403 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 404 tcg_gen_xor_i64(t1, t1, cpu_mac); 405 tcg_gen_and_i64(t1, t1, t2); 406 tcg_temp_free_i64(t2); 407 408 #if TARGET_LONG_BITS == 32 409 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 410 #else 411 tcg_gen_mov_i64(cpu_sr_ov, t1); 412 #endif 413 tcg_temp_free_i64(t1); 414 415 gen_ove_ov(dc); 416 } 417 418 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) 419 { 420 TCGv_i64 t1 = tcg_temp_new_i64(); 421 TCGv_i64 t2 = tcg_temp_new_i64(); 422 423 tcg_gen_extu_tl_i64(t1, srca); 424 tcg_gen_extu_tl_i64(t2, srcb); 425 tcg_gen_mul_i64(t1, t1, t2); 426 427 /* Note that overflow is only computed during subtraction stage. */ 428 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); 429 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 430 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); 431 tcg_temp_free_i64(t2); 432 tcg_temp_free_i64(t1); 433 434 gen_ove_cy(dc); 435 } 436 437 static bool trans_l_add(DisasContext *dc, arg_dab *a) 438 { 439 check_r0_write(a->d); 440 gen_add(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 441 return true; 442 } 443 444 static bool trans_l_addc(DisasContext *dc, arg_dab *a) 445 { 446 check_r0_write(a->d); 447 gen_addc(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 448 return true; 449 } 450 451 static bool trans_l_sub(DisasContext *dc, arg_dab *a) 452 { 453 check_r0_write(a->d); 454 gen_sub(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 455 return true; 456 } 457 458 static bool trans_l_and(DisasContext *dc, arg_dab *a) 459 { 460 check_r0_write(a->d); 461 tcg_gen_and_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 462 return true; 463 } 464 465 static bool trans_l_or(DisasContext *dc, arg_dab *a) 466 { 467 check_r0_write(a->d); 468 tcg_gen_or_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 469 return true; 470 } 471 472 static bool trans_l_xor(DisasContext *dc, arg_dab *a) 473 { 474 check_r0_write(a->d); 475 tcg_gen_xor_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 476 return true; 477 } 478 479 static bool trans_l_sll(DisasContext *dc, arg_dab *a) 480 { 481 check_r0_write(a->d); 482 tcg_gen_shl_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 483 return true; 484 } 485 486 static bool trans_l_srl(DisasContext *dc, arg_dab *a) 487 { 488 check_r0_write(a->d); 489 tcg_gen_shr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 490 return true; 491 } 492 493 static bool trans_l_sra(DisasContext *dc, arg_dab *a) 494 { 495 check_r0_write(a->d); 496 tcg_gen_sar_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 497 return true; 498 } 499 500 static bool trans_l_ror(DisasContext *dc, arg_dab *a) 501 { 502 check_r0_write(a->d); 503 tcg_gen_rotr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 504 return true; 505 } 506 507 static bool trans_l_exths(DisasContext *dc, arg_da *a) 508 { 509 check_r0_write(a->d); 510 tcg_gen_ext16s_tl(cpu_R[a->d], cpu_R[a->a]); 511 return true; 512 } 513 514 static bool trans_l_extbs(DisasContext *dc, arg_da *a) 515 { 516 check_r0_write(a->d); 517 tcg_gen_ext8s_tl(cpu_R[a->d], cpu_R[a->a]); 518 return true; 519 } 520 521 static bool trans_l_exthz(DisasContext *dc, arg_da *a) 522 { 523 check_r0_write(a->d); 524 tcg_gen_ext16u_tl(cpu_R[a->d], cpu_R[a->a]); 525 return true; 526 } 527 528 static bool trans_l_extbz(DisasContext *dc, arg_da *a) 529 { 530 check_r0_write(a->d); 531 tcg_gen_ext8u_tl(cpu_R[a->d], cpu_R[a->a]); 532 return true; 533 } 534 535 static bool trans_l_cmov(DisasContext *dc, arg_dab *a) 536 { 537 TCGv zero; 538 539 check_r0_write(a->d); 540 zero = tcg_const_tl(0); 541 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[a->d], cpu_sr_f, zero, 542 cpu_R[a->a], cpu_R[a->b]); 543 tcg_temp_free(zero); 544 return true; 545 } 546 547 static bool trans_l_ff1(DisasContext *dc, arg_da *a) 548 { 549 check_r0_write(a->d); 550 tcg_gen_ctzi_tl(cpu_R[a->d], cpu_R[a->a], -1); 551 tcg_gen_addi_tl(cpu_R[a->d], cpu_R[a->d], 1); 552 return true; 553 } 554 555 static bool trans_l_fl1(DisasContext *dc, arg_da *a) 556 { 557 check_r0_write(a->d); 558 tcg_gen_clzi_tl(cpu_R[a->d], cpu_R[a->a], TARGET_LONG_BITS); 559 tcg_gen_subfi_tl(cpu_R[a->d], TARGET_LONG_BITS, cpu_R[a->d]); 560 return true; 561 } 562 563 static bool trans_l_mul(DisasContext *dc, arg_dab *a) 564 { 565 check_r0_write(a->d); 566 gen_mul(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 567 return true; 568 } 569 570 static bool trans_l_mulu(DisasContext *dc, arg_dab *a) 571 { 572 check_r0_write(a->d); 573 gen_mulu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 574 return true; 575 } 576 577 static bool trans_l_div(DisasContext *dc, arg_dab *a) 578 { 579 check_r0_write(a->d); 580 gen_div(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 581 return true; 582 } 583 584 static bool trans_l_divu(DisasContext *dc, arg_dab *a) 585 { 586 check_r0_write(a->d); 587 gen_divu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]); 588 return true; 589 } 590 591 static bool trans_l_muld(DisasContext *dc, arg_ab *a) 592 { 593 gen_muld(dc, cpu_R[a->a], cpu_R[a->b]); 594 return true; 595 } 596 597 static bool trans_l_muldu(DisasContext *dc, arg_ab *a) 598 { 599 gen_muldu(dc, cpu_R[a->a], cpu_R[a->b]); 600 return true; 601 } 602 603 static bool trans_l_j(DisasContext *dc, arg_l_j *a) 604 { 605 target_ulong tmp_pc = dc->base.pc_next + a->n * 4; 606 607 tcg_gen_movi_tl(jmp_pc, tmp_pc); 608 dc->jmp_pc_imm = tmp_pc; 609 dc->delayed_branch = 2; 610 return true; 611 } 612 613 static bool trans_l_jal(DisasContext *dc, arg_l_jal *a) 614 { 615 target_ulong tmp_pc = dc->base.pc_next + a->n * 4; 616 target_ulong ret_pc = dc->base.pc_next + 8; 617 618 tcg_gen_movi_tl(cpu_R[9], ret_pc); 619 /* Optimize jal being used to load the PC for PIC. */ 620 if (tmp_pc != ret_pc) { 621 tcg_gen_movi_tl(jmp_pc, tmp_pc); 622 dc->jmp_pc_imm = tmp_pc; 623 dc->delayed_branch = 2; 624 } 625 return true; 626 } 627 628 static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond) 629 { 630 target_ulong tmp_pc = dc->base.pc_next + a->n * 4; 631 TCGv t_next = tcg_const_tl(dc->base.pc_next + 8); 632 TCGv t_true = tcg_const_tl(tmp_pc); 633 TCGv t_zero = tcg_const_tl(0); 634 635 tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, t_zero, t_true, t_next); 636 637 tcg_temp_free(t_next); 638 tcg_temp_free(t_true); 639 tcg_temp_free(t_zero); 640 dc->delayed_branch = 2; 641 } 642 643 static bool trans_l_bf(DisasContext *dc, arg_l_bf *a) 644 { 645 do_bf(dc, a, TCG_COND_NE); 646 return true; 647 } 648 649 static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a) 650 { 651 do_bf(dc, a, TCG_COND_EQ); 652 return true; 653 } 654 655 static bool trans_l_jr(DisasContext *dc, arg_l_jr *a) 656 { 657 tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]); 658 dc->delayed_branch = 2; 659 return true; 660 } 661 662 static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a) 663 { 664 tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]); 665 tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8); 666 dc->delayed_branch = 2; 667 return true; 668 } 669 670 static bool trans_l_lwa(DisasContext *dc, arg_load *a) 671 { 672 TCGv ea; 673 674 check_r0_write(a->d); 675 ea = tcg_temp_new(); 676 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i); 677 tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, MO_TEUL); 678 tcg_gen_mov_tl(cpu_lock_addr, ea); 679 tcg_gen_mov_tl(cpu_lock_value, cpu_R[a->d]); 680 tcg_temp_free(ea); 681 return true; 682 } 683 684 static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop) 685 { 686 TCGv ea; 687 688 check_r0_write(a->d); 689 ea = tcg_temp_new(); 690 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i); 691 tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, mop); 692 tcg_temp_free(ea); 693 } 694 695 static bool trans_l_lwz(DisasContext *dc, arg_load *a) 696 { 697 do_load(dc, a, MO_TEUL); 698 return true; 699 } 700 701 static bool trans_l_lws(DisasContext *dc, arg_load *a) 702 { 703 do_load(dc, a, MO_TESL); 704 return true; 705 } 706 707 static bool trans_l_lbz(DisasContext *dc, arg_load *a) 708 { 709 do_load(dc, a, MO_UB); 710 return true; 711 } 712 713 static bool trans_l_lbs(DisasContext *dc, arg_load *a) 714 { 715 do_load(dc, a, MO_SB); 716 return true; 717 } 718 719 static bool trans_l_lhz(DisasContext *dc, arg_load *a) 720 { 721 do_load(dc, a, MO_TEUW); 722 return true; 723 } 724 725 static bool trans_l_lhs(DisasContext *dc, arg_load *a) 726 { 727 do_load(dc, a, MO_TESW); 728 return true; 729 } 730 731 static bool trans_l_swa(DisasContext *dc, arg_store *a) 732 { 733 TCGv ea, val; 734 TCGLabel *lab_fail, *lab_done; 735 736 ea = tcg_temp_new(); 737 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i); 738 739 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned 740 to cpu_R[0]. Since l.swa is quite often immediately followed by a 741 branch, don't bother reallocating; finish the TB using the "real" R0. 742 This also takes care of RB input across the branch. */ 743 cpu_R[0] = cpu_R0; 744 745 lab_fail = gen_new_label(); 746 lab_done = gen_new_label(); 747 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); 748 tcg_temp_free(ea); 749 750 val = tcg_temp_new(); 751 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, 752 cpu_R[a->b], dc->mem_idx, MO_TEUL); 753 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); 754 tcg_temp_free(val); 755 756 tcg_gen_br(lab_done); 757 758 gen_set_label(lab_fail); 759 tcg_gen_movi_tl(cpu_sr_f, 0); 760 761 gen_set_label(lab_done); 762 tcg_gen_movi_tl(cpu_lock_addr, -1); 763 return true; 764 } 765 766 static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop) 767 { 768 TCGv t0 = tcg_temp_new(); 769 tcg_gen_addi_tl(t0, cpu_R[a->a], a->i); 770 tcg_gen_qemu_st_tl(cpu_R[a->b], t0, dc->mem_idx, mop); 771 tcg_temp_free(t0); 772 } 773 774 static bool trans_l_sw(DisasContext *dc, arg_store *a) 775 { 776 do_store(dc, a, MO_TEUL); 777 return true; 778 } 779 780 static bool trans_l_sb(DisasContext *dc, arg_store *a) 781 { 782 do_store(dc, a, MO_UB); 783 return true; 784 } 785 786 static bool trans_l_sh(DisasContext *dc, arg_store *a) 787 { 788 do_store(dc, a, MO_TEUW); 789 return true; 790 } 791 792 static bool trans_l_nop(DisasContext *dc, arg_l_nop *a) 793 { 794 return true; 795 } 796 797 static bool trans_l_addi(DisasContext *dc, arg_rri *a) 798 { 799 TCGv t0; 800 801 check_r0_write(a->d); 802 t0 = tcg_const_tl(a->i); 803 gen_add(dc, cpu_R[a->d], cpu_R[a->a], t0); 804 tcg_temp_free(t0); 805 return true; 806 } 807 808 static bool trans_l_addic(DisasContext *dc, arg_rri *a) 809 { 810 TCGv t0; 811 812 check_r0_write(a->d); 813 t0 = tcg_const_tl(a->i); 814 gen_addc(dc, cpu_R[a->d], cpu_R[a->a], t0); 815 tcg_temp_free(t0); 816 return true; 817 } 818 819 static bool trans_l_muli(DisasContext *dc, arg_rri *a) 820 { 821 TCGv t0; 822 823 check_r0_write(a->d); 824 t0 = tcg_const_tl(a->i); 825 gen_mul(dc, cpu_R[a->d], cpu_R[a->a], t0); 826 tcg_temp_free(t0); 827 return true; 828 } 829 830 static bool trans_l_maci(DisasContext *dc, arg_l_maci *a) 831 { 832 TCGv t0; 833 834 t0 = tcg_const_tl(a->i); 835 gen_mac(dc, cpu_R[a->a], t0); 836 tcg_temp_free(t0); 837 return true; 838 } 839 840 static bool trans_l_andi(DisasContext *dc, arg_rrk *a) 841 { 842 check_r0_write(a->d); 843 tcg_gen_andi_tl(cpu_R[a->d], cpu_R[a->a], a->k); 844 return true; 845 } 846 847 static bool trans_l_ori(DisasContext *dc, arg_rrk *a) 848 { 849 check_r0_write(a->d); 850 tcg_gen_ori_tl(cpu_R[a->d], cpu_R[a->a], a->k); 851 return true; 852 } 853 854 static bool trans_l_xori(DisasContext *dc, arg_rri *a) 855 { 856 check_r0_write(a->d); 857 tcg_gen_xori_tl(cpu_R[a->d], cpu_R[a->a], a->i); 858 return true; 859 } 860 861 static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a) 862 { 863 check_r0_write(a->d); 864 865 if (is_user(dc)) { 866 gen_illegal_exception(dc); 867 } else { 868 TCGv spr = tcg_temp_new(); 869 tcg_gen_ori_tl(spr, cpu_R[a->a], a->k); 870 gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], spr); 871 tcg_temp_free(spr); 872 } 873 return true; 874 } 875 876 static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) 877 { 878 if (is_user(dc)) { 879 gen_illegal_exception(dc); 880 } else { 881 TCGv spr; 882 883 /* For SR, we will need to exit the TB to recognize the new 884 * exception state. For NPC, in theory this counts as a branch 885 * (although the SPR only exists for use by an ICE). Save all 886 * of the cpu state first, allowing it to be overwritten. 887 */ 888 if (dc->delayed_branch) { 889 tcg_gen_mov_tl(cpu_pc, jmp_pc); 890 tcg_gen_discard_tl(jmp_pc); 891 } else { 892 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); 893 } 894 dc->base.is_jmp = DISAS_EXIT; 895 896 spr = tcg_temp_new(); 897 tcg_gen_ori_tl(spr, cpu_R[a->a], a->k); 898 gen_helper_mtspr(cpu_env, spr, cpu_R[a->b]); 899 tcg_temp_free(spr); 900 } 901 return true; 902 } 903 904 static bool trans_l_mac(DisasContext *dc, arg_ab *a) 905 { 906 gen_mac(dc, cpu_R[a->a], cpu_R[a->b]); 907 return true; 908 } 909 910 static bool trans_l_msb(DisasContext *dc, arg_ab *a) 911 { 912 gen_msb(dc, cpu_R[a->a], cpu_R[a->b]); 913 return true; 914 } 915 916 static bool trans_l_macu(DisasContext *dc, arg_ab *a) 917 { 918 gen_macu(dc, cpu_R[a->a], cpu_R[a->b]); 919 return true; 920 } 921 922 static bool trans_l_msbu(DisasContext *dc, arg_ab *a) 923 { 924 gen_msbu(dc, cpu_R[a->a], cpu_R[a->b]); 925 return true; 926 } 927 928 static bool trans_l_slli(DisasContext *dc, arg_dal *a) 929 { 930 check_r0_write(a->d); 931 tcg_gen_shli_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1)); 932 return true; 933 } 934 935 static bool trans_l_srli(DisasContext *dc, arg_dal *a) 936 { 937 check_r0_write(a->d); 938 tcg_gen_shri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1)); 939 return true; 940 } 941 942 static bool trans_l_srai(DisasContext *dc, arg_dal *a) 943 { 944 check_r0_write(a->d); 945 tcg_gen_sari_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1)); 946 return true; 947 } 948 949 static bool trans_l_rori(DisasContext *dc, arg_dal *a) 950 { 951 check_r0_write(a->d); 952 tcg_gen_rotri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1)); 953 return true; 954 } 955 956 static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a) 957 { 958 check_r0_write(a->d); 959 tcg_gen_movi_tl(cpu_R[a->d], a->k << 16); 960 return true; 961 } 962 963 static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a) 964 { 965 check_r0_write(a->d); 966 tcg_gen_trunc_i64_tl(cpu_R[a->d], cpu_mac); 967 tcg_gen_movi_i64(cpu_mac, 0); 968 return true; 969 } 970 971 static bool trans_l_sfeq(DisasContext *dc, arg_ab *a) 972 { 973 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 974 return true; 975 } 976 977 static bool trans_l_sfne(DisasContext *dc, arg_ab *a) 978 { 979 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 980 return true; 981 } 982 983 static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a) 984 { 985 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 986 return true; 987 } 988 989 static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a) 990 { 991 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 992 return true; 993 } 994 995 static bool trans_l_sfltu(DisasContext *dc, arg_ab *a) 996 { 997 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 998 return true; 999 } 1000 1001 static bool trans_l_sfleu(DisasContext *dc, arg_ab *a) 1002 { 1003 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 1004 return true; 1005 } 1006 1007 static bool trans_l_sfgts(DisasContext *dc, arg_ab *a) 1008 { 1009 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 1010 return true; 1011 } 1012 1013 static bool trans_l_sfges(DisasContext *dc, arg_ab *a) 1014 { 1015 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 1016 return true; 1017 } 1018 1019 static bool trans_l_sflts(DisasContext *dc, arg_ab *a) 1020 { 1021 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 1022 return true; 1023 } 1024 1025 static bool trans_l_sfles(DisasContext *dc, arg_ab *a) 1026 { 1027 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]); 1028 return true; 1029 } 1030 1031 static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a) 1032 { 1033 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], a->i); 1034 return true; 1035 } 1036 1037 static bool trans_l_sfnei(DisasContext *dc, arg_ai *a) 1038 { 1039 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], a->i); 1040 return true; 1041 } 1042 1043 static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a) 1044 { 1045 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], a->i); 1046 return true; 1047 } 1048 1049 static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a) 1050 { 1051 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], a->i); 1052 return true; 1053 } 1054 1055 static bool trans_l_sfltui(DisasContext *dc, arg_ai *a) 1056 { 1057 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], a->i); 1058 return true; 1059 } 1060 1061 static bool trans_l_sfleui(DisasContext *dc, arg_ai *a) 1062 { 1063 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], a->i); 1064 return true; 1065 } 1066 1067 static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a) 1068 { 1069 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], a->i); 1070 return true; 1071 } 1072 1073 static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a) 1074 { 1075 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], a->i); 1076 return true; 1077 } 1078 1079 static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a) 1080 { 1081 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], a->i); 1082 return true; 1083 } 1084 1085 static bool trans_l_sflesi(DisasContext *dc, arg_ai *a) 1086 { 1087 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], a->i); 1088 return true; 1089 } 1090 1091 static bool trans_l_sys(DisasContext *dc, arg_l_sys *a) 1092 { 1093 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1094 gen_exception(dc, EXCP_SYSCALL); 1095 dc->base.is_jmp = DISAS_NORETURN; 1096 return true; 1097 } 1098 1099 static bool trans_l_trap(DisasContext *dc, arg_l_trap *a) 1100 { 1101 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1102 gen_exception(dc, EXCP_TRAP); 1103 dc->base.is_jmp = DISAS_NORETURN; 1104 return true; 1105 } 1106 1107 static bool trans_l_msync(DisasContext *dc, arg_l_msync *a) 1108 { 1109 tcg_gen_mb(TCG_MO_ALL); 1110 return true; 1111 } 1112 1113 static bool trans_l_psync(DisasContext *dc, arg_l_psync *a) 1114 { 1115 return true; 1116 } 1117 1118 static bool trans_l_csync(DisasContext *dc, arg_l_csync *a) 1119 { 1120 return true; 1121 } 1122 1123 static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a) 1124 { 1125 if (is_user(dc)) { 1126 gen_illegal_exception(dc); 1127 } else { 1128 gen_helper_rfe(cpu_env); 1129 dc->base.is_jmp = DISAS_EXIT; 1130 } 1131 return true; 1132 } 1133 1134 static void do_fp2(DisasContext *dc, arg_da *a, 1135 void (*fn)(TCGv, TCGv_env, TCGv)) 1136 { 1137 check_r0_write(a->d); 1138 fn(cpu_R[a->d], cpu_env, cpu_R[a->a]); 1139 gen_helper_update_fpcsr(cpu_env); 1140 } 1141 1142 static void do_fp3(DisasContext *dc, arg_dab *a, 1143 void (*fn)(TCGv, TCGv_env, TCGv, TCGv)) 1144 { 1145 check_r0_write(a->d); 1146 fn(cpu_R[a->d], cpu_env, cpu_R[a->a], cpu_R[a->b]); 1147 gen_helper_update_fpcsr(cpu_env); 1148 } 1149 1150 static void do_fpcmp(DisasContext *dc, arg_ab *a, 1151 void (*fn)(TCGv, TCGv_env, TCGv, TCGv), 1152 bool inv, bool swap) 1153 { 1154 if (swap) { 1155 fn(cpu_sr_f, cpu_env, cpu_R[a->b], cpu_R[a->a]); 1156 } else { 1157 fn(cpu_sr_f, cpu_env, cpu_R[a->a], cpu_R[a->b]); 1158 } 1159 if (inv) { 1160 tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); 1161 } 1162 gen_helper_update_fpcsr(cpu_env); 1163 } 1164 1165 static bool trans_lf_add_s(DisasContext *dc, arg_dab *a) 1166 { 1167 do_fp3(dc, a, gen_helper_float_add_s); 1168 return true; 1169 } 1170 1171 static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a) 1172 { 1173 do_fp3(dc, a, gen_helper_float_sub_s); 1174 return true; 1175 } 1176 1177 static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a) 1178 { 1179 do_fp3(dc, a, gen_helper_float_mul_s); 1180 return true; 1181 } 1182 1183 static bool trans_lf_div_s(DisasContext *dc, arg_dab *a) 1184 { 1185 do_fp3(dc, a, gen_helper_float_div_s); 1186 return true; 1187 } 1188 1189 static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a) 1190 { 1191 do_fp3(dc, a, gen_helper_float_rem_s); 1192 return true; 1193 } 1194 1195 static bool trans_lf_itof_s(DisasContext *dc, arg_da *a) 1196 { 1197 do_fp2(dc, a, gen_helper_itofs); 1198 return true; 1199 } 1200 1201 static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a) 1202 { 1203 do_fp2(dc, a, gen_helper_ftois); 1204 return true; 1205 } 1206 1207 static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a) 1208 { 1209 check_r0_write(a->d); 1210 gen_helper_float_madd_s(cpu_R[a->d], cpu_env, cpu_R[a->d], 1211 cpu_R[a->a], cpu_R[a->b]); 1212 gen_helper_update_fpcsr(cpu_env); 1213 return true; 1214 } 1215 1216 static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a) 1217 { 1218 do_fpcmp(dc, a, gen_helper_float_eq_s, false, false); 1219 return true; 1220 } 1221 1222 static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a) 1223 { 1224 do_fpcmp(dc, a, gen_helper_float_eq_s, true, false); 1225 return true; 1226 } 1227 1228 static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a) 1229 { 1230 do_fpcmp(dc, a, gen_helper_float_lt_s, false, true); 1231 return true; 1232 } 1233 1234 static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a) 1235 { 1236 do_fpcmp(dc, a, gen_helper_float_le_s, false, true); 1237 return true; 1238 } 1239 1240 static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a) 1241 { 1242 do_fpcmp(dc, a, gen_helper_float_lt_s, false, false); 1243 return true; 1244 } 1245 1246 static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a) 1247 { 1248 do_fpcmp(dc, a, gen_helper_float_le_s, false, false); 1249 return true; 1250 } 1251 1252 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) 1253 { 1254 DisasContext *dc = container_of(dcb, DisasContext, base); 1255 CPUOpenRISCState *env = cs->env_ptr; 1256 int bound; 1257 1258 dc->mem_idx = cpu_mmu_index(env, false); 1259 dc->tb_flags = dc->base.tb->flags; 1260 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0; 1261 dc->jmp_pc_imm = -1; 1262 1263 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 1264 dc->base.max_insns = MIN(dc->base.max_insns, bound); 1265 } 1266 1267 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs) 1268 { 1269 DisasContext *dc = container_of(db, DisasContext, base); 1270 1271 /* Allow the TCG optimizer to see that R0 == 0, 1272 when it's true, which is the common case. */ 1273 if (dc->tb_flags & TB_FLAGS_R0_0) { 1274 cpu_R[0] = tcg_const_tl(0); 1275 } else { 1276 cpu_R[0] = cpu_R0; 1277 } 1278 } 1279 1280 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 1281 { 1282 DisasContext *dc = container_of(dcbase, DisasContext, base); 1283 1284 tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0) 1285 | (dc->base.num_insns > 1 ? 2 : 0)); 1286 } 1287 1288 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, 1289 const CPUBreakpoint *bp) 1290 { 1291 DisasContext *dc = container_of(dcbase, DisasContext, base); 1292 1293 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1294 gen_exception(dc, EXCP_DEBUG); 1295 dc->base.is_jmp = DISAS_NORETURN; 1296 /* The address covered by the breakpoint must be included in 1297 [tb->pc, tb->pc + tb->size) in order to for it to be 1298 properly cleared -- thus we increment the PC here so that 1299 the logic setting tb->size below does the right thing. */ 1300 dc->base.pc_next += 4; 1301 return true; 1302 } 1303 1304 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 1305 { 1306 DisasContext *dc = container_of(dcbase, DisasContext, base); 1307 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 1308 uint32_t insn = cpu_ldl_code(&cpu->env, dc->base.pc_next); 1309 1310 if (!decode(dc, insn)) { 1311 gen_illegal_exception(dc); 1312 } 1313 dc->base.pc_next += 4; 1314 1315 /* When exiting the delay slot normally, exit via jmp_pc. 1316 * For DISAS_NORETURN, we have raised an exception and already exited. 1317 * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing 1318 * in the manual saying this is illegal, but it surely it should. 1319 * At least or1ksim overrides pcnext and ignores the branch. 1320 */ 1321 if (dc->delayed_branch 1322 && --dc->delayed_branch == 0 1323 && dc->base.is_jmp == DISAS_NEXT) { 1324 dc->base.is_jmp = DISAS_JUMP; 1325 } 1326 } 1327 1328 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 1329 { 1330 DisasContext *dc = container_of(dcbase, DisasContext, base); 1331 target_ulong jmp_dest; 1332 1333 /* If we have already exited the TB, nothing following has effect. */ 1334 if (dc->base.is_jmp == DISAS_NORETURN) { 1335 return; 1336 } 1337 1338 /* Adjust the delayed branch state for the next TB. */ 1339 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) { 1340 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0); 1341 } 1342 1343 /* For DISAS_TOO_MANY, jump to the next insn. */ 1344 jmp_dest = dc->base.pc_next; 1345 tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4); 1346 1347 switch (dc->base.is_jmp) { 1348 case DISAS_JUMP: 1349 jmp_dest = dc->jmp_pc_imm; 1350 if (jmp_dest == -1) { 1351 /* The jump destination is indirect/computed; use jmp_pc. */ 1352 tcg_gen_mov_tl(cpu_pc, jmp_pc); 1353 tcg_gen_discard_tl(jmp_pc); 1354 if (unlikely(dc->base.singlestep_enabled)) { 1355 gen_exception(dc, EXCP_DEBUG); 1356 } else { 1357 tcg_gen_lookup_and_goto_ptr(); 1358 } 1359 break; 1360 } 1361 /* The jump destination is direct; use jmp_pc_imm. 1362 However, we will have stored into jmp_pc as well; 1363 we know now that it wasn't needed. */ 1364 tcg_gen_discard_tl(jmp_pc); 1365 /* fallthru */ 1366 1367 case DISAS_TOO_MANY: 1368 if (unlikely(dc->base.singlestep_enabled)) { 1369 tcg_gen_movi_tl(cpu_pc, jmp_dest); 1370 gen_exception(dc, EXCP_DEBUG); 1371 } else if ((dc->base.pc_first ^ jmp_dest) & TARGET_PAGE_MASK) { 1372 tcg_gen_movi_tl(cpu_pc, jmp_dest); 1373 tcg_gen_lookup_and_goto_ptr(); 1374 } else { 1375 tcg_gen_goto_tb(0); 1376 tcg_gen_movi_tl(cpu_pc, jmp_dest); 1377 tcg_gen_exit_tb(dc->base.tb, 0); 1378 } 1379 break; 1380 1381 case DISAS_EXIT: 1382 if (unlikely(dc->base.singlestep_enabled)) { 1383 gen_exception(dc, EXCP_DEBUG); 1384 } else { 1385 tcg_gen_exit_tb(NULL, 0); 1386 } 1387 break; 1388 default: 1389 g_assert_not_reached(); 1390 } 1391 } 1392 1393 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) 1394 { 1395 DisasContext *s = container_of(dcbase, DisasContext, base); 1396 1397 qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first)); 1398 log_target_disas(cs, s->base.pc_first, s->base.tb->size); 1399 } 1400 1401 static const TranslatorOps openrisc_tr_ops = { 1402 .init_disas_context = openrisc_tr_init_disas_context, 1403 .tb_start = openrisc_tr_tb_start, 1404 .insn_start = openrisc_tr_insn_start, 1405 .breakpoint_check = openrisc_tr_breakpoint_check, 1406 .translate_insn = openrisc_tr_translate_insn, 1407 .tb_stop = openrisc_tr_tb_stop, 1408 .disas_log = openrisc_tr_disas_log, 1409 }; 1410 1411 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) 1412 { 1413 DisasContext ctx; 1414 1415 translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb); 1416 } 1417 1418 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, 1419 fprintf_function cpu_fprintf, 1420 int flags) 1421 { 1422 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 1423 CPUOpenRISCState *env = &cpu->env; 1424 int i; 1425 1426 cpu_fprintf(f, "PC=%08x\n", env->pc); 1427 for (i = 0; i < 32; ++i) { 1428 cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i), 1429 (i % 4) == 3 ? '\n' : ' '); 1430 } 1431 } 1432 1433 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, 1434 target_ulong *data) 1435 { 1436 env->pc = data[0]; 1437 env->dflag = data[1] & 1; 1438 if (data[1] & 2) { 1439 env->ppc = env->pc - 4; 1440 } 1441 } 1442