1 /* 2 * OpenRISC translation 3 * 4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> 5 * Feng Gao <gf91597@gmail.com> 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "disas/disas.h" 25 #include "tcg-op.h" 26 #include "qemu-common.h" 27 #include "qemu/log.h" 28 #include "qemu/bitops.h" 29 #include "exec/cpu_ldst.h" 30 #include "exec/translator.h" 31 32 #include "exec/helper-proto.h" 33 #include "exec/helper-gen.h" 34 35 #include "trace-tcg.h" 36 #include "exec/log.h" 37 38 #define LOG_DIS(str, ...) \ 39 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->base.pc_next, \ 40 ## __VA_ARGS__) 41 42 /* is_jmp field values */ 43 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 44 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ 45 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ 46 47 typedef struct DisasContext { 48 DisasContextBase base; 49 uint32_t mem_idx; 50 uint32_t tb_flags; 51 uint32_t delayed_branch; 52 } DisasContext; 53 54 static TCGv cpu_sr; 55 static TCGv cpu_R[32]; 56 static TCGv cpu_R0; 57 static TCGv cpu_pc; 58 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */ 59 static TCGv cpu_ppc; 60 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */ 61 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */ 62 static TCGv cpu_sr_ov; /* signed overflow */ 63 static TCGv cpu_lock_addr; 64 static TCGv cpu_lock_value; 65 static TCGv_i32 fpcsr; 66 static TCGv_i64 cpu_mac; /* MACHI:MACLO */ 67 static TCGv_i32 cpu_dflag; 68 #include "exec/gen-icount.h" 69 70 void openrisc_translate_init(void) 71 { 72 static const char * const regnames[] = { 73 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 74 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 75 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 76 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 77 }; 78 int i; 79 80 cpu_sr = tcg_global_mem_new(cpu_env, 81 offsetof(CPUOpenRISCState, sr), "sr"); 82 cpu_dflag = tcg_global_mem_new_i32(cpu_env, 83 offsetof(CPUOpenRISCState, dflag), 84 "dflag"); 85 cpu_pc = tcg_global_mem_new(cpu_env, 86 offsetof(CPUOpenRISCState, pc), "pc"); 87 cpu_ppc = tcg_global_mem_new(cpu_env, 88 offsetof(CPUOpenRISCState, ppc), "ppc"); 89 jmp_pc = tcg_global_mem_new(cpu_env, 90 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc"); 91 cpu_sr_f = tcg_global_mem_new(cpu_env, 92 offsetof(CPUOpenRISCState, sr_f), "sr_f"); 93 cpu_sr_cy = tcg_global_mem_new(cpu_env, 94 offsetof(CPUOpenRISCState, sr_cy), "sr_cy"); 95 cpu_sr_ov = tcg_global_mem_new(cpu_env, 96 offsetof(CPUOpenRISCState, sr_ov), "sr_ov"); 97 cpu_lock_addr = tcg_global_mem_new(cpu_env, 98 offsetof(CPUOpenRISCState, lock_addr), 99 "lock_addr"); 100 cpu_lock_value = tcg_global_mem_new(cpu_env, 101 offsetof(CPUOpenRISCState, lock_value), 102 "lock_value"); 103 fpcsr = tcg_global_mem_new_i32(cpu_env, 104 offsetof(CPUOpenRISCState, fpcsr), 105 "fpcsr"); 106 cpu_mac = tcg_global_mem_new_i64(cpu_env, 107 offsetof(CPUOpenRISCState, mac), 108 "mac"); 109 for (i = 0; i < 32; i++) { 110 cpu_R[i] = tcg_global_mem_new(cpu_env, 111 offsetof(CPUOpenRISCState, 112 shadow_gpr[0][i]), 113 regnames[i]); 114 } 115 cpu_R0 = cpu_R[0]; 116 } 117 118 static void gen_exception(DisasContext *dc, unsigned int excp) 119 { 120 TCGv_i32 tmp = tcg_const_i32(excp); 121 gen_helper_exception(cpu_env, tmp); 122 tcg_temp_free_i32(tmp); 123 } 124 125 static void gen_illegal_exception(DisasContext *dc) 126 { 127 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 128 gen_exception(dc, EXCP_ILLEGAL); 129 dc->base.is_jmp = DISAS_NORETURN; 130 } 131 132 /* not used yet, open it when we need or64. */ 133 /*#ifdef TARGET_OPENRISC64 134 static void check_ob64s(DisasContext *dc) 135 { 136 if (!(dc->flags & CPUCFGR_OB64S)) { 137 gen_illegal_exception(dc); 138 } 139 } 140 141 static void check_of64s(DisasContext *dc) 142 { 143 if (!(dc->flags & CPUCFGR_OF64S)) { 144 gen_illegal_exception(dc); 145 } 146 } 147 148 static void check_ov64s(DisasContext *dc) 149 { 150 if (!(dc->flags & CPUCFGR_OV64S)) { 151 gen_illegal_exception(dc); 152 } 153 } 154 #endif*/ 155 156 /* We're about to write to REG. On the off-chance that the user is 157 writing to R0, re-instate the architectural register. */ 158 #define check_r0_write(reg) \ 159 do { \ 160 if (unlikely(reg == 0)) { \ 161 cpu_R[0] = cpu_R0; \ 162 } \ 163 } while (0) 164 165 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest) 166 { 167 if (unlikely(dc->base.singlestep_enabled)) { 168 return false; 169 } 170 171 #ifndef CONFIG_USER_ONLY 172 return (dc->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 173 #else 174 return true; 175 #endif 176 } 177 178 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 179 { 180 if (use_goto_tb(dc, dest)) { 181 tcg_gen_movi_tl(cpu_pc, dest); 182 tcg_gen_goto_tb(n); 183 tcg_gen_exit_tb((uintptr_t)dc->base.tb + n); 184 } else { 185 tcg_gen_movi_tl(cpu_pc, dest); 186 if (dc->base.singlestep_enabled) { 187 gen_exception(dc, EXCP_DEBUG); 188 } 189 tcg_gen_exit_tb(0); 190 } 191 } 192 193 static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0) 194 { 195 target_ulong tmp_pc = dc->base.pc_next + n26 * 4; 196 197 switch (op0) { 198 case 0x00: /* l.j */ 199 tcg_gen_movi_tl(jmp_pc, tmp_pc); 200 break; 201 case 0x01: /* l.jal */ 202 tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8); 203 /* Optimize jal being used to load the PC for PIC. */ 204 if (tmp_pc == dc->base.pc_next + 8) { 205 return; 206 } 207 tcg_gen_movi_tl(jmp_pc, tmp_pc); 208 break; 209 case 0x03: /* l.bnf */ 210 case 0x04: /* l.bf */ 211 { 212 TCGv t_next = tcg_const_tl(dc->base.pc_next + 8); 213 TCGv t_true = tcg_const_tl(tmp_pc); 214 TCGv t_zero = tcg_const_tl(0); 215 216 tcg_gen_movcond_tl(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE, 217 jmp_pc, cpu_sr_f, t_zero, t_true, t_next); 218 219 tcg_temp_free(t_next); 220 tcg_temp_free(t_true); 221 tcg_temp_free(t_zero); 222 } 223 break; 224 case 0x11: /* l.jr */ 225 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]); 226 break; 227 case 0x12: /* l.jalr */ 228 tcg_gen_movi_tl(cpu_R[9], (dc->base.pc_next + 8)); 229 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]); 230 break; 231 default: 232 gen_illegal_exception(dc); 233 break; 234 } 235 236 dc->delayed_branch = 2; 237 } 238 239 static void gen_ove_cy(DisasContext *dc) 240 { 241 if (dc->tb_flags & SR_OVE) { 242 gen_helper_ove_cy(cpu_env); 243 } 244 } 245 246 static void gen_ove_ov(DisasContext *dc) 247 { 248 if (dc->tb_flags & SR_OVE) { 249 gen_helper_ove_ov(cpu_env); 250 } 251 } 252 253 static void gen_ove_cyov(DisasContext *dc) 254 { 255 if (dc->tb_flags & SR_OVE) { 256 gen_helper_ove_cyov(cpu_env); 257 } 258 } 259 260 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 261 { 262 TCGv t0 = tcg_const_tl(0); 263 TCGv res = tcg_temp_new(); 264 265 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0); 266 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 267 tcg_gen_xor_tl(t0, res, srcb); 268 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 269 tcg_temp_free(t0); 270 271 tcg_gen_mov_tl(dest, res); 272 tcg_temp_free(res); 273 274 gen_ove_cyov(dc); 275 } 276 277 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 278 { 279 TCGv t0 = tcg_const_tl(0); 280 TCGv res = tcg_temp_new(); 281 282 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0); 283 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0); 284 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 285 tcg_gen_xor_tl(t0, res, srcb); 286 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 287 tcg_temp_free(t0); 288 289 tcg_gen_mov_tl(dest, res); 290 tcg_temp_free(res); 291 292 gen_ove_cyov(dc); 293 } 294 295 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 296 { 297 TCGv res = tcg_temp_new(); 298 299 tcg_gen_sub_tl(res, srca, srcb); 300 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb); 301 tcg_gen_xor_tl(cpu_sr_ov, res, srcb); 302 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); 303 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); 304 305 tcg_gen_mov_tl(dest, res); 306 tcg_temp_free(res); 307 308 gen_ove_cyov(dc); 309 } 310 311 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 312 { 313 TCGv t0 = tcg_temp_new(); 314 315 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); 316 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); 317 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); 318 tcg_temp_free(t0); 319 320 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 321 gen_ove_ov(dc); 322 } 323 324 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 325 { 326 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb); 327 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); 328 329 gen_ove_cy(dc); 330 } 331 332 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 333 { 334 TCGv t0 = tcg_temp_new(); 335 336 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0); 337 /* The result of divide-by-zero is undefined. 338 Supress the host-side exception by dividing by 1. */ 339 tcg_gen_or_tl(t0, srcb, cpu_sr_ov); 340 tcg_gen_div_tl(dest, srca, t0); 341 tcg_temp_free(t0); 342 343 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 344 gen_ove_ov(dc); 345 } 346 347 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 348 { 349 TCGv t0 = tcg_temp_new(); 350 351 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0); 352 /* The result of divide-by-zero is undefined. 353 Supress the host-side exception by dividing by 1. */ 354 tcg_gen_or_tl(t0, srcb, cpu_sr_cy); 355 tcg_gen_divu_tl(dest, srca, t0); 356 tcg_temp_free(t0); 357 358 gen_ove_cy(dc); 359 } 360 361 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) 362 { 363 TCGv_i64 t1 = tcg_temp_new_i64(); 364 TCGv_i64 t2 = tcg_temp_new_i64(); 365 366 tcg_gen_ext_tl_i64(t1, srca); 367 tcg_gen_ext_tl_i64(t2, srcb); 368 if (TARGET_LONG_BITS == 32) { 369 tcg_gen_mul_i64(cpu_mac, t1, t2); 370 tcg_gen_movi_tl(cpu_sr_ov, 0); 371 } else { 372 TCGv_i64 high = tcg_temp_new_i64(); 373 374 tcg_gen_muls2_i64(cpu_mac, high, t1, t2); 375 tcg_gen_sari_i64(t1, cpu_mac, 63); 376 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); 377 tcg_temp_free_i64(high); 378 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); 379 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 380 381 gen_ove_ov(dc); 382 } 383 tcg_temp_free_i64(t1); 384 tcg_temp_free_i64(t2); 385 } 386 387 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) 388 { 389 TCGv_i64 t1 = tcg_temp_new_i64(); 390 TCGv_i64 t2 = tcg_temp_new_i64(); 391 392 tcg_gen_extu_tl_i64(t1, srca); 393 tcg_gen_extu_tl_i64(t2, srcb); 394 if (TARGET_LONG_BITS == 32) { 395 tcg_gen_mul_i64(cpu_mac, t1, t2); 396 tcg_gen_movi_tl(cpu_sr_cy, 0); 397 } else { 398 TCGv_i64 high = tcg_temp_new_i64(); 399 400 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); 401 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); 402 tcg_gen_trunc_i64_tl(cpu_sr_cy, high); 403 tcg_temp_free_i64(high); 404 405 gen_ove_cy(dc); 406 } 407 tcg_temp_free_i64(t1); 408 tcg_temp_free_i64(t2); 409 } 410 411 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) 412 { 413 TCGv_i64 t1 = tcg_temp_new_i64(); 414 TCGv_i64 t2 = tcg_temp_new_i64(); 415 416 tcg_gen_ext_tl_i64(t1, srca); 417 tcg_gen_ext_tl_i64(t2, srcb); 418 tcg_gen_mul_i64(t1, t1, t2); 419 420 /* Note that overflow is only computed during addition stage. */ 421 tcg_gen_xor_i64(t2, cpu_mac, t1); 422 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 423 tcg_gen_xor_i64(t1, t1, cpu_mac); 424 tcg_gen_andc_i64(t1, t1, t2); 425 tcg_temp_free_i64(t2); 426 427 #if TARGET_LONG_BITS == 32 428 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 429 #else 430 tcg_gen_mov_i64(cpu_sr_ov, t1); 431 #endif 432 tcg_temp_free_i64(t1); 433 434 gen_ove_ov(dc); 435 } 436 437 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) 438 { 439 TCGv_i64 t1 = tcg_temp_new_i64(); 440 TCGv_i64 t2 = tcg_temp_new_i64(); 441 442 tcg_gen_extu_tl_i64(t1, srca); 443 tcg_gen_extu_tl_i64(t2, srcb); 444 tcg_gen_mul_i64(t1, t1, t2); 445 tcg_temp_free_i64(t2); 446 447 /* Note that overflow is only computed during addition stage. */ 448 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 449 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); 450 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); 451 tcg_temp_free_i64(t1); 452 453 gen_ove_cy(dc); 454 } 455 456 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) 457 { 458 TCGv_i64 t1 = tcg_temp_new_i64(); 459 TCGv_i64 t2 = tcg_temp_new_i64(); 460 461 tcg_gen_ext_tl_i64(t1, srca); 462 tcg_gen_ext_tl_i64(t2, srcb); 463 tcg_gen_mul_i64(t1, t1, t2); 464 465 /* Note that overflow is only computed during subtraction stage. */ 466 tcg_gen_xor_i64(t2, cpu_mac, t1); 467 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 468 tcg_gen_xor_i64(t1, t1, cpu_mac); 469 tcg_gen_and_i64(t1, t1, t2); 470 tcg_temp_free_i64(t2); 471 472 #if TARGET_LONG_BITS == 32 473 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 474 #else 475 tcg_gen_mov_i64(cpu_sr_ov, t1); 476 #endif 477 tcg_temp_free_i64(t1); 478 479 gen_ove_ov(dc); 480 } 481 482 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) 483 { 484 TCGv_i64 t1 = tcg_temp_new_i64(); 485 TCGv_i64 t2 = tcg_temp_new_i64(); 486 487 tcg_gen_extu_tl_i64(t1, srca); 488 tcg_gen_extu_tl_i64(t2, srcb); 489 tcg_gen_mul_i64(t1, t1, t2); 490 491 /* Note that overflow is only computed during subtraction stage. */ 492 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); 493 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 494 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); 495 tcg_temp_free_i64(t2); 496 tcg_temp_free_i64(t1); 497 498 gen_ove_cy(dc); 499 } 500 501 static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs) 502 { 503 TCGv ea = tcg_temp_new(); 504 505 tcg_gen_addi_tl(ea, ra, ofs); 506 tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL); 507 tcg_gen_mov_tl(cpu_lock_addr, ea); 508 tcg_gen_mov_tl(cpu_lock_value, rd); 509 tcg_temp_free(ea); 510 } 511 512 static void gen_swa(DisasContext *dc, int b, TCGv ra, int32_t ofs) 513 { 514 TCGv ea, val; 515 TCGLabel *lab_fail, *lab_done; 516 517 ea = tcg_temp_new(); 518 tcg_gen_addi_tl(ea, ra, ofs); 519 520 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned 521 to cpu_R[0]. Since l.swa is quite often immediately followed by a 522 branch, don't bother reallocating; finish the TB using the "real" R0. 523 This also takes care of RB input across the branch. */ 524 cpu_R[0] = cpu_R0; 525 526 lab_fail = gen_new_label(); 527 lab_done = gen_new_label(); 528 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); 529 tcg_temp_free(ea); 530 531 val = tcg_temp_new(); 532 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, 533 cpu_R[b], dc->mem_idx, MO_TEUL); 534 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); 535 tcg_temp_free(val); 536 537 tcg_gen_br(lab_done); 538 539 gen_set_label(lab_fail); 540 tcg_gen_movi_tl(cpu_sr_f, 0); 541 542 gen_set_label(lab_done); 543 tcg_gen_movi_tl(cpu_lock_addr, -1); 544 } 545 546 static void dec_calc(DisasContext *dc, uint32_t insn) 547 { 548 uint32_t op0, op1, op2; 549 uint32_t ra, rb, rd; 550 op0 = extract32(insn, 0, 4); 551 op1 = extract32(insn, 8, 2); 552 op2 = extract32(insn, 6, 2); 553 ra = extract32(insn, 16, 5); 554 rb = extract32(insn, 11, 5); 555 rd = extract32(insn, 21, 5); 556 557 switch (op1) { 558 case 0: 559 switch (op0) { 560 case 0x0: /* l.add */ 561 LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb); 562 gen_add(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 563 return; 564 565 case 0x1: /* l.addc */ 566 LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb); 567 gen_addc(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 568 return; 569 570 case 0x2: /* l.sub */ 571 LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb); 572 gen_sub(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 573 return; 574 575 case 0x3: /* l.and */ 576 LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb); 577 tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 578 return; 579 580 case 0x4: /* l.or */ 581 LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb); 582 tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 583 return; 584 585 case 0x5: /* l.xor */ 586 LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb); 587 tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 588 return; 589 590 case 0x8: 591 switch (op2) { 592 case 0: /* l.sll */ 593 LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb); 594 tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 595 return; 596 case 1: /* l.srl */ 597 LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb); 598 tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 599 return; 600 case 2: /* l.sra */ 601 LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb); 602 tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 603 return; 604 case 3: /* l.ror */ 605 LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb); 606 tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 607 return; 608 } 609 break; 610 611 case 0xc: 612 switch (op2) { 613 case 0: /* l.exths */ 614 LOG_DIS("l.exths r%d, r%d\n", rd, ra); 615 tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]); 616 return; 617 case 1: /* l.extbs */ 618 LOG_DIS("l.extbs r%d, r%d\n", rd, ra); 619 tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]); 620 return; 621 case 2: /* l.exthz */ 622 LOG_DIS("l.exthz r%d, r%d\n", rd, ra); 623 tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]); 624 return; 625 case 3: /* l.extbz */ 626 LOG_DIS("l.extbz r%d, r%d\n", rd, ra); 627 tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]); 628 return; 629 } 630 break; 631 632 case 0xd: 633 switch (op2) { 634 case 0: /* l.extws */ 635 LOG_DIS("l.extws r%d, r%d\n", rd, ra); 636 tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]); 637 return; 638 case 1: /* l.extwz */ 639 LOG_DIS("l.extwz r%d, r%d\n", rd, ra); 640 tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]); 641 return; 642 } 643 break; 644 645 case 0xe: /* l.cmov */ 646 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb); 647 { 648 TCGv zero = tcg_const_tl(0); 649 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[rd], cpu_sr_f, zero, 650 cpu_R[ra], cpu_R[rb]); 651 tcg_temp_free(zero); 652 } 653 return; 654 655 case 0xf: /* l.ff1 */ 656 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb); 657 tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1); 658 tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1); 659 return; 660 } 661 break; 662 663 case 1: 664 switch (op0) { 665 case 0xf: /* l.fl1 */ 666 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb); 667 tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS); 668 tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]); 669 return; 670 } 671 break; 672 673 case 2: 674 break; 675 676 case 3: 677 switch (op0) { 678 case 0x6: /* l.mul */ 679 LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb); 680 gen_mul(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 681 return; 682 683 case 0x7: /* l.muld */ 684 LOG_DIS("l.muld r%d, r%d\n", ra, rb); 685 gen_muld(dc, cpu_R[ra], cpu_R[rb]); 686 break; 687 688 case 0x9: /* l.div */ 689 LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb); 690 gen_div(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 691 return; 692 693 case 0xa: /* l.divu */ 694 LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb); 695 gen_divu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 696 return; 697 698 case 0xb: /* l.mulu */ 699 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb); 700 gen_mulu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 701 return; 702 703 case 0xc: /* l.muldu */ 704 LOG_DIS("l.muldu r%d, r%d\n", ra, rb); 705 gen_muldu(dc, cpu_R[ra], cpu_R[rb]); 706 return; 707 } 708 break; 709 } 710 gen_illegal_exception(dc); 711 } 712 713 static void dec_misc(DisasContext *dc, uint32_t insn) 714 { 715 uint32_t op0, op1; 716 uint32_t ra, rb, rd; 717 uint32_t L6, K5, K16, K5_11; 718 int32_t I16, I5_11, N26; 719 TCGMemOp mop; 720 TCGv t0; 721 722 op0 = extract32(insn, 26, 6); 723 op1 = extract32(insn, 24, 2); 724 ra = extract32(insn, 16, 5); 725 rb = extract32(insn, 11, 5); 726 rd = extract32(insn, 21, 5); 727 L6 = extract32(insn, 5, 6); 728 K5 = extract32(insn, 0, 5); 729 K16 = extract32(insn, 0, 16); 730 I16 = (int16_t)K16; 731 N26 = sextract32(insn, 0, 26); 732 K5_11 = (extract32(insn, 21, 5) << 11) | extract32(insn, 0, 11); 733 I5_11 = (int16_t)K5_11; 734 735 switch (op0) { 736 case 0x00: /* l.j */ 737 LOG_DIS("l.j %d\n", N26); 738 gen_jump(dc, N26, 0, op0); 739 break; 740 741 case 0x01: /* l.jal */ 742 LOG_DIS("l.jal %d\n", N26); 743 gen_jump(dc, N26, 0, op0); 744 break; 745 746 case 0x03: /* l.bnf */ 747 LOG_DIS("l.bnf %d\n", N26); 748 gen_jump(dc, N26, 0, op0); 749 break; 750 751 case 0x04: /* l.bf */ 752 LOG_DIS("l.bf %d\n", N26); 753 gen_jump(dc, N26, 0, op0); 754 break; 755 756 case 0x05: 757 switch (op1) { 758 case 0x01: /* l.nop */ 759 LOG_DIS("l.nop %d\n", I16); 760 break; 761 762 default: 763 gen_illegal_exception(dc); 764 break; 765 } 766 break; 767 768 case 0x11: /* l.jr */ 769 LOG_DIS("l.jr r%d\n", rb); 770 gen_jump(dc, 0, rb, op0); 771 break; 772 773 case 0x12: /* l.jalr */ 774 LOG_DIS("l.jalr r%d\n", rb); 775 gen_jump(dc, 0, rb, op0); 776 break; 777 778 case 0x13: /* l.maci */ 779 LOG_DIS("l.maci r%d, %d\n", ra, I16); 780 t0 = tcg_const_tl(I16); 781 gen_mac(dc, cpu_R[ra], t0); 782 tcg_temp_free(t0); 783 break; 784 785 case 0x09: /* l.rfe */ 786 LOG_DIS("l.rfe\n"); 787 { 788 #if defined(CONFIG_USER_ONLY) 789 return; 790 #else 791 if (dc->mem_idx == MMU_USER_IDX) { 792 gen_illegal_exception(dc); 793 return; 794 } 795 gen_helper_rfe(cpu_env); 796 dc->base.is_jmp = DISAS_UPDATE; 797 #endif 798 } 799 break; 800 801 case 0x1b: /* l.lwa */ 802 LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16); 803 check_r0_write(rd); 804 gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16); 805 break; 806 807 case 0x1c: /* l.cust1 */ 808 LOG_DIS("l.cust1\n"); 809 break; 810 811 case 0x1d: /* l.cust2 */ 812 LOG_DIS("l.cust2\n"); 813 break; 814 815 case 0x1e: /* l.cust3 */ 816 LOG_DIS("l.cust3\n"); 817 break; 818 819 case 0x1f: /* l.cust4 */ 820 LOG_DIS("l.cust4\n"); 821 break; 822 823 case 0x3c: /* l.cust5 */ 824 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5); 825 break; 826 827 case 0x3d: /* l.cust6 */ 828 LOG_DIS("l.cust6\n"); 829 break; 830 831 case 0x3e: /* l.cust7 */ 832 LOG_DIS("l.cust7\n"); 833 break; 834 835 case 0x3f: /* l.cust8 */ 836 LOG_DIS("l.cust8\n"); 837 break; 838 839 /* not used yet, open it when we need or64. */ 840 /*#ifdef TARGET_OPENRISC64 841 case 0x20: l.ld 842 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16); 843 check_ob64s(dc); 844 mop = MO_TEQ; 845 goto do_load; 846 #endif*/ 847 848 case 0x21: /* l.lwz */ 849 LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16); 850 mop = MO_TEUL; 851 goto do_load; 852 853 case 0x22: /* l.lws */ 854 LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16); 855 mop = MO_TESL; 856 goto do_load; 857 858 case 0x23: /* l.lbz */ 859 LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16); 860 mop = MO_UB; 861 goto do_load; 862 863 case 0x24: /* l.lbs */ 864 LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16); 865 mop = MO_SB; 866 goto do_load; 867 868 case 0x25: /* l.lhz */ 869 LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16); 870 mop = MO_TEUW; 871 goto do_load; 872 873 case 0x26: /* l.lhs */ 874 LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16); 875 mop = MO_TESW; 876 goto do_load; 877 878 do_load: 879 check_r0_write(rd); 880 t0 = tcg_temp_new(); 881 tcg_gen_addi_tl(t0, cpu_R[ra], I16); 882 tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop); 883 tcg_temp_free(t0); 884 break; 885 886 case 0x27: /* l.addi */ 887 LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16); 888 check_r0_write(rd); 889 t0 = tcg_const_tl(I16); 890 gen_add(dc, cpu_R[rd], cpu_R[ra], t0); 891 tcg_temp_free(t0); 892 break; 893 894 case 0x28: /* l.addic */ 895 LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16); 896 check_r0_write(rd); 897 t0 = tcg_const_tl(I16); 898 gen_addc(dc, cpu_R[rd], cpu_R[ra], t0); 899 tcg_temp_free(t0); 900 break; 901 902 case 0x29: /* l.andi */ 903 LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, K16); 904 check_r0_write(rd); 905 tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], K16); 906 break; 907 908 case 0x2a: /* l.ori */ 909 LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, K16); 910 check_r0_write(rd); 911 tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], K16); 912 break; 913 914 case 0x2b: /* l.xori */ 915 LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16); 916 check_r0_write(rd); 917 tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], I16); 918 break; 919 920 case 0x2c: /* l.muli */ 921 LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16); 922 check_r0_write(rd); 923 t0 = tcg_const_tl(I16); 924 gen_mul(dc, cpu_R[rd], cpu_R[ra], t0); 925 tcg_temp_free(t0); 926 break; 927 928 case 0x2d: /* l.mfspr */ 929 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, K16); 930 check_r0_write(rd); 931 { 932 #if defined(CONFIG_USER_ONLY) 933 return; 934 #else 935 TCGv_i32 ti = tcg_const_i32(K16); 936 if (dc->mem_idx == MMU_USER_IDX) { 937 gen_illegal_exception(dc); 938 return; 939 } 940 gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti); 941 tcg_temp_free_i32(ti); 942 #endif 943 } 944 break; 945 946 case 0x30: /* l.mtspr */ 947 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra, rb, K5_11); 948 { 949 #if defined(CONFIG_USER_ONLY) 950 return; 951 #else 952 TCGv_i32 im = tcg_const_i32(K5_11); 953 if (dc->mem_idx == MMU_USER_IDX) { 954 gen_illegal_exception(dc); 955 return; 956 } 957 gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im); 958 tcg_temp_free_i32(im); 959 #endif 960 } 961 break; 962 963 case 0x33: /* l.swa */ 964 LOG_DIS("l.swa r%d, r%d, %d\n", ra, rb, I5_11); 965 gen_swa(dc, rb, cpu_R[ra], I5_11); 966 break; 967 968 /* not used yet, open it when we need or64. */ 969 /*#ifdef TARGET_OPENRISC64 970 case 0x34: l.sd 971 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11); 972 check_ob64s(dc); 973 mop = MO_TEQ; 974 goto do_store; 975 #endif*/ 976 977 case 0x35: /* l.sw */ 978 LOG_DIS("l.sw r%d, r%d, %d\n", ra, rb, I5_11); 979 mop = MO_TEUL; 980 goto do_store; 981 982 case 0x36: /* l.sb */ 983 LOG_DIS("l.sb r%d, r%d, %d\n", ra, rb, I5_11); 984 mop = MO_UB; 985 goto do_store; 986 987 case 0x37: /* l.sh */ 988 LOG_DIS("l.sh r%d, r%d, %d\n", ra, rb, I5_11); 989 mop = MO_TEUW; 990 goto do_store; 991 992 do_store: 993 { 994 TCGv t0 = tcg_temp_new(); 995 tcg_gen_addi_tl(t0, cpu_R[ra], I5_11); 996 tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop); 997 tcg_temp_free(t0); 998 } 999 break; 1000 1001 default: 1002 gen_illegal_exception(dc); 1003 break; 1004 } 1005 } 1006 1007 static void dec_mac(DisasContext *dc, uint32_t insn) 1008 { 1009 uint32_t op0; 1010 uint32_t ra, rb; 1011 op0 = extract32(insn, 0, 4); 1012 ra = extract32(insn, 16, 5); 1013 rb = extract32(insn, 11, 5); 1014 1015 switch (op0) { 1016 case 0x0001: /* l.mac */ 1017 LOG_DIS("l.mac r%d, r%d\n", ra, rb); 1018 gen_mac(dc, cpu_R[ra], cpu_R[rb]); 1019 break; 1020 1021 case 0x0002: /* l.msb */ 1022 LOG_DIS("l.msb r%d, r%d\n", ra, rb); 1023 gen_msb(dc, cpu_R[ra], cpu_R[rb]); 1024 break; 1025 1026 case 0x0003: /* l.macu */ 1027 LOG_DIS("l.macu r%d, r%d\n", ra, rb); 1028 gen_macu(dc, cpu_R[ra], cpu_R[rb]); 1029 break; 1030 1031 case 0x0004: /* l.msbu */ 1032 LOG_DIS("l.msbu r%d, r%d\n", ra, rb); 1033 gen_msbu(dc, cpu_R[ra], cpu_R[rb]); 1034 break; 1035 1036 default: 1037 gen_illegal_exception(dc); 1038 break; 1039 } 1040 } 1041 1042 static void dec_logic(DisasContext *dc, uint32_t insn) 1043 { 1044 uint32_t op0; 1045 uint32_t rd, ra, L6, S6; 1046 op0 = extract32(insn, 6, 2); 1047 rd = extract32(insn, 21, 5); 1048 ra = extract32(insn, 16, 5); 1049 L6 = extract32(insn, 0, 6); 1050 S6 = L6 & (TARGET_LONG_BITS - 1); 1051 1052 check_r0_write(rd); 1053 switch (op0) { 1054 case 0x00: /* l.slli */ 1055 LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6); 1056 tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], S6); 1057 break; 1058 1059 case 0x01: /* l.srli */ 1060 LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6); 1061 tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], S6); 1062 break; 1063 1064 case 0x02: /* l.srai */ 1065 LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6); 1066 tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], S6); 1067 break; 1068 1069 case 0x03: /* l.rori */ 1070 LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6); 1071 tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], S6); 1072 break; 1073 1074 default: 1075 gen_illegal_exception(dc); 1076 break; 1077 } 1078 } 1079 1080 static void dec_M(DisasContext *dc, uint32_t insn) 1081 { 1082 uint32_t op0; 1083 uint32_t rd; 1084 uint32_t K16; 1085 op0 = extract32(insn, 16, 1); 1086 rd = extract32(insn, 21, 5); 1087 K16 = extract32(insn, 0, 16); 1088 1089 check_r0_write(rd); 1090 switch (op0) { 1091 case 0x0: /* l.movhi */ 1092 LOG_DIS("l.movhi r%d, %d\n", rd, K16); 1093 tcg_gen_movi_tl(cpu_R[rd], (K16 << 16)); 1094 break; 1095 1096 case 0x1: /* l.macrc */ 1097 LOG_DIS("l.macrc r%d\n", rd); 1098 tcg_gen_trunc_i64_tl(cpu_R[rd], cpu_mac); 1099 tcg_gen_movi_i64(cpu_mac, 0); 1100 break; 1101 1102 default: 1103 gen_illegal_exception(dc); 1104 break; 1105 } 1106 } 1107 1108 static void dec_comp(DisasContext *dc, uint32_t insn) 1109 { 1110 uint32_t op0; 1111 uint32_t ra, rb; 1112 1113 op0 = extract32(insn, 21, 5); 1114 ra = extract32(insn, 16, 5); 1115 rb = extract32(insn, 11, 5); 1116 1117 /* unsigned integers */ 1118 tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]); 1119 tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]); 1120 1121 switch (op0) { 1122 case 0x0: /* l.sfeq */ 1123 LOG_DIS("l.sfeq r%d, r%d\n", ra, rb); 1124 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1125 break; 1126 1127 case 0x1: /* l.sfne */ 1128 LOG_DIS("l.sfne r%d, r%d\n", ra, rb); 1129 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1130 break; 1131 1132 case 0x2: /* l.sfgtu */ 1133 LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb); 1134 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1135 break; 1136 1137 case 0x3: /* l.sfgeu */ 1138 LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb); 1139 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1140 break; 1141 1142 case 0x4: /* l.sfltu */ 1143 LOG_DIS("l.sfltu r%d, r%d\n", ra, rb); 1144 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1145 break; 1146 1147 case 0x5: /* l.sfleu */ 1148 LOG_DIS("l.sfleu r%d, r%d\n", ra, rb); 1149 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1150 break; 1151 1152 case 0xa: /* l.sfgts */ 1153 LOG_DIS("l.sfgts r%d, r%d\n", ra, rb); 1154 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1155 break; 1156 1157 case 0xb: /* l.sfges */ 1158 LOG_DIS("l.sfges r%d, r%d\n", ra, rb); 1159 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1160 break; 1161 1162 case 0xc: /* l.sflts */ 1163 LOG_DIS("l.sflts r%d, r%d\n", ra, rb); 1164 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1165 break; 1166 1167 case 0xd: /* l.sfles */ 1168 LOG_DIS("l.sfles r%d, r%d\n", ra, rb); 1169 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1170 break; 1171 1172 default: 1173 gen_illegal_exception(dc); 1174 break; 1175 } 1176 } 1177 1178 static void dec_compi(DisasContext *dc, uint32_t insn) 1179 { 1180 uint32_t op0, ra; 1181 int32_t I16; 1182 1183 op0 = extract32(insn, 21, 5); 1184 ra = extract32(insn, 16, 5); 1185 I16 = sextract32(insn, 0, 16); 1186 1187 switch (op0) { 1188 case 0x0: /* l.sfeqi */ 1189 LOG_DIS("l.sfeqi r%d, %d\n", ra, I16); 1190 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], I16); 1191 break; 1192 1193 case 0x1: /* l.sfnei */ 1194 LOG_DIS("l.sfnei r%d, %d\n", ra, I16); 1195 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], I16); 1196 break; 1197 1198 case 0x2: /* l.sfgtui */ 1199 LOG_DIS("l.sfgtui r%d, %d\n", ra, I16); 1200 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], I16); 1201 break; 1202 1203 case 0x3: /* l.sfgeui */ 1204 LOG_DIS("l.sfgeui r%d, %d\n", ra, I16); 1205 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], I16); 1206 break; 1207 1208 case 0x4: /* l.sfltui */ 1209 LOG_DIS("l.sfltui r%d, %d\n", ra, I16); 1210 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], I16); 1211 break; 1212 1213 case 0x5: /* l.sfleui */ 1214 LOG_DIS("l.sfleui r%d, %d\n", ra, I16); 1215 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], I16); 1216 break; 1217 1218 case 0xa: /* l.sfgtsi */ 1219 LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16); 1220 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], I16); 1221 break; 1222 1223 case 0xb: /* l.sfgesi */ 1224 LOG_DIS("l.sfgesi r%d, %d\n", ra, I16); 1225 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], I16); 1226 break; 1227 1228 case 0xc: /* l.sfltsi */ 1229 LOG_DIS("l.sfltsi r%d, %d\n", ra, I16); 1230 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], I16); 1231 break; 1232 1233 case 0xd: /* l.sflesi */ 1234 LOG_DIS("l.sflesi r%d, %d\n", ra, I16); 1235 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], I16); 1236 break; 1237 1238 default: 1239 gen_illegal_exception(dc); 1240 break; 1241 } 1242 } 1243 1244 static void dec_sys(DisasContext *dc, uint32_t insn) 1245 { 1246 uint32_t op0; 1247 uint32_t K16; 1248 1249 op0 = extract32(insn, 16, 10); 1250 K16 = extract32(insn, 0, 16); 1251 1252 switch (op0) { 1253 case 0x000: /* l.sys */ 1254 LOG_DIS("l.sys %d\n", K16); 1255 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1256 gen_exception(dc, EXCP_SYSCALL); 1257 dc->base.is_jmp = DISAS_NORETURN; 1258 break; 1259 1260 case 0x100: /* l.trap */ 1261 LOG_DIS("l.trap %d\n", K16); 1262 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1263 gen_exception(dc, EXCP_TRAP); 1264 dc->base.is_jmp = DISAS_NORETURN; 1265 break; 1266 1267 case 0x300: /* l.csync */ 1268 LOG_DIS("l.csync\n"); 1269 break; 1270 1271 case 0x200: /* l.msync */ 1272 LOG_DIS("l.msync\n"); 1273 tcg_gen_mb(TCG_MO_ALL); 1274 break; 1275 1276 case 0x270: /* l.psync */ 1277 LOG_DIS("l.psync\n"); 1278 break; 1279 1280 default: 1281 gen_illegal_exception(dc); 1282 break; 1283 } 1284 } 1285 1286 static void dec_float(DisasContext *dc, uint32_t insn) 1287 { 1288 uint32_t op0; 1289 uint32_t ra, rb, rd; 1290 op0 = extract32(insn, 0, 8); 1291 ra = extract32(insn, 16, 5); 1292 rb = extract32(insn, 11, 5); 1293 rd = extract32(insn, 21, 5); 1294 1295 switch (op0) { 1296 case 0x00: /* lf.add.s */ 1297 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb); 1298 check_r0_write(rd); 1299 gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1300 break; 1301 1302 case 0x01: /* lf.sub.s */ 1303 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb); 1304 check_r0_write(rd); 1305 gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1306 break; 1307 1308 case 0x02: /* lf.mul.s */ 1309 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb); 1310 check_r0_write(rd); 1311 gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1312 break; 1313 1314 case 0x03: /* lf.div.s */ 1315 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb); 1316 check_r0_write(rd); 1317 gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1318 break; 1319 1320 case 0x04: /* lf.itof.s */ 1321 LOG_DIS("lf.itof r%d, r%d\n", rd, ra); 1322 check_r0_write(rd); 1323 gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]); 1324 break; 1325 1326 case 0x05: /* lf.ftoi.s */ 1327 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); 1328 check_r0_write(rd); 1329 gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]); 1330 break; 1331 1332 case 0x06: /* lf.rem.s */ 1333 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb); 1334 check_r0_write(rd); 1335 gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1336 break; 1337 1338 case 0x07: /* lf.madd.s */ 1339 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb); 1340 check_r0_write(rd); 1341 gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd], 1342 cpu_R[ra], cpu_R[rb]); 1343 break; 1344 1345 case 0x08: /* lf.sfeq.s */ 1346 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb); 1347 gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1348 break; 1349 1350 case 0x09: /* lf.sfne.s */ 1351 LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb); 1352 gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1353 break; 1354 1355 case 0x0a: /* lf.sfgt.s */ 1356 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb); 1357 gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1358 break; 1359 1360 case 0x0b: /* lf.sfge.s */ 1361 LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb); 1362 gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1363 break; 1364 1365 case 0x0c: /* lf.sflt.s */ 1366 LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb); 1367 gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1368 break; 1369 1370 case 0x0d: /* lf.sfle.s */ 1371 LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb); 1372 gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1373 break; 1374 1375 /* not used yet, open it when we need or64. */ 1376 /*#ifdef TARGET_OPENRISC64 1377 case 0x10: lf.add.d 1378 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb); 1379 check_of64s(dc); 1380 check_r0_write(rd); 1381 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1382 break; 1383 1384 case 0x11: lf.sub.d 1385 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb); 1386 check_of64s(dc); 1387 check_r0_write(rd); 1388 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1389 break; 1390 1391 case 0x12: lf.mul.d 1392 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb); 1393 check_of64s(dc); 1394 check_r0_write(rd); 1395 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1396 break; 1397 1398 case 0x13: lf.div.d 1399 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb); 1400 check_of64s(dc); 1401 check_r0_write(rd); 1402 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1403 break; 1404 1405 case 0x14: lf.itof.d 1406 LOG_DIS("lf.itof r%d, r%d\n", rd, ra); 1407 check_of64s(dc); 1408 check_r0_write(rd); 1409 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]); 1410 break; 1411 1412 case 0x15: lf.ftoi.d 1413 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); 1414 check_of64s(dc); 1415 check_r0_write(rd); 1416 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]); 1417 break; 1418 1419 case 0x16: lf.rem.d 1420 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb); 1421 check_of64s(dc); 1422 check_r0_write(rd); 1423 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1424 break; 1425 1426 case 0x17: lf.madd.d 1427 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb); 1428 check_of64s(dc); 1429 check_r0_write(rd); 1430 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd], 1431 cpu_R[ra], cpu_R[rb]); 1432 break; 1433 1434 case 0x18: lf.sfeq.d 1435 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb); 1436 check_of64s(dc); 1437 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1438 break; 1439 1440 case 0x1a: lf.sfgt.d 1441 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb); 1442 check_of64s(dc); 1443 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1444 break; 1445 1446 case 0x1b: lf.sfge.d 1447 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb); 1448 check_of64s(dc); 1449 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1450 break; 1451 1452 case 0x19: lf.sfne.d 1453 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb); 1454 check_of64s(dc); 1455 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1456 break; 1457 1458 case 0x1c: lf.sflt.d 1459 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb); 1460 check_of64s(dc); 1461 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1462 break; 1463 1464 case 0x1d: lf.sfle.d 1465 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb); 1466 check_of64s(dc); 1467 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1468 break; 1469 #endif*/ 1470 1471 default: 1472 gen_illegal_exception(dc); 1473 break; 1474 } 1475 } 1476 1477 static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu) 1478 { 1479 uint32_t op0; 1480 uint32_t insn; 1481 insn = cpu_ldl_code(&cpu->env, dc->base.pc_next); 1482 op0 = extract32(insn, 26, 6); 1483 1484 switch (op0) { 1485 case 0x06: 1486 dec_M(dc, insn); 1487 break; 1488 1489 case 0x08: 1490 dec_sys(dc, insn); 1491 break; 1492 1493 case 0x2e: 1494 dec_logic(dc, insn); 1495 break; 1496 1497 case 0x2f: 1498 dec_compi(dc, insn); 1499 break; 1500 1501 case 0x31: 1502 dec_mac(dc, insn); 1503 break; 1504 1505 case 0x32: 1506 dec_float(dc, insn); 1507 break; 1508 1509 case 0x38: 1510 dec_calc(dc, insn); 1511 break; 1512 1513 case 0x39: 1514 dec_comp(dc, insn); 1515 break; 1516 1517 default: 1518 dec_misc(dc, insn); 1519 break; 1520 } 1521 } 1522 1523 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) 1524 { 1525 DisasContext *dc = container_of(dcb, DisasContext, base); 1526 CPUOpenRISCState *env = cs->env_ptr; 1527 int bound; 1528 1529 dc->mem_idx = cpu_mmu_index(env, false); 1530 dc->tb_flags = dc->base.tb->flags; 1531 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0; 1532 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 1533 dc->base.max_insns = MIN(dc->base.max_insns, bound); 1534 } 1535 1536 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs) 1537 { 1538 DisasContext *dc = container_of(db, DisasContext, base); 1539 1540 /* Allow the TCG optimizer to see that R0 == 0, 1541 when it's true, which is the common case. */ 1542 if (dc->tb_flags & TB_FLAGS_R0_0) { 1543 cpu_R[0] = tcg_const_tl(0); 1544 } else { 1545 cpu_R[0] = cpu_R0; 1546 } 1547 } 1548 1549 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 1550 { 1551 DisasContext *dc = container_of(dcbase, DisasContext, base); 1552 1553 tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0) 1554 | (dc->base.num_insns > 1 ? 2 : 0)); 1555 } 1556 1557 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, 1558 const CPUBreakpoint *bp) 1559 { 1560 DisasContext *dc = container_of(dcbase, DisasContext, base); 1561 1562 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1563 gen_exception(dc, EXCP_DEBUG); 1564 dc->base.is_jmp = DISAS_NORETURN; 1565 /* The address covered by the breakpoint must be included in 1566 [tb->pc, tb->pc + tb->size) in order to for it to be 1567 properly cleared -- thus we increment the PC here so that 1568 the logic setting tb->size below does the right thing. */ 1569 dc->base.pc_next += 4; 1570 return true; 1571 } 1572 1573 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 1574 { 1575 DisasContext *dc = container_of(dcbase, DisasContext, base); 1576 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 1577 1578 disas_openrisc_insn(dc, cpu); 1579 dc->base.pc_next += 4; 1580 1581 /* delay slot */ 1582 if (dc->delayed_branch) { 1583 dc->delayed_branch--; 1584 if (!dc->delayed_branch) { 1585 tcg_gen_mov_tl(cpu_pc, jmp_pc); 1586 tcg_gen_discard_tl(jmp_pc); 1587 dc->base.is_jmp = DISAS_UPDATE; 1588 return; 1589 } 1590 } 1591 } 1592 1593 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 1594 { 1595 DisasContext *dc = container_of(dcbase, DisasContext, base); 1596 1597 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) { 1598 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0); 1599 } 1600 1601 tcg_gen_movi_tl(cpu_ppc, dc->base.pc_next - 4); 1602 if (dc->base.is_jmp == DISAS_NEXT) { 1603 dc->base.is_jmp = DISAS_UPDATE; 1604 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); 1605 } 1606 if (unlikely(dc->base.singlestep_enabled)) { 1607 gen_exception(dc, EXCP_DEBUG); 1608 } else { 1609 switch (dc->base.is_jmp) { 1610 case DISAS_TOO_MANY: 1611 gen_goto_tb(dc, 0, dc->base.pc_next); 1612 break; 1613 case DISAS_NORETURN: 1614 case DISAS_JUMP: 1615 case DISAS_TB_JUMP: 1616 break; 1617 case DISAS_UPDATE: 1618 /* indicate that the hash table must be used 1619 to find the next TB */ 1620 tcg_gen_exit_tb(0); 1621 break; 1622 default: 1623 g_assert_not_reached(); 1624 } 1625 } 1626 } 1627 1628 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) 1629 { 1630 DisasContext *s = container_of(dcbase, DisasContext, base); 1631 1632 qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first)); 1633 log_target_disas(cs, s->base.pc_first, s->base.tb->size); 1634 } 1635 1636 static const TranslatorOps openrisc_tr_ops = { 1637 .init_disas_context = openrisc_tr_init_disas_context, 1638 .tb_start = openrisc_tr_tb_start, 1639 .insn_start = openrisc_tr_insn_start, 1640 .breakpoint_check = openrisc_tr_breakpoint_check, 1641 .translate_insn = openrisc_tr_translate_insn, 1642 .tb_stop = openrisc_tr_tb_stop, 1643 .disas_log = openrisc_tr_disas_log, 1644 }; 1645 1646 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) 1647 { 1648 DisasContext ctx; 1649 1650 translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb); 1651 } 1652 1653 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, 1654 fprintf_function cpu_fprintf, 1655 int flags) 1656 { 1657 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 1658 CPUOpenRISCState *env = &cpu->env; 1659 int i; 1660 1661 cpu_fprintf(f, "PC=%08x\n", env->pc); 1662 for (i = 0; i < 32; ++i) { 1663 cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i), 1664 (i % 4) == 3 ? '\n' : ' '); 1665 } 1666 } 1667 1668 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, 1669 target_ulong *data) 1670 { 1671 env->pc = data[0]; 1672 env->dflag = data[1] & 1; 1673 if (data[1] & 2) { 1674 env->ppc = env->pc - 4; 1675 } 1676 } 1677