1 /* 2 * OpenRISC translation 3 * 4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> 5 * Feng Gao <gf91597@gmail.com> 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "disas/disas.h" 25 #include "tcg-op.h" 26 #include "qemu-common.h" 27 #include "qemu/log.h" 28 #include "qemu/bitops.h" 29 #include "exec/cpu_ldst.h" 30 #include "exec/translator.h" 31 32 #include "exec/helper-proto.h" 33 #include "exec/helper-gen.h" 34 35 #include "trace-tcg.h" 36 #include "exec/log.h" 37 38 #define LOG_DIS(str, ...) \ 39 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__) 40 41 /* is_jmp field values */ 42 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 43 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ 44 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ 45 46 typedef struct DisasContext { 47 TranslationBlock *tb; 48 target_ulong pc; 49 uint32_t is_jmp; 50 uint32_t mem_idx; 51 uint32_t tb_flags; 52 uint32_t delayed_branch; 53 bool singlestep_enabled; 54 } DisasContext; 55 56 static TCGv_env cpu_env; 57 static TCGv cpu_sr; 58 static TCGv cpu_R[32]; 59 static TCGv cpu_R0; 60 static TCGv cpu_pc; 61 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */ 62 static TCGv cpu_ppc; 63 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */ 64 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */ 65 static TCGv cpu_sr_ov; /* signed overflow */ 66 static TCGv cpu_lock_addr; 67 static TCGv cpu_lock_value; 68 static TCGv_i32 fpcsr; 69 static TCGv_i64 cpu_mac; /* MACHI:MACLO */ 70 static TCGv_i32 cpu_dflag; 71 #include "exec/gen-icount.h" 72 73 void openrisc_translate_init(void) 74 { 75 static const char * const regnames[] = { 76 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 78 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 79 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 80 }; 81 int i; 82 83 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 84 tcg_ctx->tcg_env = cpu_env; 85 cpu_sr = tcg_global_mem_new(cpu_env, 86 offsetof(CPUOpenRISCState, sr), "sr"); 87 cpu_dflag = tcg_global_mem_new_i32(cpu_env, 88 offsetof(CPUOpenRISCState, dflag), 89 "dflag"); 90 cpu_pc = tcg_global_mem_new(cpu_env, 91 offsetof(CPUOpenRISCState, pc), "pc"); 92 cpu_ppc = tcg_global_mem_new(cpu_env, 93 offsetof(CPUOpenRISCState, ppc), "ppc"); 94 jmp_pc = tcg_global_mem_new(cpu_env, 95 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc"); 96 cpu_sr_f = tcg_global_mem_new(cpu_env, 97 offsetof(CPUOpenRISCState, sr_f), "sr_f"); 98 cpu_sr_cy = tcg_global_mem_new(cpu_env, 99 offsetof(CPUOpenRISCState, sr_cy), "sr_cy"); 100 cpu_sr_ov = tcg_global_mem_new(cpu_env, 101 offsetof(CPUOpenRISCState, sr_ov), "sr_ov"); 102 cpu_lock_addr = tcg_global_mem_new(cpu_env, 103 offsetof(CPUOpenRISCState, lock_addr), 104 "lock_addr"); 105 cpu_lock_value = tcg_global_mem_new(cpu_env, 106 offsetof(CPUOpenRISCState, lock_value), 107 "lock_value"); 108 fpcsr = tcg_global_mem_new_i32(cpu_env, 109 offsetof(CPUOpenRISCState, fpcsr), 110 "fpcsr"); 111 cpu_mac = tcg_global_mem_new_i64(cpu_env, 112 offsetof(CPUOpenRISCState, mac), 113 "mac"); 114 for (i = 0; i < 32; i++) { 115 cpu_R[i] = tcg_global_mem_new(cpu_env, 116 offsetof(CPUOpenRISCState, 117 shadow_gpr[0][i]), 118 regnames[i]); 119 } 120 cpu_R0 = cpu_R[0]; 121 } 122 123 static void gen_exception(DisasContext *dc, unsigned int excp) 124 { 125 TCGv_i32 tmp = tcg_const_i32(excp); 126 gen_helper_exception(cpu_env, tmp); 127 tcg_temp_free_i32(tmp); 128 } 129 130 static void gen_illegal_exception(DisasContext *dc) 131 { 132 tcg_gen_movi_tl(cpu_pc, dc->pc); 133 gen_exception(dc, EXCP_ILLEGAL); 134 dc->is_jmp = DISAS_UPDATE; 135 } 136 137 /* not used yet, open it when we need or64. */ 138 /*#ifdef TARGET_OPENRISC64 139 static void check_ob64s(DisasContext *dc) 140 { 141 if (!(dc->flags & CPUCFGR_OB64S)) { 142 gen_illegal_exception(dc); 143 } 144 } 145 146 static void check_of64s(DisasContext *dc) 147 { 148 if (!(dc->flags & CPUCFGR_OF64S)) { 149 gen_illegal_exception(dc); 150 } 151 } 152 153 static void check_ov64s(DisasContext *dc) 154 { 155 if (!(dc->flags & CPUCFGR_OV64S)) { 156 gen_illegal_exception(dc); 157 } 158 } 159 #endif*/ 160 161 /* We're about to write to REG. On the off-chance that the user is 162 writing to R0, re-instate the architectural register. */ 163 #define check_r0_write(reg) \ 164 do { \ 165 if (unlikely(reg == 0)) { \ 166 cpu_R[0] = cpu_R0; \ 167 } \ 168 } while (0) 169 170 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest) 171 { 172 if (unlikely(dc->singlestep_enabled)) { 173 return false; 174 } 175 176 #ifndef CONFIG_USER_ONLY 177 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 178 #else 179 return true; 180 #endif 181 } 182 183 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 184 { 185 if (use_goto_tb(dc, dest)) { 186 tcg_gen_movi_tl(cpu_pc, dest); 187 tcg_gen_goto_tb(n); 188 tcg_gen_exit_tb((uintptr_t)dc->tb + n); 189 } else { 190 tcg_gen_movi_tl(cpu_pc, dest); 191 if (dc->singlestep_enabled) { 192 gen_exception(dc, EXCP_DEBUG); 193 } 194 tcg_gen_exit_tb(0); 195 } 196 } 197 198 static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0) 199 { 200 target_ulong tmp_pc = dc->pc + n26 * 4; 201 202 switch (op0) { 203 case 0x00: /* l.j */ 204 tcg_gen_movi_tl(jmp_pc, tmp_pc); 205 break; 206 case 0x01: /* l.jal */ 207 tcg_gen_movi_tl(cpu_R[9], dc->pc + 8); 208 /* Optimize jal being used to load the PC for PIC. */ 209 if (tmp_pc == dc->pc + 8) { 210 return; 211 } 212 tcg_gen_movi_tl(jmp_pc, tmp_pc); 213 break; 214 case 0x03: /* l.bnf */ 215 case 0x04: /* l.bf */ 216 { 217 TCGv t_next = tcg_const_tl(dc->pc + 8); 218 TCGv t_true = tcg_const_tl(tmp_pc); 219 TCGv t_zero = tcg_const_tl(0); 220 221 tcg_gen_movcond_tl(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE, 222 jmp_pc, cpu_sr_f, t_zero, t_true, t_next); 223 224 tcg_temp_free(t_next); 225 tcg_temp_free(t_true); 226 tcg_temp_free(t_zero); 227 } 228 break; 229 case 0x11: /* l.jr */ 230 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]); 231 break; 232 case 0x12: /* l.jalr */ 233 tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8)); 234 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]); 235 break; 236 default: 237 gen_illegal_exception(dc); 238 break; 239 } 240 241 dc->delayed_branch = 2; 242 } 243 244 static void gen_ove_cy(DisasContext *dc) 245 { 246 if (dc->tb_flags & SR_OVE) { 247 gen_helper_ove_cy(cpu_env); 248 } 249 } 250 251 static void gen_ove_ov(DisasContext *dc) 252 { 253 if (dc->tb_flags & SR_OVE) { 254 gen_helper_ove_ov(cpu_env); 255 } 256 } 257 258 static void gen_ove_cyov(DisasContext *dc) 259 { 260 if (dc->tb_flags & SR_OVE) { 261 gen_helper_ove_cyov(cpu_env); 262 } 263 } 264 265 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 266 { 267 TCGv t0 = tcg_const_tl(0); 268 TCGv res = tcg_temp_new(); 269 270 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0); 271 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 272 tcg_gen_xor_tl(t0, res, srcb); 273 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 274 tcg_temp_free(t0); 275 276 tcg_gen_mov_tl(dest, res); 277 tcg_temp_free(res); 278 279 gen_ove_cyov(dc); 280 } 281 282 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 283 { 284 TCGv t0 = tcg_const_tl(0); 285 TCGv res = tcg_temp_new(); 286 287 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0); 288 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0); 289 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); 290 tcg_gen_xor_tl(t0, res, srcb); 291 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); 292 tcg_temp_free(t0); 293 294 tcg_gen_mov_tl(dest, res); 295 tcg_temp_free(res); 296 297 gen_ove_cyov(dc); 298 } 299 300 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 301 { 302 TCGv res = tcg_temp_new(); 303 304 tcg_gen_sub_tl(res, srca, srcb); 305 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb); 306 tcg_gen_xor_tl(cpu_sr_ov, res, srcb); 307 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); 308 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); 309 310 tcg_gen_mov_tl(dest, res); 311 tcg_temp_free(res); 312 313 gen_ove_cyov(dc); 314 } 315 316 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 317 { 318 TCGv t0 = tcg_temp_new(); 319 320 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); 321 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); 322 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); 323 tcg_temp_free(t0); 324 325 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 326 gen_ove_ov(dc); 327 } 328 329 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 330 { 331 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb); 332 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); 333 334 gen_ove_cy(dc); 335 } 336 337 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 338 { 339 TCGv t0 = tcg_temp_new(); 340 341 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0); 342 /* The result of divide-by-zero is undefined. 343 Supress the host-side exception by dividing by 1. */ 344 tcg_gen_or_tl(t0, srcb, cpu_sr_ov); 345 tcg_gen_div_tl(dest, srca, t0); 346 tcg_temp_free(t0); 347 348 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 349 gen_ove_ov(dc); 350 } 351 352 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) 353 { 354 TCGv t0 = tcg_temp_new(); 355 356 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0); 357 /* The result of divide-by-zero is undefined. 358 Supress the host-side exception by dividing by 1. */ 359 tcg_gen_or_tl(t0, srcb, cpu_sr_cy); 360 tcg_gen_divu_tl(dest, srca, t0); 361 tcg_temp_free(t0); 362 363 gen_ove_cy(dc); 364 } 365 366 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) 367 { 368 TCGv_i64 t1 = tcg_temp_new_i64(); 369 TCGv_i64 t2 = tcg_temp_new_i64(); 370 371 tcg_gen_ext_tl_i64(t1, srca); 372 tcg_gen_ext_tl_i64(t2, srcb); 373 if (TARGET_LONG_BITS == 32) { 374 tcg_gen_mul_i64(cpu_mac, t1, t2); 375 tcg_gen_movi_tl(cpu_sr_ov, 0); 376 } else { 377 TCGv_i64 high = tcg_temp_new_i64(); 378 379 tcg_gen_muls2_i64(cpu_mac, high, t1, t2); 380 tcg_gen_sari_i64(t1, cpu_mac, 63); 381 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); 382 tcg_temp_free_i64(high); 383 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); 384 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); 385 386 gen_ove_ov(dc); 387 } 388 tcg_temp_free_i64(t1); 389 tcg_temp_free_i64(t2); 390 } 391 392 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) 393 { 394 TCGv_i64 t1 = tcg_temp_new_i64(); 395 TCGv_i64 t2 = tcg_temp_new_i64(); 396 397 tcg_gen_extu_tl_i64(t1, srca); 398 tcg_gen_extu_tl_i64(t2, srcb); 399 if (TARGET_LONG_BITS == 32) { 400 tcg_gen_mul_i64(cpu_mac, t1, t2); 401 tcg_gen_movi_tl(cpu_sr_cy, 0); 402 } else { 403 TCGv_i64 high = tcg_temp_new_i64(); 404 405 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); 406 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); 407 tcg_gen_trunc_i64_tl(cpu_sr_cy, high); 408 tcg_temp_free_i64(high); 409 410 gen_ove_cy(dc); 411 } 412 tcg_temp_free_i64(t1); 413 tcg_temp_free_i64(t2); 414 } 415 416 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) 417 { 418 TCGv_i64 t1 = tcg_temp_new_i64(); 419 TCGv_i64 t2 = tcg_temp_new_i64(); 420 421 tcg_gen_ext_tl_i64(t1, srca); 422 tcg_gen_ext_tl_i64(t2, srcb); 423 tcg_gen_mul_i64(t1, t1, t2); 424 425 /* Note that overflow is only computed during addition stage. */ 426 tcg_gen_xor_i64(t2, cpu_mac, t1); 427 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 428 tcg_gen_xor_i64(t1, t1, cpu_mac); 429 tcg_gen_andc_i64(t1, t1, t2); 430 tcg_temp_free_i64(t2); 431 432 #if TARGET_LONG_BITS == 32 433 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 434 #else 435 tcg_gen_mov_i64(cpu_sr_ov, t1); 436 #endif 437 tcg_temp_free_i64(t1); 438 439 gen_ove_ov(dc); 440 } 441 442 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) 443 { 444 TCGv_i64 t1 = tcg_temp_new_i64(); 445 TCGv_i64 t2 = tcg_temp_new_i64(); 446 447 tcg_gen_extu_tl_i64(t1, srca); 448 tcg_gen_extu_tl_i64(t2, srcb); 449 tcg_gen_mul_i64(t1, t1, t2); 450 tcg_temp_free_i64(t2); 451 452 /* Note that overflow is only computed during addition stage. */ 453 tcg_gen_add_i64(cpu_mac, cpu_mac, t1); 454 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); 455 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); 456 tcg_temp_free_i64(t1); 457 458 gen_ove_cy(dc); 459 } 460 461 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) 462 { 463 TCGv_i64 t1 = tcg_temp_new_i64(); 464 TCGv_i64 t2 = tcg_temp_new_i64(); 465 466 tcg_gen_ext_tl_i64(t1, srca); 467 tcg_gen_ext_tl_i64(t2, srcb); 468 tcg_gen_mul_i64(t1, t1, t2); 469 470 /* Note that overflow is only computed during subtraction stage. */ 471 tcg_gen_xor_i64(t2, cpu_mac, t1); 472 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 473 tcg_gen_xor_i64(t1, t1, cpu_mac); 474 tcg_gen_and_i64(t1, t1, t2); 475 tcg_temp_free_i64(t2); 476 477 #if TARGET_LONG_BITS == 32 478 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); 479 #else 480 tcg_gen_mov_i64(cpu_sr_ov, t1); 481 #endif 482 tcg_temp_free_i64(t1); 483 484 gen_ove_ov(dc); 485 } 486 487 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) 488 { 489 TCGv_i64 t1 = tcg_temp_new_i64(); 490 TCGv_i64 t2 = tcg_temp_new_i64(); 491 492 tcg_gen_extu_tl_i64(t1, srca); 493 tcg_gen_extu_tl_i64(t2, srcb); 494 tcg_gen_mul_i64(t1, t1, t2); 495 496 /* Note that overflow is only computed during subtraction stage. */ 497 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); 498 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); 499 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); 500 tcg_temp_free_i64(t2); 501 tcg_temp_free_i64(t1); 502 503 gen_ove_cy(dc); 504 } 505 506 static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs) 507 { 508 TCGv ea = tcg_temp_new(); 509 510 tcg_gen_addi_tl(ea, ra, ofs); 511 tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL); 512 tcg_gen_mov_tl(cpu_lock_addr, ea); 513 tcg_gen_mov_tl(cpu_lock_value, rd); 514 tcg_temp_free(ea); 515 } 516 517 static void gen_swa(DisasContext *dc, int b, TCGv ra, int32_t ofs) 518 { 519 TCGv ea, val; 520 TCGLabel *lab_fail, *lab_done; 521 522 ea = tcg_temp_new(); 523 tcg_gen_addi_tl(ea, ra, ofs); 524 525 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned 526 to cpu_R[0]. Since l.swa is quite often immediately followed by a 527 branch, don't bother reallocating; finish the TB using the "real" R0. 528 This also takes care of RB input across the branch. */ 529 cpu_R[0] = cpu_R0; 530 531 lab_fail = gen_new_label(); 532 lab_done = gen_new_label(); 533 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); 534 tcg_temp_free(ea); 535 536 val = tcg_temp_new(); 537 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, 538 cpu_R[b], dc->mem_idx, MO_TEUL); 539 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); 540 tcg_temp_free(val); 541 542 tcg_gen_br(lab_done); 543 544 gen_set_label(lab_fail); 545 tcg_gen_movi_tl(cpu_sr_f, 0); 546 547 gen_set_label(lab_done); 548 tcg_gen_movi_tl(cpu_lock_addr, -1); 549 } 550 551 static void dec_calc(DisasContext *dc, uint32_t insn) 552 { 553 uint32_t op0, op1, op2; 554 uint32_t ra, rb, rd; 555 op0 = extract32(insn, 0, 4); 556 op1 = extract32(insn, 8, 2); 557 op2 = extract32(insn, 6, 2); 558 ra = extract32(insn, 16, 5); 559 rb = extract32(insn, 11, 5); 560 rd = extract32(insn, 21, 5); 561 562 switch (op1) { 563 case 0: 564 switch (op0) { 565 case 0x0: /* l.add */ 566 LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb); 567 gen_add(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 568 return; 569 570 case 0x1: /* l.addc */ 571 LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb); 572 gen_addc(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 573 return; 574 575 case 0x2: /* l.sub */ 576 LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb); 577 gen_sub(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 578 return; 579 580 case 0x3: /* l.and */ 581 LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb); 582 tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 583 return; 584 585 case 0x4: /* l.or */ 586 LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb); 587 tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 588 return; 589 590 case 0x5: /* l.xor */ 591 LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb); 592 tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 593 return; 594 595 case 0x8: 596 switch (op2) { 597 case 0: /* l.sll */ 598 LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb); 599 tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 600 return; 601 case 1: /* l.srl */ 602 LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb); 603 tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 604 return; 605 case 2: /* l.sra */ 606 LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb); 607 tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 608 return; 609 case 3: /* l.ror */ 610 LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb); 611 tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); 612 return; 613 } 614 break; 615 616 case 0xc: 617 switch (op2) { 618 case 0: /* l.exths */ 619 LOG_DIS("l.exths r%d, r%d\n", rd, ra); 620 tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]); 621 return; 622 case 1: /* l.extbs */ 623 LOG_DIS("l.extbs r%d, r%d\n", rd, ra); 624 tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]); 625 return; 626 case 2: /* l.exthz */ 627 LOG_DIS("l.exthz r%d, r%d\n", rd, ra); 628 tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]); 629 return; 630 case 3: /* l.extbz */ 631 LOG_DIS("l.extbz r%d, r%d\n", rd, ra); 632 tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]); 633 return; 634 } 635 break; 636 637 case 0xd: 638 switch (op2) { 639 case 0: /* l.extws */ 640 LOG_DIS("l.extws r%d, r%d\n", rd, ra); 641 tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]); 642 return; 643 case 1: /* l.extwz */ 644 LOG_DIS("l.extwz r%d, r%d\n", rd, ra); 645 tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]); 646 return; 647 } 648 break; 649 650 case 0xe: /* l.cmov */ 651 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb); 652 { 653 TCGv zero = tcg_const_tl(0); 654 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[rd], cpu_sr_f, zero, 655 cpu_R[ra], cpu_R[rb]); 656 tcg_temp_free(zero); 657 } 658 return; 659 660 case 0xf: /* l.ff1 */ 661 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb); 662 tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1); 663 tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1); 664 return; 665 } 666 break; 667 668 case 1: 669 switch (op0) { 670 case 0xf: /* l.fl1 */ 671 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb); 672 tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS); 673 tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]); 674 return; 675 } 676 break; 677 678 case 2: 679 break; 680 681 case 3: 682 switch (op0) { 683 case 0x6: /* l.mul */ 684 LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb); 685 gen_mul(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 686 return; 687 688 case 0x7: /* l.muld */ 689 LOG_DIS("l.muld r%d, r%d\n", ra, rb); 690 gen_muld(dc, cpu_R[ra], cpu_R[rb]); 691 break; 692 693 case 0x9: /* l.div */ 694 LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb); 695 gen_div(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 696 return; 697 698 case 0xa: /* l.divu */ 699 LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb); 700 gen_divu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 701 return; 702 703 case 0xb: /* l.mulu */ 704 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb); 705 gen_mulu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]); 706 return; 707 708 case 0xc: /* l.muldu */ 709 LOG_DIS("l.muldu r%d, r%d\n", ra, rb); 710 gen_muldu(dc, cpu_R[ra], cpu_R[rb]); 711 return; 712 } 713 break; 714 } 715 gen_illegal_exception(dc); 716 } 717 718 static void dec_misc(DisasContext *dc, uint32_t insn) 719 { 720 uint32_t op0, op1; 721 uint32_t ra, rb, rd; 722 uint32_t L6, K5, K16, K5_11; 723 int32_t I16, I5_11, N26; 724 TCGMemOp mop; 725 TCGv t0; 726 727 op0 = extract32(insn, 26, 6); 728 op1 = extract32(insn, 24, 2); 729 ra = extract32(insn, 16, 5); 730 rb = extract32(insn, 11, 5); 731 rd = extract32(insn, 21, 5); 732 L6 = extract32(insn, 5, 6); 733 K5 = extract32(insn, 0, 5); 734 K16 = extract32(insn, 0, 16); 735 I16 = (int16_t)K16; 736 N26 = sextract32(insn, 0, 26); 737 K5_11 = (extract32(insn, 21, 5) << 11) | extract32(insn, 0, 11); 738 I5_11 = (int16_t)K5_11; 739 740 switch (op0) { 741 case 0x00: /* l.j */ 742 LOG_DIS("l.j %d\n", N26); 743 gen_jump(dc, N26, 0, op0); 744 break; 745 746 case 0x01: /* l.jal */ 747 LOG_DIS("l.jal %d\n", N26); 748 gen_jump(dc, N26, 0, op0); 749 break; 750 751 case 0x03: /* l.bnf */ 752 LOG_DIS("l.bnf %d\n", N26); 753 gen_jump(dc, N26, 0, op0); 754 break; 755 756 case 0x04: /* l.bf */ 757 LOG_DIS("l.bf %d\n", N26); 758 gen_jump(dc, N26, 0, op0); 759 break; 760 761 case 0x05: 762 switch (op1) { 763 case 0x01: /* l.nop */ 764 LOG_DIS("l.nop %d\n", I16); 765 break; 766 767 default: 768 gen_illegal_exception(dc); 769 break; 770 } 771 break; 772 773 case 0x11: /* l.jr */ 774 LOG_DIS("l.jr r%d\n", rb); 775 gen_jump(dc, 0, rb, op0); 776 break; 777 778 case 0x12: /* l.jalr */ 779 LOG_DIS("l.jalr r%d\n", rb); 780 gen_jump(dc, 0, rb, op0); 781 break; 782 783 case 0x13: /* l.maci */ 784 LOG_DIS("l.maci r%d, %d\n", ra, I16); 785 t0 = tcg_const_tl(I16); 786 gen_mac(dc, cpu_R[ra], t0); 787 tcg_temp_free(t0); 788 break; 789 790 case 0x09: /* l.rfe */ 791 LOG_DIS("l.rfe\n"); 792 { 793 #if defined(CONFIG_USER_ONLY) 794 return; 795 #else 796 if (dc->mem_idx == MMU_USER_IDX) { 797 gen_illegal_exception(dc); 798 return; 799 } 800 gen_helper_rfe(cpu_env); 801 dc->is_jmp = DISAS_UPDATE; 802 #endif 803 } 804 break; 805 806 case 0x1b: /* l.lwa */ 807 LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16); 808 check_r0_write(rd); 809 gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16); 810 break; 811 812 case 0x1c: /* l.cust1 */ 813 LOG_DIS("l.cust1\n"); 814 break; 815 816 case 0x1d: /* l.cust2 */ 817 LOG_DIS("l.cust2\n"); 818 break; 819 820 case 0x1e: /* l.cust3 */ 821 LOG_DIS("l.cust3\n"); 822 break; 823 824 case 0x1f: /* l.cust4 */ 825 LOG_DIS("l.cust4\n"); 826 break; 827 828 case 0x3c: /* l.cust5 */ 829 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5); 830 break; 831 832 case 0x3d: /* l.cust6 */ 833 LOG_DIS("l.cust6\n"); 834 break; 835 836 case 0x3e: /* l.cust7 */ 837 LOG_DIS("l.cust7\n"); 838 break; 839 840 case 0x3f: /* l.cust8 */ 841 LOG_DIS("l.cust8\n"); 842 break; 843 844 /* not used yet, open it when we need or64. */ 845 /*#ifdef TARGET_OPENRISC64 846 case 0x20: l.ld 847 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16); 848 check_ob64s(dc); 849 mop = MO_TEQ; 850 goto do_load; 851 #endif*/ 852 853 case 0x21: /* l.lwz */ 854 LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16); 855 mop = MO_TEUL; 856 goto do_load; 857 858 case 0x22: /* l.lws */ 859 LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16); 860 mop = MO_TESL; 861 goto do_load; 862 863 case 0x23: /* l.lbz */ 864 LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16); 865 mop = MO_UB; 866 goto do_load; 867 868 case 0x24: /* l.lbs */ 869 LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16); 870 mop = MO_SB; 871 goto do_load; 872 873 case 0x25: /* l.lhz */ 874 LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16); 875 mop = MO_TEUW; 876 goto do_load; 877 878 case 0x26: /* l.lhs */ 879 LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16); 880 mop = MO_TESW; 881 goto do_load; 882 883 do_load: 884 check_r0_write(rd); 885 t0 = tcg_temp_new(); 886 tcg_gen_addi_tl(t0, cpu_R[ra], I16); 887 tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop); 888 tcg_temp_free(t0); 889 break; 890 891 case 0x27: /* l.addi */ 892 LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16); 893 check_r0_write(rd); 894 t0 = tcg_const_tl(I16); 895 gen_add(dc, cpu_R[rd], cpu_R[ra], t0); 896 tcg_temp_free(t0); 897 break; 898 899 case 0x28: /* l.addic */ 900 LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16); 901 check_r0_write(rd); 902 t0 = tcg_const_tl(I16); 903 gen_addc(dc, cpu_R[rd], cpu_R[ra], t0); 904 tcg_temp_free(t0); 905 break; 906 907 case 0x29: /* l.andi */ 908 LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, K16); 909 check_r0_write(rd); 910 tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], K16); 911 break; 912 913 case 0x2a: /* l.ori */ 914 LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, K16); 915 check_r0_write(rd); 916 tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], K16); 917 break; 918 919 case 0x2b: /* l.xori */ 920 LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16); 921 check_r0_write(rd); 922 tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], I16); 923 break; 924 925 case 0x2c: /* l.muli */ 926 LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16); 927 check_r0_write(rd); 928 t0 = tcg_const_tl(I16); 929 gen_mul(dc, cpu_R[rd], cpu_R[ra], t0); 930 tcg_temp_free(t0); 931 break; 932 933 case 0x2d: /* l.mfspr */ 934 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, K16); 935 check_r0_write(rd); 936 { 937 #if defined(CONFIG_USER_ONLY) 938 return; 939 #else 940 TCGv_i32 ti = tcg_const_i32(K16); 941 if (dc->mem_idx == MMU_USER_IDX) { 942 gen_illegal_exception(dc); 943 return; 944 } 945 gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti); 946 tcg_temp_free_i32(ti); 947 #endif 948 } 949 break; 950 951 case 0x30: /* l.mtspr */ 952 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra, rb, K5_11); 953 { 954 #if defined(CONFIG_USER_ONLY) 955 return; 956 #else 957 TCGv_i32 im = tcg_const_i32(K5_11); 958 if (dc->mem_idx == MMU_USER_IDX) { 959 gen_illegal_exception(dc); 960 return; 961 } 962 gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im); 963 tcg_temp_free_i32(im); 964 #endif 965 } 966 break; 967 968 case 0x33: /* l.swa */ 969 LOG_DIS("l.swa r%d, r%d, %d\n", ra, rb, I5_11); 970 gen_swa(dc, rb, cpu_R[ra], I5_11); 971 break; 972 973 /* not used yet, open it when we need or64. */ 974 /*#ifdef TARGET_OPENRISC64 975 case 0x34: l.sd 976 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11); 977 check_ob64s(dc); 978 mop = MO_TEQ; 979 goto do_store; 980 #endif*/ 981 982 case 0x35: /* l.sw */ 983 LOG_DIS("l.sw r%d, r%d, %d\n", ra, rb, I5_11); 984 mop = MO_TEUL; 985 goto do_store; 986 987 case 0x36: /* l.sb */ 988 LOG_DIS("l.sb r%d, r%d, %d\n", ra, rb, I5_11); 989 mop = MO_UB; 990 goto do_store; 991 992 case 0x37: /* l.sh */ 993 LOG_DIS("l.sh r%d, r%d, %d\n", ra, rb, I5_11); 994 mop = MO_TEUW; 995 goto do_store; 996 997 do_store: 998 { 999 TCGv t0 = tcg_temp_new(); 1000 tcg_gen_addi_tl(t0, cpu_R[ra], I5_11); 1001 tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop); 1002 tcg_temp_free(t0); 1003 } 1004 break; 1005 1006 default: 1007 gen_illegal_exception(dc); 1008 break; 1009 } 1010 } 1011 1012 static void dec_mac(DisasContext *dc, uint32_t insn) 1013 { 1014 uint32_t op0; 1015 uint32_t ra, rb; 1016 op0 = extract32(insn, 0, 4); 1017 ra = extract32(insn, 16, 5); 1018 rb = extract32(insn, 11, 5); 1019 1020 switch (op0) { 1021 case 0x0001: /* l.mac */ 1022 LOG_DIS("l.mac r%d, r%d\n", ra, rb); 1023 gen_mac(dc, cpu_R[ra], cpu_R[rb]); 1024 break; 1025 1026 case 0x0002: /* l.msb */ 1027 LOG_DIS("l.msb r%d, r%d\n", ra, rb); 1028 gen_msb(dc, cpu_R[ra], cpu_R[rb]); 1029 break; 1030 1031 case 0x0003: /* l.macu */ 1032 LOG_DIS("l.macu r%d, r%d\n", ra, rb); 1033 gen_macu(dc, cpu_R[ra], cpu_R[rb]); 1034 break; 1035 1036 case 0x0004: /* l.msbu */ 1037 LOG_DIS("l.msbu r%d, r%d\n", ra, rb); 1038 gen_msbu(dc, cpu_R[ra], cpu_R[rb]); 1039 break; 1040 1041 default: 1042 gen_illegal_exception(dc); 1043 break; 1044 } 1045 } 1046 1047 static void dec_logic(DisasContext *dc, uint32_t insn) 1048 { 1049 uint32_t op0; 1050 uint32_t rd, ra, L6, S6; 1051 op0 = extract32(insn, 6, 2); 1052 rd = extract32(insn, 21, 5); 1053 ra = extract32(insn, 16, 5); 1054 L6 = extract32(insn, 0, 6); 1055 S6 = L6 & (TARGET_LONG_BITS - 1); 1056 1057 check_r0_write(rd); 1058 switch (op0) { 1059 case 0x00: /* l.slli */ 1060 LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6); 1061 tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], S6); 1062 break; 1063 1064 case 0x01: /* l.srli */ 1065 LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6); 1066 tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], S6); 1067 break; 1068 1069 case 0x02: /* l.srai */ 1070 LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6); 1071 tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], S6); 1072 break; 1073 1074 case 0x03: /* l.rori */ 1075 LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6); 1076 tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], S6); 1077 break; 1078 1079 default: 1080 gen_illegal_exception(dc); 1081 break; 1082 } 1083 } 1084 1085 static void dec_M(DisasContext *dc, uint32_t insn) 1086 { 1087 uint32_t op0; 1088 uint32_t rd; 1089 uint32_t K16; 1090 op0 = extract32(insn, 16, 1); 1091 rd = extract32(insn, 21, 5); 1092 K16 = extract32(insn, 0, 16); 1093 1094 check_r0_write(rd); 1095 switch (op0) { 1096 case 0x0: /* l.movhi */ 1097 LOG_DIS("l.movhi r%d, %d\n", rd, K16); 1098 tcg_gen_movi_tl(cpu_R[rd], (K16 << 16)); 1099 break; 1100 1101 case 0x1: /* l.macrc */ 1102 LOG_DIS("l.macrc r%d\n", rd); 1103 tcg_gen_trunc_i64_tl(cpu_R[rd], cpu_mac); 1104 tcg_gen_movi_i64(cpu_mac, 0); 1105 break; 1106 1107 default: 1108 gen_illegal_exception(dc); 1109 break; 1110 } 1111 } 1112 1113 static void dec_comp(DisasContext *dc, uint32_t insn) 1114 { 1115 uint32_t op0; 1116 uint32_t ra, rb; 1117 1118 op0 = extract32(insn, 21, 5); 1119 ra = extract32(insn, 16, 5); 1120 rb = extract32(insn, 11, 5); 1121 1122 /* unsigned integers */ 1123 tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]); 1124 tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]); 1125 1126 switch (op0) { 1127 case 0x0: /* l.sfeq */ 1128 LOG_DIS("l.sfeq r%d, r%d\n", ra, rb); 1129 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1130 break; 1131 1132 case 0x1: /* l.sfne */ 1133 LOG_DIS("l.sfne r%d, r%d\n", ra, rb); 1134 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1135 break; 1136 1137 case 0x2: /* l.sfgtu */ 1138 LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb); 1139 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1140 break; 1141 1142 case 0x3: /* l.sfgeu */ 1143 LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb); 1144 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1145 break; 1146 1147 case 0x4: /* l.sfltu */ 1148 LOG_DIS("l.sfltu r%d, r%d\n", ra, rb); 1149 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1150 break; 1151 1152 case 0x5: /* l.sfleu */ 1153 LOG_DIS("l.sfleu r%d, r%d\n", ra, rb); 1154 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1155 break; 1156 1157 case 0xa: /* l.sfgts */ 1158 LOG_DIS("l.sfgts r%d, r%d\n", ra, rb); 1159 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1160 break; 1161 1162 case 0xb: /* l.sfges */ 1163 LOG_DIS("l.sfges r%d, r%d\n", ra, rb); 1164 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1165 break; 1166 1167 case 0xc: /* l.sflts */ 1168 LOG_DIS("l.sflts r%d, r%d\n", ra, rb); 1169 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1170 break; 1171 1172 case 0xd: /* l.sfles */ 1173 LOG_DIS("l.sfles r%d, r%d\n", ra, rb); 1174 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], cpu_R[rb]); 1175 break; 1176 1177 default: 1178 gen_illegal_exception(dc); 1179 break; 1180 } 1181 } 1182 1183 static void dec_compi(DisasContext *dc, uint32_t insn) 1184 { 1185 uint32_t op0, ra; 1186 int32_t I16; 1187 1188 op0 = extract32(insn, 21, 5); 1189 ra = extract32(insn, 16, 5); 1190 I16 = sextract32(insn, 0, 16); 1191 1192 switch (op0) { 1193 case 0x0: /* l.sfeqi */ 1194 LOG_DIS("l.sfeqi r%d, %d\n", ra, I16); 1195 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], I16); 1196 break; 1197 1198 case 0x1: /* l.sfnei */ 1199 LOG_DIS("l.sfnei r%d, %d\n", ra, I16); 1200 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], I16); 1201 break; 1202 1203 case 0x2: /* l.sfgtui */ 1204 LOG_DIS("l.sfgtui r%d, %d\n", ra, I16); 1205 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], I16); 1206 break; 1207 1208 case 0x3: /* l.sfgeui */ 1209 LOG_DIS("l.sfgeui r%d, %d\n", ra, I16); 1210 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], I16); 1211 break; 1212 1213 case 0x4: /* l.sfltui */ 1214 LOG_DIS("l.sfltui r%d, %d\n", ra, I16); 1215 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], I16); 1216 break; 1217 1218 case 0x5: /* l.sfleui */ 1219 LOG_DIS("l.sfleui r%d, %d\n", ra, I16); 1220 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], I16); 1221 break; 1222 1223 case 0xa: /* l.sfgtsi */ 1224 LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16); 1225 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], I16); 1226 break; 1227 1228 case 0xb: /* l.sfgesi */ 1229 LOG_DIS("l.sfgesi r%d, %d\n", ra, I16); 1230 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], I16); 1231 break; 1232 1233 case 0xc: /* l.sfltsi */ 1234 LOG_DIS("l.sfltsi r%d, %d\n", ra, I16); 1235 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], I16); 1236 break; 1237 1238 case 0xd: /* l.sflesi */ 1239 LOG_DIS("l.sflesi r%d, %d\n", ra, I16); 1240 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], I16); 1241 break; 1242 1243 default: 1244 gen_illegal_exception(dc); 1245 break; 1246 } 1247 } 1248 1249 static void dec_sys(DisasContext *dc, uint32_t insn) 1250 { 1251 uint32_t op0; 1252 uint32_t K16; 1253 1254 op0 = extract32(insn, 16, 10); 1255 K16 = extract32(insn, 0, 16); 1256 1257 switch (op0) { 1258 case 0x000: /* l.sys */ 1259 LOG_DIS("l.sys %d\n", K16); 1260 tcg_gen_movi_tl(cpu_pc, dc->pc); 1261 gen_exception(dc, EXCP_SYSCALL); 1262 dc->is_jmp = DISAS_UPDATE; 1263 break; 1264 1265 case 0x100: /* l.trap */ 1266 LOG_DIS("l.trap %d\n", K16); 1267 tcg_gen_movi_tl(cpu_pc, dc->pc); 1268 gen_exception(dc, EXCP_TRAP); 1269 break; 1270 1271 case 0x300: /* l.csync */ 1272 LOG_DIS("l.csync\n"); 1273 break; 1274 1275 case 0x200: /* l.msync */ 1276 LOG_DIS("l.msync\n"); 1277 tcg_gen_mb(TCG_MO_ALL); 1278 break; 1279 1280 case 0x270: /* l.psync */ 1281 LOG_DIS("l.psync\n"); 1282 break; 1283 1284 default: 1285 gen_illegal_exception(dc); 1286 break; 1287 } 1288 } 1289 1290 static void dec_float(DisasContext *dc, uint32_t insn) 1291 { 1292 uint32_t op0; 1293 uint32_t ra, rb, rd; 1294 op0 = extract32(insn, 0, 8); 1295 ra = extract32(insn, 16, 5); 1296 rb = extract32(insn, 11, 5); 1297 rd = extract32(insn, 21, 5); 1298 1299 switch (op0) { 1300 case 0x00: /* lf.add.s */ 1301 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb); 1302 check_r0_write(rd); 1303 gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1304 break; 1305 1306 case 0x01: /* lf.sub.s */ 1307 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb); 1308 check_r0_write(rd); 1309 gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1310 break; 1311 1312 case 0x02: /* lf.mul.s */ 1313 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb); 1314 check_r0_write(rd); 1315 gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1316 break; 1317 1318 case 0x03: /* lf.div.s */ 1319 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb); 1320 check_r0_write(rd); 1321 gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1322 break; 1323 1324 case 0x04: /* lf.itof.s */ 1325 LOG_DIS("lf.itof r%d, r%d\n", rd, ra); 1326 check_r0_write(rd); 1327 gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]); 1328 break; 1329 1330 case 0x05: /* lf.ftoi.s */ 1331 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); 1332 check_r0_write(rd); 1333 gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]); 1334 break; 1335 1336 case 0x06: /* lf.rem.s */ 1337 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb); 1338 check_r0_write(rd); 1339 gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1340 break; 1341 1342 case 0x07: /* lf.madd.s */ 1343 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb); 1344 check_r0_write(rd); 1345 gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd], 1346 cpu_R[ra], cpu_R[rb]); 1347 break; 1348 1349 case 0x08: /* lf.sfeq.s */ 1350 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb); 1351 gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1352 break; 1353 1354 case 0x09: /* lf.sfne.s */ 1355 LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb); 1356 gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1357 break; 1358 1359 case 0x0a: /* lf.sfgt.s */ 1360 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb); 1361 gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1362 break; 1363 1364 case 0x0b: /* lf.sfge.s */ 1365 LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb); 1366 gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1367 break; 1368 1369 case 0x0c: /* lf.sflt.s */ 1370 LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb); 1371 gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1372 break; 1373 1374 case 0x0d: /* lf.sfle.s */ 1375 LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb); 1376 gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1377 break; 1378 1379 /* not used yet, open it when we need or64. */ 1380 /*#ifdef TARGET_OPENRISC64 1381 case 0x10: lf.add.d 1382 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb); 1383 check_of64s(dc); 1384 check_r0_write(rd); 1385 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1386 break; 1387 1388 case 0x11: lf.sub.d 1389 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb); 1390 check_of64s(dc); 1391 check_r0_write(rd); 1392 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1393 break; 1394 1395 case 0x12: lf.mul.d 1396 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb); 1397 check_of64s(dc); 1398 check_r0_write(rd); 1399 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1400 break; 1401 1402 case 0x13: lf.div.d 1403 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb); 1404 check_of64s(dc); 1405 check_r0_write(rd); 1406 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1407 break; 1408 1409 case 0x14: lf.itof.d 1410 LOG_DIS("lf.itof r%d, r%d\n", rd, ra); 1411 check_of64s(dc); 1412 check_r0_write(rd); 1413 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]); 1414 break; 1415 1416 case 0x15: lf.ftoi.d 1417 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); 1418 check_of64s(dc); 1419 check_r0_write(rd); 1420 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]); 1421 break; 1422 1423 case 0x16: lf.rem.d 1424 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb); 1425 check_of64s(dc); 1426 check_r0_write(rd); 1427 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); 1428 break; 1429 1430 case 0x17: lf.madd.d 1431 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb); 1432 check_of64s(dc); 1433 check_r0_write(rd); 1434 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd], 1435 cpu_R[ra], cpu_R[rb]); 1436 break; 1437 1438 case 0x18: lf.sfeq.d 1439 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb); 1440 check_of64s(dc); 1441 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1442 break; 1443 1444 case 0x1a: lf.sfgt.d 1445 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb); 1446 check_of64s(dc); 1447 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1448 break; 1449 1450 case 0x1b: lf.sfge.d 1451 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb); 1452 check_of64s(dc); 1453 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1454 break; 1455 1456 case 0x19: lf.sfne.d 1457 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb); 1458 check_of64s(dc); 1459 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1460 break; 1461 1462 case 0x1c: lf.sflt.d 1463 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb); 1464 check_of64s(dc); 1465 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1466 break; 1467 1468 case 0x1d: lf.sfle.d 1469 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb); 1470 check_of64s(dc); 1471 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); 1472 break; 1473 #endif*/ 1474 1475 default: 1476 gen_illegal_exception(dc); 1477 break; 1478 } 1479 } 1480 1481 static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu) 1482 { 1483 uint32_t op0; 1484 uint32_t insn; 1485 insn = cpu_ldl_code(&cpu->env, dc->pc); 1486 op0 = extract32(insn, 26, 6); 1487 1488 switch (op0) { 1489 case 0x06: 1490 dec_M(dc, insn); 1491 break; 1492 1493 case 0x08: 1494 dec_sys(dc, insn); 1495 break; 1496 1497 case 0x2e: 1498 dec_logic(dc, insn); 1499 break; 1500 1501 case 0x2f: 1502 dec_compi(dc, insn); 1503 break; 1504 1505 case 0x31: 1506 dec_mac(dc, insn); 1507 break; 1508 1509 case 0x32: 1510 dec_float(dc, insn); 1511 break; 1512 1513 case 0x38: 1514 dec_calc(dc, insn); 1515 break; 1516 1517 case 0x39: 1518 dec_comp(dc, insn); 1519 break; 1520 1521 default: 1522 dec_misc(dc, insn); 1523 break; 1524 } 1525 } 1526 1527 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) 1528 { 1529 CPUOpenRISCState *env = cs->env_ptr; 1530 OpenRISCCPU *cpu = openrisc_env_get_cpu(env); 1531 struct DisasContext ctx, *dc = &ctx; 1532 uint32_t pc_start; 1533 uint32_t next_page_start; 1534 int num_insns; 1535 int max_insns; 1536 1537 pc_start = tb->pc; 1538 dc->tb = tb; 1539 1540 dc->is_jmp = DISAS_NEXT; 1541 dc->pc = pc_start; 1542 dc->mem_idx = cpu_mmu_index(&cpu->env, false); 1543 dc->tb_flags = tb->flags; 1544 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0; 1545 dc->singlestep_enabled = cs->singlestep_enabled; 1546 1547 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 1548 num_insns = 0; 1549 max_insns = tb_cflags(tb) & CF_COUNT_MASK; 1550 1551 if (max_insns == 0) { 1552 max_insns = CF_COUNT_MASK; 1553 } 1554 if (max_insns > TCG_MAX_INSNS) { 1555 max_insns = TCG_MAX_INSNS; 1556 } 1557 1558 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 1559 && qemu_log_in_addr_range(pc_start)) { 1560 qemu_log_lock(); 1561 qemu_log("----------------\n"); 1562 qemu_log("IN: %s\n", lookup_symbol(pc_start)); 1563 } 1564 1565 gen_tb_start(tb); 1566 1567 /* Allow the TCG optimizer to see that R0 == 0, 1568 when it's true, which is the common case. */ 1569 if (dc->tb_flags & TB_FLAGS_R0_0) { 1570 cpu_R[0] = tcg_const_tl(0); 1571 } else { 1572 cpu_R[0] = cpu_R0; 1573 } 1574 1575 do { 1576 tcg_gen_insn_start(dc->pc, (dc->delayed_branch ? 1 : 0) 1577 | (num_insns ? 2 : 0)); 1578 num_insns++; 1579 1580 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { 1581 tcg_gen_movi_tl(cpu_pc, dc->pc); 1582 gen_exception(dc, EXCP_DEBUG); 1583 dc->is_jmp = DISAS_UPDATE; 1584 /* The address covered by the breakpoint must be included in 1585 [tb->pc, tb->pc + tb->size) in order to for it to be 1586 properly cleared -- thus we increment the PC here so that 1587 the logic setting tb->size below does the right thing. */ 1588 dc->pc += 4; 1589 break; 1590 } 1591 1592 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { 1593 gen_io_start(); 1594 } 1595 disas_openrisc_insn(dc, cpu); 1596 dc->pc = dc->pc + 4; 1597 1598 /* delay slot */ 1599 if (dc->delayed_branch) { 1600 dc->delayed_branch--; 1601 if (!dc->delayed_branch) { 1602 tcg_gen_mov_tl(cpu_pc, jmp_pc); 1603 tcg_gen_discard_tl(jmp_pc); 1604 dc->is_jmp = DISAS_UPDATE; 1605 break; 1606 } 1607 } 1608 } while (!dc->is_jmp 1609 && !tcg_op_buf_full() 1610 && !cs->singlestep_enabled 1611 && !singlestep 1612 && (dc->pc < next_page_start) 1613 && num_insns < max_insns); 1614 1615 if (tb_cflags(tb) & CF_LAST_IO) { 1616 gen_io_end(); 1617 } 1618 1619 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) { 1620 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0); 1621 } 1622 1623 tcg_gen_movi_tl(cpu_ppc, dc->pc - 4); 1624 if (dc->is_jmp == DISAS_NEXT) { 1625 dc->is_jmp = DISAS_UPDATE; 1626 tcg_gen_movi_tl(cpu_pc, dc->pc); 1627 } 1628 if (unlikely(cs->singlestep_enabled)) { 1629 gen_exception(dc, EXCP_DEBUG); 1630 } else { 1631 switch (dc->is_jmp) { 1632 case DISAS_NEXT: 1633 gen_goto_tb(dc, 0, dc->pc); 1634 break; 1635 default: 1636 case DISAS_JUMP: 1637 break; 1638 case DISAS_UPDATE: 1639 /* indicate that the hash table must be used 1640 to find the next TB */ 1641 tcg_gen_exit_tb(0); 1642 break; 1643 case DISAS_TB_JUMP: 1644 /* nothing more to generate */ 1645 break; 1646 } 1647 } 1648 1649 gen_tb_end(tb, num_insns); 1650 1651 tb->size = dc->pc - pc_start; 1652 tb->icount = num_insns; 1653 1654 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 1655 && qemu_log_in_addr_range(pc_start)) { 1656 log_target_disas(cs, pc_start, tb->size, 0); 1657 qemu_log("\n"); 1658 qemu_log_unlock(); 1659 } 1660 } 1661 1662 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, 1663 fprintf_function cpu_fprintf, 1664 int flags) 1665 { 1666 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 1667 CPUOpenRISCState *env = &cpu->env; 1668 int i; 1669 1670 cpu_fprintf(f, "PC=%08x\n", env->pc); 1671 for (i = 0; i < 32; ++i) { 1672 cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i), 1673 (i % 4) == 3 ? '\n' : ' '); 1674 } 1675 } 1676 1677 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, 1678 target_ulong *data) 1679 { 1680 env->pc = data[0]; 1681 env->dflag = data[1] & 1; 1682 if (data[1] & 2) { 1683 env->ppc = env->pc - 4; 1684 } 1685 } 1686