1 /* 2 * QEMU AVR CPU 3 * 4 * Copyright (c) 2019-2020 Michael Rolnik 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see 18 * <http://www.gnu.org/licenses/lgpl-2.1.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/qemu-print.h" 23 #include "tcg/tcg.h" 24 #include "cpu.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg-op.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 #include "exec/log.h" 31 #include "exec/translator.h" 32 #include "exec/gen-icount.h" 33 34 #define HELPER_H "helper.h" 35 #include "exec/helper-info.c.inc" 36 #undef HELPER_H 37 38 39 /* 40 * Define if you want a BREAK instruction translated to a breakpoint 41 * Active debugging connection is assumed 42 * This is for 43 * https://github.com/seharris/qemu-avr-tests/tree/master/instruction-tests 44 * tests 45 */ 46 #undef BREAKPOINT_ON_BREAK 47 48 static TCGv cpu_pc; 49 50 static TCGv cpu_Cf; 51 static TCGv cpu_Zf; 52 static TCGv cpu_Nf; 53 static TCGv cpu_Vf; 54 static TCGv cpu_Sf; 55 static TCGv cpu_Hf; 56 static TCGv cpu_Tf; 57 static TCGv cpu_If; 58 59 static TCGv cpu_rampD; 60 static TCGv cpu_rampX; 61 static TCGv cpu_rampY; 62 static TCGv cpu_rampZ; 63 64 static TCGv cpu_r[NUMBER_OF_CPU_REGISTERS]; 65 static TCGv cpu_eind; 66 static TCGv cpu_sp; 67 68 static TCGv cpu_skip; 69 70 static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = { 71 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 72 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 73 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 74 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 75 }; 76 #define REG(x) (cpu_r[x]) 77 78 #define DISAS_EXIT DISAS_TARGET_0 /* We want return to the cpu main loop. */ 79 #define DISAS_LOOKUP DISAS_TARGET_1 /* We have a variable condition exit. */ 80 #define DISAS_CHAIN DISAS_TARGET_2 /* We have a single condition exit. */ 81 82 typedef struct DisasContext DisasContext; 83 84 /* This is the state at translation time. */ 85 struct DisasContext { 86 DisasContextBase base; 87 88 CPUAVRState *env; 89 CPUState *cs; 90 91 target_long npc; 92 uint32_t opcode; 93 94 /* Routine used to access memory */ 95 int memidx; 96 97 /* 98 * some AVR instructions can make the following instruction to be skipped 99 * Let's name those instructions 100 * A - instruction that can skip the next one 101 * B - instruction that can be skipped. this depends on execution of A 102 * there are two scenarios 103 * 1. A and B belong to the same translation block 104 * 2. A is the last instruction in the translation block and B is the last 105 * 106 * following variables are used to simplify the skipping logic, they are 107 * used in the following manner (sketch) 108 * 109 * TCGLabel *skip_label = NULL; 110 * if (ctx->skip_cond != TCG_COND_NEVER) { 111 * skip_label = gen_new_label(); 112 * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label); 113 * } 114 * 115 * translate(ctx); 116 * 117 * if (skip_label) { 118 * gen_set_label(skip_label); 119 * } 120 */ 121 TCGv skip_var0; 122 TCGv skip_var1; 123 TCGCond skip_cond; 124 }; 125 126 void avr_cpu_tcg_init(void) 127 { 128 int i; 129 130 #define AVR_REG_OFFS(x) offsetof(CPUAVRState, x) 131 cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc"); 132 cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf"); 133 cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf"); 134 cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf"); 135 cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf"); 136 cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf"); 137 cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf"); 138 cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf"); 139 cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If"); 140 cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD"); 141 cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX"); 142 cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY"); 143 cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ"); 144 cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind"); 145 cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp"); 146 cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip"); 147 148 for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) { 149 cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]), 150 reg_names[i]); 151 } 152 #undef AVR_REG_OFFS 153 } 154 155 static int to_regs_16_31_by_one(DisasContext *ctx, int indx) 156 { 157 return 16 + (indx % 16); 158 } 159 160 static int to_regs_16_23_by_one(DisasContext *ctx, int indx) 161 { 162 return 16 + (indx % 8); 163 } 164 165 static int to_regs_24_30_by_two(DisasContext *ctx, int indx) 166 { 167 return 24 + (indx % 4) * 2; 168 } 169 170 static int to_regs_00_30_by_two(DisasContext *ctx, int indx) 171 { 172 return (indx % 16) * 2; 173 } 174 175 static uint16_t next_word(DisasContext *ctx) 176 { 177 return cpu_lduw_code(ctx->env, ctx->npc++ * 2); 178 } 179 180 static int append_16(DisasContext *ctx, int x) 181 { 182 return x << 16 | next_word(ctx); 183 } 184 185 static bool avr_have_feature(DisasContext *ctx, int feature) 186 { 187 if (!avr_feature(ctx->env, feature)) { 188 gen_helper_unsupported(cpu_env); 189 ctx->base.is_jmp = DISAS_NORETURN; 190 return false; 191 } 192 return true; 193 } 194 195 static bool decode_insn(DisasContext *ctx, uint16_t insn); 196 #include "decode-insn.c.inc" 197 198 /* 199 * Arithmetic Instructions 200 */ 201 202 /* 203 * Utility functions for updating status registers: 204 * 205 * - gen_add_CHf() 206 * - gen_add_Vf() 207 * - gen_sub_CHf() 208 * - gen_sub_Vf() 209 * - gen_NSf() 210 * - gen_ZNSf() 211 * 212 */ 213 214 static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr) 215 { 216 TCGv t1 = tcg_temp_new_i32(); 217 TCGv t2 = tcg_temp_new_i32(); 218 TCGv t3 = tcg_temp_new_i32(); 219 220 tcg_gen_and_tl(t1, Rd, Rr); /* t1 = Rd & Rr */ 221 tcg_gen_andc_tl(t2, Rd, R); /* t2 = Rd & ~R */ 222 tcg_gen_andc_tl(t3, Rr, R); /* t3 = Rr & ~R */ 223 tcg_gen_or_tl(t1, t1, t2); /* t1 = t1 | t2 | t3 */ 224 tcg_gen_or_tl(t1, t1, t3); 225 226 tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */ 227 tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */ 228 tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); 229 } 230 231 static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr) 232 { 233 TCGv t1 = tcg_temp_new_i32(); 234 TCGv t2 = tcg_temp_new_i32(); 235 236 /* t1 = Rd & Rr & ~R | ~Rd & ~Rr & R */ 237 /* = (Rd ^ R) & ~(Rd ^ Rr) */ 238 tcg_gen_xor_tl(t1, Rd, R); 239 tcg_gen_xor_tl(t2, Rd, Rr); 240 tcg_gen_andc_tl(t1, t1, t2); 241 242 tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ 243 } 244 245 static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr) 246 { 247 TCGv t1 = tcg_temp_new_i32(); 248 TCGv t2 = tcg_temp_new_i32(); 249 TCGv t3 = tcg_temp_new_i32(); 250 251 tcg_gen_not_tl(t1, Rd); /* t1 = ~Rd */ 252 tcg_gen_and_tl(t2, t1, Rr); /* t2 = ~Rd & Rr */ 253 tcg_gen_or_tl(t3, t1, Rr); /* t3 = (~Rd | Rr) & R */ 254 tcg_gen_and_tl(t3, t3, R); 255 tcg_gen_or_tl(t2, t2, t3); /* t2 = ~Rd & Rr | ~Rd & R | R & Rr */ 256 257 tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */ 258 tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */ 259 tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); 260 } 261 262 static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr) 263 { 264 TCGv t1 = tcg_temp_new_i32(); 265 TCGv t2 = tcg_temp_new_i32(); 266 267 /* t1 = Rd & ~Rr & ~R | ~Rd & Rr & R */ 268 /* = (Rd ^ R) & (Rd ^ R) */ 269 tcg_gen_xor_tl(t1, Rd, R); 270 tcg_gen_xor_tl(t2, Rd, Rr); 271 tcg_gen_and_tl(t1, t1, t2); 272 273 tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ 274 } 275 276 static void gen_NSf(TCGv R) 277 { 278 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */ 279 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */ 280 } 281 282 static void gen_ZNSf(TCGv R) 283 { 284 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 285 286 /* update status register */ 287 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */ 288 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */ 289 } 290 291 /* 292 * Adds two registers without the C Flag and places the result in the 293 * destination register Rd. 294 */ 295 static bool trans_ADD(DisasContext *ctx, arg_ADD *a) 296 { 297 TCGv Rd = cpu_r[a->rd]; 298 TCGv Rr = cpu_r[a->rr]; 299 TCGv R = tcg_temp_new_i32(); 300 301 tcg_gen_add_tl(R, Rd, Rr); /* Rd = Rd + Rr */ 302 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 303 304 /* update status register */ 305 gen_add_CHf(R, Rd, Rr); 306 gen_add_Vf(R, Rd, Rr); 307 gen_ZNSf(R); 308 309 /* update output registers */ 310 tcg_gen_mov_tl(Rd, R); 311 return true; 312 } 313 314 /* 315 * Adds two registers and the contents of the C Flag and places the result in 316 * the destination register Rd. 317 */ 318 static bool trans_ADC(DisasContext *ctx, arg_ADC *a) 319 { 320 TCGv Rd = cpu_r[a->rd]; 321 TCGv Rr = cpu_r[a->rr]; 322 TCGv R = tcg_temp_new_i32(); 323 324 tcg_gen_add_tl(R, Rd, Rr); /* R = Rd + Rr + Cf */ 325 tcg_gen_add_tl(R, R, cpu_Cf); 326 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 327 328 /* update status register */ 329 gen_add_CHf(R, Rd, Rr); 330 gen_add_Vf(R, Rd, Rr); 331 gen_ZNSf(R); 332 333 /* update output registers */ 334 tcg_gen_mov_tl(Rd, R); 335 return true; 336 } 337 338 /* 339 * Adds an immediate value (0 - 63) to a register pair and places the result 340 * in the register pair. This instruction operates on the upper four register 341 * pairs, and is well suited for operations on the pointer registers. This 342 * instruction is not available in all devices. Refer to the device specific 343 * instruction set summary. 344 */ 345 static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a) 346 { 347 if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) { 348 return true; 349 } 350 351 TCGv RdL = cpu_r[a->rd]; 352 TCGv RdH = cpu_r[a->rd + 1]; 353 int Imm = (a->imm); 354 TCGv R = tcg_temp_new_i32(); 355 TCGv Rd = tcg_temp_new_i32(); 356 357 tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */ 358 tcg_gen_addi_tl(R, Rd, Imm); /* R = Rd + Imm */ 359 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */ 360 361 /* update status register */ 362 tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */ 363 tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15); 364 tcg_gen_andc_tl(cpu_Vf, R, Rd); /* Vf = R & ~Rd */ 365 tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15); 366 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 367 tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */ 368 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf);/* Sf = Nf ^ Vf */ 369 370 /* update output registers */ 371 tcg_gen_andi_tl(RdL, R, 0xff); 372 tcg_gen_shri_tl(RdH, R, 8); 373 return true; 374 } 375 376 /* 377 * Subtracts two registers and places the result in the destination 378 * register Rd. 379 */ 380 static bool trans_SUB(DisasContext *ctx, arg_SUB *a) 381 { 382 TCGv Rd = cpu_r[a->rd]; 383 TCGv Rr = cpu_r[a->rr]; 384 TCGv R = tcg_temp_new_i32(); 385 386 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */ 387 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 388 389 /* update status register */ 390 tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */ 391 gen_sub_CHf(R, Rd, Rr); 392 gen_sub_Vf(R, Rd, Rr); 393 gen_ZNSf(R); 394 395 /* update output registers */ 396 tcg_gen_mov_tl(Rd, R); 397 return true; 398 } 399 400 /* 401 * Subtracts a register and a constant and places the result in the 402 * destination register Rd. This instruction is working on Register R16 to R31 403 * and is very well suited for operations on the X, Y, and Z-pointers. 404 */ 405 static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a) 406 { 407 TCGv Rd = cpu_r[a->rd]; 408 TCGv Rr = tcg_constant_i32(a->imm); 409 TCGv R = tcg_temp_new_i32(); 410 411 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */ 412 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 413 414 /* update status register */ 415 gen_sub_CHf(R, Rd, Rr); 416 gen_sub_Vf(R, Rd, Rr); 417 gen_ZNSf(R); 418 419 /* update output registers */ 420 tcg_gen_mov_tl(Rd, R); 421 return true; 422 } 423 424 /* 425 * Subtracts two registers and subtracts with the C Flag and places the 426 * result in the destination register Rd. 427 */ 428 static bool trans_SBC(DisasContext *ctx, arg_SBC *a) 429 { 430 TCGv Rd = cpu_r[a->rd]; 431 TCGv Rr = cpu_r[a->rr]; 432 TCGv R = tcg_temp_new_i32(); 433 TCGv zero = tcg_constant_i32(0); 434 435 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ 436 tcg_gen_sub_tl(R, R, cpu_Cf); 437 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 438 439 /* update status register */ 440 gen_sub_CHf(R, Rd, Rr); 441 gen_sub_Vf(R, Rd, Rr); 442 gen_NSf(R); 443 444 /* 445 * Previous value remains unchanged when the result is zero; 446 * cleared otherwise. 447 */ 448 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero); 449 450 /* update output registers */ 451 tcg_gen_mov_tl(Rd, R); 452 return true; 453 } 454 455 /* 456 * SBCI -- Subtract Immediate with Carry 457 */ 458 static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a) 459 { 460 TCGv Rd = cpu_r[a->rd]; 461 TCGv Rr = tcg_constant_i32(a->imm); 462 TCGv R = tcg_temp_new_i32(); 463 TCGv zero = tcg_constant_i32(0); 464 465 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ 466 tcg_gen_sub_tl(R, R, cpu_Cf); 467 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 468 469 /* update status register */ 470 gen_sub_CHf(R, Rd, Rr); 471 gen_sub_Vf(R, Rd, Rr); 472 gen_NSf(R); 473 474 /* 475 * Previous value remains unchanged when the result is zero; 476 * cleared otherwise. 477 */ 478 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero); 479 480 /* update output registers */ 481 tcg_gen_mov_tl(Rd, R); 482 return true; 483 } 484 485 /* 486 * Subtracts an immediate value (0-63) from a register pair and places the 487 * result in the register pair. This instruction operates on the upper four 488 * register pairs, and is well suited for operations on the Pointer Registers. 489 * This instruction is not available in all devices. Refer to the device 490 * specific instruction set summary. 491 */ 492 static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a) 493 { 494 if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) { 495 return true; 496 } 497 498 TCGv RdL = cpu_r[a->rd]; 499 TCGv RdH = cpu_r[a->rd + 1]; 500 int Imm = (a->imm); 501 TCGv R = tcg_temp_new_i32(); 502 TCGv Rd = tcg_temp_new_i32(); 503 504 tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */ 505 tcg_gen_subi_tl(R, Rd, Imm); /* R = Rd - Imm */ 506 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */ 507 508 /* update status register */ 509 tcg_gen_andc_tl(cpu_Cf, R, Rd); 510 tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15); /* Cf = R & ~Rd */ 511 tcg_gen_andc_tl(cpu_Vf, Rd, R); 512 tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15); /* Vf = Rd & ~R */ 513 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 514 tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */ 515 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */ 516 517 /* update output registers */ 518 tcg_gen_andi_tl(RdL, R, 0xff); 519 tcg_gen_shri_tl(RdH, R, 8); 520 return true; 521 } 522 523 /* 524 * Performs the logical AND between the contents of register Rd and register 525 * Rr and places the result in the destination register Rd. 526 */ 527 static bool trans_AND(DisasContext *ctx, arg_AND *a) 528 { 529 TCGv Rd = cpu_r[a->rd]; 530 TCGv Rr = cpu_r[a->rr]; 531 TCGv R = tcg_temp_new_i32(); 532 533 tcg_gen_and_tl(R, Rd, Rr); /* Rd = Rd and Rr */ 534 535 /* update status register */ 536 tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */ 537 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 538 gen_ZNSf(R); 539 540 /* update output registers */ 541 tcg_gen_mov_tl(Rd, R); 542 return true; 543 } 544 545 /* 546 * Performs the logical AND between the contents of register Rd and a constant 547 * and places the result in the destination register Rd. 548 */ 549 static bool trans_ANDI(DisasContext *ctx, arg_ANDI *a) 550 { 551 TCGv Rd = cpu_r[a->rd]; 552 int Imm = (a->imm); 553 554 tcg_gen_andi_tl(Rd, Rd, Imm); /* Rd = Rd & Imm */ 555 556 /* update status register */ 557 tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */ 558 gen_ZNSf(Rd); 559 560 return true; 561 } 562 563 /* 564 * Performs the logical OR between the contents of register Rd and register 565 * Rr and places the result in the destination register Rd. 566 */ 567 static bool trans_OR(DisasContext *ctx, arg_OR *a) 568 { 569 TCGv Rd = cpu_r[a->rd]; 570 TCGv Rr = cpu_r[a->rr]; 571 TCGv R = tcg_temp_new_i32(); 572 573 tcg_gen_or_tl(R, Rd, Rr); 574 575 /* update status register */ 576 tcg_gen_movi_tl(cpu_Vf, 0); 577 gen_ZNSf(R); 578 579 /* update output registers */ 580 tcg_gen_mov_tl(Rd, R); 581 return true; 582 } 583 584 /* 585 * Performs the logical OR between the contents of register Rd and a 586 * constant and places the result in the destination register Rd. 587 */ 588 static bool trans_ORI(DisasContext *ctx, arg_ORI *a) 589 { 590 TCGv Rd = cpu_r[a->rd]; 591 int Imm = (a->imm); 592 593 tcg_gen_ori_tl(Rd, Rd, Imm); /* Rd = Rd | Imm */ 594 595 /* update status register */ 596 tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */ 597 gen_ZNSf(Rd); 598 599 return true; 600 } 601 602 /* 603 * Performs the logical EOR between the contents of register Rd and 604 * register Rr and places the result in the destination register Rd. 605 */ 606 static bool trans_EOR(DisasContext *ctx, arg_EOR *a) 607 { 608 TCGv Rd = cpu_r[a->rd]; 609 TCGv Rr = cpu_r[a->rr]; 610 611 tcg_gen_xor_tl(Rd, Rd, Rr); 612 613 /* update status register */ 614 tcg_gen_movi_tl(cpu_Vf, 0); 615 gen_ZNSf(Rd); 616 617 return true; 618 } 619 620 /* 621 * Clears the specified bits in register Rd. Performs the logical AND 622 * between the contents of register Rd and the complement of the constant mask 623 * K. The result will be placed in register Rd. 624 */ 625 static bool trans_COM(DisasContext *ctx, arg_COM *a) 626 { 627 TCGv Rd = cpu_r[a->rd]; 628 629 tcg_gen_xori_tl(Rd, Rd, 0xff); 630 631 /* update status register */ 632 tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */ 633 tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */ 634 gen_ZNSf(Rd); 635 return true; 636 } 637 638 /* 639 * Replaces the contents of register Rd with its two's complement; the 640 * value $80 is left unchanged. 641 */ 642 static bool trans_NEG(DisasContext *ctx, arg_NEG *a) 643 { 644 TCGv Rd = cpu_r[a->rd]; 645 TCGv t0 = tcg_constant_i32(0); 646 TCGv R = tcg_temp_new_i32(); 647 648 tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */ 649 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 650 651 /* update status register */ 652 gen_sub_CHf(R, t0, Rd); 653 gen_sub_Vf(R, t0, Rd); 654 gen_ZNSf(R); 655 656 /* update output registers */ 657 tcg_gen_mov_tl(Rd, R); 658 return true; 659 } 660 661 /* 662 * Adds one -1- to the contents of register Rd and places the result in the 663 * destination register Rd. The C Flag in SREG is not affected by the 664 * operation, thus allowing the INC instruction to be used on a loop counter in 665 * multiple-precision computations. When operating on unsigned numbers, only 666 * BREQ and BRNE branches can be expected to perform consistently. When 667 * operating on two's complement values, all signed branches are available. 668 */ 669 static bool trans_INC(DisasContext *ctx, arg_INC *a) 670 { 671 TCGv Rd = cpu_r[a->rd]; 672 673 tcg_gen_addi_tl(Rd, Rd, 1); 674 tcg_gen_andi_tl(Rd, Rd, 0xff); 675 676 /* update status register */ 677 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x80); /* Vf = Rd == 0x80 */ 678 gen_ZNSf(Rd); 679 680 return true; 681 } 682 683 /* 684 * Subtracts one -1- from the contents of register Rd and places the result 685 * in the destination register Rd. The C Flag in SREG is not affected by the 686 * operation, thus allowing the DEC instruction to be used on a loop counter in 687 * multiple-precision computations. When operating on unsigned values, only 688 * BREQ and BRNE branches can be expected to perform consistently. When 689 * operating on two's complement values, all signed branches are available. 690 */ 691 static bool trans_DEC(DisasContext *ctx, arg_DEC *a) 692 { 693 TCGv Rd = cpu_r[a->rd]; 694 695 tcg_gen_subi_tl(Rd, Rd, 1); /* Rd = Rd - 1 */ 696 tcg_gen_andi_tl(Rd, Rd, 0xff); /* make it 8 bits */ 697 698 /* update status register */ 699 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x7f); /* Vf = Rd == 0x7f */ 700 gen_ZNSf(Rd); 701 702 return true; 703 } 704 705 /* 706 * This instruction performs 8-bit x 8-bit -> 16-bit unsigned multiplication. 707 */ 708 static bool trans_MUL(DisasContext *ctx, arg_MUL *a) 709 { 710 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 711 return true; 712 } 713 714 TCGv R0 = cpu_r[0]; 715 TCGv R1 = cpu_r[1]; 716 TCGv Rd = cpu_r[a->rd]; 717 TCGv Rr = cpu_r[a->rr]; 718 TCGv R = tcg_temp_new_i32(); 719 720 tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */ 721 tcg_gen_andi_tl(R0, R, 0xff); 722 tcg_gen_shri_tl(R1, R, 8); 723 724 /* update status register */ 725 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 726 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 727 return true; 728 } 729 730 /* 731 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication. 732 */ 733 static bool trans_MULS(DisasContext *ctx, arg_MULS *a) 734 { 735 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 736 return true; 737 } 738 739 TCGv R0 = cpu_r[0]; 740 TCGv R1 = cpu_r[1]; 741 TCGv Rd = cpu_r[a->rd]; 742 TCGv Rr = cpu_r[a->rr]; 743 TCGv R = tcg_temp_new_i32(); 744 TCGv t0 = tcg_temp_new_i32(); 745 TCGv t1 = tcg_temp_new_i32(); 746 747 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */ 748 tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */ 749 tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */ 750 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */ 751 tcg_gen_andi_tl(R0, R, 0xff); 752 tcg_gen_shri_tl(R1, R, 8); 753 754 /* update status register */ 755 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 756 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 757 return true; 758 } 759 760 /* 761 * This instruction performs 8-bit x 8-bit -> 16-bit multiplication of a 762 * signed and an unsigned number. 763 */ 764 static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a) 765 { 766 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 767 return true; 768 } 769 770 TCGv R0 = cpu_r[0]; 771 TCGv R1 = cpu_r[1]; 772 TCGv Rd = cpu_r[a->rd]; 773 TCGv Rr = cpu_r[a->rr]; 774 TCGv R = tcg_temp_new_i32(); 775 TCGv t0 = tcg_temp_new_i32(); 776 777 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */ 778 tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */ 779 tcg_gen_andi_tl(R, R, 0xffff); /* make R 16 bits */ 780 tcg_gen_andi_tl(R0, R, 0xff); 781 tcg_gen_shri_tl(R1, R, 8); 782 783 /* update status register */ 784 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 785 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 786 return true; 787 } 788 789 /* 790 * This instruction performs 8-bit x 8-bit -> 16-bit unsigned 791 * multiplication and shifts the result one bit left. 792 */ 793 static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a) 794 { 795 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 796 return true; 797 } 798 799 TCGv R0 = cpu_r[0]; 800 TCGv R1 = cpu_r[1]; 801 TCGv Rd = cpu_r[a->rd]; 802 TCGv Rr = cpu_r[a->rr]; 803 TCGv R = tcg_temp_new_i32(); 804 805 tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */ 806 807 /* update status register */ 808 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 809 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 810 811 /* update output registers */ 812 tcg_gen_shli_tl(R, R, 1); 813 tcg_gen_andi_tl(R0, R, 0xff); 814 tcg_gen_shri_tl(R1, R, 8); 815 tcg_gen_andi_tl(R1, R1, 0xff); 816 return true; 817 } 818 819 /* 820 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication 821 * and shifts the result one bit left. 822 */ 823 static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a) 824 { 825 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 826 return true; 827 } 828 829 TCGv R0 = cpu_r[0]; 830 TCGv R1 = cpu_r[1]; 831 TCGv Rd = cpu_r[a->rd]; 832 TCGv Rr = cpu_r[a->rr]; 833 TCGv R = tcg_temp_new_i32(); 834 TCGv t0 = tcg_temp_new_i32(); 835 TCGv t1 = tcg_temp_new_i32(); 836 837 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */ 838 tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */ 839 tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */ 840 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */ 841 842 /* update status register */ 843 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 844 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 845 846 /* update output registers */ 847 tcg_gen_shli_tl(R, R, 1); 848 tcg_gen_andi_tl(R0, R, 0xff); 849 tcg_gen_shri_tl(R1, R, 8); 850 tcg_gen_andi_tl(R1, R1, 0xff); 851 return true; 852 } 853 854 /* 855 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication 856 * and shifts the result one bit left. 857 */ 858 static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a) 859 { 860 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) { 861 return true; 862 } 863 864 TCGv R0 = cpu_r[0]; 865 TCGv R1 = cpu_r[1]; 866 TCGv Rd = cpu_r[a->rd]; 867 TCGv Rr = cpu_r[a->rr]; 868 TCGv R = tcg_temp_new_i32(); 869 TCGv t0 = tcg_temp_new_i32(); 870 871 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */ 872 tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */ 873 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */ 874 875 /* update status register */ 876 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ 877 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 878 879 /* update output registers */ 880 tcg_gen_shli_tl(R, R, 1); 881 tcg_gen_andi_tl(R0, R, 0xff); 882 tcg_gen_shri_tl(R1, R, 8); 883 tcg_gen_andi_tl(R1, R1, 0xff); 884 return true; 885 } 886 887 /* 888 * The module is an instruction set extension to the AVR CPU, performing 889 * DES iterations. The 64-bit data block (plaintext or ciphertext) is placed in 890 * the CPU register file, registers R0-R7, where LSB of data is placed in LSB 891 * of R0 and MSB of data is placed in MSB of R7. The full 64-bit key (including 892 * parity bits) is placed in registers R8- R15, organized in the register file 893 * with LSB of key in LSB of R8 and MSB of key in MSB of R15. Executing one DES 894 * instruction performs one round in the DES algorithm. Sixteen rounds must be 895 * executed in increasing order to form the correct DES ciphertext or 896 * plaintext. Intermediate results are stored in the register file (R0-R15) 897 * after each DES instruction. The instruction's operand (K) determines which 898 * round is executed, and the half carry flag (H) determines whether encryption 899 * or decryption is performed. The DES algorithm is described in 900 * "Specifications for the Data Encryption Standard" (Federal Information 901 * Processing Standards Publication 46). Intermediate results in this 902 * implementation differ from the standard because the initial permutation and 903 * the inverse initial permutation are performed each iteration. This does not 904 * affect the result in the final ciphertext or plaintext, but reduces 905 * execution time. 906 */ 907 static bool trans_DES(DisasContext *ctx, arg_DES *a) 908 { 909 /* TODO */ 910 if (!avr_have_feature(ctx, AVR_FEATURE_DES)) { 911 return true; 912 } 913 914 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); 915 916 return true; 917 } 918 919 /* 920 * Branch Instructions 921 */ 922 static void gen_jmp_ez(DisasContext *ctx) 923 { 924 tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8); 925 tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind); 926 ctx->base.is_jmp = DISAS_LOOKUP; 927 } 928 929 static void gen_jmp_z(DisasContext *ctx) 930 { 931 tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8); 932 ctx->base.is_jmp = DISAS_LOOKUP; 933 } 934 935 static void gen_push_ret(DisasContext *ctx, int ret) 936 { 937 if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) { 938 TCGv t0 = tcg_constant_i32(ret & 0x0000ff); 939 940 tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB); 941 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); 942 } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) { 943 TCGv t0 = tcg_constant_i32(ret & 0x00ffff); 944 945 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); 946 tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW); 947 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); 948 } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) { 949 TCGv lo = tcg_constant_i32(ret & 0x0000ff); 950 TCGv hi = tcg_constant_i32((ret & 0xffff00) >> 8); 951 952 tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); 953 tcg_gen_subi_tl(cpu_sp, cpu_sp, 2); 954 tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW); 955 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); 956 } 957 } 958 959 static void gen_pop_ret(DisasContext *ctx, TCGv ret) 960 { 961 if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) { 962 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1); 963 tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_UB); 964 } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) { 965 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1); 966 tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_BEUW); 967 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1); 968 } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) { 969 TCGv lo = tcg_temp_new_i32(); 970 TCGv hi = tcg_temp_new_i32(); 971 972 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1); 973 tcg_gen_qemu_ld_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW); 974 975 tcg_gen_addi_tl(cpu_sp, cpu_sp, 2); 976 tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); 977 978 tcg_gen_deposit_tl(ret, lo, hi, 8, 16); 979 } 980 } 981 982 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 983 { 984 const TranslationBlock *tb = ctx->base.tb; 985 986 if (translator_use_goto_tb(&ctx->base, dest)) { 987 tcg_gen_goto_tb(n); 988 tcg_gen_movi_i32(cpu_pc, dest); 989 tcg_gen_exit_tb(tb, n); 990 } else { 991 tcg_gen_movi_i32(cpu_pc, dest); 992 tcg_gen_lookup_and_goto_ptr(); 993 } 994 ctx->base.is_jmp = DISAS_NORETURN; 995 } 996 997 /* 998 * Relative jump to an address within PC - 2K +1 and PC + 2K (words). For 999 * AVR microcontrollers with Program memory not exceeding 4K words (8KB) this 1000 * instruction can address the entire memory from every address location. See 1001 * also JMP. 1002 */ 1003 static bool trans_RJMP(DisasContext *ctx, arg_RJMP *a) 1004 { 1005 int dst = ctx->npc + a->imm; 1006 1007 gen_goto_tb(ctx, 0, dst); 1008 1009 return true; 1010 } 1011 1012 /* 1013 * Indirect jump to the address pointed to by the Z (16 bits) Pointer 1014 * Register in the Register File. The Z-pointer Register is 16 bits wide and 1015 * allows jump within the lowest 64K words (128KB) section of Program memory. 1016 * This instruction is not available in all devices. Refer to the device 1017 * specific instruction set summary. 1018 */ 1019 static bool trans_IJMP(DisasContext *ctx, arg_IJMP *a) 1020 { 1021 if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) { 1022 return true; 1023 } 1024 1025 gen_jmp_z(ctx); 1026 1027 return true; 1028 } 1029 1030 /* 1031 * Indirect jump to the address pointed to by the Z (16 bits) Pointer 1032 * Register in the Register File and the EIND Register in the I/O space. This 1033 * instruction allows for indirect jumps to the entire 4M (words) Program 1034 * memory space. See also IJMP. This instruction is not available in all 1035 * devices. Refer to the device specific instruction set summary. 1036 */ 1037 static bool trans_EIJMP(DisasContext *ctx, arg_EIJMP *a) 1038 { 1039 if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) { 1040 return true; 1041 } 1042 1043 gen_jmp_ez(ctx); 1044 return true; 1045 } 1046 1047 /* 1048 * Jump to an address within the entire 4M (words) Program memory. See also 1049 * RJMP. This instruction is not available in all devices. Refer to the device 1050 * specific instruction set summary.0 1051 */ 1052 static bool trans_JMP(DisasContext *ctx, arg_JMP *a) 1053 { 1054 if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) { 1055 return true; 1056 } 1057 1058 gen_goto_tb(ctx, 0, a->imm); 1059 1060 return true; 1061 } 1062 1063 /* 1064 * Relative call to an address within PC - 2K + 1 and PC + 2K (words). The 1065 * return address (the instruction after the RCALL) is stored onto the Stack. 1066 * See also CALL. For AVR microcontrollers with Program memory not exceeding 4K 1067 * words (8KB) this instruction can address the entire memory from every 1068 * address location. The Stack Pointer uses a post-decrement scheme during 1069 * RCALL. 1070 */ 1071 static bool trans_RCALL(DisasContext *ctx, arg_RCALL *a) 1072 { 1073 int ret = ctx->npc; 1074 int dst = ctx->npc + a->imm; 1075 1076 gen_push_ret(ctx, ret); 1077 gen_goto_tb(ctx, 0, dst); 1078 1079 return true; 1080 } 1081 1082 /* 1083 * Calls to a subroutine within the entire 4M (words) Program memory. The 1084 * return address (to the instruction after the CALL) will be stored onto the 1085 * Stack. See also RCALL. The Stack Pointer uses a post-decrement scheme during 1086 * CALL. This instruction is not available in all devices. Refer to the device 1087 * specific instruction set summary. 1088 */ 1089 static bool trans_ICALL(DisasContext *ctx, arg_ICALL *a) 1090 { 1091 if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) { 1092 return true; 1093 } 1094 1095 int ret = ctx->npc; 1096 1097 gen_push_ret(ctx, ret); 1098 gen_jmp_z(ctx); 1099 1100 return true; 1101 } 1102 1103 /* 1104 * Indirect call of a subroutine pointed to by the Z (16 bits) Pointer 1105 * Register in the Register File and the EIND Register in the I/O space. This 1106 * instruction allows for indirect calls to the entire 4M (words) Program 1107 * memory space. See also ICALL. The Stack Pointer uses a post-decrement scheme 1108 * during EICALL. This instruction is not available in all devices. Refer to 1109 * the device specific instruction set summary. 1110 */ 1111 static bool trans_EICALL(DisasContext *ctx, arg_EICALL *a) 1112 { 1113 if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) { 1114 return true; 1115 } 1116 1117 int ret = ctx->npc; 1118 1119 gen_push_ret(ctx, ret); 1120 gen_jmp_ez(ctx); 1121 return true; 1122 } 1123 1124 /* 1125 * Calls to a subroutine within the entire Program memory. The return 1126 * address (to the instruction after the CALL) will be stored onto the Stack. 1127 * (See also RCALL). The Stack Pointer uses a post-decrement scheme during 1128 * CALL. This instruction is not available in all devices. Refer to the device 1129 * specific instruction set summary. 1130 */ 1131 static bool trans_CALL(DisasContext *ctx, arg_CALL *a) 1132 { 1133 if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) { 1134 return true; 1135 } 1136 1137 int Imm = a->imm; 1138 int ret = ctx->npc; 1139 1140 gen_push_ret(ctx, ret); 1141 gen_goto_tb(ctx, 0, Imm); 1142 1143 return true; 1144 } 1145 1146 /* 1147 * Returns from subroutine. The return address is loaded from the STACK. 1148 * The Stack Pointer uses a preincrement scheme during RET. 1149 */ 1150 static bool trans_RET(DisasContext *ctx, arg_RET *a) 1151 { 1152 gen_pop_ret(ctx, cpu_pc); 1153 1154 ctx->base.is_jmp = DISAS_LOOKUP; 1155 return true; 1156 } 1157 1158 /* 1159 * Returns from interrupt. The return address is loaded from the STACK and 1160 * the Global Interrupt Flag is set. Note that the Status Register is not 1161 * automatically stored when entering an interrupt routine, and it is not 1162 * restored when returning from an interrupt routine. This must be handled by 1163 * the application program. The Stack Pointer uses a pre-increment scheme 1164 * during RETI. 1165 */ 1166 static bool trans_RETI(DisasContext *ctx, arg_RETI *a) 1167 { 1168 gen_pop_ret(ctx, cpu_pc); 1169 tcg_gen_movi_tl(cpu_If, 1); 1170 1171 /* Need to return to main loop to re-evaluate interrupts. */ 1172 ctx->base.is_jmp = DISAS_EXIT; 1173 return true; 1174 } 1175 1176 /* 1177 * This instruction performs a compare between two registers Rd and Rr, and 1178 * skips the next instruction if Rd = Rr. 1179 */ 1180 static bool trans_CPSE(DisasContext *ctx, arg_CPSE *a) 1181 { 1182 ctx->skip_cond = TCG_COND_EQ; 1183 ctx->skip_var0 = cpu_r[a->rd]; 1184 ctx->skip_var1 = cpu_r[a->rr]; 1185 return true; 1186 } 1187 1188 /* 1189 * This instruction performs a compare between two registers Rd and Rr. 1190 * None of the registers are changed. All conditional branches can be used 1191 * after this instruction. 1192 */ 1193 static bool trans_CP(DisasContext *ctx, arg_CP *a) 1194 { 1195 TCGv Rd = cpu_r[a->rd]; 1196 TCGv Rr = cpu_r[a->rr]; 1197 TCGv R = tcg_temp_new_i32(); 1198 1199 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */ 1200 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 1201 1202 /* update status register */ 1203 gen_sub_CHf(R, Rd, Rr); 1204 gen_sub_Vf(R, Rd, Rr); 1205 gen_ZNSf(R); 1206 return true; 1207 } 1208 1209 /* 1210 * This instruction performs a compare between two registers Rd and Rr and 1211 * also takes into account the previous carry. None of the registers are 1212 * changed. All conditional branches can be used after this instruction. 1213 */ 1214 static bool trans_CPC(DisasContext *ctx, arg_CPC *a) 1215 { 1216 TCGv Rd = cpu_r[a->rd]; 1217 TCGv Rr = cpu_r[a->rr]; 1218 TCGv R = tcg_temp_new_i32(); 1219 TCGv zero = tcg_constant_i32(0); 1220 1221 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ 1222 tcg_gen_sub_tl(R, R, cpu_Cf); 1223 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 1224 /* update status register */ 1225 gen_sub_CHf(R, Rd, Rr); 1226 gen_sub_Vf(R, Rd, Rr); 1227 gen_NSf(R); 1228 1229 /* 1230 * Previous value remains unchanged when the result is zero; 1231 * cleared otherwise. 1232 */ 1233 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero); 1234 return true; 1235 } 1236 1237 /* 1238 * This instruction performs a compare between register Rd and a constant. 1239 * The register is not changed. All conditional branches can be used after this 1240 * instruction. 1241 */ 1242 static bool trans_CPI(DisasContext *ctx, arg_CPI *a) 1243 { 1244 TCGv Rd = cpu_r[a->rd]; 1245 int Imm = a->imm; 1246 TCGv Rr = tcg_constant_i32(Imm); 1247 TCGv R = tcg_temp_new_i32(); 1248 1249 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */ 1250 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */ 1251 1252 /* update status register */ 1253 gen_sub_CHf(R, Rd, Rr); 1254 gen_sub_Vf(R, Rd, Rr); 1255 gen_ZNSf(R); 1256 return true; 1257 } 1258 1259 /* 1260 * This instruction tests a single bit in a register and skips the next 1261 * instruction if the bit is cleared. 1262 */ 1263 static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a) 1264 { 1265 TCGv Rr = cpu_r[a->rr]; 1266 1267 ctx->skip_cond = TCG_COND_EQ; 1268 ctx->skip_var0 = tcg_temp_new(); 1269 1270 tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); 1271 return true; 1272 } 1273 1274 /* 1275 * This instruction tests a single bit in a register and skips the next 1276 * instruction if the bit is set. 1277 */ 1278 static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) 1279 { 1280 TCGv Rr = cpu_r[a->rr]; 1281 1282 ctx->skip_cond = TCG_COND_NE; 1283 ctx->skip_var0 = tcg_temp_new(); 1284 1285 tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); 1286 return true; 1287 } 1288 1289 /* 1290 * This instruction tests a single bit in an I/O Register and skips the 1291 * next instruction if the bit is cleared. This instruction operates on the 1292 * lower 32 I/O Registers -- addresses 0-31. 1293 */ 1294 static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) 1295 { 1296 TCGv data = tcg_temp_new_i32(); 1297 TCGv port = tcg_constant_i32(a->reg); 1298 1299 gen_helper_inb(data, cpu_env, port); 1300 tcg_gen_andi_tl(data, data, 1 << a->bit); 1301 ctx->skip_cond = TCG_COND_EQ; 1302 ctx->skip_var0 = data; 1303 1304 return true; 1305 } 1306 1307 /* 1308 * This instruction tests a single bit in an I/O Register and skips the 1309 * next instruction if the bit is set. This instruction operates on the lower 1310 * 32 I/O Registers -- addresses 0-31. 1311 */ 1312 static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a) 1313 { 1314 TCGv data = tcg_temp_new_i32(); 1315 TCGv port = tcg_constant_i32(a->reg); 1316 1317 gen_helper_inb(data, cpu_env, port); 1318 tcg_gen_andi_tl(data, data, 1 << a->bit); 1319 ctx->skip_cond = TCG_COND_NE; 1320 ctx->skip_var0 = data; 1321 1322 return true; 1323 } 1324 1325 /* 1326 * Conditional relative branch. Tests a single bit in SREG and branches 1327 * relatively to PC if the bit is cleared. This instruction branches relatively 1328 * to PC in either direction (PC - 63 < = destination <= PC + 64). The 1329 * parameter k is the offset from PC and is represented in two's complement 1330 * form. 1331 */ 1332 static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a) 1333 { 1334 TCGLabel *not_taken = gen_new_label(); 1335 1336 TCGv var; 1337 1338 switch (a->bit) { 1339 case 0x00: 1340 var = cpu_Cf; 1341 break; 1342 case 0x01: 1343 var = cpu_Zf; 1344 break; 1345 case 0x02: 1346 var = cpu_Nf; 1347 break; 1348 case 0x03: 1349 var = cpu_Vf; 1350 break; 1351 case 0x04: 1352 var = cpu_Sf; 1353 break; 1354 case 0x05: 1355 var = cpu_Hf; 1356 break; 1357 case 0x06: 1358 var = cpu_Tf; 1359 break; 1360 case 0x07: 1361 var = cpu_If; 1362 break; 1363 default: 1364 g_assert_not_reached(); 1365 } 1366 1367 tcg_gen_brcondi_i32(TCG_COND_NE, var, 0, not_taken); 1368 gen_goto_tb(ctx, 0, ctx->npc + a->imm); 1369 gen_set_label(not_taken); 1370 1371 ctx->base.is_jmp = DISAS_CHAIN; 1372 return true; 1373 } 1374 1375 /* 1376 * Conditional relative branch. Tests a single bit in SREG and branches 1377 * relatively to PC if the bit is set. This instruction branches relatively to 1378 * PC in either direction (PC - 63 < = destination <= PC + 64). The parameter k 1379 * is the offset from PC and is represented in two's complement form. 1380 */ 1381 static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a) 1382 { 1383 TCGLabel *not_taken = gen_new_label(); 1384 1385 TCGv var; 1386 1387 switch (a->bit) { 1388 case 0x00: 1389 var = cpu_Cf; 1390 break; 1391 case 0x01: 1392 var = cpu_Zf; 1393 break; 1394 case 0x02: 1395 var = cpu_Nf; 1396 break; 1397 case 0x03: 1398 var = cpu_Vf; 1399 break; 1400 case 0x04: 1401 var = cpu_Sf; 1402 break; 1403 case 0x05: 1404 var = cpu_Hf; 1405 break; 1406 case 0x06: 1407 var = cpu_Tf; 1408 break; 1409 case 0x07: 1410 var = cpu_If; 1411 break; 1412 default: 1413 g_assert_not_reached(); 1414 } 1415 1416 tcg_gen_brcondi_i32(TCG_COND_EQ, var, 0, not_taken); 1417 gen_goto_tb(ctx, 0, ctx->npc + a->imm); 1418 gen_set_label(not_taken); 1419 1420 ctx->base.is_jmp = DISAS_CHAIN; 1421 return true; 1422 } 1423 1424 /* 1425 * Data Transfer Instructions 1426 */ 1427 1428 /* 1429 * in the gen_set_addr & gen_get_addr functions 1430 * H assumed to be in 0x00ff0000 format 1431 * M assumed to be in 0x000000ff format 1432 * L assumed to be in 0x000000ff format 1433 */ 1434 static void gen_set_addr(TCGv addr, TCGv H, TCGv M, TCGv L) 1435 { 1436 1437 tcg_gen_andi_tl(L, addr, 0x000000ff); 1438 1439 tcg_gen_andi_tl(M, addr, 0x0000ff00); 1440 tcg_gen_shri_tl(M, M, 8); 1441 1442 tcg_gen_andi_tl(H, addr, 0x00ff0000); 1443 } 1444 1445 static void gen_set_xaddr(TCGv addr) 1446 { 1447 gen_set_addr(addr, cpu_rampX, cpu_r[27], cpu_r[26]); 1448 } 1449 1450 static void gen_set_yaddr(TCGv addr) 1451 { 1452 gen_set_addr(addr, cpu_rampY, cpu_r[29], cpu_r[28]); 1453 } 1454 1455 static void gen_set_zaddr(TCGv addr) 1456 { 1457 gen_set_addr(addr, cpu_rampZ, cpu_r[31], cpu_r[30]); 1458 } 1459 1460 static TCGv gen_get_addr(TCGv H, TCGv M, TCGv L) 1461 { 1462 TCGv addr = tcg_temp_new_i32(); 1463 1464 tcg_gen_deposit_tl(addr, M, H, 8, 8); 1465 tcg_gen_deposit_tl(addr, L, addr, 8, 16); 1466 1467 return addr; 1468 } 1469 1470 static TCGv gen_get_xaddr(void) 1471 { 1472 return gen_get_addr(cpu_rampX, cpu_r[27], cpu_r[26]); 1473 } 1474 1475 static TCGv gen_get_yaddr(void) 1476 { 1477 return gen_get_addr(cpu_rampY, cpu_r[29], cpu_r[28]); 1478 } 1479 1480 static TCGv gen_get_zaddr(void) 1481 { 1482 return gen_get_addr(cpu_rampZ, cpu_r[31], cpu_r[30]); 1483 } 1484 1485 /* 1486 * Load one byte indirect from data space to register and stores an clear 1487 * the bits in data space specified by the register. The instruction can only 1488 * be used towards internal SRAM. The data location is pointed to by the Z (16 1489 * bits) Pointer Register in the Register File. Memory access is limited to the 1490 * current data segment of 64KB. To access another data segment in devices with 1491 * more than 64KB data space, the RAMPZ in register in the I/O area has to be 1492 * changed. The Z-pointer Register is left unchanged by the operation. This 1493 * instruction is especially suited for clearing status bits stored in SRAM. 1494 */ 1495 static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr) 1496 { 1497 if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { 1498 gen_helper_fullwr(cpu_env, data, addr); 1499 } else { 1500 tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB); 1501 } 1502 } 1503 1504 static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr) 1505 { 1506 if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { 1507 gen_helper_fullrd(data, cpu_env, addr); 1508 } else { 1509 tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); 1510 } 1511 } 1512 1513 /* 1514 * This instruction makes a copy of one register into another. The source 1515 * register Rr is left unchanged, while the destination register Rd is loaded 1516 * with a copy of Rr. 1517 */ 1518 static bool trans_MOV(DisasContext *ctx, arg_MOV *a) 1519 { 1520 TCGv Rd = cpu_r[a->rd]; 1521 TCGv Rr = cpu_r[a->rr]; 1522 1523 tcg_gen_mov_tl(Rd, Rr); 1524 1525 return true; 1526 } 1527 1528 /* 1529 * This instruction makes a copy of one register pair into another register 1530 * pair. The source register pair Rr+1:Rr is left unchanged, while the 1531 * destination register pair Rd+1:Rd is loaded with a copy of Rr + 1:Rr. This 1532 * instruction is not available in all devices. Refer to the device specific 1533 * instruction set summary. 1534 */ 1535 static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a) 1536 { 1537 if (!avr_have_feature(ctx, AVR_FEATURE_MOVW)) { 1538 return true; 1539 } 1540 1541 TCGv RdL = cpu_r[a->rd]; 1542 TCGv RdH = cpu_r[a->rd + 1]; 1543 TCGv RrL = cpu_r[a->rr]; 1544 TCGv RrH = cpu_r[a->rr + 1]; 1545 1546 tcg_gen_mov_tl(RdH, RrH); 1547 tcg_gen_mov_tl(RdL, RrL); 1548 1549 return true; 1550 } 1551 1552 /* 1553 * Loads an 8 bit constant directly to register 16 to 31. 1554 */ 1555 static bool trans_LDI(DisasContext *ctx, arg_LDI *a) 1556 { 1557 TCGv Rd = cpu_r[a->rd]; 1558 int imm = a->imm; 1559 1560 tcg_gen_movi_tl(Rd, imm); 1561 1562 return true; 1563 } 1564 1565 /* 1566 * Loads one byte from the data space to a register. For parts with SRAM, 1567 * the data space consists of the Register File, I/O memory and internal SRAM 1568 * (and external SRAM if applicable). For parts without SRAM, the data space 1569 * consists of the register file only. The EEPROM has a separate address space. 1570 * A 16-bit address must be supplied. Memory access is limited to the current 1571 * data segment of 64KB. The LDS instruction uses the RAMPD Register to access 1572 * memory above 64KB. To access another data segment in devices with more than 1573 * 64KB data space, the RAMPD in register in the I/O area has to be changed. 1574 * This instruction is not available in all devices. Refer to the device 1575 * specific instruction set summary. 1576 */ 1577 static bool trans_LDS(DisasContext *ctx, arg_LDS *a) 1578 { 1579 TCGv Rd = cpu_r[a->rd]; 1580 TCGv addr = tcg_temp_new_i32(); 1581 TCGv H = cpu_rampD; 1582 a->imm = next_word(ctx); 1583 1584 tcg_gen_mov_tl(addr, H); /* addr = H:M:L */ 1585 tcg_gen_shli_tl(addr, addr, 16); 1586 tcg_gen_ori_tl(addr, addr, a->imm); 1587 1588 gen_data_load(ctx, Rd, addr); 1589 return true; 1590 } 1591 1592 /* 1593 * Loads one byte indirect from the data space to a register. For parts 1594 * with SRAM, the data space consists of the Register File, I/O memory and 1595 * internal SRAM (and external SRAM if applicable). For parts without SRAM, the 1596 * data space consists of the Register File only. In some parts the Flash 1597 * Memory has been mapped to the data space and can be read using this command. 1598 * The EEPROM has a separate address space. The data location is pointed to by 1599 * the X (16 bits) Pointer Register in the Register File. Memory access is 1600 * limited to the current data segment of 64KB. To access another data segment 1601 * in devices with more than 64KB data space, the RAMPX in register in the I/O 1602 * area has to be changed. The X-pointer Register can either be left unchanged 1603 * by the operation, or it can be post-incremented or predecremented. These 1604 * features are especially suited for accessing arrays, tables, and Stack 1605 * Pointer usage of the X-pointer Register. Note that only the low byte of the 1606 * X-pointer is updated in devices with no more than 256 bytes data space. For 1607 * such devices, the high byte of the pointer is not used by this instruction 1608 * and can be used for other purposes. The RAMPX Register in the I/O area is 1609 * updated in parts with more than 64KB data space or more than 64KB Program 1610 * memory, and the increment/decrement is added to the entire 24-bit address on 1611 * such devices. Not all variants of this instruction is available in all 1612 * devices. Refer to the device specific instruction set summary. In the 1613 * Reduced Core tinyAVR the LD instruction can be used to achieve the same 1614 * operation as LPM since the program memory is mapped to the data memory 1615 * space. 1616 */ 1617 static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a) 1618 { 1619 TCGv Rd = cpu_r[a->rd]; 1620 TCGv addr = gen_get_xaddr(); 1621 1622 gen_data_load(ctx, Rd, addr); 1623 return true; 1624 } 1625 1626 static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a) 1627 { 1628 TCGv Rd = cpu_r[a->rd]; 1629 TCGv addr = gen_get_xaddr(); 1630 1631 gen_data_load(ctx, Rd, addr); 1632 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1633 1634 gen_set_xaddr(addr); 1635 return true; 1636 } 1637 1638 static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a) 1639 { 1640 TCGv Rd = cpu_r[a->rd]; 1641 TCGv addr = gen_get_xaddr(); 1642 1643 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1644 gen_data_load(ctx, Rd, addr); 1645 gen_set_xaddr(addr); 1646 return true; 1647 } 1648 1649 /* 1650 * Loads one byte indirect with or without displacement from the data space 1651 * to a register. For parts with SRAM, the data space consists of the Register 1652 * File, I/O memory and internal SRAM (and external SRAM if applicable). For 1653 * parts without SRAM, the data space consists of the Register File only. In 1654 * some parts the Flash Memory has been mapped to the data space and can be 1655 * read using this command. The EEPROM has a separate address space. The data 1656 * location is pointed to by the Y (16 bits) Pointer Register in the Register 1657 * File. Memory access is limited to the current data segment of 64KB. To 1658 * access another data segment in devices with more than 64KB data space, the 1659 * RAMPY in register in the I/O area has to be changed. The Y-pointer Register 1660 * can either be left unchanged by the operation, or it can be post-incremented 1661 * or predecremented. These features are especially suited for accessing 1662 * arrays, tables, and Stack Pointer usage of the Y-pointer Register. Note that 1663 * only the low byte of the Y-pointer is updated in devices with no more than 1664 * 256 bytes data space. For such devices, the high byte of the pointer is not 1665 * used by this instruction and can be used for other purposes. The RAMPY 1666 * Register in the I/O area is updated in parts with more than 64KB data space 1667 * or more than 64KB Program memory, and the increment/decrement/displacement 1668 * is added to the entire 24-bit address on such devices. Not all variants of 1669 * this instruction is available in all devices. Refer to the device specific 1670 * instruction set summary. In the Reduced Core tinyAVR the LD instruction can 1671 * be used to achieve the same operation as LPM since the program memory is 1672 * mapped to the data memory space. 1673 */ 1674 static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a) 1675 { 1676 TCGv Rd = cpu_r[a->rd]; 1677 TCGv addr = gen_get_yaddr(); 1678 1679 gen_data_load(ctx, Rd, addr); 1680 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1681 1682 gen_set_yaddr(addr); 1683 return true; 1684 } 1685 1686 static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a) 1687 { 1688 TCGv Rd = cpu_r[a->rd]; 1689 TCGv addr = gen_get_yaddr(); 1690 1691 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1692 gen_data_load(ctx, Rd, addr); 1693 gen_set_yaddr(addr); 1694 return true; 1695 } 1696 1697 static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a) 1698 { 1699 TCGv Rd = cpu_r[a->rd]; 1700 TCGv addr = gen_get_yaddr(); 1701 1702 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ 1703 gen_data_load(ctx, Rd, addr); 1704 return true; 1705 } 1706 1707 /* 1708 * Loads one byte indirect with or without displacement from the data space 1709 * to a register. For parts with SRAM, the data space consists of the Register 1710 * File, I/O memory and internal SRAM (and external SRAM if applicable). For 1711 * parts without SRAM, the data space consists of the Register File only. In 1712 * some parts the Flash Memory has been mapped to the data space and can be 1713 * read using this command. The EEPROM has a separate address space. The data 1714 * location is pointed to by the Z (16 bits) Pointer Register in the Register 1715 * File. Memory access is limited to the current data segment of 64KB. To 1716 * access another data segment in devices with more than 64KB data space, the 1717 * RAMPZ in register in the I/O area has to be changed. The Z-pointer Register 1718 * can either be left unchanged by the operation, or it can be post-incremented 1719 * or predecremented. These features are especially suited for Stack Pointer 1720 * usage of the Z-pointer Register, however because the Z-pointer Register can 1721 * be used for indirect subroutine calls, indirect jumps and table lookup, it 1722 * is often more convenient to use the X or Y-pointer as a dedicated Stack 1723 * Pointer. Note that only the low byte of the Z-pointer is updated in devices 1724 * with no more than 256 bytes data space. For such devices, the high byte of 1725 * the pointer is not used by this instruction and can be used for other 1726 * purposes. The RAMPZ Register in the I/O area is updated in parts with more 1727 * than 64KB data space or more than 64KB Program memory, and the 1728 * increment/decrement/displacement is added to the entire 24-bit address on 1729 * such devices. Not all variants of this instruction is available in all 1730 * devices. Refer to the device specific instruction set summary. In the 1731 * Reduced Core tinyAVR the LD instruction can be used to achieve the same 1732 * operation as LPM since the program memory is mapped to the data memory 1733 * space. For using the Z-pointer for table lookup in Program memory see the 1734 * LPM and ELPM instructions. 1735 */ 1736 static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a) 1737 { 1738 TCGv Rd = cpu_r[a->rd]; 1739 TCGv addr = gen_get_zaddr(); 1740 1741 gen_data_load(ctx, Rd, addr); 1742 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1743 1744 gen_set_zaddr(addr); 1745 return true; 1746 } 1747 1748 static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a) 1749 { 1750 TCGv Rd = cpu_r[a->rd]; 1751 TCGv addr = gen_get_zaddr(); 1752 1753 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1754 gen_data_load(ctx, Rd, addr); 1755 1756 gen_set_zaddr(addr); 1757 return true; 1758 } 1759 1760 static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a) 1761 { 1762 TCGv Rd = cpu_r[a->rd]; 1763 TCGv addr = gen_get_zaddr(); 1764 1765 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ 1766 gen_data_load(ctx, Rd, addr); 1767 return true; 1768 } 1769 1770 /* 1771 * Stores one byte from a Register to the data space. For parts with SRAM, 1772 * the data space consists of the Register File, I/O memory and internal SRAM 1773 * (and external SRAM if applicable). For parts without SRAM, the data space 1774 * consists of the Register File only. The EEPROM has a separate address space. 1775 * A 16-bit address must be supplied. Memory access is limited to the current 1776 * data segment of 64KB. The STS instruction uses the RAMPD Register to access 1777 * memory above 64KB. To access another data segment in devices with more than 1778 * 64KB data space, the RAMPD in register in the I/O area has to be changed. 1779 * This instruction is not available in all devices. Refer to the device 1780 * specific instruction set summary. 1781 */ 1782 static bool trans_STS(DisasContext *ctx, arg_STS *a) 1783 { 1784 TCGv Rd = cpu_r[a->rd]; 1785 TCGv addr = tcg_temp_new_i32(); 1786 TCGv H = cpu_rampD; 1787 a->imm = next_word(ctx); 1788 1789 tcg_gen_mov_tl(addr, H); /* addr = H:M:L */ 1790 tcg_gen_shli_tl(addr, addr, 16); 1791 tcg_gen_ori_tl(addr, addr, a->imm); 1792 gen_data_store(ctx, Rd, addr); 1793 return true; 1794 } 1795 1796 /* 1797 * Stores one byte indirect from a register to data space. For parts with SRAM, 1798 * the data space consists of the Register File, I/O memory, and internal SRAM 1799 * (and external SRAM if applicable). For parts without SRAM, the data space 1800 * consists of the Register File only. The EEPROM has a separate address space. 1801 * 1802 * The data location is pointed to by the X (16 bits) Pointer Register in the 1803 * Register File. Memory access is limited to the current data segment of 64KB. 1804 * To access another data segment in devices with more than 64KB data space, the 1805 * RAMPX in register in the I/O area has to be changed. 1806 * 1807 * The X-pointer Register can either be left unchanged by the operation, or it 1808 * can be post-incremented or pre-decremented. These features are especially 1809 * suited for accessing arrays, tables, and Stack Pointer usage of the 1810 * X-pointer Register. Note that only the low byte of the X-pointer is updated 1811 * in devices with no more than 256 bytes data space. For such devices, the high 1812 * byte of the pointer is not used by this instruction and can be used for other 1813 * purposes. The RAMPX Register in the I/O area is updated in parts with more 1814 * than 64KB data space or more than 64KB Program memory, and the increment / 1815 * decrement is added to the entire 24-bit address on such devices. 1816 */ 1817 static bool trans_STX1(DisasContext *ctx, arg_STX1 *a) 1818 { 1819 TCGv Rd = cpu_r[a->rr]; 1820 TCGv addr = gen_get_xaddr(); 1821 1822 gen_data_store(ctx, Rd, addr); 1823 return true; 1824 } 1825 1826 static bool trans_STX2(DisasContext *ctx, arg_STX2 *a) 1827 { 1828 TCGv Rd = cpu_r[a->rr]; 1829 TCGv addr = gen_get_xaddr(); 1830 1831 gen_data_store(ctx, Rd, addr); 1832 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1833 gen_set_xaddr(addr); 1834 return true; 1835 } 1836 1837 static bool trans_STX3(DisasContext *ctx, arg_STX3 *a) 1838 { 1839 TCGv Rd = cpu_r[a->rr]; 1840 TCGv addr = gen_get_xaddr(); 1841 1842 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1843 gen_data_store(ctx, Rd, addr); 1844 gen_set_xaddr(addr); 1845 return true; 1846 } 1847 1848 /* 1849 * Stores one byte indirect with or without displacement from a register to data 1850 * space. For parts with SRAM, the data space consists of the Register File, I/O 1851 * memory, and internal SRAM (and external SRAM if applicable). For parts 1852 * without SRAM, the data space consists of the Register File only. The EEPROM 1853 * has a separate address space. 1854 * 1855 * The data location is pointed to by the Y (16 bits) Pointer Register in the 1856 * Register File. Memory access is limited to the current data segment of 64KB. 1857 * To access another data segment in devices with more than 64KB data space, the 1858 * RAMPY in register in the I/O area has to be changed. 1859 * 1860 * The Y-pointer Register can either be left unchanged by the operation, or it 1861 * can be post-incremented or pre-decremented. These features are especially 1862 * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer 1863 * Register. Note that only the low byte of the Y-pointer is updated in devices 1864 * with no more than 256 bytes data space. For such devices, the high byte of 1865 * the pointer is not used by this instruction and can be used for other 1866 * purposes. The RAMPY Register in the I/O area is updated in parts with more 1867 * than 64KB data space or more than 64KB Program memory, and the increment / 1868 * decrement / displacement is added to the entire 24-bit address on such 1869 * devices. 1870 */ 1871 static bool trans_STY2(DisasContext *ctx, arg_STY2 *a) 1872 { 1873 TCGv Rd = cpu_r[a->rd]; 1874 TCGv addr = gen_get_yaddr(); 1875 1876 gen_data_store(ctx, Rd, addr); 1877 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1878 gen_set_yaddr(addr); 1879 return true; 1880 } 1881 1882 static bool trans_STY3(DisasContext *ctx, arg_STY3 *a) 1883 { 1884 TCGv Rd = cpu_r[a->rd]; 1885 TCGv addr = gen_get_yaddr(); 1886 1887 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1888 gen_data_store(ctx, Rd, addr); 1889 gen_set_yaddr(addr); 1890 return true; 1891 } 1892 1893 static bool trans_STDY(DisasContext *ctx, arg_STDY *a) 1894 { 1895 TCGv Rd = cpu_r[a->rd]; 1896 TCGv addr = gen_get_yaddr(); 1897 1898 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ 1899 gen_data_store(ctx, Rd, addr); 1900 return true; 1901 } 1902 1903 /* 1904 * Stores one byte indirect with or without displacement from a register to data 1905 * space. For parts with SRAM, the data space consists of the Register File, I/O 1906 * memory, and internal SRAM (and external SRAM if applicable). For parts 1907 * without SRAM, the data space consists of the Register File only. The EEPROM 1908 * has a separate address space. 1909 * 1910 * The data location is pointed to by the Y (16 bits) Pointer Register in the 1911 * Register File. Memory access is limited to the current data segment of 64KB. 1912 * To access another data segment in devices with more than 64KB data space, the 1913 * RAMPY in register in the I/O area has to be changed. 1914 * 1915 * The Y-pointer Register can either be left unchanged by the operation, or it 1916 * can be post-incremented or pre-decremented. These features are especially 1917 * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer 1918 * Register. Note that only the low byte of the Y-pointer is updated in devices 1919 * with no more than 256 bytes data space. For such devices, the high byte of 1920 * the pointer is not used by this instruction and can be used for other 1921 * purposes. The RAMPY Register in the I/O area is updated in parts with more 1922 * than 64KB data space or more than 64KB Program memory, and the increment / 1923 * decrement / displacement is added to the entire 24-bit address on such 1924 * devices. 1925 */ 1926 static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a) 1927 { 1928 TCGv Rd = cpu_r[a->rd]; 1929 TCGv addr = gen_get_zaddr(); 1930 1931 gen_data_store(ctx, Rd, addr); 1932 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 1933 1934 gen_set_zaddr(addr); 1935 return true; 1936 } 1937 1938 static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a) 1939 { 1940 TCGv Rd = cpu_r[a->rd]; 1941 TCGv addr = gen_get_zaddr(); 1942 1943 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ 1944 gen_data_store(ctx, Rd, addr); 1945 1946 gen_set_zaddr(addr); 1947 return true; 1948 } 1949 1950 static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a) 1951 { 1952 TCGv Rd = cpu_r[a->rd]; 1953 TCGv addr = gen_get_zaddr(); 1954 1955 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ 1956 gen_data_store(ctx, Rd, addr); 1957 return true; 1958 } 1959 1960 /* 1961 * Loads one byte pointed to by the Z-register into the destination 1962 * register Rd. This instruction features a 100% space effective constant 1963 * initialization or constant data fetch. The Program memory is organized in 1964 * 16-bit words while the Z-pointer is a byte address. Thus, the least 1965 * significant bit of the Z-pointer selects either low byte (ZLSB = 0) or high 1966 * byte (ZLSB = 1). This instruction can address the first 64KB (32K words) of 1967 * Program memory. The Zpointer Register can either be left unchanged by the 1968 * operation, or it can be incremented. The incrementation does not apply to 1969 * the RAMPZ Register. 1970 * 1971 * Devices with Self-Programming capability can use the LPM instruction to read 1972 * the Fuse and Lock bit values. 1973 */ 1974 static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a) 1975 { 1976 if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) { 1977 return true; 1978 } 1979 1980 TCGv Rd = cpu_r[0]; 1981 TCGv addr = tcg_temp_new_i32(); 1982 TCGv H = cpu_r[31]; 1983 TCGv L = cpu_r[30]; 1984 1985 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ 1986 tcg_gen_or_tl(addr, addr, L); 1987 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 1988 return true; 1989 } 1990 1991 static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a) 1992 { 1993 if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) { 1994 return true; 1995 } 1996 1997 TCGv Rd = cpu_r[a->rd]; 1998 TCGv addr = tcg_temp_new_i32(); 1999 TCGv H = cpu_r[31]; 2000 TCGv L = cpu_r[30]; 2001 2002 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ 2003 tcg_gen_or_tl(addr, addr, L); 2004 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 2005 return true; 2006 } 2007 2008 static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a) 2009 { 2010 if (!avr_have_feature(ctx, AVR_FEATURE_LPMX)) { 2011 return true; 2012 } 2013 2014 TCGv Rd = cpu_r[a->rd]; 2015 TCGv addr = tcg_temp_new_i32(); 2016 TCGv H = cpu_r[31]; 2017 TCGv L = cpu_r[30]; 2018 2019 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ 2020 tcg_gen_or_tl(addr, addr, L); 2021 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 2022 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 2023 tcg_gen_andi_tl(L, addr, 0xff); 2024 tcg_gen_shri_tl(addr, addr, 8); 2025 tcg_gen_andi_tl(H, addr, 0xff); 2026 return true; 2027 } 2028 2029 /* 2030 * Loads one byte pointed to by the Z-register and the RAMPZ Register in 2031 * the I/O space, and places this byte in the destination register Rd. This 2032 * instruction features a 100% space effective constant initialization or 2033 * constant data fetch. The Program memory is organized in 16-bit words while 2034 * the Z-pointer is a byte address. Thus, the least significant bit of the 2035 * Z-pointer selects either low byte (ZLSB = 0) or high byte (ZLSB = 1). This 2036 * instruction can address the entire Program memory space. The Z-pointer 2037 * Register can either be left unchanged by the operation, or it can be 2038 * incremented. The incrementation applies to the entire 24-bit concatenation 2039 * of the RAMPZ and Z-pointer Registers. 2040 * 2041 * Devices with Self-Programming capability can use the ELPM instruction to 2042 * read the Fuse and Lock bit value. 2043 */ 2044 static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a) 2045 { 2046 if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) { 2047 return true; 2048 } 2049 2050 TCGv Rd = cpu_r[0]; 2051 TCGv addr = gen_get_zaddr(); 2052 2053 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 2054 return true; 2055 } 2056 2057 static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a) 2058 { 2059 if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) { 2060 return true; 2061 } 2062 2063 TCGv Rd = cpu_r[a->rd]; 2064 TCGv addr = gen_get_zaddr(); 2065 2066 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 2067 return true; 2068 } 2069 2070 static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a) 2071 { 2072 if (!avr_have_feature(ctx, AVR_FEATURE_ELPMX)) { 2073 return true; 2074 } 2075 2076 TCGv Rd = cpu_r[a->rd]; 2077 TCGv addr = gen_get_zaddr(); 2078 2079 tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); 2080 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ 2081 gen_set_zaddr(addr); 2082 return true; 2083 } 2084 2085 /* 2086 * SPM can be used to erase a page in the Program memory, to write a page 2087 * in the Program memory (that is already erased), and to set Boot Loader Lock 2088 * bits. In some devices, the Program memory can be written one word at a time, 2089 * in other devices an entire page can be programmed simultaneously after first 2090 * filling a temporary page buffer. In all cases, the Program memory must be 2091 * erased one page at a time. When erasing the Program memory, the RAMPZ and 2092 * Z-register are used as page address. When writing the Program memory, the 2093 * RAMPZ and Z-register are used as page or word address, and the R1:R0 2094 * register pair is used as data(1). When setting the Boot Loader Lock bits, 2095 * the R1:R0 register pair is used as data. Refer to the device documentation 2096 * for detailed description of SPM usage. This instruction can address the 2097 * entire Program memory. 2098 * 2099 * The SPM instruction is not available in all devices. Refer to the device 2100 * specific instruction set summary. 2101 * 2102 * Note: 1. R1 determines the instruction high byte, and R0 determines the 2103 * instruction low byte. 2104 */ 2105 static bool trans_SPM(DisasContext *ctx, arg_SPM *a) 2106 { 2107 /* TODO */ 2108 if (!avr_have_feature(ctx, AVR_FEATURE_SPM)) { 2109 return true; 2110 } 2111 2112 return true; 2113 } 2114 2115 static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a) 2116 { 2117 /* TODO */ 2118 if (!avr_have_feature(ctx, AVR_FEATURE_SPMX)) { 2119 return true; 2120 } 2121 2122 return true; 2123 } 2124 2125 /* 2126 * Loads data from the I/O Space (Ports, Timers, Configuration Registers, 2127 * etc.) into register Rd in the Register File. 2128 */ 2129 static bool trans_IN(DisasContext *ctx, arg_IN *a) 2130 { 2131 TCGv Rd = cpu_r[a->rd]; 2132 TCGv port = tcg_constant_i32(a->imm); 2133 2134 gen_helper_inb(Rd, cpu_env, port); 2135 return true; 2136 } 2137 2138 /* 2139 * Stores data from register Rr in the Register File to I/O Space (Ports, 2140 * Timers, Configuration Registers, etc.). 2141 */ 2142 static bool trans_OUT(DisasContext *ctx, arg_OUT *a) 2143 { 2144 TCGv Rd = cpu_r[a->rd]; 2145 TCGv port = tcg_constant_i32(a->imm); 2146 2147 gen_helper_outb(cpu_env, port, Rd); 2148 return true; 2149 } 2150 2151 /* 2152 * This instruction stores the contents of register Rr on the STACK. The 2153 * Stack Pointer is post-decremented by 1 after the PUSH. This instruction is 2154 * not available in all devices. Refer to the device specific instruction set 2155 * summary. 2156 */ 2157 static bool trans_PUSH(DisasContext *ctx, arg_PUSH *a) 2158 { 2159 TCGv Rd = cpu_r[a->rd]; 2160 2161 gen_data_store(ctx, Rd, cpu_sp); 2162 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); 2163 2164 return true; 2165 } 2166 2167 /* 2168 * This instruction loads register Rd with a byte from the STACK. The Stack 2169 * Pointer is pre-incremented by 1 before the POP. This instruction is not 2170 * available in all devices. Refer to the device specific instruction set 2171 * summary. 2172 */ 2173 static bool trans_POP(DisasContext *ctx, arg_POP *a) 2174 { 2175 /* 2176 * Using a temp to work around some strange behaviour: 2177 * tcg_gen_addi_tl(cpu_sp, cpu_sp, 1); 2178 * gen_data_load(ctx, Rd, cpu_sp); 2179 * seems to cause the add to happen twice. 2180 * This doesn't happen if either the add or the load is removed. 2181 */ 2182 TCGv t1 = tcg_temp_new_i32(); 2183 TCGv Rd = cpu_r[a->rd]; 2184 2185 tcg_gen_addi_tl(t1, cpu_sp, 1); 2186 gen_data_load(ctx, Rd, t1); 2187 tcg_gen_mov_tl(cpu_sp, t1); 2188 2189 return true; 2190 } 2191 2192 /* 2193 * Exchanges one byte indirect between register and data space. The data 2194 * location is pointed to by the Z (16 bits) Pointer Register in the Register 2195 * File. Memory access is limited to the current data segment of 64KB. To 2196 * access another data segment in devices with more than 64KB data space, the 2197 * RAMPZ in register in the I/O area has to be changed. 2198 * 2199 * The Z-pointer Register is left unchanged by the operation. This instruction 2200 * is especially suited for writing/reading status bits stored in SRAM. 2201 */ 2202 static bool trans_XCH(DisasContext *ctx, arg_XCH *a) 2203 { 2204 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) { 2205 return true; 2206 } 2207 2208 TCGv Rd = cpu_r[a->rd]; 2209 TCGv t0 = tcg_temp_new_i32(); 2210 TCGv addr = gen_get_zaddr(); 2211 2212 gen_data_load(ctx, t0, addr); 2213 gen_data_store(ctx, Rd, addr); 2214 tcg_gen_mov_tl(Rd, t0); 2215 return true; 2216 } 2217 2218 /* 2219 * Load one byte indirect from data space to register and set bits in data 2220 * space specified by the register. The instruction can only be used towards 2221 * internal SRAM. The data location is pointed to by the Z (16 bits) Pointer 2222 * Register in the Register File. Memory access is limited to the current data 2223 * segment of 64KB. To access another data segment in devices with more than 2224 * 64KB data space, the RAMPZ in register in the I/O area has to be changed. 2225 * 2226 * The Z-pointer Register is left unchanged by the operation. This instruction 2227 * is especially suited for setting status bits stored in SRAM. 2228 */ 2229 static bool trans_LAS(DisasContext *ctx, arg_LAS *a) 2230 { 2231 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) { 2232 return true; 2233 } 2234 2235 TCGv Rr = cpu_r[a->rd]; 2236 TCGv addr = gen_get_zaddr(); 2237 TCGv t0 = tcg_temp_new_i32(); 2238 TCGv t1 = tcg_temp_new_i32(); 2239 2240 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */ 2241 tcg_gen_or_tl(t1, t0, Rr); 2242 tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ 2243 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ 2244 return true; 2245 } 2246 2247 /* 2248 * Load one byte indirect from data space to register and stores and clear 2249 * the bits in data space specified by the register. The instruction can 2250 * only be used towards internal SRAM. The data location is pointed to by 2251 * the Z (16 bits) Pointer Register in the Register File. Memory access is 2252 * limited to the current data segment of 64KB. To access another data 2253 * segment in devices with more than 64KB data space, the RAMPZ in register 2254 * in the I/O area has to be changed. 2255 * 2256 * The Z-pointer Register is left unchanged by the operation. This instruction 2257 * is especially suited for clearing status bits stored in SRAM. 2258 */ 2259 static bool trans_LAC(DisasContext *ctx, arg_LAC *a) 2260 { 2261 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) { 2262 return true; 2263 } 2264 2265 TCGv Rr = cpu_r[a->rd]; 2266 TCGv addr = gen_get_zaddr(); 2267 TCGv t0 = tcg_temp_new_i32(); 2268 TCGv t1 = tcg_temp_new_i32(); 2269 2270 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */ 2271 tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */ 2272 tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ 2273 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ 2274 return true; 2275 } 2276 2277 2278 /* 2279 * Load one byte indirect from data space to register and toggles bits in 2280 * the data space specified by the register. The instruction can only be used 2281 * towards SRAM. The data location is pointed to by the Z (16 bits) Pointer 2282 * Register in the Register File. Memory access is limited to the current data 2283 * segment of 64KB. To access another data segment in devices with more than 2284 * 64KB data space, the RAMPZ in register in the I/O area has to be changed. 2285 * 2286 * The Z-pointer Register is left unchanged by the operation. This instruction 2287 * is especially suited for changing status bits stored in SRAM. 2288 */ 2289 static bool trans_LAT(DisasContext *ctx, arg_LAT *a) 2290 { 2291 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) { 2292 return true; 2293 } 2294 2295 TCGv Rd = cpu_r[a->rd]; 2296 TCGv addr = gen_get_zaddr(); 2297 TCGv t0 = tcg_temp_new_i32(); 2298 TCGv t1 = tcg_temp_new_i32(); 2299 2300 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */ 2301 tcg_gen_xor_tl(t1, t0, Rd); 2302 tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */ 2303 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ 2304 return true; 2305 } 2306 2307 /* 2308 * Bit and Bit-test Instructions 2309 */ 2310 static void gen_rshift_ZNVSf(TCGv R) 2311 { 2312 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ 2313 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */ 2314 tcg_gen_xor_tl(cpu_Vf, cpu_Nf, cpu_Cf); 2315 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */ 2316 } 2317 2318 /* 2319 * Shifts all bits in Rd one place to the right. Bit 7 is cleared. Bit 0 is 2320 * loaded into the C Flag of the SREG. This operation effectively divides an 2321 * unsigned value by two. The C Flag can be used to round the result. 2322 */ 2323 static bool trans_LSR(DisasContext *ctx, arg_LSR *a) 2324 { 2325 TCGv Rd = cpu_r[a->rd]; 2326 2327 tcg_gen_andi_tl(cpu_Cf, Rd, 1); 2328 tcg_gen_shri_tl(Rd, Rd, 1); 2329 2330 /* update status register */ 2331 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, Rd, 0); /* Zf = Rd == 0 */ 2332 tcg_gen_movi_tl(cpu_Nf, 0); 2333 tcg_gen_mov_tl(cpu_Vf, cpu_Cf); 2334 tcg_gen_mov_tl(cpu_Sf, cpu_Vf); 2335 2336 return true; 2337 } 2338 2339 /* 2340 * Shifts all bits in Rd one place to the right. The C Flag is shifted into 2341 * bit 7 of Rd. Bit 0 is shifted into the C Flag. This operation, combined 2342 * with ASR, effectively divides multi-byte signed values by two. Combined with 2343 * LSR it effectively divides multi-byte unsigned values by two. The Carry Flag 2344 * can be used to round the result. 2345 */ 2346 static bool trans_ROR(DisasContext *ctx, arg_ROR *a) 2347 { 2348 TCGv Rd = cpu_r[a->rd]; 2349 TCGv t0 = tcg_temp_new_i32(); 2350 2351 tcg_gen_shli_tl(t0, cpu_Cf, 7); 2352 2353 /* update status register */ 2354 tcg_gen_andi_tl(cpu_Cf, Rd, 1); 2355 2356 /* update output register */ 2357 tcg_gen_shri_tl(Rd, Rd, 1); 2358 tcg_gen_or_tl(Rd, Rd, t0); 2359 2360 /* update status register */ 2361 gen_rshift_ZNVSf(Rd); 2362 return true; 2363 } 2364 2365 /* 2366 * Shifts all bits in Rd one place to the right. Bit 7 is held constant. Bit 0 2367 * is loaded into the C Flag of the SREG. This operation effectively divides a 2368 * signed value by two without changing its sign. The Carry Flag can be used to 2369 * round the result. 2370 */ 2371 static bool trans_ASR(DisasContext *ctx, arg_ASR *a) 2372 { 2373 TCGv Rd = cpu_r[a->rd]; 2374 TCGv t0 = tcg_temp_new_i32(); 2375 2376 /* update status register */ 2377 tcg_gen_andi_tl(cpu_Cf, Rd, 1); /* Cf = Rd(0) */ 2378 2379 /* update output register */ 2380 tcg_gen_andi_tl(t0, Rd, 0x80); /* Rd = (Rd & 0x80) | (Rd >> 1) */ 2381 tcg_gen_shri_tl(Rd, Rd, 1); 2382 tcg_gen_or_tl(Rd, Rd, t0); 2383 2384 /* update status register */ 2385 gen_rshift_ZNVSf(Rd); 2386 return true; 2387 } 2388 2389 /* 2390 * Swaps high and low nibbles in a register. 2391 */ 2392 static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) 2393 { 2394 TCGv Rd = cpu_r[a->rd]; 2395 TCGv t0 = tcg_temp_new_i32(); 2396 TCGv t1 = tcg_temp_new_i32(); 2397 2398 tcg_gen_andi_tl(t0, Rd, 0x0f); 2399 tcg_gen_shli_tl(t0, t0, 4); 2400 tcg_gen_andi_tl(t1, Rd, 0xf0); 2401 tcg_gen_shri_tl(t1, t1, 4); 2402 tcg_gen_or_tl(Rd, t0, t1); 2403 return true; 2404 } 2405 2406 /* 2407 * Sets a specified bit in an I/O Register. This instruction operates on 2408 * the lower 32 I/O Registers -- addresses 0-31. 2409 */ 2410 static bool trans_SBI(DisasContext *ctx, arg_SBI *a) 2411 { 2412 TCGv data = tcg_temp_new_i32(); 2413 TCGv port = tcg_constant_i32(a->reg); 2414 2415 gen_helper_inb(data, cpu_env, port); 2416 tcg_gen_ori_tl(data, data, 1 << a->bit); 2417 gen_helper_outb(cpu_env, port, data); 2418 return true; 2419 } 2420 2421 /* 2422 * Clears a specified bit in an I/O Register. This instruction operates on 2423 * the lower 32 I/O Registers -- addresses 0-31. 2424 */ 2425 static bool trans_CBI(DisasContext *ctx, arg_CBI *a) 2426 { 2427 TCGv data = tcg_temp_new_i32(); 2428 TCGv port = tcg_constant_i32(a->reg); 2429 2430 gen_helper_inb(data, cpu_env, port); 2431 tcg_gen_andi_tl(data, data, ~(1 << a->bit)); 2432 gen_helper_outb(cpu_env, port, data); 2433 return true; 2434 } 2435 2436 /* 2437 * Stores bit b from Rd to the T Flag in SREG (Status Register). 2438 */ 2439 static bool trans_BST(DisasContext *ctx, arg_BST *a) 2440 { 2441 TCGv Rd = cpu_r[a->rd]; 2442 2443 tcg_gen_andi_tl(cpu_Tf, Rd, 1 << a->bit); 2444 tcg_gen_shri_tl(cpu_Tf, cpu_Tf, a->bit); 2445 2446 return true; 2447 } 2448 2449 /* 2450 * Copies the T Flag in the SREG (Status Register) to bit b in register Rd. 2451 */ 2452 static bool trans_BLD(DisasContext *ctx, arg_BLD *a) 2453 { 2454 TCGv Rd = cpu_r[a->rd]; 2455 TCGv t1 = tcg_temp_new_i32(); 2456 2457 tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */ 2458 tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */ 2459 tcg_gen_or_tl(Rd, Rd, t1); 2460 return true; 2461 } 2462 2463 /* 2464 * Sets a single Flag or bit in SREG. 2465 */ 2466 static bool trans_BSET(DisasContext *ctx, arg_BSET *a) 2467 { 2468 switch (a->bit) { 2469 case 0x00: 2470 tcg_gen_movi_tl(cpu_Cf, 0x01); 2471 break; 2472 case 0x01: 2473 tcg_gen_movi_tl(cpu_Zf, 0x01); 2474 break; 2475 case 0x02: 2476 tcg_gen_movi_tl(cpu_Nf, 0x01); 2477 break; 2478 case 0x03: 2479 tcg_gen_movi_tl(cpu_Vf, 0x01); 2480 break; 2481 case 0x04: 2482 tcg_gen_movi_tl(cpu_Sf, 0x01); 2483 break; 2484 case 0x05: 2485 tcg_gen_movi_tl(cpu_Hf, 0x01); 2486 break; 2487 case 0x06: 2488 tcg_gen_movi_tl(cpu_Tf, 0x01); 2489 break; 2490 case 0x07: 2491 tcg_gen_movi_tl(cpu_If, 0x01); 2492 break; 2493 } 2494 2495 return true; 2496 } 2497 2498 /* 2499 * Clears a single Flag in SREG. 2500 */ 2501 static bool trans_BCLR(DisasContext *ctx, arg_BCLR *a) 2502 { 2503 switch (a->bit) { 2504 case 0x00: 2505 tcg_gen_movi_tl(cpu_Cf, 0x00); 2506 break; 2507 case 0x01: 2508 tcg_gen_movi_tl(cpu_Zf, 0x00); 2509 break; 2510 case 0x02: 2511 tcg_gen_movi_tl(cpu_Nf, 0x00); 2512 break; 2513 case 0x03: 2514 tcg_gen_movi_tl(cpu_Vf, 0x00); 2515 break; 2516 case 0x04: 2517 tcg_gen_movi_tl(cpu_Sf, 0x00); 2518 break; 2519 case 0x05: 2520 tcg_gen_movi_tl(cpu_Hf, 0x00); 2521 break; 2522 case 0x06: 2523 tcg_gen_movi_tl(cpu_Tf, 0x00); 2524 break; 2525 case 0x07: 2526 tcg_gen_movi_tl(cpu_If, 0x00); 2527 break; 2528 } 2529 2530 return true; 2531 } 2532 2533 /* 2534 * MCU Control Instructions 2535 */ 2536 2537 /* 2538 * The BREAK instruction is used by the On-chip Debug system, and is 2539 * normally not used in the application software. When the BREAK instruction is 2540 * executed, the AVR CPU is set in the Stopped Mode. This gives the On-chip 2541 * Debugger access to internal resources. If any Lock bits are set, or either 2542 * the JTAGEN or OCDEN Fuses are unprogrammed, the CPU will treat the BREAK 2543 * instruction as a NOP and will not enter the Stopped mode. This instruction 2544 * is not available in all devices. Refer to the device specific instruction 2545 * set summary. 2546 */ 2547 static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a) 2548 { 2549 if (!avr_have_feature(ctx, AVR_FEATURE_BREAK)) { 2550 return true; 2551 } 2552 2553 #ifdef BREAKPOINT_ON_BREAK 2554 tcg_gen_movi_tl(cpu_pc, ctx->npc - 1); 2555 gen_helper_debug(cpu_env); 2556 ctx->base.is_jmp = DISAS_EXIT; 2557 #else 2558 /* NOP */ 2559 #endif 2560 2561 return true; 2562 } 2563 2564 /* 2565 * This instruction performs a single cycle No Operation. 2566 */ 2567 static bool trans_NOP(DisasContext *ctx, arg_NOP *a) 2568 { 2569 2570 /* NOP */ 2571 2572 return true; 2573 } 2574 2575 /* 2576 * This instruction sets the circuit in sleep mode defined by the MCU 2577 * Control Register. 2578 */ 2579 static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a) 2580 { 2581 gen_helper_sleep(cpu_env); 2582 ctx->base.is_jmp = DISAS_NORETURN; 2583 return true; 2584 } 2585 2586 /* 2587 * This instruction resets the Watchdog Timer. This instruction must be 2588 * executed within a limited time given by the WD prescaler. See the Watchdog 2589 * Timer hardware specification. 2590 */ 2591 static bool trans_WDR(DisasContext *ctx, arg_WDR *a) 2592 { 2593 gen_helper_wdr(cpu_env); 2594 2595 return true; 2596 } 2597 2598 /* 2599 * Core translation mechanism functions: 2600 * 2601 * - translate() 2602 * - canonicalize_skip() 2603 * - gen_intermediate_code() 2604 * - restore_state_to_opc() 2605 * 2606 */ 2607 static void translate(DisasContext *ctx) 2608 { 2609 uint32_t opcode = next_word(ctx); 2610 2611 if (!decode_insn(ctx, opcode)) { 2612 gen_helper_unsupported(cpu_env); 2613 ctx->base.is_jmp = DISAS_NORETURN; 2614 } 2615 } 2616 2617 /* Standardize the cpu_skip condition to NE. */ 2618 static bool canonicalize_skip(DisasContext *ctx) 2619 { 2620 switch (ctx->skip_cond) { 2621 case TCG_COND_NEVER: 2622 /* Normal case: cpu_skip is known to be false. */ 2623 return false; 2624 2625 case TCG_COND_ALWAYS: 2626 /* 2627 * Breakpoint case: cpu_skip is known to be true, via TB_FLAGS_SKIP. 2628 * The breakpoint is on the instruction being skipped, at the start 2629 * of the TranslationBlock. No need to update. 2630 */ 2631 return false; 2632 2633 case TCG_COND_NE: 2634 if (ctx->skip_var1 == NULL) { 2635 tcg_gen_mov_tl(cpu_skip, ctx->skip_var0); 2636 } else { 2637 tcg_gen_xor_tl(cpu_skip, ctx->skip_var0, ctx->skip_var1); 2638 ctx->skip_var1 = NULL; 2639 } 2640 break; 2641 2642 default: 2643 /* Convert to a NE condition vs 0. */ 2644 if (ctx->skip_var1 == NULL) { 2645 tcg_gen_setcondi_tl(ctx->skip_cond, cpu_skip, ctx->skip_var0, 0); 2646 } else { 2647 tcg_gen_setcond_tl(ctx->skip_cond, cpu_skip, 2648 ctx->skip_var0, ctx->skip_var1); 2649 ctx->skip_var1 = NULL; 2650 } 2651 ctx->skip_cond = TCG_COND_NE; 2652 break; 2653 } 2654 ctx->skip_var0 = cpu_skip; 2655 return true; 2656 } 2657 2658 static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 2659 { 2660 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2661 CPUAVRState *env = cs->env_ptr; 2662 uint32_t tb_flags = ctx->base.tb->flags; 2663 2664 ctx->cs = cs; 2665 ctx->env = env; 2666 ctx->npc = ctx->base.pc_first / 2; 2667 2668 ctx->skip_cond = TCG_COND_NEVER; 2669 if (tb_flags & TB_FLAGS_SKIP) { 2670 ctx->skip_cond = TCG_COND_ALWAYS; 2671 ctx->skip_var0 = cpu_skip; 2672 } 2673 2674 if (tb_flags & TB_FLAGS_FULL_ACCESS) { 2675 /* 2676 * This flag is set by ST/LD instruction we will regenerate it ONLY 2677 * with mem/cpu memory access instead of mem access 2678 */ 2679 ctx->base.max_insns = 1; 2680 } 2681 } 2682 2683 static void avr_tr_tb_start(DisasContextBase *db, CPUState *cs) 2684 { 2685 } 2686 2687 static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 2688 { 2689 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2690 2691 tcg_gen_insn_start(ctx->npc); 2692 } 2693 2694 static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 2695 { 2696 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2697 TCGLabel *skip_label = NULL; 2698 2699 /* Conditionally skip the next instruction, if indicated. */ 2700 if (ctx->skip_cond != TCG_COND_NEVER) { 2701 skip_label = gen_new_label(); 2702 if (ctx->skip_var0 == cpu_skip) { 2703 /* 2704 * Copy cpu_skip so that we may zero it before the branch. 2705 * This ensures that cpu_skip is non-zero after the label 2706 * if and only if the skipped insn itself sets a skip. 2707 */ 2708 ctx->skip_var0 = tcg_temp_new(); 2709 tcg_gen_mov_tl(ctx->skip_var0, cpu_skip); 2710 tcg_gen_movi_tl(cpu_skip, 0); 2711 } 2712 if (ctx->skip_var1 == NULL) { 2713 tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0, 0, skip_label); 2714 } else { 2715 tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0, 2716 ctx->skip_var1, skip_label); 2717 ctx->skip_var1 = NULL; 2718 } 2719 ctx->skip_cond = TCG_COND_NEVER; 2720 ctx->skip_var0 = NULL; 2721 } 2722 2723 translate(ctx); 2724 2725 ctx->base.pc_next = ctx->npc * 2; 2726 2727 if (skip_label) { 2728 canonicalize_skip(ctx); 2729 gen_set_label(skip_label); 2730 2731 switch (ctx->base.is_jmp) { 2732 case DISAS_NORETURN: 2733 ctx->base.is_jmp = DISAS_CHAIN; 2734 break; 2735 case DISAS_NEXT: 2736 if (ctx->base.tb->flags & TB_FLAGS_SKIP) { 2737 ctx->base.is_jmp = DISAS_TOO_MANY; 2738 } 2739 break; 2740 default: 2741 break; 2742 } 2743 } 2744 2745 if (ctx->base.is_jmp == DISAS_NEXT) { 2746 target_ulong page_first = ctx->base.pc_first & TARGET_PAGE_MASK; 2747 2748 if ((ctx->base.pc_next - page_first) >= TARGET_PAGE_SIZE - 4) { 2749 ctx->base.is_jmp = DISAS_TOO_MANY; 2750 } 2751 } 2752 } 2753 2754 static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 2755 { 2756 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2757 bool nonconst_skip = canonicalize_skip(ctx); 2758 /* 2759 * Because we disable interrupts while env->skip is set, 2760 * we must return to the main loop to re-evaluate afterward. 2761 */ 2762 bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP; 2763 2764 switch (ctx->base.is_jmp) { 2765 case DISAS_NORETURN: 2766 assert(!nonconst_skip); 2767 break; 2768 case DISAS_NEXT: 2769 case DISAS_TOO_MANY: 2770 case DISAS_CHAIN: 2771 if (!nonconst_skip && !force_exit) { 2772 /* Note gen_goto_tb checks singlestep. */ 2773 gen_goto_tb(ctx, 1, ctx->npc); 2774 break; 2775 } 2776 tcg_gen_movi_tl(cpu_pc, ctx->npc); 2777 /* fall through */ 2778 case DISAS_LOOKUP: 2779 if (!force_exit) { 2780 tcg_gen_lookup_and_goto_ptr(); 2781 break; 2782 } 2783 /* fall through */ 2784 case DISAS_EXIT: 2785 tcg_gen_exit_tb(NULL, 0); 2786 break; 2787 default: 2788 g_assert_not_reached(); 2789 } 2790 } 2791 2792 static void avr_tr_disas_log(const DisasContextBase *dcbase, 2793 CPUState *cs, FILE *logfile) 2794 { 2795 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 2796 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); 2797 } 2798 2799 static const TranslatorOps avr_tr_ops = { 2800 .init_disas_context = avr_tr_init_disas_context, 2801 .tb_start = avr_tr_tb_start, 2802 .insn_start = avr_tr_insn_start, 2803 .translate_insn = avr_tr_translate_insn, 2804 .tb_stop = avr_tr_tb_stop, 2805 .disas_log = avr_tr_disas_log, 2806 }; 2807 2808 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 2809 target_ulong pc, void *host_pc) 2810 { 2811 DisasContext dc = { }; 2812 translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); 2813 } 2814