1 /* 2 * QEMU AVR CPU helpers 3 * 4 * Copyright (c) 2016-2020 Michael Rolnik 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see 18 * <http://www.gnu.org/licenses/lgpl-2.1.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "cpu.h" 24 #include "hw/core/tcg-cpu-ops.h" 25 #include "exec/exec-all.h" 26 #include "exec/address-spaces.h" 27 #include "exec/helper-proto.h" 28 29 bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 30 { 31 AVRCPU *cpu = AVR_CPU(cs); 32 CPUAVRState *env = &cpu->env; 33 34 /* 35 * We cannot separate a skip from the next instruction, 36 * as the skip would not be preserved across the interrupt. 37 * Separating the two insn normally only happens at page boundaries. 38 */ 39 if (env->skip) { 40 return false; 41 } 42 43 if (interrupt_request & CPU_INTERRUPT_RESET) { 44 if (cpu_interrupts_enabled(env)) { 45 cs->exception_index = EXCP_RESET; 46 avr_cpu_do_interrupt(cs); 47 48 cs->interrupt_request &= ~CPU_INTERRUPT_RESET; 49 return true; 50 } 51 } 52 if (interrupt_request & CPU_INTERRUPT_HARD) { 53 if (cpu_interrupts_enabled(env) && env->intsrc != 0) { 54 int index = ctz32(env->intsrc); 55 cs->exception_index = EXCP_INT(index); 56 avr_cpu_do_interrupt(cs); 57 58 env->intsrc &= env->intsrc - 1; /* clear the interrupt */ 59 if (!env->intsrc) { 60 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 61 } 62 return true; 63 } 64 } 65 return false; 66 } 67 68 void avr_cpu_do_interrupt(CPUState *cs) 69 { 70 AVRCPU *cpu = AVR_CPU(cs); 71 CPUAVRState *env = &cpu->env; 72 73 uint32_t ret = env->pc_w; 74 int vector = 0; 75 int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1; 76 int base = 0; 77 78 if (cs->exception_index == EXCP_RESET) { 79 vector = 0; 80 } else if (env->intsrc != 0) { 81 vector = ctz32(env->intsrc) + 1; 82 } 83 84 if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) { 85 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 86 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 87 cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16); 88 } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) { 89 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 90 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 91 } else { 92 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 93 } 94 95 env->pc_w = base + vector * size; 96 env->sregI = 0; /* clear Global Interrupt Flag */ 97 98 cs->exception_index = -1; 99 } 100 101 hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 102 { 103 return addr; /* I assume 1:1 address correspondence */ 104 } 105 106 bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 107 MMUAccessType access_type, int mmu_idx, 108 bool probe, uintptr_t retaddr) 109 { 110 int prot, page_size = TARGET_PAGE_SIZE; 111 uint32_t paddr; 112 113 address &= TARGET_PAGE_MASK; 114 115 if (mmu_idx == MMU_CODE_IDX) { 116 /* Access to code in flash. */ 117 paddr = OFFSET_CODE + address; 118 prot = PAGE_READ | PAGE_EXEC; 119 if (paddr >= OFFSET_DATA) { 120 /* 121 * This should not be possible via any architectural operations. 122 * There is certainly not an exception that we can deliver. 123 * Accept probing that might come from generic code. 124 */ 125 if (probe) { 126 return false; 127 } 128 error_report("execution left flash memory"); 129 abort(); 130 } 131 } else { 132 /* Access to memory. */ 133 paddr = OFFSET_DATA + address; 134 prot = PAGE_READ | PAGE_WRITE; 135 if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 136 /* 137 * Access to CPU registers, exit and rebuilt this TB to use 138 * full access in case it touches specially handled registers 139 * like SREG or SP. For probing, set page_size = 1, in order 140 * to force tlb_fill to be called for the next access. 141 */ 142 if (probe) { 143 page_size = 1; 144 } else { 145 AVRCPU *cpu = AVR_CPU(cs); 146 CPUAVRState *env = &cpu->env; 147 env->fullacc = 1; 148 cpu_loop_exit_restore(cs, retaddr); 149 } 150 } 151 } 152 153 tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size); 154 return true; 155 } 156 157 /* 158 * helpers 159 */ 160 161 void helper_sleep(CPUAVRState *env) 162 { 163 CPUState *cs = env_cpu(env); 164 165 cs->exception_index = EXCP_HLT; 166 cpu_loop_exit(cs); 167 } 168 169 void helper_unsupported(CPUAVRState *env) 170 { 171 CPUState *cs = env_cpu(env); 172 173 /* 174 * I count not find what happens on the real platform, so 175 * it's EXCP_DEBUG for meanwhile 176 */ 177 cs->exception_index = EXCP_DEBUG; 178 if (qemu_loglevel_mask(LOG_UNIMP)) { 179 qemu_log("UNSUPPORTED\n"); 180 cpu_dump_state(cs, stderr, 0); 181 } 182 cpu_loop_exit(cs); 183 } 184 185 void helper_debug(CPUAVRState *env) 186 { 187 CPUState *cs = env_cpu(env); 188 189 cs->exception_index = EXCP_DEBUG; 190 cpu_loop_exit(cs); 191 } 192 193 void helper_break(CPUAVRState *env) 194 { 195 CPUState *cs = env_cpu(env); 196 197 cs->exception_index = EXCP_DEBUG; 198 cpu_loop_exit(cs); 199 } 200 201 void helper_wdr(CPUAVRState *env) 202 { 203 qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n"); 204 } 205 206 /* 207 * This function implements IN instruction 208 * 209 * It does the following 210 * a. if an IO register belongs to CPU, its value is read and returned 211 * b. otherwise io address is translated to mem address and physical memory 212 * is read. 213 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 214 * 215 */ 216 target_ulong helper_inb(CPUAVRState *env, uint32_t port) 217 { 218 target_ulong data = 0; 219 220 switch (port) { 221 case 0x38: /* RAMPD */ 222 data = 0xff & (env->rampD >> 16); 223 break; 224 case 0x39: /* RAMPX */ 225 data = 0xff & (env->rampX >> 16); 226 break; 227 case 0x3a: /* RAMPY */ 228 data = 0xff & (env->rampY >> 16); 229 break; 230 case 0x3b: /* RAMPZ */ 231 data = 0xff & (env->rampZ >> 16); 232 break; 233 case 0x3c: /* EIND */ 234 data = 0xff & (env->eind >> 16); 235 break; 236 case 0x3d: /* SPL */ 237 data = env->sp & 0x00ff; 238 break; 239 case 0x3e: /* SPH */ 240 data = env->sp >> 8; 241 break; 242 case 0x3f: /* SREG */ 243 data = cpu_get_sreg(env); 244 break; 245 default: 246 /* not a special register, pass to normal memory access */ 247 data = address_space_ldub(&address_space_memory, 248 OFFSET_IO_REGISTERS + port, 249 MEMTXATTRS_UNSPECIFIED, NULL); 250 } 251 252 return data; 253 } 254 255 /* 256 * This function implements OUT instruction 257 * 258 * It does the following 259 * a. if an IO register belongs to CPU, its value is written into the register 260 * b. otherwise io address is translated to mem address and physical memory 261 * is written. 262 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 263 * 264 */ 265 void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data) 266 { 267 data &= 0x000000ff; 268 269 switch (port) { 270 case 0x38: /* RAMPD */ 271 if (avr_feature(env, AVR_FEATURE_RAMPD)) { 272 env->rampD = (data & 0xff) << 16; 273 } 274 break; 275 case 0x39: /* RAMPX */ 276 if (avr_feature(env, AVR_FEATURE_RAMPX)) { 277 env->rampX = (data & 0xff) << 16; 278 } 279 break; 280 case 0x3a: /* RAMPY */ 281 if (avr_feature(env, AVR_FEATURE_RAMPY)) { 282 env->rampY = (data & 0xff) << 16; 283 } 284 break; 285 case 0x3b: /* RAMPZ */ 286 if (avr_feature(env, AVR_FEATURE_RAMPZ)) { 287 env->rampZ = (data & 0xff) << 16; 288 } 289 break; 290 case 0x3c: /* EIDN */ 291 env->eind = (data & 0xff) << 16; 292 break; 293 case 0x3d: /* SPL */ 294 env->sp = (env->sp & 0xff00) | (data); 295 break; 296 case 0x3e: /* SPH */ 297 if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) { 298 env->sp = (env->sp & 0x00ff) | (data << 8); 299 } 300 break; 301 case 0x3f: /* SREG */ 302 cpu_set_sreg(env, data); 303 break; 304 default: 305 /* not a special register, pass to normal memory access */ 306 address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port, 307 data, MEMTXATTRS_UNSPECIFIED, NULL); 308 } 309 } 310 311 /* 312 * this function implements LD instruction when there is a possibility to read 313 * from a CPU register 314 */ 315 target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr) 316 { 317 uint8_t data; 318 319 env->fullacc = false; 320 321 if (addr < NUMBER_OF_CPU_REGISTERS) { 322 /* CPU registers */ 323 data = env->r[addr]; 324 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 325 /* IO registers */ 326 data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS); 327 } else { 328 /* memory */ 329 data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr, 330 MEMTXATTRS_UNSPECIFIED, NULL); 331 } 332 return data; 333 } 334 335 /* 336 * this function implements ST instruction when there is a possibility to write 337 * into a CPU register 338 */ 339 void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr) 340 { 341 env->fullacc = false; 342 343 /* Following logic assumes this: */ 344 assert(OFFSET_CPU_REGISTERS == OFFSET_DATA); 345 assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS + 346 NUMBER_OF_CPU_REGISTERS); 347 348 if (addr < NUMBER_OF_CPU_REGISTERS) { 349 /* CPU registers */ 350 env->r[addr] = data; 351 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 352 /* IO registers */ 353 helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data); 354 } else { 355 /* memory */ 356 address_space_stb(&address_space_memory, OFFSET_DATA + addr, data, 357 MEMTXATTRS_UNSPECIFIED, NULL); 358 } 359 } 360