1 /* 2 * QEMU AVR CPU helpers 3 * 4 * Copyright (c) 2016-2020 Michael Rolnik 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see 18 * <http://www.gnu.org/licenses/lgpl-2.1.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "qemu/error-report.h" 24 #include "cpu.h" 25 #include "hw/core/tcg-cpu-ops.h" 26 #include "exec/exec-all.h" 27 #include "exec/address-spaces.h" 28 #include "exec/helper-proto.h" 29 30 bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 31 { 32 AVRCPU *cpu = AVR_CPU(cs); 33 CPUAVRState *env = &cpu->env; 34 35 /* 36 * We cannot separate a skip from the next instruction, 37 * as the skip would not be preserved across the interrupt. 38 * Separating the two insn normally only happens at page boundaries. 39 */ 40 if (env->skip) { 41 return false; 42 } 43 44 if (interrupt_request & CPU_INTERRUPT_RESET) { 45 if (cpu_interrupts_enabled(env)) { 46 cs->exception_index = EXCP_RESET; 47 avr_cpu_do_interrupt(cs); 48 49 cs->interrupt_request &= ~CPU_INTERRUPT_RESET; 50 return true; 51 } 52 } 53 if (interrupt_request & CPU_INTERRUPT_HARD) { 54 if (cpu_interrupts_enabled(env) && env->intsrc != 0) { 55 int index = ctz32(env->intsrc); 56 cs->exception_index = EXCP_INT(index); 57 avr_cpu_do_interrupt(cs); 58 59 env->intsrc &= env->intsrc - 1; /* clear the interrupt */ 60 if (!env->intsrc) { 61 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 62 } 63 return true; 64 } 65 } 66 return false; 67 } 68 69 void avr_cpu_do_interrupt(CPUState *cs) 70 { 71 AVRCPU *cpu = AVR_CPU(cs); 72 CPUAVRState *env = &cpu->env; 73 74 uint32_t ret = env->pc_w; 75 int vector = 0; 76 int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1; 77 int base = 0; 78 79 if (cs->exception_index == EXCP_RESET) { 80 vector = 0; 81 } else if (env->intsrc != 0) { 82 vector = ctz32(env->intsrc) + 1; 83 } 84 85 if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) { 86 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 87 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 88 cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16); 89 } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) { 90 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 91 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 92 } else { 93 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 94 } 95 96 env->pc_w = base + vector * size; 97 env->sregI = 0; /* clear Global Interrupt Flag */ 98 99 cs->exception_index = -1; 100 } 101 102 hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 103 { 104 return addr; /* I assume 1:1 address correspondence */ 105 } 106 107 bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 108 MMUAccessType access_type, int mmu_idx, 109 bool probe, uintptr_t retaddr) 110 { 111 int prot, page_size = TARGET_PAGE_SIZE; 112 uint32_t paddr; 113 114 address &= TARGET_PAGE_MASK; 115 116 if (mmu_idx == MMU_CODE_IDX) { 117 /* Access to code in flash. */ 118 paddr = OFFSET_CODE + address; 119 prot = PAGE_READ | PAGE_EXEC; 120 if (paddr >= OFFSET_DATA) { 121 /* 122 * This should not be possible via any architectural operations. 123 * There is certainly not an exception that we can deliver. 124 * Accept probing that might come from generic code. 125 */ 126 if (probe) { 127 return false; 128 } 129 error_report("execution left flash memory"); 130 abort(); 131 } 132 } else { 133 /* Access to memory. */ 134 paddr = OFFSET_DATA + address; 135 prot = PAGE_READ | PAGE_WRITE; 136 if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 137 /* 138 * Access to CPU registers, exit and rebuilt this TB to use 139 * full access in case it touches specially handled registers 140 * like SREG or SP. For probing, set page_size = 1, in order 141 * to force tlb_fill to be called for the next access. 142 */ 143 if (probe) { 144 page_size = 1; 145 } else { 146 AVRCPU *cpu = AVR_CPU(cs); 147 CPUAVRState *env = &cpu->env; 148 env->fullacc = 1; 149 cpu_loop_exit_restore(cs, retaddr); 150 } 151 } 152 } 153 154 tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size); 155 return true; 156 } 157 158 /* 159 * helpers 160 */ 161 162 void helper_sleep(CPUAVRState *env) 163 { 164 CPUState *cs = env_cpu(env); 165 166 cs->exception_index = EXCP_HLT; 167 cpu_loop_exit(cs); 168 } 169 170 void helper_unsupported(CPUAVRState *env) 171 { 172 CPUState *cs = env_cpu(env); 173 174 /* 175 * I count not find what happens on the real platform, so 176 * it's EXCP_DEBUG for meanwhile 177 */ 178 cs->exception_index = EXCP_DEBUG; 179 if (qemu_loglevel_mask(LOG_UNIMP)) { 180 qemu_log("UNSUPPORTED\n"); 181 cpu_dump_state(cs, stderr, 0); 182 } 183 cpu_loop_exit(cs); 184 } 185 186 void helper_debug(CPUAVRState *env) 187 { 188 CPUState *cs = env_cpu(env); 189 190 cs->exception_index = EXCP_DEBUG; 191 cpu_loop_exit(cs); 192 } 193 194 void helper_break(CPUAVRState *env) 195 { 196 CPUState *cs = env_cpu(env); 197 198 cs->exception_index = EXCP_DEBUG; 199 cpu_loop_exit(cs); 200 } 201 202 void helper_wdr(CPUAVRState *env) 203 { 204 qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n"); 205 } 206 207 /* 208 * This function implements IN instruction 209 * 210 * It does the following 211 * a. if an IO register belongs to CPU, its value is read and returned 212 * b. otherwise io address is translated to mem address and physical memory 213 * is read. 214 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 215 * 216 */ 217 target_ulong helper_inb(CPUAVRState *env, uint32_t port) 218 { 219 target_ulong data = 0; 220 221 switch (port) { 222 case 0x38: /* RAMPD */ 223 data = 0xff & (env->rampD >> 16); 224 break; 225 case 0x39: /* RAMPX */ 226 data = 0xff & (env->rampX >> 16); 227 break; 228 case 0x3a: /* RAMPY */ 229 data = 0xff & (env->rampY >> 16); 230 break; 231 case 0x3b: /* RAMPZ */ 232 data = 0xff & (env->rampZ >> 16); 233 break; 234 case 0x3c: /* EIND */ 235 data = 0xff & (env->eind >> 16); 236 break; 237 case 0x3d: /* SPL */ 238 data = env->sp & 0x00ff; 239 break; 240 case 0x3e: /* SPH */ 241 data = env->sp >> 8; 242 break; 243 case 0x3f: /* SREG */ 244 data = cpu_get_sreg(env); 245 break; 246 default: 247 /* not a special register, pass to normal memory access */ 248 data = address_space_ldub(&address_space_memory, 249 OFFSET_IO_REGISTERS + port, 250 MEMTXATTRS_UNSPECIFIED, NULL); 251 } 252 253 return data; 254 } 255 256 /* 257 * This function implements OUT instruction 258 * 259 * It does the following 260 * a. if an IO register belongs to CPU, its value is written into the register 261 * b. otherwise io address is translated to mem address and physical memory 262 * is written. 263 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 264 * 265 */ 266 void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data) 267 { 268 data &= 0x000000ff; 269 270 switch (port) { 271 case 0x38: /* RAMPD */ 272 if (avr_feature(env, AVR_FEATURE_RAMPD)) { 273 env->rampD = (data & 0xff) << 16; 274 } 275 break; 276 case 0x39: /* RAMPX */ 277 if (avr_feature(env, AVR_FEATURE_RAMPX)) { 278 env->rampX = (data & 0xff) << 16; 279 } 280 break; 281 case 0x3a: /* RAMPY */ 282 if (avr_feature(env, AVR_FEATURE_RAMPY)) { 283 env->rampY = (data & 0xff) << 16; 284 } 285 break; 286 case 0x3b: /* RAMPZ */ 287 if (avr_feature(env, AVR_FEATURE_RAMPZ)) { 288 env->rampZ = (data & 0xff) << 16; 289 } 290 break; 291 case 0x3c: /* EIDN */ 292 env->eind = (data & 0xff) << 16; 293 break; 294 case 0x3d: /* SPL */ 295 env->sp = (env->sp & 0xff00) | (data); 296 break; 297 case 0x3e: /* SPH */ 298 if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) { 299 env->sp = (env->sp & 0x00ff) | (data << 8); 300 } 301 break; 302 case 0x3f: /* SREG */ 303 cpu_set_sreg(env, data); 304 break; 305 default: 306 /* not a special register, pass to normal memory access */ 307 address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port, 308 data, MEMTXATTRS_UNSPECIFIED, NULL); 309 } 310 } 311 312 /* 313 * this function implements LD instruction when there is a possibility to read 314 * from a CPU register 315 */ 316 target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr) 317 { 318 uint8_t data; 319 320 env->fullacc = false; 321 322 if (addr < NUMBER_OF_CPU_REGISTERS) { 323 /* CPU registers */ 324 data = env->r[addr]; 325 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 326 /* IO registers */ 327 data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS); 328 } else { 329 /* memory */ 330 data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr, 331 MEMTXATTRS_UNSPECIFIED, NULL); 332 } 333 return data; 334 } 335 336 /* 337 * this function implements ST instruction when there is a possibility to write 338 * into a CPU register 339 */ 340 void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr) 341 { 342 env->fullacc = false; 343 344 /* Following logic assumes this: */ 345 assert(OFFSET_CPU_REGISTERS == OFFSET_DATA); 346 assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS + 347 NUMBER_OF_CPU_REGISTERS); 348 349 if (addr < NUMBER_OF_CPU_REGISTERS) { 350 /* CPU registers */ 351 env->r[addr] = data; 352 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 353 /* IO registers */ 354 helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data); 355 } else { 356 /* memory */ 357 address_space_stb(&address_space_memory, OFFSET_DATA + addr, data, 358 MEMTXATTRS_UNSPECIFIED, NULL); 359 } 360 } 361