1 /* 2 * QEMU AVR CPU helpers 3 * 4 * Copyright (c) 2016-2020 Michael Rolnik 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see 18 * <http://www.gnu.org/licenses/lgpl-2.1.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "qemu/error-report.h" 24 #include "cpu.h" 25 #include "hw/core/tcg-cpu-ops.h" 26 #include "exec/exec-all.h" 27 #include "exec/page-protection.h" 28 #include "exec/cpu_ldst.h" 29 #include "exec/address-spaces.h" 30 #include "exec/helper-proto.h" 31 32 bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 33 { 34 CPUAVRState *env = cpu_env(cs); 35 36 /* 37 * We cannot separate a skip from the next instruction, 38 * as the skip would not be preserved across the interrupt. 39 * Separating the two insn normally only happens at page boundaries. 40 */ 41 if (env->skip) { 42 return false; 43 } 44 45 if (interrupt_request & CPU_INTERRUPT_RESET) { 46 if (cpu_interrupts_enabled(env)) { 47 cs->exception_index = EXCP_RESET; 48 avr_cpu_do_interrupt(cs); 49 50 cs->interrupt_request &= ~CPU_INTERRUPT_RESET; 51 return true; 52 } 53 } 54 if (interrupt_request & CPU_INTERRUPT_HARD) { 55 if (cpu_interrupts_enabled(env) && env->intsrc != 0) { 56 int index = ctz64(env->intsrc); 57 cs->exception_index = EXCP_INT(index); 58 avr_cpu_do_interrupt(cs); 59 60 env->intsrc &= env->intsrc - 1; /* clear the interrupt */ 61 if (!env->intsrc) { 62 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 63 } 64 return true; 65 } 66 } 67 return false; 68 } 69 70 void avr_cpu_do_interrupt(CPUState *cs) 71 { 72 CPUAVRState *env = cpu_env(cs); 73 74 uint32_t ret = env->pc_w; 75 int vector = 0; 76 int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1; 77 int base = 0; 78 79 if (cs->exception_index == EXCP_RESET) { 80 vector = 0; 81 } else if (env->intsrc != 0) { 82 vector = ctz64(env->intsrc) + 1; 83 } 84 85 if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) { 86 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 87 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 88 cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16); 89 } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) { 90 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 91 cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); 92 } else { 93 cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); 94 } 95 96 env->pc_w = base + vector * size; 97 env->sregI = 0; /* clear Global Interrupt Flag */ 98 99 cs->exception_index = -1; 100 } 101 102 hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 103 { 104 return addr; /* I assume 1:1 address correspondence */ 105 } 106 107 bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 108 MMUAccessType access_type, int mmu_idx, 109 bool probe, uintptr_t retaddr) 110 { 111 int prot, page_size = TARGET_PAGE_SIZE; 112 uint32_t paddr; 113 114 address &= TARGET_PAGE_MASK; 115 116 if (mmu_idx == MMU_CODE_IDX) { 117 /* Access to code in flash. */ 118 paddr = OFFSET_CODE + address; 119 prot = PAGE_READ | PAGE_EXEC; 120 if (paddr >= OFFSET_DATA) { 121 /* 122 * This should not be possible via any architectural operations. 123 * There is certainly not an exception that we can deliver. 124 * Accept probing that might come from generic code. 125 */ 126 if (probe) { 127 return false; 128 } 129 error_report("execution left flash memory"); 130 abort(); 131 } 132 } else { 133 /* Access to memory. */ 134 paddr = OFFSET_DATA + address; 135 prot = PAGE_READ | PAGE_WRITE; 136 if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 137 /* 138 * Access to CPU registers, exit and rebuilt this TB to use 139 * full access in case it touches specially handled registers 140 * like SREG or SP. For probing, set page_size = 1, in order 141 * to force tlb_fill to be called for the next access. 142 */ 143 if (probe) { 144 page_size = 1; 145 } else { 146 cpu_env(cs)->fullacc = 1; 147 cpu_loop_exit_restore(cs, retaddr); 148 } 149 } 150 } 151 152 tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size); 153 return true; 154 } 155 156 /* 157 * helpers 158 */ 159 160 void helper_sleep(CPUAVRState *env) 161 { 162 CPUState *cs = env_cpu(env); 163 164 cs->exception_index = EXCP_HLT; 165 cpu_loop_exit(cs); 166 } 167 168 void helper_unsupported(CPUAVRState *env) 169 { 170 CPUState *cs = env_cpu(env); 171 172 /* 173 * I count not find what happens on the real platform, so 174 * it's EXCP_DEBUG for meanwhile 175 */ 176 cs->exception_index = EXCP_DEBUG; 177 if (qemu_loglevel_mask(LOG_UNIMP)) { 178 qemu_log("UNSUPPORTED\n"); 179 cpu_dump_state(cs, stderr, 0); 180 } 181 cpu_loop_exit(cs); 182 } 183 184 void helper_debug(CPUAVRState *env) 185 { 186 CPUState *cs = env_cpu(env); 187 188 cs->exception_index = EXCP_DEBUG; 189 cpu_loop_exit(cs); 190 } 191 192 void helper_break(CPUAVRState *env) 193 { 194 CPUState *cs = env_cpu(env); 195 196 cs->exception_index = EXCP_DEBUG; 197 cpu_loop_exit(cs); 198 } 199 200 void helper_wdr(CPUAVRState *env) 201 { 202 qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n"); 203 } 204 205 /* 206 * This function implements IN instruction 207 * 208 * It does the following 209 * a. if an IO register belongs to CPU, its value is read and returned 210 * b. otherwise io address is translated to mem address and physical memory 211 * is read. 212 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 213 * 214 */ 215 target_ulong helper_inb(CPUAVRState *env, uint32_t port) 216 { 217 target_ulong data = 0; 218 219 switch (port) { 220 case 0x38: /* RAMPD */ 221 data = 0xff & (env->rampD >> 16); 222 break; 223 case 0x39: /* RAMPX */ 224 data = 0xff & (env->rampX >> 16); 225 break; 226 case 0x3a: /* RAMPY */ 227 data = 0xff & (env->rampY >> 16); 228 break; 229 case 0x3b: /* RAMPZ */ 230 data = 0xff & (env->rampZ >> 16); 231 break; 232 case 0x3c: /* EIND */ 233 data = 0xff & (env->eind >> 16); 234 break; 235 case 0x3d: /* SPL */ 236 data = env->sp & 0x00ff; 237 break; 238 case 0x3e: /* SPH */ 239 data = env->sp >> 8; 240 break; 241 case 0x3f: /* SREG */ 242 data = cpu_get_sreg(env); 243 break; 244 default: 245 /* not a special register, pass to normal memory access */ 246 data = address_space_ldub(&address_space_memory, 247 OFFSET_IO_REGISTERS + port, 248 MEMTXATTRS_UNSPECIFIED, NULL); 249 } 250 251 return data; 252 } 253 254 /* 255 * This function implements OUT instruction 256 * 257 * It does the following 258 * a. if an IO register belongs to CPU, its value is written into the register 259 * b. otherwise io address is translated to mem address and physical memory 260 * is written. 261 * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation 262 * 263 */ 264 void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data) 265 { 266 data &= 0x000000ff; 267 268 switch (port) { 269 case 0x38: /* RAMPD */ 270 if (avr_feature(env, AVR_FEATURE_RAMPD)) { 271 env->rampD = (data & 0xff) << 16; 272 } 273 break; 274 case 0x39: /* RAMPX */ 275 if (avr_feature(env, AVR_FEATURE_RAMPX)) { 276 env->rampX = (data & 0xff) << 16; 277 } 278 break; 279 case 0x3a: /* RAMPY */ 280 if (avr_feature(env, AVR_FEATURE_RAMPY)) { 281 env->rampY = (data & 0xff) << 16; 282 } 283 break; 284 case 0x3b: /* RAMPZ */ 285 if (avr_feature(env, AVR_FEATURE_RAMPZ)) { 286 env->rampZ = (data & 0xff) << 16; 287 } 288 break; 289 case 0x3c: /* EIDN */ 290 env->eind = (data & 0xff) << 16; 291 break; 292 case 0x3d: /* SPL */ 293 env->sp = (env->sp & 0xff00) | (data); 294 break; 295 case 0x3e: /* SPH */ 296 if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) { 297 env->sp = (env->sp & 0x00ff) | (data << 8); 298 } 299 break; 300 case 0x3f: /* SREG */ 301 cpu_set_sreg(env, data); 302 break; 303 default: 304 /* not a special register, pass to normal memory access */ 305 address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port, 306 data, MEMTXATTRS_UNSPECIFIED, NULL); 307 } 308 } 309 310 /* 311 * this function implements LD instruction when there is a possibility to read 312 * from a CPU register 313 */ 314 target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr) 315 { 316 uint8_t data; 317 318 env->fullacc = false; 319 320 if (addr < NUMBER_OF_CPU_REGISTERS) { 321 /* CPU registers */ 322 data = env->r[addr]; 323 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 324 /* IO registers */ 325 data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS); 326 } else { 327 /* memory */ 328 data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr, 329 MEMTXATTRS_UNSPECIFIED, NULL); 330 } 331 return data; 332 } 333 334 /* 335 * this function implements ST instruction when there is a possibility to write 336 * into a CPU register 337 */ 338 void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr) 339 { 340 env->fullacc = false; 341 342 /* Following logic assumes this: */ 343 assert(OFFSET_CPU_REGISTERS == OFFSET_DATA); 344 assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS + 345 NUMBER_OF_CPU_REGISTERS); 346 347 if (addr < NUMBER_OF_CPU_REGISTERS) { 348 /* CPU registers */ 349 env->r[addr] = data; 350 } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { 351 /* IO registers */ 352 helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data); 353 } else { 354 /* memory */ 355 address_space_stb(&address_space_memory, OFFSET_DATA + addr, data, 356 MEMTXATTRS_UNSPECIFIED, NULL); 357 } 358 } 359