1 /*
2 * Helpers for loads and stores
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/range.h"
23 #include "cpu.h"
24 #include "tcg/tcg.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "exec/cputlb.h"
28 #include "exec/page-protection.h"
29 #include "exec/cpu_ldst.h"
30 #ifdef CONFIG_USER_ONLY
31 #include "user/page-protection.h"
32 #endif
33 #include "asi.h"
34
35 //#define DEBUG_MMU
36 //#define DEBUG_MXCC
37 //#define DEBUG_UNASSIGNED
38 //#define DEBUG_ASI
39 //#define DEBUG_CACHE_CONTROL
40
41 #ifdef DEBUG_MMU
42 #define DPRINTF_MMU(fmt, ...) \
43 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
44 #else
45 #define DPRINTF_MMU(fmt, ...) do {} while (0)
46 #endif
47
48 #ifdef DEBUG_MXCC
49 #define DPRINTF_MXCC(fmt, ...) \
50 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
51 #else
52 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
53 #endif
54
55 #ifdef DEBUG_ASI
56 #define DPRINTF_ASI(fmt, ...) \
57 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
58 #endif
59
60 #ifdef DEBUG_CACHE_CONTROL
61 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
62 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
63 #else
64 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
65 #endif
66
67 #ifdef TARGET_SPARC64
68 #ifndef TARGET_ABI32
69 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
70 #else
71 #define AM_CHECK(env1) (1)
72 #endif
73 #endif
74
75 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
76 /* Calculates TSB pointer value for fault page size
77 * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
78 * UA2005 holds the page size configuration in mmu_ctx registers */
ultrasparc_tsb_pointer(CPUSPARCState * env,const SparcV9MMU * mmu,const int idx)79 static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
80 const SparcV9MMU *mmu, const int idx)
81 {
82 uint64_t tsb_register;
83 int page_size;
84 if (cpu_has_hypervisor(env)) {
85 int tsb_index = 0;
86 int ctx = mmu->tag_access & 0x1fffULL;
87 uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
88 tsb_index = idx;
89 tsb_index |= ctx ? 2 : 0;
90 page_size = idx ? ctx_register >> 8 : ctx_register;
91 page_size &= 7;
92 tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
93 } else {
94 page_size = idx;
95 tsb_register = mmu->tsb;
96 }
97 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
98 int tsb_size = tsb_register & 0xf;
99
100 uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
101
102 /* move va bits to correct position,
103 * the context bits will be masked out later */
104 uint64_t va = mmu->tag_access >> (3 * page_size + 9);
105
106 /* calculate tsb_base mask and adjust va if split is in use */
107 if (tsb_split) {
108 if (idx == 0) {
109 va &= ~(1ULL << (13 + tsb_size));
110 } else {
111 va |= (1ULL << (13 + tsb_size));
112 }
113 tsb_base_mask <<= 1;
114 }
115
116 return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
117 }
118
119 /* Calculates tag target register value by reordering bits
120 in tag access register */
ultrasparc_tag_target(uint64_t tag_access_register)121 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
122 {
123 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
124 }
125
replace_tlb_entry(SparcTLBEntry * tlb,uint64_t tlb_tag,uint64_t tlb_tte,CPUSPARCState * env)126 static void replace_tlb_entry(SparcTLBEntry *tlb,
127 uint64_t tlb_tag, uint64_t tlb_tte,
128 CPUSPARCState *env)
129 {
130 target_ulong mask, size, va, offset;
131
132 /* flush page range if translation is valid */
133 if (TTE_IS_VALID(tlb->tte)) {
134 CPUState *cs = env_cpu(env);
135
136 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
137 mask = 1ULL + ~size;
138
139 va = tlb->tag & mask;
140
141 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
142 tlb_flush_page(cs, va + offset);
143 }
144 }
145
146 tlb->tag = tlb_tag;
147 tlb->tte = tlb_tte;
148 }
149
demap_tlb(SparcTLBEntry * tlb,target_ulong demap_addr,const char * strmmu,CPUSPARCState * env1)150 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
151 const char *strmmu, CPUSPARCState *env1)
152 {
153 unsigned int i;
154 target_ulong mask;
155 uint64_t context;
156
157 int is_demap_context = (demap_addr >> 6) & 1;
158
159 /* demap context */
160 switch ((demap_addr >> 4) & 3) {
161 case 0: /* primary */
162 context = env1->dmmu.mmu_primary_context;
163 break;
164 case 1: /* secondary */
165 context = env1->dmmu.mmu_secondary_context;
166 break;
167 case 2: /* nucleus */
168 context = 0;
169 break;
170 case 3: /* reserved */
171 default:
172 return;
173 }
174
175 for (i = 0; i < 64; i++) {
176 if (TTE_IS_VALID(tlb[i].tte)) {
177
178 if (is_demap_context) {
179 /* will remove non-global entries matching context value */
180 if (TTE_IS_GLOBAL(tlb[i].tte) ||
181 !tlb_compare_context(&tlb[i], context)) {
182 continue;
183 }
184 } else {
185 /* demap page
186 will remove any entry matching VA */
187 mask = 0xffffffffffffe000ULL;
188 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
189
190 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
191 continue;
192 }
193
194 /* entry should be global or matching context value */
195 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
196 !tlb_compare_context(&tlb[i], context)) {
197 continue;
198 }
199 }
200
201 replace_tlb_entry(&tlb[i], 0, 0, env1);
202 #ifdef DEBUG_MMU
203 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
204 dump_mmu(env1);
205 #endif
206 }
207 }
208 }
209
sun4v_tte_to_sun4u(CPUSPARCState * env,uint64_t tag,uint64_t sun4v_tte)210 static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
211 uint64_t sun4v_tte)
212 {
213 uint64_t sun4u_tte;
214 if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
215 /* is already in the sun4u format */
216 return sun4v_tte;
217 }
218 sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
219 sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
220 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
221 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
222 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
223 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
224 TTE_SIDEEFFECT_BIT);
225 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
226 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
227 return sun4u_tte;
228 }
229
replace_tlb_1bit_lru(SparcTLBEntry * tlb,uint64_t tlb_tag,uint64_t tlb_tte,const char * strmmu,CPUSPARCState * env1,uint64_t addr)230 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
231 uint64_t tlb_tag, uint64_t tlb_tte,
232 const char *strmmu, CPUSPARCState *env1,
233 uint64_t addr)
234 {
235 unsigned int i, replace_used;
236
237 tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
238 if (cpu_has_hypervisor(env1)) {
239 uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
240 uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
241 uint32_t new_ctx = tlb_tag & 0x1fffU;
242 for (i = 0; i < 64; i++) {
243 uint32_t ctx = tlb[i].tag & 0x1fffU;
244 /* check if new mapping overlaps an existing one */
245 if (new_ctx == ctx) {
246 uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
247 uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
248 if (ranges_overlap(new_vaddr, new_size, vaddr, size)) {
249 DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
250 new_vaddr);
251 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
252 return;
253 }
254 }
255
256 }
257 }
258 /* Try replacing invalid entry */
259 for (i = 0; i < 64; i++) {
260 if (!TTE_IS_VALID(tlb[i].tte)) {
261 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
262 #ifdef DEBUG_MMU
263 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
264 dump_mmu(env1);
265 #endif
266 return;
267 }
268 }
269
270 /* All entries are valid, try replacing unlocked entry */
271
272 for (replace_used = 0; replace_used < 2; ++replace_used) {
273
274 /* Used entries are not replaced on first pass */
275
276 for (i = 0; i < 64; i++) {
277 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
278
279 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
280 #ifdef DEBUG_MMU
281 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
282 strmmu, (replace_used ? "used" : "unused"), i);
283 dump_mmu(env1);
284 #endif
285 return;
286 }
287 }
288
289 /* Now reset used bit and search for unused entries again */
290
291 for (i = 0; i < 64; i++) {
292 TTE_SET_UNUSED(tlb[i].tte);
293 }
294 }
295
296 #ifdef DEBUG_MMU
297 DPRINTF_MMU("%s lru replacement: no free entries available, "
298 "replacing the last one\n", strmmu);
299 #endif
300 /* corner case: the last entry is replaced anyway */
301 replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
302 }
303
304 #endif
305
306 #ifdef TARGET_SPARC64
307 /* returns true if access using this ASI is to have address translated by MMU
308 otherwise access is to raw physical address */
309 /* TODO: check sparc32 bits */
is_translating_asi(int asi)310 static inline int is_translating_asi(int asi)
311 {
312 /* Ultrasparc IIi translating asi
313 - note this list is defined by cpu implementation
314 */
315 switch (asi) {
316 case 0x04 ... 0x11:
317 case 0x16 ... 0x19:
318 case 0x1E ... 0x1F:
319 case 0x24 ... 0x2C:
320 case 0x70 ... 0x73:
321 case 0x78 ... 0x79:
322 case 0x80 ... 0xFF:
323 return 1;
324
325 default:
326 return 0;
327 }
328 }
329
address_mask(CPUSPARCState * env1,target_ulong addr)330 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
331 {
332 if (AM_CHECK(env1)) {
333 addr &= 0xffffffffULL;
334 }
335 return addr;
336 }
337
asi_address_mask(CPUSPARCState * env,int asi,target_ulong addr)338 static inline target_ulong asi_address_mask(CPUSPARCState *env,
339 int asi, target_ulong addr)
340 {
341 if (is_translating_asi(asi)) {
342 addr = address_mask(env, addr);
343 }
344 return addr;
345 }
346
347 #ifndef CONFIG_USER_ONLY
do_check_asi(CPUSPARCState * env,int asi,uintptr_t ra)348 static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
349 {
350 /* ASIs >= 0x80 are user mode.
351 * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
352 * ASIs <= 0x2f are super mode.
353 */
354 if (asi < 0x80
355 && !cpu_hypervisor_mode(env)
356 && (!cpu_supervisor_mode(env)
357 || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
358 cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
359 }
360 }
361 #endif /* !CONFIG_USER_ONLY */
362 #endif
363
364 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
do_check_align(CPUSPARCState * env,target_ulong addr,uint32_t align,uintptr_t ra)365 static void do_check_align(CPUSPARCState *env, target_ulong addr,
366 uint32_t align, uintptr_t ra)
367 {
368 if (addr & align) {
369 cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
370 }
371 }
372 #endif
373
374 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
375 defined(DEBUG_MXCC)
dump_mxcc(CPUSPARCState * env)376 static void dump_mxcc(CPUSPARCState *env)
377 {
378 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
379 "\n",
380 env->mxccdata[0], env->mxccdata[1],
381 env->mxccdata[2], env->mxccdata[3]);
382 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
383 "\n"
384 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
385 "\n",
386 env->mxccregs[0], env->mxccregs[1],
387 env->mxccregs[2], env->mxccregs[3],
388 env->mxccregs[4], env->mxccregs[5],
389 env->mxccregs[6], env->mxccregs[7]);
390 }
391 #endif
392
393 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
394 && defined(DEBUG_ASI)
dump_asi(const char * txt,target_ulong addr,int asi,int size,uint64_t r1)395 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
396 uint64_t r1)
397 {
398 switch (size) {
399 case 1:
400 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
401 addr, asi, r1 & 0xff);
402 break;
403 case 2:
404 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
405 addr, asi, r1 & 0xffff);
406 break;
407 case 4:
408 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
409 addr, asi, r1 & 0xffffffff);
410 break;
411 case 8:
412 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
413 addr, asi, r1);
414 break;
415 }
416 }
417 #endif
418
419 #ifndef CONFIG_USER_ONLY
420 #ifndef TARGET_SPARC64
sparc_raise_mmu_fault(CPUState * cs,hwaddr addr,bool is_write,bool is_exec,int is_asi,unsigned size,uintptr_t retaddr)421 static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
422 bool is_write, bool is_exec, int is_asi,
423 unsigned size, uintptr_t retaddr)
424 {
425 CPUSPARCState *env = cpu_env(cs);
426 int fault_type;
427
428 #ifdef DEBUG_UNASSIGNED
429 if (is_asi) {
430 printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
431 " asi 0x%02x from " TARGET_FMT_lx "\n",
432 is_exec ? "exec" : is_write ? "write" : "read", size,
433 size == 1 ? "" : "s", addr, is_asi, env->pc);
434 } else {
435 printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
436 " from " TARGET_FMT_lx "\n",
437 is_exec ? "exec" : is_write ? "write" : "read", size,
438 size == 1 ? "" : "s", addr, env->pc);
439 }
440 #endif
441 /* Don't overwrite translation and access faults */
442 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
443 if ((fault_type > 4) || (fault_type == 0)) {
444 env->mmuregs[3] = 0; /* Fault status register */
445 if (is_asi) {
446 env->mmuregs[3] |= 1 << 16;
447 }
448 if (env->psrs) {
449 env->mmuregs[3] |= 1 << 5;
450 }
451 if (is_exec) {
452 env->mmuregs[3] |= 1 << 6;
453 }
454 if (is_write) {
455 env->mmuregs[3] |= 1 << 7;
456 }
457 env->mmuregs[3] |= (5 << 2) | 2;
458 /* SuperSPARC will never place instruction fault addresses in the FAR */
459 if (!is_exec) {
460 env->mmuregs[4] = addr; /* Fault address register */
461 }
462 }
463 /* overflow (same type fault was not read before another fault) */
464 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
465 env->mmuregs[3] |= 1;
466 }
467
468 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
469 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
470 cpu_raise_exception_ra(env, tt, retaddr);
471 }
472
473 /*
474 * flush neverland mappings created during no-fault mode,
475 * so the sequential MMU faults report proper fault types
476 */
477 if (env->mmuregs[0] & MMU_NF) {
478 tlb_flush(cs);
479 }
480 }
481 #else
sparc_raise_mmu_fault(CPUState * cs,hwaddr addr,bool is_write,bool is_exec,int is_asi,unsigned size,uintptr_t retaddr)482 static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
483 bool is_write, bool is_exec, int is_asi,
484 unsigned size, uintptr_t retaddr)
485 {
486 CPUSPARCState *env = cpu_env(cs);
487
488 #ifdef DEBUG_UNASSIGNED
489 printf("Unassigned mem access to " HWADDR_FMT_plx " from " TARGET_FMT_lx
490 "\n", addr, env->pc);
491 #endif
492
493 if (is_exec) { /* XXX has_hypervisor */
494 if (env->lsu & (IMMU_E)) {
495 cpu_raise_exception_ra(env, TT_CODE_ACCESS, retaddr);
496 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
497 cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, retaddr);
498 }
499 } else {
500 if (env->lsu & (DMMU_E)) {
501 cpu_raise_exception_ra(env, TT_DATA_ACCESS, retaddr);
502 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
503 cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, retaddr);
504 }
505 }
506 }
507 #endif
508 #endif
509
510 #ifndef TARGET_SPARC64
511 #ifndef CONFIG_USER_ONLY
512
513
514 /* Leon3 cache control */
515
leon3_cache_control_st(CPUSPARCState * env,target_ulong addr,uint64_t val,int size)516 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
517 uint64_t val, int size)
518 {
519 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
520 addr, val, size);
521
522 if (size != 4) {
523 DPRINTF_CACHE_CONTROL("32bits only\n");
524 return;
525 }
526
527 switch (addr) {
528 case 0x00: /* Cache control */
529
530 /* These values must always be read as zeros */
531 val &= ~CACHE_CTRL_FD;
532 val &= ~CACHE_CTRL_FI;
533 val &= ~CACHE_CTRL_IB;
534 val &= ~CACHE_CTRL_IP;
535 val &= ~CACHE_CTRL_DP;
536
537 env->cache_control = val;
538 break;
539 case 0x04: /* Instruction cache configuration */
540 case 0x08: /* Data cache configuration */
541 /* Read Only */
542 break;
543 default:
544 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
545 break;
546 };
547 }
548
leon3_cache_control_ld(CPUSPARCState * env,target_ulong addr,int size)549 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
550 int size)
551 {
552 uint64_t ret = 0;
553
554 if (size != 4) {
555 DPRINTF_CACHE_CONTROL("32bits only\n");
556 return 0;
557 }
558
559 switch (addr) {
560 case 0x00: /* Cache control */
561 ret = env->cache_control;
562 break;
563
564 /* Configuration registers are read and only always keep those
565 predefined values */
566
567 case 0x04: /* Instruction cache configuration */
568 ret = 0x10220000;
569 break;
570 case 0x08: /* Data cache configuration */
571 ret = 0x18220000;
572 break;
573 default:
574 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
575 break;
576 };
577 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
578 addr, ret, size);
579 return ret;
580 }
581
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)582 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
583 int asi, uint32_t memop)
584 {
585 int size = 1 << (memop & MO_SIZE);
586 int sign = memop & MO_SIGN;
587 CPUState *cs = env_cpu(env);
588 uint64_t ret = 0;
589 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
590 uint32_t last_addr = addr;
591 #endif
592
593 do_check_align(env, addr, size - 1, GETPC());
594 switch (asi) {
595 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
596 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
597 switch (addr) {
598 case 0x00: /* Leon3 Cache Control */
599 case 0x08: /* Leon3 Instruction Cache config */
600 case 0x0C: /* Leon3 Date Cache config */
601 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
602 ret = leon3_cache_control_ld(env, addr, size);
603 } else {
604 qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
605 " address, size: %d\n", addr, size);
606 }
607 break;
608 case 0x01c00a00: /* MXCC control register */
609 if (size == 8) {
610 ret = env->mxccregs[3];
611 } else {
612 qemu_log_mask(LOG_UNIMP,
613 "%08x: unimplemented access size: %d\n", addr,
614 size);
615 }
616 break;
617 case 0x01c00a04: /* MXCC control register */
618 if (size == 4) {
619 ret = env->mxccregs[3];
620 } else {
621 qemu_log_mask(LOG_UNIMP,
622 "%08x: unimplemented access size: %d\n", addr,
623 size);
624 }
625 break;
626 case 0x01c00c00: /* Module reset register */
627 if (size == 8) {
628 ret = env->mxccregs[5];
629 /* should we do something here? */
630 } else {
631 qemu_log_mask(LOG_UNIMP,
632 "%08x: unimplemented access size: %d\n", addr,
633 size);
634 }
635 break;
636 case 0x01c00f00: /* MBus port address register */
637 if (size == 8) {
638 ret = env->mxccregs[7];
639 } else {
640 qemu_log_mask(LOG_UNIMP,
641 "%08x: unimplemented access size: %d\n", addr,
642 size);
643 }
644 break;
645 default:
646 qemu_log_mask(LOG_UNIMP,
647 "%08x: unimplemented address, size: %d\n", addr,
648 size);
649 break;
650 }
651 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
652 "addr = %08x -> ret = %" PRIx64 ","
653 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
654 #ifdef DEBUG_MXCC
655 dump_mxcc(env);
656 #endif
657 break;
658 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
659 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
660 {
661 int mmulev;
662
663 mmulev = (addr >> 8) & 15;
664 if (mmulev > 4) {
665 ret = 0;
666 } else {
667 ret = mmu_probe(env, addr, mmulev);
668 }
669 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
670 addr, mmulev, ret);
671 }
672 break;
673 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
674 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
675 {
676 int reg = (addr >> 8) & 0x1f;
677
678 ret = env->mmuregs[reg];
679 if (reg == 3) { /* Fault status cleared on read */
680 env->mmuregs[3] = 0;
681 } else if (reg == 0x13) { /* Fault status read */
682 ret = env->mmuregs[3];
683 } else if (reg == 0x14) { /* Fault address read */
684 ret = env->mmuregs[4];
685 }
686 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
687 }
688 break;
689 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
690 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
691 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
692 break;
693 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
694 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
695 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
696 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
697 break;
698 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
699 {
700 MemTxResult result;
701 hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
702
703 switch (size) {
704 case 1:
705 ret = address_space_ldub(cs->as, access_addr,
706 MEMTXATTRS_UNSPECIFIED, &result);
707 break;
708 case 2:
709 ret = address_space_lduw(cs->as, access_addr,
710 MEMTXATTRS_UNSPECIFIED, &result);
711 break;
712 default:
713 case 4:
714 ret = address_space_ldl(cs->as, access_addr,
715 MEMTXATTRS_UNSPECIFIED, &result);
716 break;
717 case 8:
718 ret = address_space_ldq(cs->as, access_addr,
719 MEMTXATTRS_UNSPECIFIED, &result);
720 break;
721 }
722
723 if (result != MEMTX_OK) {
724 sparc_raise_mmu_fault(cs, access_addr, false, false, false,
725 size, GETPC());
726 }
727 break;
728 }
729 case 0x30: /* Turbosparc secondary cache diagnostic */
730 case 0x31: /* Turbosparc RAM snoop */
731 case 0x32: /* Turbosparc page table descriptor diagnostic */
732 case 0x39: /* data cache diagnostic register */
733 ret = 0;
734 break;
735 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
736 {
737 int reg = (addr >> 8) & 3;
738
739 switch (reg) {
740 case 0: /* Breakpoint Value (Addr) */
741 ret = env->mmubpregs[reg];
742 break;
743 case 1: /* Breakpoint Mask */
744 ret = env->mmubpregs[reg];
745 break;
746 case 2: /* Breakpoint Control */
747 ret = env->mmubpregs[reg];
748 break;
749 case 3: /* Breakpoint Status */
750 ret = env->mmubpregs[reg];
751 env->mmubpregs[reg] = 0ULL;
752 break;
753 }
754 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
755 ret);
756 }
757 break;
758 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
759 ret = env->mmubpctrv;
760 break;
761 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
762 ret = env->mmubpctrc;
763 break;
764 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
765 ret = env->mmubpctrs;
766 break;
767 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
768 ret = env->mmubpaction;
769 break;
770 default:
771 sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC());
772 ret = 0;
773 break;
774
775 case ASI_USERDATA: /* User data access */
776 case ASI_KERNELDATA: /* Supervisor data access */
777 case ASI_USERTXT: /* User code access */
778 case ASI_KERNELTXT: /* Supervisor code access */
779 case ASI_P: /* Implicit primary context data access (v9 only?) */
780 case ASI_M_BYPASS: /* MMU passthrough */
781 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
782 /* These are always handled inline. */
783 g_assert_not_reached();
784 }
785 if (sign) {
786 switch (size) {
787 case 1:
788 ret = (int8_t) ret;
789 break;
790 case 2:
791 ret = (int16_t) ret;
792 break;
793 case 4:
794 ret = (int32_t) ret;
795 break;
796 default:
797 break;
798 }
799 }
800 #ifdef DEBUG_ASI
801 dump_asi("read ", last_addr, asi, size, ret);
802 #endif
803 return ret;
804 }
805
helper_st_asi(CPUSPARCState * env,target_ulong addr,uint64_t val,int asi,uint32_t memop)806 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
807 int asi, uint32_t memop)
808 {
809 int size = 1 << (memop & MO_SIZE);
810 CPUState *cs = env_cpu(env);
811
812 do_check_align(env, addr, size - 1, GETPC());
813 switch (asi) {
814 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
815 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
816 switch (addr) {
817 case 0x00: /* Leon3 Cache Control */
818 case 0x08: /* Leon3 Instruction Cache config */
819 case 0x0C: /* Leon3 Date Cache config */
820 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
821 leon3_cache_control_st(env, addr, val, size);
822 } else {
823 qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
824 " address, size: %d\n", addr, size);
825 }
826 break;
827
828 case 0x01c00000: /* MXCC stream data register 0 */
829 if (size == 8) {
830 env->mxccdata[0] = val;
831 } else {
832 qemu_log_mask(LOG_UNIMP,
833 "%08x: unimplemented access size: %d\n", addr,
834 size);
835 }
836 break;
837 case 0x01c00008: /* MXCC stream data register 1 */
838 if (size == 8) {
839 env->mxccdata[1] = val;
840 } else {
841 qemu_log_mask(LOG_UNIMP,
842 "%08x: unimplemented access size: %d\n", addr,
843 size);
844 }
845 break;
846 case 0x01c00010: /* MXCC stream data register 2 */
847 if (size == 8) {
848 env->mxccdata[2] = val;
849 } else {
850 qemu_log_mask(LOG_UNIMP,
851 "%08x: unimplemented access size: %d\n", addr,
852 size);
853 }
854 break;
855 case 0x01c00018: /* MXCC stream data register 3 */
856 if (size == 8) {
857 env->mxccdata[3] = val;
858 } else {
859 qemu_log_mask(LOG_UNIMP,
860 "%08x: unimplemented access size: %d\n", addr,
861 size);
862 }
863 break;
864 case 0x01c00100: /* MXCC stream source */
865 {
866 int i;
867
868 if (size == 8) {
869 env->mxccregs[0] = val;
870 } else {
871 qemu_log_mask(LOG_UNIMP,
872 "%08x: unimplemented access size: %d\n", addr,
873 size);
874 }
875
876 for (i = 0; i < 4; i++) {
877 MemTxResult result;
878 hwaddr access_addr = (env->mxccregs[0] & 0xffffffffULL) + 8 * i;
879
880 env->mxccdata[i] = address_space_ldq(cs->as,
881 access_addr,
882 MEMTXATTRS_UNSPECIFIED,
883 &result);
884 if (result != MEMTX_OK) {
885 /* TODO: investigate whether this is the right behaviour */
886 sparc_raise_mmu_fault(cs, access_addr, false, false,
887 false, size, GETPC());
888 }
889 }
890 break;
891 }
892 case 0x01c00200: /* MXCC stream destination */
893 {
894 int i;
895
896 if (size == 8) {
897 env->mxccregs[1] = val;
898 } else {
899 qemu_log_mask(LOG_UNIMP,
900 "%08x: unimplemented access size: %d\n", addr,
901 size);
902 }
903
904 for (i = 0; i < 4; i++) {
905 MemTxResult result;
906 hwaddr access_addr = (env->mxccregs[1] & 0xffffffffULL) + 8 * i;
907
908 address_space_stq(cs->as, access_addr, env->mxccdata[i],
909 MEMTXATTRS_UNSPECIFIED, &result);
910
911 if (result != MEMTX_OK) {
912 /* TODO: investigate whether this is the right behaviour */
913 sparc_raise_mmu_fault(cs, access_addr, true, false,
914 false, size, GETPC());
915 }
916 }
917 break;
918 }
919 case 0x01c00a00: /* MXCC control register */
920 if (size == 8) {
921 env->mxccregs[3] = val;
922 } else {
923 qemu_log_mask(LOG_UNIMP,
924 "%08x: unimplemented access size: %d\n", addr,
925 size);
926 }
927 break;
928 case 0x01c00a04: /* MXCC control register */
929 if (size == 4) {
930 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
931 | val;
932 } else {
933 qemu_log_mask(LOG_UNIMP,
934 "%08x: unimplemented access size: %d\n", addr,
935 size);
936 }
937 break;
938 case 0x01c00e00: /* MXCC error register */
939 /* writing a 1 bit clears the error */
940 if (size == 8) {
941 env->mxccregs[6] &= ~val;
942 } else {
943 qemu_log_mask(LOG_UNIMP,
944 "%08x: unimplemented access size: %d\n", addr,
945 size);
946 }
947 break;
948 case 0x01c00f00: /* MBus port address register */
949 if (size == 8) {
950 env->mxccregs[7] = val;
951 } else {
952 qemu_log_mask(LOG_UNIMP,
953 "%08x: unimplemented access size: %d\n", addr,
954 size);
955 }
956 break;
957 default:
958 qemu_log_mask(LOG_UNIMP,
959 "%08x: unimplemented address, size: %d\n", addr,
960 size);
961 break;
962 }
963 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
964 asi, size, addr, val);
965 #ifdef DEBUG_MXCC
966 dump_mxcc(env);
967 #endif
968 break;
969 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
970 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
971 {
972 int mmulev;
973
974 mmulev = (addr >> 8) & 15;
975 DPRINTF_MMU("mmu flush level %d\n", mmulev);
976 switch (mmulev) {
977 case 0: /* flush page */
978 tlb_flush_page(cs, addr & 0xfffff000);
979 break;
980 case 1: /* flush segment (256k) */
981 case 2: /* flush region (16M) */
982 case 3: /* flush context (4G) */
983 case 4: /* flush entire */
984 tlb_flush(cs);
985 break;
986 default:
987 break;
988 }
989 #ifdef DEBUG_MMU
990 dump_mmu(env);
991 #endif
992 }
993 break;
994 case ASI_M_MMUREGS: /* write MMU regs */
995 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
996 {
997 int reg = (addr >> 8) & 0x1f;
998 uint32_t oldreg;
999
1000 oldreg = env->mmuregs[reg];
1001 switch (reg) {
1002 case 0: /* Control Register */
1003 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1004 (val & 0x00ffffff);
1005 /* Mappings generated during no-fault mode
1006 are invalid in normal mode. */
1007 if ((oldreg ^ env->mmuregs[reg])
1008 & (MMU_NF | env->def.mmu_bm)) {
1009 tlb_flush(cs);
1010 }
1011 break;
1012 case 1: /* Context Table Pointer Register */
1013 env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
1014 break;
1015 case 2: /* Context Register */
1016 env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
1017 if (oldreg != env->mmuregs[reg]) {
1018 /* we flush when the MMU context changes because
1019 QEMU has no MMU context support */
1020 tlb_flush(cs);
1021 }
1022 break;
1023 case 3: /* Synchronous Fault Status Register with Clear */
1024 case 4: /* Synchronous Fault Address Register */
1025 break;
1026 case 0x10: /* TLB Replacement Control Register */
1027 env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
1028 break;
1029 case 0x13: /* Synchronous Fault Status Register with Read
1030 and Clear */
1031 env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
1032 break;
1033 case 0x14: /* Synchronous Fault Address Register */
1034 env->mmuregs[4] = val;
1035 break;
1036 default:
1037 env->mmuregs[reg] = val;
1038 break;
1039 }
1040 if (oldreg != env->mmuregs[reg]) {
1041 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1042 reg, oldreg, env->mmuregs[reg]);
1043 }
1044 #ifdef DEBUG_MMU
1045 dump_mmu(env);
1046 #endif
1047 }
1048 break;
1049 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
1050 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
1051 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
1052 break;
1053 case ASI_M_TXTC_TAG: /* I-cache tag */
1054 case ASI_M_TXTC_DATA: /* I-cache data */
1055 case ASI_M_DATAC_TAG: /* D-cache tag */
1056 case ASI_M_DATAC_DATA: /* D-cache data */
1057 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
1058 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
1059 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
1060 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
1061 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
1062 break;
1063 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1064 {
1065 MemTxResult result;
1066 hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
1067
1068 switch (size) {
1069 case 1:
1070 address_space_stb(cs->as, access_addr, val,
1071 MEMTXATTRS_UNSPECIFIED, &result);
1072 break;
1073 case 2:
1074 address_space_stw(cs->as, access_addr, val,
1075 MEMTXATTRS_UNSPECIFIED, &result);
1076 break;
1077 case 4:
1078 default:
1079 address_space_stl(cs->as, access_addr, val,
1080 MEMTXATTRS_UNSPECIFIED, &result);
1081 break;
1082 case 8:
1083 address_space_stq(cs->as, access_addr, val,
1084 MEMTXATTRS_UNSPECIFIED, &result);
1085 break;
1086 }
1087 if (result != MEMTX_OK) {
1088 sparc_raise_mmu_fault(cs, access_addr, true, false, false,
1089 size, GETPC());
1090 }
1091 }
1092 break;
1093 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1094 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1095 Turbosparc snoop RAM */
1096 case 0x32: /* store buffer control or Turbosparc page table
1097 descriptor diagnostic */
1098 case 0x36: /* I-cache flash clear */
1099 case 0x37: /* D-cache flash clear */
1100 break;
1101 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1102 {
1103 int reg = (addr >> 8) & 3;
1104
1105 switch (reg) {
1106 case 0: /* Breakpoint Value (Addr) */
1107 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1108 break;
1109 case 1: /* Breakpoint Mask */
1110 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1111 break;
1112 case 2: /* Breakpoint Control */
1113 env->mmubpregs[reg] = (val & 0x7fULL);
1114 break;
1115 case 3: /* Breakpoint Status */
1116 env->mmubpregs[reg] = (val & 0xfULL);
1117 break;
1118 }
1119 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1120 env->mmuregs[reg]);
1121 }
1122 break;
1123 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1124 env->mmubpctrv = val & 0xffffffff;
1125 break;
1126 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1127 env->mmubpctrc = val & 0x3;
1128 break;
1129 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1130 env->mmubpctrs = val & 0x3;
1131 break;
1132 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1133 env->mmubpaction = val & 0x1fff;
1134 break;
1135 case ASI_USERTXT: /* User code access, XXX */
1136 case ASI_KERNELTXT: /* Supervisor code access, XXX */
1137 default:
1138 sparc_raise_mmu_fault(cs, addr, true, false, asi, size, GETPC());
1139 break;
1140
1141 case ASI_USERDATA: /* User data access */
1142 case ASI_KERNELDATA: /* Supervisor data access */
1143 case ASI_P:
1144 case ASI_M_BYPASS: /* MMU passthrough */
1145 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1146 case ASI_M_BCOPY: /* Block copy, sta access */
1147 case ASI_M_BFILL: /* Block fill, stda access */
1148 /* These are always handled inline. */
1149 g_assert_not_reached();
1150 }
1151 #ifdef DEBUG_ASI
1152 dump_asi("write", addr, asi, size, val);
1153 #endif
1154 }
1155
helper_ld_code(CPUSPARCState * env,target_ulong addr,uint32_t oi)1156 uint64_t helper_ld_code(CPUSPARCState *env, target_ulong addr, uint32_t oi)
1157 {
1158 MemOp mop = get_memop(oi);
1159 uintptr_t ra = GETPC();
1160 uint64_t ret;
1161
1162 switch (mop & MO_SIZE) {
1163 case MO_8:
1164 ret = cpu_ldb_code_mmu(env, addr, oi, ra);
1165 if (mop & MO_SIGN) {
1166 ret = (int8_t)ret;
1167 }
1168 break;
1169 case MO_16:
1170 ret = cpu_ldw_code_mmu(env, addr, oi, ra);
1171 if ((mop & MO_BSWAP) != MO_TE) {
1172 ret = bswap16(ret);
1173 }
1174 if (mop & MO_SIGN) {
1175 ret = (int16_t)ret;
1176 }
1177 break;
1178 case MO_32:
1179 ret = cpu_ldl_code_mmu(env, addr, oi, ra);
1180 if ((mop & MO_BSWAP) != MO_TE) {
1181 ret = bswap32(ret);
1182 }
1183 if (mop & MO_SIGN) {
1184 ret = (int32_t)ret;
1185 }
1186 break;
1187 case MO_64:
1188 ret = cpu_ldq_code_mmu(env, addr, oi, ra);
1189 if ((mop & MO_BSWAP) != MO_TE) {
1190 ret = bswap64(ret);
1191 }
1192 break;
1193 default:
1194 g_assert_not_reached();
1195 }
1196 return ret;
1197 }
1198
1199 #endif /* CONFIG_USER_ONLY */
1200 #else /* TARGET_SPARC64 */
1201
1202 #ifdef CONFIG_USER_ONLY
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)1203 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1204 int asi, uint32_t memop)
1205 {
1206 int size = 1 << (memop & MO_SIZE);
1207 int sign = memop & MO_SIGN;
1208 uint64_t ret = 0;
1209
1210 if (asi < 0x80) {
1211 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1212 }
1213 do_check_align(env, addr, size - 1, GETPC());
1214 addr = asi_address_mask(env, asi, addr);
1215
1216 switch (asi) {
1217 case ASI_PNF: /* Primary no-fault */
1218 case ASI_PNFL: /* Primary no-fault LE */
1219 case ASI_SNF: /* Secondary no-fault */
1220 case ASI_SNFL: /* Secondary no-fault LE */
1221 if (!page_check_range(addr, size, PAGE_READ)) {
1222 ret = 0;
1223 break;
1224 }
1225 switch (size) {
1226 case 1:
1227 ret = cpu_ldub_data(env, addr);
1228 break;
1229 case 2:
1230 ret = cpu_lduw_data(env, addr);
1231 break;
1232 case 4:
1233 ret = cpu_ldl_data(env, addr);
1234 break;
1235 case 8:
1236 ret = cpu_ldq_data(env, addr);
1237 break;
1238 default:
1239 g_assert_not_reached();
1240 }
1241 break;
1242 break;
1243
1244 case ASI_P: /* Primary */
1245 case ASI_PL: /* Primary LE */
1246 case ASI_S: /* Secondary */
1247 case ASI_SL: /* Secondary LE */
1248 /* These are always handled inline. */
1249 g_assert_not_reached();
1250
1251 default:
1252 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1253 }
1254
1255 /* Convert from little endian */
1256 switch (asi) {
1257 case ASI_PNFL: /* Primary no-fault LE */
1258 case ASI_SNFL: /* Secondary no-fault LE */
1259 switch (size) {
1260 case 2:
1261 ret = bswap16(ret);
1262 break;
1263 case 4:
1264 ret = bswap32(ret);
1265 break;
1266 case 8:
1267 ret = bswap64(ret);
1268 break;
1269 }
1270 }
1271
1272 /* Convert to signed number */
1273 if (sign) {
1274 switch (size) {
1275 case 1:
1276 ret = (int8_t) ret;
1277 break;
1278 case 2:
1279 ret = (int16_t) ret;
1280 break;
1281 case 4:
1282 ret = (int32_t) ret;
1283 break;
1284 }
1285 }
1286 #ifdef DEBUG_ASI
1287 dump_asi("read", addr, asi, size, ret);
1288 #endif
1289 return ret;
1290 }
1291
helper_st_asi(CPUSPARCState * env,target_ulong addr,target_ulong val,int asi,uint32_t memop)1292 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1293 int asi, uint32_t memop)
1294 {
1295 int size = 1 << (memop & MO_SIZE);
1296 #ifdef DEBUG_ASI
1297 dump_asi("write", addr, asi, size, val);
1298 #endif
1299 if (asi < 0x80) {
1300 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1301 }
1302 do_check_align(env, addr, size - 1, GETPC());
1303
1304 switch (asi) {
1305 case ASI_P: /* Primary */
1306 case ASI_PL: /* Primary LE */
1307 case ASI_S: /* Secondary */
1308 case ASI_SL: /* Secondary LE */
1309 /* These are always handled inline. */
1310 g_assert_not_reached();
1311
1312 case ASI_PNF: /* Primary no-fault, RO */
1313 case ASI_SNF: /* Secondary no-fault, RO */
1314 case ASI_PNFL: /* Primary no-fault LE, RO */
1315 case ASI_SNFL: /* Secondary no-fault LE, RO */
1316 default:
1317 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1318 }
1319 }
1320
1321 #else /* CONFIG_USER_ONLY */
1322
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)1323 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1324 int asi, uint32_t memop)
1325 {
1326 int size = 1 << (memop & MO_SIZE);
1327 int sign = memop & MO_SIGN;
1328 CPUState *cs = env_cpu(env);
1329 uint64_t ret = 0;
1330 #if defined(DEBUG_ASI)
1331 target_ulong last_addr = addr;
1332 #endif
1333
1334 asi &= 0xff;
1335
1336 do_check_asi(env, asi, GETPC());
1337 do_check_align(env, addr, size - 1, GETPC());
1338 addr = asi_address_mask(env, asi, addr);
1339
1340 switch (asi) {
1341 case ASI_PNF:
1342 case ASI_PNFL:
1343 case ASI_SNF:
1344 case ASI_SNFL:
1345 {
1346 MemOpIdx oi;
1347 int idx = (env->pstate & PS_PRIV
1348 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
1349 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
1350
1351 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
1352 #ifdef DEBUG_ASI
1353 dump_asi("read ", last_addr, asi, size, ret);
1354 #endif
1355 /* exception_index is set in get_physical_address_data. */
1356 cpu_raise_exception_ra(env, cs->exception_index, GETPC());
1357 }
1358 oi = make_memop_idx(memop, idx);
1359 switch (size) {
1360 case 1:
1361 ret = cpu_ldb_mmu(env, addr, oi, GETPC());
1362 break;
1363 case 2:
1364 ret = cpu_ldw_mmu(env, addr, oi, GETPC());
1365 break;
1366 case 4:
1367 ret = cpu_ldl_mmu(env, addr, oi, GETPC());
1368 break;
1369 case 8:
1370 ret = cpu_ldq_mmu(env, addr, oi, GETPC());
1371 break;
1372 default:
1373 g_assert_not_reached();
1374 }
1375 }
1376 break;
1377
1378 case ASI_AIUP: /* As if user primary */
1379 case ASI_AIUS: /* As if user secondary */
1380 case ASI_AIUPL: /* As if user primary LE */
1381 case ASI_AIUSL: /* As if user secondary LE */
1382 case ASI_P: /* Primary */
1383 case ASI_S: /* Secondary */
1384 case ASI_PL: /* Primary LE */
1385 case ASI_SL: /* Secondary LE */
1386 case ASI_REAL: /* Bypass */
1387 case ASI_REAL_IO: /* Bypass, non-cacheable */
1388 case ASI_REAL_L: /* Bypass LE */
1389 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1390 case ASI_N: /* Nucleus */
1391 case ASI_NL: /* Nucleus Little Endian (LE) */
1392 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1393 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1394 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1395 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1396 case ASI_TWINX_REAL: /* Real address, twinx */
1397 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1398 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1399 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1400 case ASI_TWINX_N: /* Nucleus, twinx */
1401 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1402 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1403 case ASI_TWINX_P: /* Primary, twinx */
1404 case ASI_TWINX_PL: /* Primary, twinx, LE */
1405 case ASI_TWINX_S: /* Secondary, twinx */
1406 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1407 case ASI_MON_P:
1408 case ASI_MON_S:
1409 case ASI_MON_AIUP:
1410 case ASI_MON_AIUS:
1411 /* These are always handled inline. */
1412 g_assert_not_reached();
1413
1414 case ASI_UPA_CONFIG: /* UPA config */
1415 /* XXX */
1416 break;
1417 case ASI_LSU_CONTROL: /* LSU */
1418 ret = env->lsu;
1419 break;
1420 case ASI_IMMU: /* I-MMU regs */
1421 {
1422 int reg = (addr >> 3) & 0xf;
1423 switch (reg) {
1424 case 0:
1425 /* 0x00 I-TSB Tag Target register */
1426 ret = ultrasparc_tag_target(env->immu.tag_access);
1427 break;
1428 case 3: /* SFSR */
1429 ret = env->immu.sfsr;
1430 break;
1431 case 5: /* TSB access */
1432 ret = env->immu.tsb;
1433 break;
1434 case 6:
1435 /* 0x30 I-TSB Tag Access register */
1436 ret = env->immu.tag_access;
1437 break;
1438 default:
1439 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1440 ret = 0;
1441 }
1442 break;
1443 }
1444 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1445 {
1446 /* env->immuregs[5] holds I-MMU TSB register value
1447 env->immuregs[6] holds I-MMU Tag Access register value */
1448 ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
1449 break;
1450 }
1451 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1452 {
1453 /* env->immuregs[5] holds I-MMU TSB register value
1454 env->immuregs[6] holds I-MMU Tag Access register value */
1455 ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
1456 break;
1457 }
1458 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1459 {
1460 int reg = (addr >> 3) & 0x3f;
1461
1462 ret = env->itlb[reg].tte;
1463 break;
1464 }
1465 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1466 {
1467 int reg = (addr >> 3) & 0x3f;
1468
1469 ret = env->itlb[reg].tag;
1470 break;
1471 }
1472 case ASI_DMMU: /* D-MMU regs */
1473 {
1474 int reg = (addr >> 3) & 0xf;
1475 switch (reg) {
1476 case 0:
1477 /* 0x00 D-TSB Tag Target register */
1478 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1479 break;
1480 case 1: /* 0x08 Primary Context */
1481 ret = env->dmmu.mmu_primary_context;
1482 break;
1483 case 2: /* 0x10 Secondary Context */
1484 ret = env->dmmu.mmu_secondary_context;
1485 break;
1486 case 3: /* SFSR */
1487 ret = env->dmmu.sfsr;
1488 break;
1489 case 4: /* 0x20 SFAR */
1490 ret = env->dmmu.sfar;
1491 break;
1492 case 5: /* 0x28 TSB access */
1493 ret = env->dmmu.tsb;
1494 break;
1495 case 6: /* 0x30 D-TSB Tag Access register */
1496 ret = env->dmmu.tag_access;
1497 break;
1498 case 7:
1499 ret = env->dmmu.virtual_watchpoint;
1500 break;
1501 case 8:
1502 ret = env->dmmu.physical_watchpoint;
1503 break;
1504 default:
1505 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1506 ret = 0;
1507 }
1508 break;
1509 }
1510 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1511 {
1512 /* env->dmmuregs[5] holds D-MMU TSB register value
1513 env->dmmuregs[6] holds D-MMU Tag Access register value */
1514 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
1515 break;
1516 }
1517 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1518 {
1519 /* env->dmmuregs[5] holds D-MMU TSB register value
1520 env->dmmuregs[6] holds D-MMU Tag Access register value */
1521 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
1522 break;
1523 }
1524 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1525 {
1526 int reg = (addr >> 3) & 0x3f;
1527
1528 ret = env->dtlb[reg].tte;
1529 break;
1530 }
1531 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1532 {
1533 int reg = (addr >> 3) & 0x3f;
1534
1535 ret = env->dtlb[reg].tag;
1536 break;
1537 }
1538 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1539 break;
1540 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1541 ret = env->ivec_status;
1542 break;
1543 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1544 {
1545 int reg = (addr >> 4) & 0x3;
1546 if (reg < 3) {
1547 ret = env->ivec_data[reg];
1548 }
1549 break;
1550 }
1551 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1552 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1553 /* Hyperprivileged access only */
1554 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1555 }
1556 /* fall through */
1557 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1558 {
1559 unsigned int i = (addr >> 3) & 0x7;
1560 ret = env->scratch[i];
1561 break;
1562 }
1563 case ASI_MMU: /* UA2005 Context ID registers */
1564 switch ((addr >> 3) & 0x3) {
1565 case 1:
1566 ret = env->dmmu.mmu_primary_context;
1567 break;
1568 case 2:
1569 ret = env->dmmu.mmu_secondary_context;
1570 break;
1571 default:
1572 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1573 }
1574 break;
1575 case ASI_DCACHE_DATA: /* D-cache data */
1576 case ASI_DCACHE_TAG: /* D-cache tag access */
1577 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1578 case ASI_AFSR: /* E-cache asynchronous fault status */
1579 case ASI_AFAR: /* E-cache asynchronous fault address */
1580 case ASI_EC_TAG_DATA: /* E-cache tag data */
1581 case ASI_IC_INSTR: /* I-cache instruction access */
1582 case ASI_IC_TAG: /* I-cache tag access */
1583 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1584 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1585 case ASI_EC_W: /* E-cache tag */
1586 case ASI_EC_R: /* E-cache tag */
1587 break;
1588 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1589 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1590 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1591 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1592 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1593 case ASI_INTR_W: /* Interrupt vector, WO */
1594 default:
1595 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1596 ret = 0;
1597 break;
1598 }
1599
1600 /* Convert to signed number */
1601 if (sign) {
1602 switch (size) {
1603 case 1:
1604 ret = (int8_t) ret;
1605 break;
1606 case 2:
1607 ret = (int16_t) ret;
1608 break;
1609 case 4:
1610 ret = (int32_t) ret;
1611 break;
1612 default:
1613 break;
1614 }
1615 }
1616 #ifdef DEBUG_ASI
1617 dump_asi("read ", last_addr, asi, size, ret);
1618 #endif
1619 return ret;
1620 }
1621
helper_st_asi(CPUSPARCState * env,target_ulong addr,target_ulong val,int asi,uint32_t memop)1622 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1623 int asi, uint32_t memop)
1624 {
1625 int size = 1 << (memop & MO_SIZE);
1626 CPUState *cs = env_cpu(env);
1627
1628 #ifdef DEBUG_ASI
1629 dump_asi("write", addr, asi, size, val);
1630 #endif
1631
1632 asi &= 0xff;
1633
1634 do_check_asi(env, asi, GETPC());
1635 do_check_align(env, addr, size - 1, GETPC());
1636 addr = asi_address_mask(env, asi, addr);
1637
1638 switch (asi) {
1639 case ASI_AIUP: /* As if user primary */
1640 case ASI_AIUS: /* As if user secondary */
1641 case ASI_AIUPL: /* As if user primary LE */
1642 case ASI_AIUSL: /* As if user secondary LE */
1643 case ASI_P: /* Primary */
1644 case ASI_S: /* Secondary */
1645 case ASI_PL: /* Primary LE */
1646 case ASI_SL: /* Secondary LE */
1647 case ASI_REAL: /* Bypass */
1648 case ASI_REAL_IO: /* Bypass, non-cacheable */
1649 case ASI_REAL_L: /* Bypass LE */
1650 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1651 case ASI_N: /* Nucleus */
1652 case ASI_NL: /* Nucleus Little Endian (LE) */
1653 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1654 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1655 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1656 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1657 case ASI_TWINX_REAL: /* Real address, twinx */
1658 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1659 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1660 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1661 case ASI_TWINX_N: /* Nucleus, twinx */
1662 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1663 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1664 case ASI_TWINX_P: /* Primary, twinx */
1665 case ASI_TWINX_PL: /* Primary, twinx, LE */
1666 case ASI_TWINX_S: /* Secondary, twinx */
1667 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1668 /* These are always handled inline. */
1669 g_assert_not_reached();
1670 /* these ASIs have different functions on UltraSPARC-IIIi
1671 * and UA2005 CPUs. Use the explicit numbers to avoid confusion
1672 */
1673 case 0x31:
1674 case 0x32:
1675 case 0x39:
1676 case 0x3a:
1677 if (cpu_has_hypervisor(env)) {
1678 /* UA2005
1679 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
1680 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
1681 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
1682 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
1683 */
1684 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1685 env->dmmu.sun4v_tsb_pointers[idx] = val;
1686 } else {
1687 goto illegal_insn;
1688 }
1689 break;
1690 case 0x33:
1691 case 0x3b:
1692 if (cpu_has_hypervisor(env)) {
1693 /* UA2005
1694 * ASI_DMMU_CTX_ZERO_CONFIG
1695 * ASI_DMMU_CTX_NONZERO_CONFIG
1696 */
1697 env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1698 } else {
1699 goto illegal_insn;
1700 }
1701 break;
1702 case 0x35:
1703 case 0x36:
1704 case 0x3d:
1705 case 0x3e:
1706 if (cpu_has_hypervisor(env)) {
1707 /* UA2005
1708 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
1709 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
1710 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
1711 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
1712 */
1713 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1714 env->immu.sun4v_tsb_pointers[idx] = val;
1715 } else {
1716 goto illegal_insn;
1717 }
1718 break;
1719 case 0x37:
1720 case 0x3f:
1721 if (cpu_has_hypervisor(env)) {
1722 /* UA2005
1723 * ASI_IMMU_CTX_ZERO_CONFIG
1724 * ASI_IMMU_CTX_NONZERO_CONFIG
1725 */
1726 env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1727 } else {
1728 goto illegal_insn;
1729 }
1730 break;
1731 case ASI_UPA_CONFIG: /* UPA config */
1732 /* XXX */
1733 return;
1734 case ASI_LSU_CONTROL: /* LSU */
1735 env->lsu = val & (DMMU_E | IMMU_E);
1736 return;
1737 case ASI_IMMU: /* I-MMU regs */
1738 {
1739 int reg = (addr >> 3) & 0xf;
1740 uint64_t oldreg;
1741
1742 oldreg = env->immu.mmuregs[reg];
1743 switch (reg) {
1744 case 0: /* RO */
1745 return;
1746 case 1: /* Not in I-MMU */
1747 case 2:
1748 return;
1749 case 3: /* SFSR */
1750 if ((val & 1) == 0) {
1751 val = 0; /* Clear SFSR */
1752 }
1753 env->immu.sfsr = val;
1754 break;
1755 case 4: /* RO */
1756 return;
1757 case 5: /* TSB access */
1758 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1759 PRIx64 "\n", env->immu.tsb, val);
1760 env->immu.tsb = val;
1761 break;
1762 case 6: /* Tag access */
1763 env->immu.tag_access = val;
1764 break;
1765 case 7:
1766 case 8:
1767 return;
1768 default:
1769 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1770 break;
1771 }
1772
1773 if (oldreg != env->immu.mmuregs[reg]) {
1774 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1775 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1776 }
1777 #ifdef DEBUG_MMU
1778 dump_mmu(env);
1779 #endif
1780 return;
1781 }
1782 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1783 /* ignore real translation entries */
1784 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1785 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
1786 val, "immu", env, addr);
1787 }
1788 return;
1789 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1790 {
1791 /* TODO: auto demap */
1792
1793 unsigned int i = (addr >> 3) & 0x3f;
1794
1795 /* ignore real translation entries */
1796 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1797 replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
1798 sun4v_tte_to_sun4u(env, addr, val), env);
1799 }
1800 #ifdef DEBUG_MMU
1801 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1802 dump_mmu(env);
1803 #endif
1804 return;
1805 }
1806 case ASI_IMMU_DEMAP: /* I-MMU demap */
1807 demap_tlb(env->itlb, addr, "immu", env);
1808 return;
1809 case ASI_DMMU: /* D-MMU regs */
1810 {
1811 int reg = (addr >> 3) & 0xf;
1812 uint64_t oldreg;
1813
1814 oldreg = env->dmmu.mmuregs[reg];
1815 switch (reg) {
1816 case 0: /* RO */
1817 case 4:
1818 return;
1819 case 3: /* SFSR */
1820 if ((val & 1) == 0) {
1821 val = 0; /* Clear SFSR, Fault address */
1822 env->dmmu.sfar = 0;
1823 }
1824 env->dmmu.sfsr = val;
1825 break;
1826 case 1: /* Primary context */
1827 env->dmmu.mmu_primary_context = val;
1828 /* can be optimized to only flush MMU_USER_IDX
1829 and MMU_KERNEL_IDX entries */
1830 tlb_flush(cs);
1831 break;
1832 case 2: /* Secondary context */
1833 env->dmmu.mmu_secondary_context = val;
1834 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1835 and MMU_KERNEL_SECONDARY_IDX entries */
1836 tlb_flush(cs);
1837 break;
1838 case 5: /* TSB access */
1839 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1840 PRIx64 "\n", env->dmmu.tsb, val);
1841 env->dmmu.tsb = val;
1842 break;
1843 case 6: /* Tag access */
1844 env->dmmu.tag_access = val;
1845 break;
1846 case 7: /* Virtual Watchpoint */
1847 env->dmmu.virtual_watchpoint = val;
1848 break;
1849 case 8: /* Physical Watchpoint */
1850 env->dmmu.physical_watchpoint = val;
1851 break;
1852 default:
1853 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1854 break;
1855 }
1856
1857 if (oldreg != env->dmmu.mmuregs[reg]) {
1858 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1859 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1860 }
1861 #ifdef DEBUG_MMU
1862 dump_mmu(env);
1863 #endif
1864 return;
1865 }
1866 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1867 /* ignore real translation entries */
1868 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1869 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
1870 val, "dmmu", env, addr);
1871 }
1872 return;
1873 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1874 {
1875 unsigned int i = (addr >> 3) & 0x3f;
1876
1877 /* ignore real translation entries */
1878 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1879 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
1880 sun4v_tte_to_sun4u(env, addr, val), env);
1881 }
1882 #ifdef DEBUG_MMU
1883 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1884 dump_mmu(env);
1885 #endif
1886 return;
1887 }
1888 case ASI_DMMU_DEMAP: /* D-MMU demap */
1889 demap_tlb(env->dtlb, addr, "dmmu", env);
1890 return;
1891 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1892 env->ivec_status = val & 0x20;
1893 return;
1894 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1895 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1896 /* Hyperprivileged access only */
1897 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1898 }
1899 /* fall through */
1900 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1901 {
1902 unsigned int i = (addr >> 3) & 0x7;
1903 env->scratch[i] = val;
1904 return;
1905 }
1906 case ASI_MMU: /* UA2005 Context ID registers */
1907 {
1908 switch ((addr >> 3) & 0x3) {
1909 case 1:
1910 env->dmmu.mmu_primary_context = val;
1911 env->immu.mmu_primary_context = val;
1912 tlb_flush_by_mmuidx(cs,
1913 (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
1914 break;
1915 case 2:
1916 env->dmmu.mmu_secondary_context = val;
1917 env->immu.mmu_secondary_context = val;
1918 tlb_flush_by_mmuidx(cs,
1919 (1 << MMU_USER_SECONDARY_IDX) |
1920 (1 << MMU_KERNEL_SECONDARY_IDX));
1921 break;
1922 default:
1923 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1924 }
1925 }
1926 return;
1927 case ASI_QUEUE: /* UA2005 CPU mondo queue */
1928 case ASI_DCACHE_DATA: /* D-cache data */
1929 case ASI_DCACHE_TAG: /* D-cache tag access */
1930 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1931 case ASI_AFSR: /* E-cache asynchronous fault status */
1932 case ASI_AFAR: /* E-cache asynchronous fault address */
1933 case ASI_EC_TAG_DATA: /* E-cache tag data */
1934 case ASI_IC_INSTR: /* I-cache instruction access */
1935 case ASI_IC_TAG: /* I-cache tag access */
1936 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1937 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1938 case ASI_EC_W: /* E-cache tag */
1939 case ASI_EC_R: /* E-cache tag */
1940 return;
1941 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
1942 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
1943 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
1944 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
1945 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
1946 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
1947 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
1948 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1949 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1950 case ASI_PNF: /* Primary no-fault, RO */
1951 case ASI_SNF: /* Secondary no-fault, RO */
1952 case ASI_PNFL: /* Primary no-fault LE, RO */
1953 case ASI_SNFL: /* Secondary no-fault LE, RO */
1954 default:
1955 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1956 return;
1957 illegal_insn:
1958 cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
1959 }
1960 }
1961 #endif /* CONFIG_USER_ONLY */
1962 #endif /* TARGET_SPARC64 */
1963
1964 #if !defined(CONFIG_USER_ONLY)
1965
sparc_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)1966 void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1967 vaddr addr, unsigned size,
1968 MMUAccessType access_type,
1969 int mmu_idx, MemTxAttrs attrs,
1970 MemTxResult response, uintptr_t retaddr)
1971 {
1972 bool is_write = access_type == MMU_DATA_STORE;
1973 bool is_exec = access_type == MMU_INST_FETCH;
1974 bool is_asi = false;
1975
1976 sparc_raise_mmu_fault(cs, physaddr, is_write, is_exec,
1977 is_asi, size, retaddr);
1978 }
1979 #endif
1980