xref: /openbmc/qemu/target/sh4/helper.c (revision 8f0a3716)
1 /*
2  *  SH4 emulation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/log.h"
24 #include "sysemu/sysemu.h"
25 
26 #if !defined(CONFIG_USER_ONLY)
27 #include "hw/sh4/sh_intc.h"
28 #endif
29 
30 #if defined(CONFIG_USER_ONLY)
31 
32 void superh_cpu_do_interrupt(CPUState *cs)
33 {
34     cs->exception_index = -1;
35 }
36 
37 int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
38                                 int mmu_idx)
39 {
40     SuperHCPU *cpu = SUPERH_CPU(cs);
41     CPUSH4State *env = &cpu->env;
42 
43     env->tea = address;
44     cs->exception_index = -1;
45     switch (rw) {
46     case 0:
47         cs->exception_index = 0x0a0;
48         break;
49     case 1:
50         cs->exception_index = 0x0c0;
51         break;
52     case 2:
53         cs->exception_index = 0x0a0;
54         break;
55     }
56     return 1;
57 }
58 
59 int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
60 {
61     /* For user mode, only U0 area is cacheable. */
62     return !(addr & 0x80000000);
63 }
64 
65 #else /* !CONFIG_USER_ONLY */
66 
67 #define MMU_OK                   0
68 #define MMU_ITLB_MISS            (-1)
69 #define MMU_ITLB_MULTIPLE        (-2)
70 #define MMU_ITLB_VIOLATION       (-3)
71 #define MMU_DTLB_MISS_READ       (-4)
72 #define MMU_DTLB_MISS_WRITE      (-5)
73 #define MMU_DTLB_INITIAL_WRITE   (-6)
74 #define MMU_DTLB_VIOLATION_READ  (-7)
75 #define MMU_DTLB_VIOLATION_WRITE (-8)
76 #define MMU_DTLB_MULTIPLE        (-9)
77 #define MMU_DTLB_MISS            (-10)
78 #define MMU_IADDR_ERROR          (-11)
79 #define MMU_DADDR_ERROR_READ     (-12)
80 #define MMU_DADDR_ERROR_WRITE    (-13)
81 
82 void superh_cpu_do_interrupt(CPUState *cs)
83 {
84     SuperHCPU *cpu = SUPERH_CPU(cs);
85     CPUSH4State *env = &cpu->env;
86     int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
87     int do_exp, irq_vector = cs->exception_index;
88 
89     /* prioritize exceptions over interrupts */
90 
91     do_exp = cs->exception_index != -1;
92     do_irq = do_irq && (cs->exception_index == -1);
93 
94     if (env->sr & (1u << SR_BL)) {
95         if (do_exp && cs->exception_index != 0x1e0) {
96             /* In theory a masked exception generates a reset exception,
97                which in turn jumps to the reset vector. However this only
98                works when using a bootloader. When using a kernel and an
99                initrd, they need to be reloaded and the program counter
100                should be loaded with the kernel entry point.
101                qemu_system_reset_request takes care of that.  */
102             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
103             return;
104         }
105         if (do_irq && !env->in_sleep) {
106             return; /* masked */
107         }
108     }
109     env->in_sleep = 0;
110 
111     if (do_irq) {
112         irq_vector = sh_intc_get_pending_vector(env->intc_handle,
113 						(env->sr >> 4) & 0xf);
114         if (irq_vector == -1) {
115             return; /* masked */
116 	}
117     }
118 
119     if (qemu_loglevel_mask(CPU_LOG_INT)) {
120 	const char *expname;
121         switch (cs->exception_index) {
122 	case 0x0e0:
123 	    expname = "addr_error";
124 	    break;
125 	case 0x040:
126 	    expname = "tlb_miss";
127 	    break;
128 	case 0x0a0:
129 	    expname = "tlb_violation";
130 	    break;
131 	case 0x180:
132 	    expname = "illegal_instruction";
133 	    break;
134 	case 0x1a0:
135 	    expname = "slot_illegal_instruction";
136 	    break;
137 	case 0x800:
138 	    expname = "fpu_disable";
139 	    break;
140 	case 0x820:
141 	    expname = "slot_fpu";
142 	    break;
143 	case 0x100:
144 	    expname = "data_write";
145 	    break;
146 	case 0x060:
147 	    expname = "dtlb_miss_write";
148 	    break;
149 	case 0x0c0:
150 	    expname = "dtlb_violation_write";
151 	    break;
152 	case 0x120:
153 	    expname = "fpu_exception";
154 	    break;
155 	case 0x080:
156 	    expname = "initial_page_write";
157 	    break;
158 	case 0x160:
159 	    expname = "trapa";
160 	    break;
161 	default:
162             expname = do_irq ? "interrupt" : "???";
163             break;
164 	}
165 	qemu_log("exception 0x%03x [%s] raised\n",
166 		  irq_vector, expname);
167         log_cpu_state(cs, 0);
168     }
169 
170     env->ssr = cpu_read_sr(env);
171     env->spc = env->pc;
172     env->sgr = env->gregs[15];
173     env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
174     env->lock_addr = -1;
175 
176     if (env->flags & DELAY_SLOT_MASK) {
177         /* Branch instruction should be executed again before delay slot. */
178 	env->spc -= 2;
179 	/* Clear flags for exception/interrupt routine. */
180         env->flags &= ~DELAY_SLOT_MASK;
181     }
182 
183     if (do_exp) {
184         env->expevt = cs->exception_index;
185         switch (cs->exception_index) {
186         case 0x000:
187         case 0x020:
188         case 0x140:
189             env->sr &= ~(1u << SR_FD);
190             env->sr |= 0xf << 4; /* IMASK */
191             env->pc = 0xa0000000;
192             break;
193         case 0x040:
194         case 0x060:
195             env->pc = env->vbr + 0x400;
196             break;
197         case 0x160:
198             env->spc += 2; /* special case for TRAPA */
199             /* fall through */
200         default:
201             env->pc = env->vbr + 0x100;
202             break;
203         }
204         return;
205     }
206 
207     if (do_irq) {
208         env->intevt = irq_vector;
209         env->pc = env->vbr + 0x600;
210         return;
211     }
212 }
213 
214 static void update_itlb_use(CPUSH4State * env, int itlbnb)
215 {
216     uint8_t or_mask = 0, and_mask = (uint8_t) - 1;
217 
218     switch (itlbnb) {
219     case 0:
220 	and_mask = 0x1f;
221 	break;
222     case 1:
223 	and_mask = 0xe7;
224 	or_mask = 0x80;
225 	break;
226     case 2:
227 	and_mask = 0xfb;
228 	or_mask = 0x50;
229 	break;
230     case 3:
231 	or_mask = 0x2c;
232 	break;
233     }
234 
235     env->mmucr &= (and_mask << 24) | 0x00ffffff;
236     env->mmucr |= (or_mask << 24);
237 }
238 
239 static int itlb_replacement(CPUSH4State * env)
240 {
241     SuperHCPU *cpu = sh_env_get_cpu(env);
242 
243     if ((env->mmucr & 0xe0000000) == 0xe0000000) {
244 	return 0;
245     }
246     if ((env->mmucr & 0x98000000) == 0x18000000) {
247 	return 1;
248     }
249     if ((env->mmucr & 0x54000000) == 0x04000000) {
250 	return 2;
251     }
252     if ((env->mmucr & 0x2c000000) == 0x00000000) {
253 	return 3;
254     }
255     cpu_abort(CPU(cpu), "Unhandled itlb_replacement");
256 }
257 
258 /* Find the corresponding entry in the right TLB
259    Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
260 */
261 static int find_tlb_entry(CPUSH4State * env, target_ulong address,
262 			  tlb_t * entries, uint8_t nbtlb, int use_asid)
263 {
264     int match = MMU_DTLB_MISS;
265     uint32_t start, end;
266     uint8_t asid;
267     int i;
268 
269     asid = env->pteh & 0xff;
270 
271     for (i = 0; i < nbtlb; i++) {
272 	if (!entries[i].v)
273 	    continue;		/* Invalid entry */
274 	if (!entries[i].sh && use_asid && entries[i].asid != asid)
275 	    continue;		/* Bad ASID */
276 	start = (entries[i].vpn << 10) & ~(entries[i].size - 1);
277 	end = start + entries[i].size - 1;
278 	if (address >= start && address <= end) {	/* Match */
279 	    if (match != MMU_DTLB_MISS)
280 		return MMU_DTLB_MULTIPLE;	/* Multiple match */
281 	    match = i;
282 	}
283     }
284     return match;
285 }
286 
287 static void increment_urc(CPUSH4State * env)
288 {
289     uint8_t urb, urc;
290 
291     /* Increment URC */
292     urb = ((env->mmucr) >> 18) & 0x3f;
293     urc = ((env->mmucr) >> 10) & 0x3f;
294     urc++;
295     if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1))
296 	urc = 0;
297     env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
298 }
299 
300 /* Copy and utlb entry into itlb
301    Return entry
302 */
303 static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
304 {
305     int itlb;
306 
307     tlb_t * ientry;
308     itlb = itlb_replacement(env);
309     ientry = &env->itlb[itlb];
310     if (ientry->v) {
311         tlb_flush_page(CPU(sh_env_get_cpu(env)), ientry->vpn << 10);
312     }
313     *ientry = env->utlb[utlb];
314     update_itlb_use(env, itlb);
315     return itlb;
316 }
317 
318 /* Find itlb entry
319    Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
320 */
321 static int find_itlb_entry(CPUSH4State * env, target_ulong address,
322                            int use_asid)
323 {
324     int e;
325 
326     e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid);
327     if (e == MMU_DTLB_MULTIPLE) {
328 	e = MMU_ITLB_MULTIPLE;
329     } else if (e == MMU_DTLB_MISS) {
330 	e = MMU_ITLB_MISS;
331     } else if (e >= 0) {
332 	update_itlb_use(env, e);
333     }
334     return e;
335 }
336 
337 /* Find utlb entry
338    Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
339 static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid)
340 {
341     /* per utlb access */
342     increment_urc(env);
343 
344     /* Return entry */
345     return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
346 }
347 
348 /* Match address against MMU
349    Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE,
350    MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ,
351    MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS,
352    MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
353    MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
354 */
355 static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
356 			   int *prot, target_ulong address,
357 			   int rw, int access_type)
358 {
359     int use_asid, n;
360     tlb_t *matching = NULL;
361 
362     use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
363 
364     if (rw == 2) {
365         n = find_itlb_entry(env, address, use_asid);
366 	if (n >= 0) {
367 	    matching = &env->itlb[n];
368             if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
369 		n = MMU_ITLB_VIOLATION;
370             } else {
371 		*prot = PAGE_EXEC;
372             }
373         } else {
374             n = find_utlb_entry(env, address, use_asid);
375             if (n >= 0) {
376                 n = copy_utlb_entry_itlb(env, n);
377                 matching = &env->itlb[n];
378                 if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
379                     n = MMU_ITLB_VIOLATION;
380                 } else {
381                     *prot = PAGE_READ | PAGE_EXEC;
382                     if ((matching->pr & 1) && matching->d) {
383                         *prot |= PAGE_WRITE;
384                     }
385                 }
386             } else if (n == MMU_DTLB_MULTIPLE) {
387                 n = MMU_ITLB_MULTIPLE;
388             } else if (n == MMU_DTLB_MISS) {
389                 n = MMU_ITLB_MISS;
390             }
391 	}
392     } else {
393 	n = find_utlb_entry(env, address, use_asid);
394 	if (n >= 0) {
395 	    matching = &env->utlb[n];
396             if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
397                 n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
398                     MMU_DTLB_VIOLATION_READ;
399             } else if ((rw == 1) && !(matching->pr & 1)) {
400                 n = MMU_DTLB_VIOLATION_WRITE;
401             } else if ((rw == 1) && !matching->d) {
402                 n = MMU_DTLB_INITIAL_WRITE;
403             } else {
404                 *prot = PAGE_READ;
405                 if ((matching->pr & 1) && matching->d) {
406                     *prot |= PAGE_WRITE;
407                 }
408             }
409 	} else if (n == MMU_DTLB_MISS) {
410 	    n = (rw == 1) ? MMU_DTLB_MISS_WRITE :
411 		MMU_DTLB_MISS_READ;
412 	}
413     }
414     if (n >= 0) {
415 	n = MMU_OK;
416 	*physical = ((matching->ppn << 10) & ~(matching->size - 1)) |
417 	    (address & (matching->size - 1));
418     }
419     return n;
420 }
421 
422 static int get_physical_address(CPUSH4State * env, target_ulong * physical,
423                                 int *prot, target_ulong address,
424                                 int rw, int access_type)
425 {
426     /* P1, P2 and P4 areas do not use translation */
427     if ((address >= 0x80000000 && address < 0xc0000000) ||
428 	address >= 0xe0000000) {
429         if (!(env->sr & (1u << SR_MD))
430 	    && (address < 0xe0000000 || address >= 0xe4000000)) {
431 	    /* Unauthorized access in user mode (only store queues are available) */
432             qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
433 	    if (rw == 0)
434 		return MMU_DADDR_ERROR_READ;
435 	    else if (rw == 1)
436 		return MMU_DADDR_ERROR_WRITE;
437 	    else
438 		return MMU_IADDR_ERROR;
439 	}
440 	if (address >= 0x80000000 && address < 0xc0000000) {
441 	    /* Mask upper 3 bits for P1 and P2 areas */
442 	    *physical = address & 0x1fffffff;
443 	} else {
444 	    *physical = address;
445 	}
446 	*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
447 	return MMU_OK;
448     }
449 
450     /* If MMU is disabled, return the corresponding physical page */
451     if (!(env->mmucr & MMUCR_AT)) {
452 	*physical = address & 0x1FFFFFFF;
453 	*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
454 	return MMU_OK;
455     }
456 
457     /* We need to resort to the MMU */
458     return get_mmu_address(env, physical, prot, address, rw, access_type);
459 }
460 
461 int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
462                                 int mmu_idx)
463 {
464     SuperHCPU *cpu = SUPERH_CPU(cs);
465     CPUSH4State *env = &cpu->env;
466     target_ulong physical;
467     int prot, ret, access_type;
468 
469     access_type = ACCESS_INT;
470     ret =
471 	get_physical_address(env, &physical, &prot, address, rw,
472 			     access_type);
473 
474     if (ret != MMU_OK) {
475 	env->tea = address;
476 	if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
477 	    env->pteh = (env->pteh & PTEH_ASID_MASK) |
478 		    (address & PTEH_VPN_MASK);
479 	}
480 	switch (ret) {
481 	case MMU_ITLB_MISS:
482 	case MMU_DTLB_MISS_READ:
483             cs->exception_index = 0x040;
484 	    break;
485 	case MMU_DTLB_MULTIPLE:
486 	case MMU_ITLB_MULTIPLE:
487             cs->exception_index = 0x140;
488 	    break;
489 	case MMU_ITLB_VIOLATION:
490             cs->exception_index = 0x0a0;
491 	    break;
492 	case MMU_DTLB_MISS_WRITE:
493             cs->exception_index = 0x060;
494 	    break;
495 	case MMU_DTLB_INITIAL_WRITE:
496             cs->exception_index = 0x080;
497 	    break;
498 	case MMU_DTLB_VIOLATION_READ:
499             cs->exception_index = 0x0a0;
500 	    break;
501 	case MMU_DTLB_VIOLATION_WRITE:
502             cs->exception_index = 0x0c0;
503 	    break;
504 	case MMU_IADDR_ERROR:
505 	case MMU_DADDR_ERROR_READ:
506             cs->exception_index = 0x0e0;
507 	    break;
508 	case MMU_DADDR_ERROR_WRITE:
509             cs->exception_index = 0x100;
510 	    break;
511 	default:
512             cpu_abort(cs, "Unhandled MMU fault");
513 	}
514 	return 1;
515     }
516 
517     address &= TARGET_PAGE_MASK;
518     physical &= TARGET_PAGE_MASK;
519 
520     tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
521     return 0;
522 }
523 
524 hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
525 {
526     SuperHCPU *cpu = SUPERH_CPU(cs);
527     target_ulong physical;
528     int prot;
529 
530     get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0);
531     return physical;
532 }
533 
534 void cpu_load_tlb(CPUSH4State * env)
535 {
536     SuperHCPU *cpu = sh_env_get_cpu(env);
537     int n = cpu_mmucr_urc(env->mmucr);
538     tlb_t * entry = &env->utlb[n];
539 
540     if (entry->v) {
541         /* Overwriting valid entry in utlb. */
542         target_ulong address = entry->vpn << 10;
543         tlb_flush_page(CPU(cpu), address);
544     }
545 
546     /* Take values into cpu status from registers. */
547     entry->asid = (uint8_t)cpu_pteh_asid(env->pteh);
548     entry->vpn  = cpu_pteh_vpn(env->pteh);
549     entry->v    = (uint8_t)cpu_ptel_v(env->ptel);
550     entry->ppn  = cpu_ptel_ppn(env->ptel);
551     entry->sz   = (uint8_t)cpu_ptel_sz(env->ptel);
552     switch (entry->sz) {
553     case 0: /* 00 */
554         entry->size = 1024; /* 1K */
555         break;
556     case 1: /* 01 */
557         entry->size = 1024 * 4; /* 4K */
558         break;
559     case 2: /* 10 */
560         entry->size = 1024 * 64; /* 64K */
561         break;
562     case 3: /* 11 */
563         entry->size = 1024 * 1024; /* 1M */
564         break;
565     default:
566         cpu_abort(CPU(cpu), "Unhandled load_tlb");
567         break;
568     }
569     entry->sh   = (uint8_t)cpu_ptel_sh(env->ptel);
570     entry->c    = (uint8_t)cpu_ptel_c(env->ptel);
571     entry->pr   = (uint8_t)cpu_ptel_pr(env->ptel);
572     entry->d    = (uint8_t)cpu_ptel_d(env->ptel);
573     entry->wt   = (uint8_t)cpu_ptel_wt(env->ptel);
574     entry->sa   = (uint8_t)cpu_ptea_sa(env->ptea);
575     entry->tc   = (uint8_t)cpu_ptea_tc(env->ptea);
576 }
577 
578  void cpu_sh4_invalidate_tlb(CPUSH4State *s)
579 {
580     int i;
581 
582     /* UTLB */
583     for (i = 0; i < UTLB_SIZE; i++) {
584         tlb_t * entry = &s->utlb[i];
585         entry->v = 0;
586     }
587     /* ITLB */
588     for (i = 0; i < ITLB_SIZE; i++) {
589         tlb_t * entry = &s->itlb[i];
590         entry->v = 0;
591     }
592 
593     tlb_flush(CPU(sh_env_get_cpu(s)));
594 }
595 
596 uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
597                                        hwaddr addr)
598 {
599     int index = (addr & 0x00000300) >> 8;
600     tlb_t * entry = &s->itlb[index];
601 
602     return (entry->vpn  << 10) |
603            (entry->v    <<  8) |
604            (entry->asid);
605 }
606 
607 void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
608 				    uint32_t mem_value)
609 {
610     uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
611     uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
612     uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
613 
614     int index = (addr & 0x00000300) >> 8;
615     tlb_t * entry = &s->itlb[index];
616     if (entry->v) {
617         /* Overwriting valid entry in itlb. */
618         target_ulong address = entry->vpn << 10;
619         tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
620     }
621     entry->asid = asid;
622     entry->vpn = vpn;
623     entry->v = v;
624 }
625 
626 uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
627                                        hwaddr addr)
628 {
629     int array = (addr & 0x00800000) >> 23;
630     int index = (addr & 0x00000300) >> 8;
631     tlb_t * entry = &s->itlb[index];
632 
633     if (array == 0) {
634         /* ITLB Data Array 1 */
635         return (entry->ppn << 10) |
636                (entry->v   <<  8) |
637                (entry->pr  <<  5) |
638                ((entry->sz & 1) <<  6) |
639                ((entry->sz & 2) <<  4) |
640                (entry->c   <<  3) |
641                (entry->sh  <<  1);
642     } else {
643         /* ITLB Data Array 2 */
644         return (entry->tc << 1) |
645                (entry->sa);
646     }
647 }
648 
649 void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
650                                     uint32_t mem_value)
651 {
652     int array = (addr & 0x00800000) >> 23;
653     int index = (addr & 0x00000300) >> 8;
654     tlb_t * entry = &s->itlb[index];
655 
656     if (array == 0) {
657         /* ITLB Data Array 1 */
658         if (entry->v) {
659             /* Overwriting valid entry in utlb. */
660             target_ulong address = entry->vpn << 10;
661             tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
662         }
663         entry->ppn = (mem_value & 0x1ffffc00) >> 10;
664         entry->v   = (mem_value & 0x00000100) >> 8;
665         entry->sz  = (mem_value & 0x00000080) >> 6 |
666                      (mem_value & 0x00000010) >> 4;
667         entry->pr  = (mem_value & 0x00000040) >> 5;
668         entry->c   = (mem_value & 0x00000008) >> 3;
669         entry->sh  = (mem_value & 0x00000002) >> 1;
670     } else {
671         /* ITLB Data Array 2 */
672         entry->tc  = (mem_value & 0x00000008) >> 3;
673         entry->sa  = (mem_value & 0x00000007);
674     }
675 }
676 
677 uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
678                                        hwaddr addr)
679 {
680     int index = (addr & 0x00003f00) >> 8;
681     tlb_t * entry = &s->utlb[index];
682 
683     increment_urc(s); /* per utlb access */
684 
685     return (entry->vpn  << 10) |
686            (entry->v    <<  8) |
687            (entry->asid);
688 }
689 
690 void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
691 				    uint32_t mem_value)
692 {
693     int associate = addr & 0x0000080;
694     uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
695     uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
696     uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
697     uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
698     int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD));
699 
700     if (associate) {
701         int i;
702 	tlb_t * utlb_match_entry = NULL;
703 	int needs_tlb_flush = 0;
704 
705 	/* search UTLB */
706 	for (i = 0; i < UTLB_SIZE; i++) {
707             tlb_t * entry = &s->utlb[i];
708             if (!entry->v)
709 	        continue;
710 
711             if (entry->vpn == vpn
712                 && (!use_asid || entry->asid == asid || entry->sh)) {
713 	        if (utlb_match_entry) {
714                     CPUState *cs = CPU(sh_env_get_cpu(s));
715 
716 		    /* Multiple TLB Exception */
717                     cs->exception_index = 0x140;
718 		    s->tea = addr;
719 		    break;
720 	        }
721 		if (entry->v && !v)
722 		    needs_tlb_flush = 1;
723 		entry->v = v;
724 		entry->d = d;
725 	        utlb_match_entry = entry;
726 	    }
727 	    increment_urc(s); /* per utlb access */
728 	}
729 
730 	/* search ITLB */
731 	for (i = 0; i < ITLB_SIZE; i++) {
732             tlb_t * entry = &s->itlb[i];
733             if (entry->vpn == vpn
734                 && (!use_asid || entry->asid == asid || entry->sh)) {
735 	        if (entry->v && !v)
736 		    needs_tlb_flush = 1;
737 	        if (utlb_match_entry)
738 		    *entry = *utlb_match_entry;
739 	        else
740 		    entry->v = v;
741 		break;
742 	    }
743 	}
744 
745         if (needs_tlb_flush) {
746             tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
747         }
748 
749     } else {
750         int index = (addr & 0x00003f00) >> 8;
751         tlb_t * entry = &s->utlb[index];
752 	if (entry->v) {
753             CPUState *cs = CPU(sh_env_get_cpu(s));
754 
755 	    /* Overwriting valid entry in utlb. */
756             target_ulong address = entry->vpn << 10;
757             tlb_flush_page(cs, address);
758 	}
759 	entry->asid = asid;
760 	entry->vpn = vpn;
761 	entry->d = d;
762 	entry->v = v;
763 	increment_urc(s);
764     }
765 }
766 
767 uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
768                                        hwaddr addr)
769 {
770     int array = (addr & 0x00800000) >> 23;
771     int index = (addr & 0x00003f00) >> 8;
772     tlb_t * entry = &s->utlb[index];
773 
774     increment_urc(s); /* per utlb access */
775 
776     if (array == 0) {
777         /* ITLB Data Array 1 */
778         return (entry->ppn << 10) |
779                (entry->v   <<  8) |
780                (entry->pr  <<  5) |
781                ((entry->sz & 1) <<  6) |
782                ((entry->sz & 2) <<  4) |
783                (entry->c   <<  3) |
784                (entry->d   <<  2) |
785                (entry->sh  <<  1) |
786                (entry->wt);
787     } else {
788         /* ITLB Data Array 2 */
789         return (entry->tc << 1) |
790                (entry->sa);
791     }
792 }
793 
794 void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
795                                     uint32_t mem_value)
796 {
797     int array = (addr & 0x00800000) >> 23;
798     int index = (addr & 0x00003f00) >> 8;
799     tlb_t * entry = &s->utlb[index];
800 
801     increment_urc(s); /* per utlb access */
802 
803     if (array == 0) {
804         /* UTLB Data Array 1 */
805         if (entry->v) {
806             /* Overwriting valid entry in utlb. */
807             target_ulong address = entry->vpn << 10;
808             tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
809         }
810         entry->ppn = (mem_value & 0x1ffffc00) >> 10;
811         entry->v   = (mem_value & 0x00000100) >> 8;
812         entry->sz  = (mem_value & 0x00000080) >> 6 |
813                      (mem_value & 0x00000010) >> 4;
814         entry->pr  = (mem_value & 0x00000060) >> 5;
815         entry->c   = (mem_value & 0x00000008) >> 3;
816         entry->d   = (mem_value & 0x00000004) >> 2;
817         entry->sh  = (mem_value & 0x00000002) >> 1;
818         entry->wt  = (mem_value & 0x00000001);
819     } else {
820         /* UTLB Data Array 2 */
821         entry->tc = (mem_value & 0x00000008) >> 3;
822         entry->sa = (mem_value & 0x00000007);
823     }
824 }
825 
826 int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
827 {
828     int n;
829     int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
830 
831     /* check area */
832     if (env->sr & (1u << SR_MD)) {
833         /* For privileged mode, P2 and P4 area is not cacheable. */
834         if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr)
835             return 0;
836     } else {
837         /* For user mode, only U0 area is cacheable. */
838         if (0x80000000 <= addr)
839             return 0;
840     }
841 
842     /*
843      * TODO : Evaluate CCR and check if the cache is on or off.
844      *        Now CCR is not in CPUSH4State, but in SH7750State.
845      *        When you move the ccr into CPUSH4State, the code will be
846      *        as follows.
847      */
848 #if 0
849     /* check if operand cache is enabled or not. */
850     if (!(env->ccr & 1))
851         return 0;
852 #endif
853 
854     /* if MMU is off, no check for TLB. */
855     if (env->mmucr & MMUCR_AT)
856         return 1;
857 
858     /* check TLB */
859     n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid);
860     if (n >= 0)
861         return env->itlb[n].c;
862 
863     n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid);
864     if (n >= 0)
865         return env->utlb[n].c;
866 
867     return 0;
868 }
869 
870 #endif
871 
872 bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
873 {
874     if (interrupt_request & CPU_INTERRUPT_HARD) {
875         SuperHCPU *cpu = SUPERH_CPU(cs);
876         CPUSH4State *env = &cpu->env;
877 
878         /* Delay slots are indivisible, ignore interrupts */
879         if (env->flags & DELAY_SLOT_MASK) {
880             return false;
881         } else {
882             superh_cpu_do_interrupt(cs);
883             return true;
884         }
885     }
886     return false;
887 }
888