1 /*
2 * MIPS TLB (Translation lookaside buffer) helpers.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/exec-all.h"
25 #include "exec/page-protection.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "exec/helper-proto.h"
29
30 /* TLB management */
r4k_mips_tlb_flush_extra(CPUMIPSState * env,int first)31 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
32 {
33 /* Discard entries from env->tlb[first] onwards. */
34 while (env->tlb->tlb_in_use > first) {
35 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
36 }
37 }
38
get_tlb_pfn_from_entrylo(uint64_t entrylo)39 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
40 {
41 #if defined(TARGET_MIPS64)
42 return extract64(entrylo, 6, 54);
43 #else
44 return extract64(entrylo, 6, 24) | /* PFN */
45 (extract64(entrylo, 32, 32) << 24); /* PFNX */
46 #endif
47 }
48
r4k_fill_tlb(CPUMIPSState * env,int idx)49 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
50 {
51 r4k_tlb_t *tlb;
52 uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
53
54 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
55 tlb = &env->tlb->mmu.r4k.tlb[idx];
56 if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
57 tlb->EHINV = 1;
58 return;
59 }
60 tlb->EHINV = 0;
61 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
62 #if defined(TARGET_MIPS64)
63 tlb->VPN &= env->SEGMask;
64 #endif
65 tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
66 tlb->MMID = env->CP0_MemoryMapID;
67 tlb->PageMask = env->CP0_PageMask;
68 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
69 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
70 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
71 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
72 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
73 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
74 tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
75 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
76 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
77 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
78 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
79 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
80 tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
81 }
82
r4k_helper_tlbinv(CPUMIPSState * env)83 static void r4k_helper_tlbinv(CPUMIPSState *env)
84 {
85 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
86 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
87 uint32_t MMID = env->CP0_MemoryMapID;
88 uint32_t tlb_mmid;
89 r4k_tlb_t *tlb;
90 int idx;
91
92 MMID = mi ? MMID : (uint32_t) ASID;
93 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
94 tlb = &env->tlb->mmu.r4k.tlb[idx];
95 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
96 if (!tlb->G && tlb_mmid == MMID) {
97 tlb->EHINV = 1;
98 }
99 }
100 cpu_mips_tlb_flush(env);
101 }
102
r4k_helper_tlbinvf(CPUMIPSState * env)103 static void r4k_helper_tlbinvf(CPUMIPSState *env)
104 {
105 int idx;
106
107 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
108 env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
109 }
110 cpu_mips_tlb_flush(env);
111 }
112
r4k_helper_tlbwi(CPUMIPSState * env)113 static void r4k_helper_tlbwi(CPUMIPSState *env)
114 {
115 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
116 target_ulong VPN;
117 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
118 uint32_t MMID = env->CP0_MemoryMapID;
119 uint32_t tlb_mmid;
120 bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
121 r4k_tlb_t *tlb;
122 int idx;
123
124 MMID = mi ? MMID : (uint32_t) ASID;
125
126 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
127 tlb = &env->tlb->mmu.r4k.tlb[idx];
128 VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
129 #if defined(TARGET_MIPS64)
130 VPN &= env->SEGMask;
131 #endif
132 EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
133 G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
134 V0 = (env->CP0_EntryLo0 & 2) != 0;
135 D0 = (env->CP0_EntryLo0 & 4) != 0;
136 XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
137 RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
138 V1 = (env->CP0_EntryLo1 & 2) != 0;
139 D1 = (env->CP0_EntryLo1 & 4) != 0;
140 XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
141 RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
142
143 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
144 /*
145 * Discard cached TLB entries, unless tlbwi is just upgrading access
146 * permissions on the current entry.
147 */
148 if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
149 (!tlb->EHINV && EHINV) ||
150 (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
151 (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
152 (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
153 (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
154 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
155 }
156
157 r4k_invalidate_tlb(env, idx, 0);
158 r4k_fill_tlb(env, idx);
159 }
160
r4k_helper_tlbwr(CPUMIPSState * env)161 static void r4k_helper_tlbwr(CPUMIPSState *env)
162 {
163 int r = cpu_mips_get_random(env);
164
165 r4k_invalidate_tlb(env, r, 1);
166 r4k_fill_tlb(env, r);
167 }
168
r4k_helper_tlbp(CPUMIPSState * env)169 static void r4k_helper_tlbp(CPUMIPSState *env)
170 {
171 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
172 r4k_tlb_t *tlb;
173 target_ulong mask;
174 target_ulong tag;
175 target_ulong VPN;
176 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
177 uint32_t MMID = env->CP0_MemoryMapID;
178 uint32_t tlb_mmid;
179 int i;
180
181 MMID = mi ? MMID : (uint32_t) ASID;
182 for (i = 0; i < env->tlb->nb_tlb; i++) {
183 tlb = &env->tlb->mmu.r4k.tlb[i];
184 /* 1k pages are not supported. */
185 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
186 tag = env->CP0_EntryHi & ~mask;
187 VPN = tlb->VPN & ~mask;
188 #if defined(TARGET_MIPS64)
189 tag &= env->SEGMask;
190 #endif
191 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
192 /* Check ASID/MMID, virtual page number & size */
193 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
194 /* TLB match */
195 env->CP0_Index = i;
196 break;
197 }
198 }
199 if (i == env->tlb->nb_tlb) {
200 /* No match. Discard any shadow entries, if any of them match. */
201 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
202 tlb = &env->tlb->mmu.r4k.tlb[i];
203 /* 1k pages are not supported. */
204 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
205 tag = env->CP0_EntryHi & ~mask;
206 VPN = tlb->VPN & ~mask;
207 #if defined(TARGET_MIPS64)
208 tag &= env->SEGMask;
209 #endif
210 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
211 /* Check ASID/MMID, virtual page number & size */
212 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
213 r4k_mips_tlb_flush_extra(env, i);
214 break;
215 }
216 }
217
218 env->CP0_Index |= 0x80000000;
219 }
220 }
221
get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)222 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
223 {
224 #if defined(TARGET_MIPS64)
225 return tlb_pfn << 6;
226 #else
227 return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
228 (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
229 #endif
230 }
231
r4k_helper_tlbr(CPUMIPSState * env)232 static void r4k_helper_tlbr(CPUMIPSState *env)
233 {
234 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
235 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
236 uint32_t MMID = env->CP0_MemoryMapID;
237 uint32_t tlb_mmid;
238 r4k_tlb_t *tlb;
239 int idx;
240
241 MMID = mi ? MMID : (uint32_t) ASID;
242 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
243 tlb = &env->tlb->mmu.r4k.tlb[idx];
244
245 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
246 /* If this will change the current ASID/MMID, flush qemu's TLB. */
247 if (MMID != tlb_mmid) {
248 cpu_mips_tlb_flush(env);
249 }
250
251 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
252
253 if (tlb->EHINV) {
254 env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
255 env->CP0_PageMask = 0;
256 env->CP0_EntryLo0 = 0;
257 env->CP0_EntryLo1 = 0;
258 } else {
259 env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
260 env->CP0_MemoryMapID = tlb->MMID;
261 env->CP0_PageMask = tlb->PageMask;
262 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
263 ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
264 ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
265 get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
266 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
267 ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
268 ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
269 get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
270 }
271 }
272
helper_tlbwi(CPUMIPSState * env)273 void helper_tlbwi(CPUMIPSState *env)
274 {
275 env->tlb->helper_tlbwi(env);
276 }
277
helper_tlbwr(CPUMIPSState * env)278 void helper_tlbwr(CPUMIPSState *env)
279 {
280 env->tlb->helper_tlbwr(env);
281 }
282
helper_tlbp(CPUMIPSState * env)283 void helper_tlbp(CPUMIPSState *env)
284 {
285 env->tlb->helper_tlbp(env);
286 }
287
helper_tlbr(CPUMIPSState * env)288 void helper_tlbr(CPUMIPSState *env)
289 {
290 env->tlb->helper_tlbr(env);
291 }
292
helper_tlbinv(CPUMIPSState * env)293 void helper_tlbinv(CPUMIPSState *env)
294 {
295 env->tlb->helper_tlbinv(env);
296 }
297
helper_tlbinvf(CPUMIPSState * env)298 void helper_tlbinvf(CPUMIPSState *env)
299 {
300 env->tlb->helper_tlbinvf(env);
301 }
302
global_invalidate_tlb(CPUMIPSState * env,uint32_t invMsgVPN2,uint8_t invMsgR,uint32_t invMsgMMid,bool invAll,bool invVAMMid,bool invMMid,bool invVA)303 static void global_invalidate_tlb(CPUMIPSState *env,
304 uint32_t invMsgVPN2,
305 uint8_t invMsgR,
306 uint32_t invMsgMMid,
307 bool invAll,
308 bool invVAMMid,
309 bool invMMid,
310 bool invVA)
311 {
312
313 int idx;
314 r4k_tlb_t *tlb;
315 bool VAMatch;
316 bool MMidMatch;
317
318 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
319 tlb = &env->tlb->mmu.r4k.tlb[idx];
320 VAMatch =
321 (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
322 #ifdef TARGET_MIPS64
323 &&
324 (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
325 #endif
326 );
327 MMidMatch = tlb->MMID == invMsgMMid;
328 if ((invAll && (idx > env->CP0_Wired)) ||
329 (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
330 (VAMatch && invVA) ||
331 (MMidMatch && !(tlb->G) && invMMid)) {
332 tlb->EHINV = 1;
333 }
334 }
335 cpu_mips_tlb_flush(env);
336 }
337
helper_ginvt(CPUMIPSState * env,target_ulong arg,uint32_t type)338 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
339 {
340 bool invAll = type == 0;
341 bool invVA = type == 1;
342 bool invMMid = type == 2;
343 bool invVAMMid = type == 3;
344 uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
345 uint8_t invMsgR = 0;
346 uint32_t invMsgMMid = env->CP0_MemoryMapID;
347 CPUState *other_cs = first_cpu;
348
349 #ifdef TARGET_MIPS64
350 invMsgR = extract64(arg, 62, 2);
351 #endif
352
353 CPU_FOREACH(other_cs) {
354 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
355 global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
356 invAll, invVAMMid, invMMid, invVA);
357 }
358 }
359
360 /* no MMU emulation */
no_mmu_map_address(CPUMIPSState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type)361 static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
362 target_ulong address, MMUAccessType access_type)
363 {
364 *physical = address;
365 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
366 return TLBRET_MATCH;
367 }
368
369 /* fixed mapping MMU emulation */
fixed_mmu_map_address(CPUMIPSState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type)370 static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
371 int *prot, target_ulong address,
372 MMUAccessType access_type)
373 {
374 if (address <= (int32_t)0x7FFFFFFFUL) {
375 if (!(env->CP0_Status & (1 << CP0St_ERL))) {
376 *physical = address + 0x40000000UL;
377 } else {
378 *physical = address;
379 }
380 } else if (address <= (int32_t)0xBFFFFFFFUL) {
381 *physical = address & 0x1FFFFFFF;
382 } else {
383 *physical = address;
384 }
385
386 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
387 return TLBRET_MATCH;
388 }
389
390 /* MIPS32/MIPS64 R4000-style MMU emulation */
r4k_map_address(CPUMIPSState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type)391 static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
392 target_ulong address, MMUAccessType access_type)
393 {
394 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
395 uint32_t MMID = env->CP0_MemoryMapID;
396 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
397 uint32_t tlb_mmid;
398 int i;
399
400 MMID = mi ? MMID : (uint32_t) ASID;
401
402 for (i = 0; i < env->tlb->tlb_in_use; i++) {
403 r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
404 /* 1k pages are not supported. */
405 target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
406 target_ulong tag = address & ~mask;
407 target_ulong VPN = tlb->VPN & ~mask;
408 #if defined(TARGET_MIPS64)
409 tag &= env->SEGMask;
410 #endif
411
412 /* Check ASID/MMID, virtual page number & size */
413 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
414 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
415 /* TLB match */
416 int n = !!(address & mask & ~(mask >> 1));
417 /* Check access rights */
418 if (!(n ? tlb->V1 : tlb->V0)) {
419 return TLBRET_INVALID;
420 }
421 if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
422 return TLBRET_XI;
423 }
424 if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
425 return TLBRET_RI;
426 }
427 if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
428 *physical = tlb->PFN[n] | (address & (mask >> 1));
429 *prot = PAGE_READ;
430 if (n ? tlb->D1 : tlb->D0) {
431 *prot |= PAGE_WRITE;
432 }
433 if (!(n ? tlb->XI1 : tlb->XI0)) {
434 *prot |= PAGE_EXEC;
435 }
436 return TLBRET_MATCH;
437 }
438 return TLBRET_DIRTY;
439 }
440 }
441 return TLBRET_NOMATCH;
442 }
443
no_mmu_init(CPUMIPSState * env,const mips_def_t * def)444 static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
445 {
446 env->tlb->nb_tlb = 1;
447 env->tlb->map_address = &no_mmu_map_address;
448 }
449
fixed_mmu_init(CPUMIPSState * env,const mips_def_t * def)450 static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
451 {
452 env->tlb->nb_tlb = 1;
453 env->tlb->map_address = &fixed_mmu_map_address;
454 }
455
r4k_mmu_init(CPUMIPSState * env,const mips_def_t * def)456 static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
457 {
458 env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
459 env->tlb->map_address = &r4k_map_address;
460 env->tlb->helper_tlbwi = r4k_helper_tlbwi;
461 env->tlb->helper_tlbwr = r4k_helper_tlbwr;
462 env->tlb->helper_tlbp = r4k_helper_tlbp;
463 env->tlb->helper_tlbr = r4k_helper_tlbr;
464 env->tlb->helper_tlbinv = r4k_helper_tlbinv;
465 env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
466 }
467
mmu_init(CPUMIPSState * env,const mips_def_t * def)468 void mmu_init(CPUMIPSState *env, const mips_def_t *def)
469 {
470 env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
471
472 switch (def->mmu_type) {
473 case MMU_TYPE_NONE:
474 no_mmu_init(env, def);
475 break;
476 case MMU_TYPE_R4000:
477 r4k_mmu_init(env, def);
478 break;
479 case MMU_TYPE_FMT:
480 fixed_mmu_init(env, def);
481 break;
482 case MMU_TYPE_R3000:
483 case MMU_TYPE_R6000:
484 case MMU_TYPE_R8000:
485 default:
486 cpu_abort(env_cpu(env), "MMU type not supported\n");
487 }
488 }
489
cpu_mips_tlb_flush(CPUMIPSState * env)490 void cpu_mips_tlb_flush(CPUMIPSState *env)
491 {
492 /* Flush qemu's TLB and discard all shadowed entries. */
493 tlb_flush(env_cpu(env));
494 env->tlb->tlb_in_use = env->tlb->nb_tlb;
495 }
496
raise_mmu_exception(CPUMIPSState * env,target_ulong address,MMUAccessType access_type,int tlb_error)497 static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
498 MMUAccessType access_type, int tlb_error)
499 {
500 CPUState *cs = env_cpu(env);
501 int exception = 0, error_code = 0;
502
503 if (access_type == MMU_INST_FETCH) {
504 error_code |= EXCP_INST_NOTAVAIL;
505 }
506
507 switch (tlb_error) {
508 default:
509 case TLBRET_BADADDR:
510 /* Reference to kernel address from user mode or supervisor mode */
511 /* Reference to supervisor address from user mode */
512 if (access_type == MMU_DATA_STORE) {
513 exception = EXCP_AdES;
514 } else {
515 exception = EXCP_AdEL;
516 }
517 break;
518 case TLBRET_NOMATCH:
519 /* No TLB match for a mapped address */
520 if (access_type == MMU_DATA_STORE) {
521 exception = EXCP_TLBS;
522 } else {
523 exception = EXCP_TLBL;
524 }
525 error_code |= EXCP_TLB_NOMATCH;
526 break;
527 case TLBRET_INVALID:
528 /* TLB match with no valid bit */
529 if (access_type == MMU_DATA_STORE) {
530 exception = EXCP_TLBS;
531 } else {
532 exception = EXCP_TLBL;
533 }
534 break;
535 case TLBRET_DIRTY:
536 /* TLB match but 'D' bit is cleared */
537 exception = EXCP_LTLBL;
538 break;
539 case TLBRET_XI:
540 /* Execute-Inhibit Exception */
541 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
542 exception = EXCP_TLBXI;
543 } else {
544 exception = EXCP_TLBL;
545 }
546 break;
547 case TLBRET_RI:
548 /* Read-Inhibit Exception */
549 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
550 exception = EXCP_TLBRI;
551 } else {
552 exception = EXCP_TLBL;
553 }
554 break;
555 }
556 /* Raise exception */
557 if (!(env->hflags & MIPS_HFLAG_DM)) {
558 env->CP0_BadVAddr = address;
559 }
560 env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
561 ((address >> 9) & 0x007ffff0);
562 env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
563 (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
564 (address & (TARGET_PAGE_MASK << 1));
565 #if defined(TARGET_MIPS64)
566 env->CP0_EntryHi &= env->SEGMask;
567 env->CP0_XContext =
568 (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
569 (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */
570 (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */
571 #endif
572 cs->exception_index = exception;
573 env->error_code = error_code;
574 }
575
576 #if !defined(TARGET_MIPS64)
577
578 /*
579 * Perform hardware page table walk
580 *
581 * Memory accesses are performed using the KERNEL privilege level.
582 * Synchronous exceptions detected on memory accesses cause a silent exit
583 * from page table walking, resulting in a TLB or XTLB Refill exception.
584 *
585 * Implementations are not required to support page table walk memory
586 * accesses from mapped memory regions. When an unsupported access is
587 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
588 * exception.
589 *
590 * Note that if an exception is caused by AddressTranslation or LoadMemory
591 * functions, the exception is not taken, a silent exit is taken,
592 * resulting in a TLB or XTLB Refill exception.
593 */
594
get_pte(CPUMIPSState * env,uint64_t vaddr,MemOp op,uint64_t * pte,unsigned ptw_mmu_idx)595 static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op,
596 uint64_t *pte, unsigned ptw_mmu_idx)
597 {
598 MemOpIdx oi;
599
600 if ((vaddr & (memop_size(op) - 1)) != 0) {
601 return false;
602 }
603
604 oi = make_memop_idx(op | MO_TE, ptw_mmu_idx);
605 if (op == MO_64) {
606 *pte = cpu_ldq_mmu(env, vaddr, oi, 0);
607 } else {
608 *pte = cpu_ldl_mmu(env, vaddr, oi, 0);
609 }
610
611 return true;
612 }
613
get_tlb_entry_layout(CPUMIPSState * env,uint64_t entry,MemOp op,int ptei)614 static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
615 MemOp op, int ptei)
616 {
617 unsigned entry_size = memop_size(op) << 3;
618 uint64_t result = entry;
619 uint64_t rixi;
620 if (ptei > entry_size) {
621 ptei -= 32;
622 }
623 result >>= (ptei - 2);
624 rixi = result & 3;
625 result >>= 2;
626 result |= rixi << CP0EnLo_XI;
627 return result;
628 }
629
walk_directory(CPUMIPSState * env,uint64_t * vaddr,int directory_index,bool * huge_page,bool * hgpg_directory_hit,uint64_t * pw_entrylo0,uint64_t * pw_entrylo1,MemOp directory_mop,MemOp leaf_mop,int ptw_mmu_idx)630 static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
631 int directory_index, bool *huge_page, bool *hgpg_directory_hit,
632 uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
633 MemOp directory_mop, MemOp leaf_mop, int ptw_mmu_idx)
634 {
635 int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
636 int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
637 int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
638 int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
639 uint64_t entry;
640 uint64_t paddr;
641 int prot;
642 uint64_t lsb = 0;
643 uint64_t w = 0;
644
645 if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
646 ptw_mmu_idx) != TLBRET_MATCH) {
647 /* wrong base address */
648 return 0;
649 }
650 if (!get_pte(env, *vaddr, directory_mop, &entry, ptw_mmu_idx)) {
651 return 0;
652 }
653
654 if ((entry & (1 << psn)) && hugepg) {
655 *huge_page = true;
656 *hgpg_directory_hit = true;
657 entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
658 w = directory_index - 1;
659 if (directory_index & 0x1) {
660 /* Generate adjacent page from same PTE for odd TLB page */
661 lsb = BIT_ULL(w) >> 6;
662 *pw_entrylo0 = entry & ~lsb; /* even page */
663 *pw_entrylo1 = entry | lsb; /* odd page */
664 } else if (dph) {
665 int oddpagebit = 1 << leaf_mop;
666 uint64_t vaddr2 = *vaddr ^ oddpagebit;
667 if (*vaddr & oddpagebit) {
668 *pw_entrylo1 = entry;
669 } else {
670 *pw_entrylo0 = entry;
671 }
672 if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
673 ptw_mmu_idx) != TLBRET_MATCH) {
674 return 0;
675 }
676 if (!get_pte(env, vaddr2, leaf_mop, &entry, ptw_mmu_idx)) {
677 return 0;
678 }
679 entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
680 if (*vaddr & oddpagebit) {
681 *pw_entrylo0 = entry;
682 } else {
683 *pw_entrylo1 = entry;
684 }
685 } else {
686 return 0;
687 }
688 return 1;
689 } else {
690 *vaddr = entry;
691 return 2;
692 }
693 }
694
page_table_walk_refill(CPUMIPSState * env,vaddr address,int ptw_mmu_idx)695 static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
696 int ptw_mmu_idx)
697 {
698 int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
699 int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
700 int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
701 int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
702 int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
703
704 /* Initial values */
705 bool huge_page = false;
706 bool hgpg_bdhit = false;
707 bool hgpg_gdhit = false;
708 bool hgpg_udhit = false;
709 bool hgpg_mdhit = false;
710
711 int32_t pw_pagemask = 0;
712 target_ulong pw_entryhi = 0;
713 uint64_t pw_entrylo0 = 0;
714 uint64_t pw_entrylo1 = 0;
715
716 /* Native pointer size */
717 /*For the 32-bit architectures, this bit is fixed to 0.*/
718 MemOp native_op = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? MO_32 : MO_64;
719
720 /* Indices from PWField */
721 int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
722 int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
723 int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
724 int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
725 int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
726
727 /* Indices computed from faulting address */
728 int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
729 int uindex = (address >> pf_udw) & ((1 << udw) - 1);
730 int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
731 int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
732
733 /* Other HTW configs */
734 int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
735 MemOp directory_mop, leaf_mop;
736
737 /* Offsets into tables */
738 unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
739
740 /* Starting address - Page Table Base */
741 uint64_t vaddr = env->CP0_PWBase;
742
743 uint64_t dir_entry;
744 uint64_t paddr;
745 int prot;
746 int m;
747
748 if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
749 /* walker is unimplemented */
750 return false;
751 }
752 if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
753 /* walker is disabled */
754 return false;
755 }
756 if (!(gdw > 0 || udw > 0 || mdw > 0)) {
757 /* no structure to walk */
758 return false;
759 }
760 if (ptew > 1) {
761 return false;
762 }
763
764 /* HTW Shift values (depend on entry size) */
765 directory_mop = (hugepg && (ptew == 1)) ? native_op + 1 : native_op;
766 leaf_mop = (ptew == 1) ? native_op + 1 : native_op;
767
768 goffset = gindex << directory_mop;
769 uoffset = uindex << directory_mop;
770 moffset = mindex << directory_mop;
771 ptoffset0 = (ptindex >> 1) << (leaf_mop + 1);
772 ptoffset1 = ptoffset0 | (1 << (leaf_mop));
773
774 /* Global Directory */
775 if (gdw > 0) {
776 vaddr |= goffset;
777 switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
778 &pw_entrylo0, &pw_entrylo1,
779 directory_mop, leaf_mop, ptw_mmu_idx))
780 {
781 case 0:
782 return false;
783 case 1:
784 goto refill;
785 case 2:
786 default:
787 break;
788 }
789 }
790
791 /* Upper directory */
792 if (udw > 0) {
793 vaddr |= uoffset;
794 switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
795 &pw_entrylo0, &pw_entrylo1,
796 directory_mop, leaf_mop, ptw_mmu_idx))
797 {
798 case 0:
799 return false;
800 case 1:
801 goto refill;
802 case 2:
803 default:
804 break;
805 }
806 }
807
808 /* Middle directory */
809 if (mdw > 0) {
810 vaddr |= moffset;
811 switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
812 &pw_entrylo0, &pw_entrylo1,
813 directory_mop, leaf_mop, ptw_mmu_idx))
814 {
815 case 0:
816 return false;
817 case 1:
818 goto refill;
819 case 2:
820 default:
821 break;
822 }
823 }
824
825 /* Leaf Level Page Table - First half of PTE pair */
826 vaddr |= ptoffset0;
827 if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
828 ptw_mmu_idx) != TLBRET_MATCH) {
829 return false;
830 }
831 if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
832 return false;
833 }
834 dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
835 pw_entrylo0 = dir_entry;
836
837 /* Leaf Level Page Table - Second half of PTE pair */
838 vaddr |= ptoffset1;
839 if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
840 ptw_mmu_idx) != TLBRET_MATCH) {
841 return false;
842 }
843 if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
844 return false;
845 }
846 dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
847 pw_entrylo1 = dir_entry;
848
849 refill:
850
851 m = (1 << pf_ptw) - 1;
852
853 if (huge_page) {
854 switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
855 hgpg_mdhit)
856 {
857 case 4:
858 m = (1 << pf_gdw) - 1;
859 if (pf_gdw & 1) {
860 m >>= 1;
861 }
862 break;
863 case 2:
864 m = (1 << pf_udw) - 1;
865 if (pf_udw & 1) {
866 m >>= 1;
867 }
868 break;
869 case 1:
870 m = (1 << pf_mdw) - 1;
871 if (pf_mdw & 1) {
872 m >>= 1;
873 }
874 break;
875 }
876 }
877 pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
878 update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
879 pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
880 {
881 target_ulong tmp_entryhi = env->CP0_EntryHi;
882 int32_t tmp_pagemask = env->CP0_PageMask;
883 uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
884 uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
885
886 env->CP0_EntryHi = pw_entryhi;
887 env->CP0_PageMask = pw_pagemask;
888 env->CP0_EntryLo0 = pw_entrylo0;
889 env->CP0_EntryLo1 = pw_entrylo1;
890
891 /*
892 * The hardware page walker inserts a page into the TLB in a manner
893 * identical to a TLBWR instruction as executed by the software refill
894 * handler.
895 */
896 r4k_helper_tlbwr(env);
897
898 env->CP0_EntryHi = tmp_entryhi;
899 env->CP0_PageMask = tmp_pagemask;
900 env->CP0_EntryLo0 = tmp_entrylo0;
901 env->CP0_EntryLo1 = tmp_entrylo1;
902 }
903 return true;
904 }
905 #endif
906
mips_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)907 bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
908 MMUAccessType access_type, int mmu_idx,
909 bool probe, uintptr_t retaddr)
910 {
911 CPUMIPSState *env = cpu_env(cs);
912 hwaddr physical;
913 int prot;
914 int ret = TLBRET_BADADDR;
915
916 /* data access */
917 /* XXX: put correct access by using cpu_restore_state() correctly */
918 ret = get_physical_address(env, &physical, &prot, address,
919 access_type, mmu_idx);
920 switch (ret) {
921 case TLBRET_MATCH:
922 qemu_log_mask(CPU_LOG_MMU,
923 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
924 " prot %d\n", __func__, address, physical, prot);
925 break;
926 default:
927 qemu_log_mask(CPU_LOG_MMU,
928 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
929 ret);
930 break;
931 }
932 if (ret == TLBRET_MATCH) {
933 tlb_set_page(cs, address & TARGET_PAGE_MASK,
934 physical & TARGET_PAGE_MASK, prot,
935 mmu_idx, TARGET_PAGE_SIZE);
936 return true;
937 }
938 #if !defined(TARGET_MIPS64)
939 if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
940 /*
941 * Memory reads during hardware page table walking are performed
942 * as if they were kernel-mode load instructions.
943 */
944 int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ?
945 MMU_ERL_IDX : MMU_KERNEL_IDX);
946
947 if (page_table_walk_refill(env, address, ptw_mmu_idx)) {
948 ret = get_physical_address(env, &physical, &prot, address,
949 access_type, mmu_idx);
950 if (ret == TLBRET_MATCH) {
951 tlb_set_page(cs, address & TARGET_PAGE_MASK,
952 physical & TARGET_PAGE_MASK, prot,
953 mmu_idx, TARGET_PAGE_SIZE);
954 return true;
955 }
956 }
957 }
958 #endif
959 if (probe) {
960 return false;
961 }
962
963 raise_mmu_exception(env, address, access_type, ret);
964 do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
965 }
966
cpu_mips_translate_address(CPUMIPSState * env,target_ulong address,MMUAccessType access_type,uintptr_t retaddr)967 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
968 MMUAccessType access_type, uintptr_t retaddr)
969 {
970 hwaddr physical;
971 int prot;
972 int ret = 0;
973 CPUState *cs = env_cpu(env);
974
975 /* data access */
976 ret = get_physical_address(env, &physical, &prot, address, access_type,
977 mips_env_mmu_index(env));
978 if (ret == TLBRET_MATCH) {
979 return physical;
980 }
981
982 raise_mmu_exception(env, address, access_type, ret);
983 cpu_loop_exit_restore(cs, retaddr);
984 }
985
set_hflags_for_handler(CPUMIPSState * env)986 static void set_hflags_for_handler(CPUMIPSState *env)
987 {
988 /* Exception handlers are entered in 32-bit mode. */
989 env->hflags &= ~(MIPS_HFLAG_M16);
990 /* ...except that microMIPS lets you choose. */
991 if (env->insn_flags & ASE_MICROMIPS) {
992 env->hflags |= (!!(env->CP0_Config3 &
993 (1 << CP0C3_ISA_ON_EXC))
994 << MIPS_HFLAG_M16_SHIFT);
995 }
996 }
997
set_badinstr_registers(CPUMIPSState * env)998 static inline void set_badinstr_registers(CPUMIPSState *env)
999 {
1000 if (env->insn_flags & ISA_NANOMIPS32) {
1001 if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1002 uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
1003 if ((instr & 0x10000000) == 0) {
1004 instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
1005 }
1006 env->CP0_BadInstr = instr;
1007
1008 if ((instr & 0xFC000000) == 0x60000000) {
1009 instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
1010 env->CP0_BadInstrX = instr;
1011 }
1012 }
1013 return;
1014 }
1015
1016 if (env->hflags & MIPS_HFLAG_M16) {
1017 /* TODO: add BadInstr support for microMIPS */
1018 return;
1019 }
1020 if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1021 env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
1022 }
1023 if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
1024 (env->hflags & MIPS_HFLAG_BMASK)) {
1025 env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
1026 }
1027 }
1028
mips_cpu_do_interrupt(CPUState * cs)1029 void mips_cpu_do_interrupt(CPUState *cs)
1030 {
1031 MIPSCPU *cpu = MIPS_CPU(cs);
1032 CPUMIPSState *env = &cpu->env;
1033 bool update_badinstr = 0;
1034 target_ulong offset;
1035 int cause = -1;
1036
1037 if (qemu_loglevel_mask(CPU_LOG_INT)
1038 && cs->exception_index != EXCP_EXT_INTERRUPT) {
1039 qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
1040 " %s exception\n",
1041 __func__, env->active_tc.PC, env->CP0_EPC,
1042 mips_exception_name(cs->exception_index));
1043 }
1044 if (cs->exception_index == EXCP_EXT_INTERRUPT &&
1045 (env->hflags & MIPS_HFLAG_DM)) {
1046 cs->exception_index = EXCP_DINT;
1047 }
1048 offset = 0x180;
1049 switch (cs->exception_index) {
1050 case EXCP_SEMIHOST:
1051 cs->exception_index = EXCP_NONE;
1052 mips_semihosting(env);
1053 env->active_tc.PC += env->error_code;
1054 return;
1055 case EXCP_DSS:
1056 env->CP0_Debug |= 1 << CP0DB_DSS;
1057 /*
1058 * Debug single step cannot be raised inside a delay slot and
1059 * resume will always occur on the next instruction
1060 * (but we assume the pc has always been updated during
1061 * code translation).
1062 */
1063 env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
1064 goto enter_debug_mode;
1065 case EXCP_DINT:
1066 env->CP0_Debug |= 1 << CP0DB_DINT;
1067 goto set_DEPC;
1068 case EXCP_DIB:
1069 env->CP0_Debug |= 1 << CP0DB_DIB;
1070 goto set_DEPC;
1071 case EXCP_DBp:
1072 env->CP0_Debug |= 1 << CP0DB_DBp;
1073 /* Setup DExcCode - SDBBP instruction */
1074 env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
1075 (9 << CP0DB_DEC);
1076 goto set_DEPC;
1077 case EXCP_DDBS:
1078 env->CP0_Debug |= 1 << CP0DB_DDBS;
1079 goto set_DEPC;
1080 case EXCP_DDBL:
1081 env->CP0_Debug |= 1 << CP0DB_DDBL;
1082 set_DEPC:
1083 env->CP0_DEPC = exception_resume_pc(env);
1084 env->hflags &= ~MIPS_HFLAG_BMASK;
1085 enter_debug_mode:
1086 if (env->insn_flags & ISA_MIPS3) {
1087 env->hflags |= MIPS_HFLAG_64;
1088 if (!(env->insn_flags & ISA_MIPS_R6) ||
1089 env->CP0_Status & (1 << CP0St_KX)) {
1090 env->hflags &= ~MIPS_HFLAG_AWRAP;
1091 }
1092 }
1093 env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
1094 env->hflags &= ~(MIPS_HFLAG_KSU);
1095 /* EJTAG probe trap enable is not implemented... */
1096 if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1097 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1098 }
1099 env->active_tc.PC = env->exception_base + 0x480;
1100 set_hflags_for_handler(env);
1101 break;
1102 case EXCP_RESET:
1103 cpu_reset(CPU(cpu));
1104 break;
1105 case EXCP_SRESET:
1106 env->CP0_Status |= (1 << CP0St_SR);
1107 memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
1108 goto set_error_EPC;
1109 case EXCP_NMI:
1110 env->CP0_Status |= (1 << CP0St_NMI);
1111 set_error_EPC:
1112 env->CP0_ErrorEPC = exception_resume_pc(env);
1113 env->hflags &= ~MIPS_HFLAG_BMASK;
1114 env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
1115 if (env->insn_flags & ISA_MIPS3) {
1116 env->hflags |= MIPS_HFLAG_64;
1117 if (!(env->insn_flags & ISA_MIPS_R6) ||
1118 env->CP0_Status & (1 << CP0St_KX)) {
1119 env->hflags &= ~MIPS_HFLAG_AWRAP;
1120 }
1121 }
1122 env->hflags |= MIPS_HFLAG_CP0;
1123 env->hflags &= ~(MIPS_HFLAG_KSU);
1124 if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1125 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1126 }
1127 env->active_tc.PC = env->exception_base;
1128 set_hflags_for_handler(env);
1129 break;
1130 case EXCP_EXT_INTERRUPT:
1131 cause = 0;
1132 if (env->CP0_Cause & (1 << CP0Ca_IV)) {
1133 uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
1134
1135 if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
1136 offset = 0x200;
1137 } else {
1138 uint32_t vector = 0;
1139 uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
1140
1141 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
1142 /*
1143 * For VEIC mode, the external interrupt controller feeds
1144 * the vector through the CP0Cause IP lines.
1145 */
1146 vector = pending;
1147 } else {
1148 /*
1149 * Vectored Interrupts
1150 * Mask with Status.IM7-IM0 to get enabled interrupts.
1151 */
1152 pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
1153 /* Find the highest-priority interrupt. */
1154 while (pending >>= 1) {
1155 vector++;
1156 }
1157 }
1158 offset = 0x200 + (vector * (spacing << 5));
1159 }
1160 }
1161 goto set_EPC;
1162 case EXCP_LTLBL:
1163 cause = 1;
1164 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1165 goto set_EPC;
1166 case EXCP_TLBL:
1167 cause = 2;
1168 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1169 if ((env->error_code & EXCP_TLB_NOMATCH) &&
1170 !(env->CP0_Status & (1 << CP0St_EXL))) {
1171 #if defined(TARGET_MIPS64)
1172 int R = env->CP0_BadVAddr >> 62;
1173 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1174 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1175
1176 if ((R != 0 || UX) && (R != 3 || KX) &&
1177 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1178 offset = 0x080;
1179 } else {
1180 #endif
1181 offset = 0x000;
1182 #if defined(TARGET_MIPS64)
1183 }
1184 #endif
1185 }
1186 goto set_EPC;
1187 case EXCP_TLBS:
1188 cause = 3;
1189 update_badinstr = 1;
1190 if ((env->error_code & EXCP_TLB_NOMATCH) &&
1191 !(env->CP0_Status & (1 << CP0St_EXL))) {
1192 #if defined(TARGET_MIPS64)
1193 int R = env->CP0_BadVAddr >> 62;
1194 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1195 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1196
1197 if ((R != 0 || UX) && (R != 3 || KX) &&
1198 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1199 offset = 0x080;
1200 } else {
1201 #endif
1202 offset = 0x000;
1203 #if defined(TARGET_MIPS64)
1204 }
1205 #endif
1206 }
1207 goto set_EPC;
1208 case EXCP_AdEL:
1209 cause = 4;
1210 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1211 goto set_EPC;
1212 case EXCP_AdES:
1213 cause = 5;
1214 update_badinstr = 1;
1215 goto set_EPC;
1216 case EXCP_IBE:
1217 cause = 6;
1218 goto set_EPC;
1219 case EXCP_DBE:
1220 cause = 7;
1221 goto set_EPC;
1222 case EXCP_SYSCALL:
1223 cause = 8;
1224 update_badinstr = 1;
1225 goto set_EPC;
1226 case EXCP_BREAK:
1227 cause = 9;
1228 update_badinstr = 1;
1229 goto set_EPC;
1230 case EXCP_RI:
1231 cause = 10;
1232 update_badinstr = 1;
1233 goto set_EPC;
1234 case EXCP_CpU:
1235 cause = 11;
1236 update_badinstr = 1;
1237 env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
1238 (env->error_code << CP0Ca_CE);
1239 goto set_EPC;
1240 case EXCP_OVERFLOW:
1241 cause = 12;
1242 update_badinstr = 1;
1243 goto set_EPC;
1244 case EXCP_TRAP:
1245 cause = 13;
1246 update_badinstr = 1;
1247 goto set_EPC;
1248 case EXCP_MSAFPE:
1249 cause = 14;
1250 update_badinstr = 1;
1251 goto set_EPC;
1252 case EXCP_FPE:
1253 cause = 15;
1254 update_badinstr = 1;
1255 goto set_EPC;
1256 case EXCP_C2E:
1257 cause = 18;
1258 goto set_EPC;
1259 case EXCP_TLBRI:
1260 cause = 19;
1261 update_badinstr = 1;
1262 goto set_EPC;
1263 case EXCP_TLBXI:
1264 cause = 20;
1265 goto set_EPC;
1266 case EXCP_MSADIS:
1267 cause = 21;
1268 update_badinstr = 1;
1269 goto set_EPC;
1270 case EXCP_MDMX:
1271 cause = 22;
1272 goto set_EPC;
1273 case EXCP_DWATCH:
1274 cause = 23;
1275 /* XXX: TODO: manage deferred watch exceptions */
1276 goto set_EPC;
1277 case EXCP_MCHECK:
1278 cause = 24;
1279 goto set_EPC;
1280 case EXCP_THREAD:
1281 cause = 25;
1282 goto set_EPC;
1283 case EXCP_DSPDIS:
1284 cause = 26;
1285 goto set_EPC;
1286 case EXCP_CACHE:
1287 cause = 30;
1288 offset = 0x100;
1289 set_EPC:
1290 if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1291 env->CP0_EPC = exception_resume_pc(env);
1292 if (update_badinstr) {
1293 set_badinstr_registers(env);
1294 }
1295 if (env->hflags & MIPS_HFLAG_BMASK) {
1296 env->CP0_Cause |= (1U << CP0Ca_BD);
1297 } else {
1298 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1299 }
1300 env->CP0_Status |= (1 << CP0St_EXL);
1301 if (env->insn_flags & ISA_MIPS3) {
1302 env->hflags |= MIPS_HFLAG_64;
1303 if (!(env->insn_flags & ISA_MIPS_R6) ||
1304 env->CP0_Status & (1 << CP0St_KX)) {
1305 env->hflags &= ~MIPS_HFLAG_AWRAP;
1306 }
1307 }
1308 env->hflags |= MIPS_HFLAG_CP0;
1309 env->hflags &= ~(MIPS_HFLAG_KSU);
1310 }
1311 env->hflags &= ~MIPS_HFLAG_BMASK;
1312 if (env->CP0_Status & (1 << CP0St_BEV)) {
1313 env->active_tc.PC = env->exception_base + 0x200;
1314 } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
1315 env->CP0_Config5 & (1 << CP0C5_CV))) {
1316 /* Force KSeg1 for cache errors */
1317 env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
1318 } else {
1319 env->active_tc.PC = env->CP0_EBase & ~0xfff;
1320 }
1321
1322 env->active_tc.PC += offset;
1323 set_hflags_for_handler(env);
1324 env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
1325 (cause << CP0Ca_EC);
1326 break;
1327 default:
1328 abort();
1329 }
1330 if (qemu_loglevel_mask(CPU_LOG_INT)
1331 && cs->exception_index != EXCP_EXT_INTERRUPT) {
1332 qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
1333 " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
1334 __func__, env->active_tc.PC, env->CP0_EPC, cause,
1335 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
1336 env->CP0_DEPC);
1337 }
1338 cs->exception_index = EXCP_NONE;
1339 }
1340
mips_cpu_exec_interrupt(CPUState * cs,int interrupt_request)1341 bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1342 {
1343 if (interrupt_request & CPU_INTERRUPT_HARD) {
1344 CPUMIPSState *env = cpu_env(cs);
1345
1346 if (cpu_mips_hw_interrupts_enabled(env) &&
1347 cpu_mips_hw_interrupts_pending(env)) {
1348 /* Raise it */
1349 cs->exception_index = EXCP_EXT_INTERRUPT;
1350 env->error_code = 0;
1351 mips_cpu_do_interrupt(cs);
1352 return true;
1353 }
1354 }
1355 return false;
1356 }
1357
r4k_invalidate_tlb(CPUMIPSState * env,int idx,int use_extra)1358 void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
1359 {
1360 CPUState *cs = env_cpu(env);
1361 r4k_tlb_t *tlb;
1362 target_ulong addr;
1363 target_ulong end;
1364 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
1365 uint32_t MMID = env->CP0_MemoryMapID;
1366 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
1367 uint32_t tlb_mmid;
1368 target_ulong mask;
1369
1370 MMID = mi ? MMID : (uint32_t) ASID;
1371
1372 tlb = &env->tlb->mmu.r4k.tlb[idx];
1373 /*
1374 * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1375 * flush these entries again.
1376 */
1377 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
1378 if (tlb->G == 0 && tlb_mmid != MMID) {
1379 return;
1380 }
1381
1382 if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
1383 /*
1384 * For tlbwr, we can shadow the discarded entry into
1385 * a new (fake) TLB entry, as long as the guest can not
1386 * tell that it's there.
1387 */
1388 env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
1389 env->tlb->tlb_in_use++;
1390 return;
1391 }
1392
1393 /* 1k pages are not supported. */
1394 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1395 if (tlb->V0) {
1396 addr = tlb->VPN & ~mask;
1397 #if defined(TARGET_MIPS64)
1398 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1399 addr |= 0x3FFFFF0000000000ULL;
1400 }
1401 #endif
1402 end = addr | (mask >> 1);
1403 while (addr < end) {
1404 tlb_flush_page(cs, addr);
1405 addr += TARGET_PAGE_SIZE;
1406 }
1407 }
1408 if (tlb->V1) {
1409 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1410 #if defined(TARGET_MIPS64)
1411 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1412 addr |= 0x3FFFFF0000000000ULL;
1413 }
1414 #endif
1415 end = addr | mask;
1416 while (addr - 1 < end) {
1417 tlb_flush_page(cs, addr);
1418 addr += TARGET_PAGE_SIZE;
1419 }
1420 }
1421 }
1422