1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
11
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/cputlb.h"
16 #include "exec/page-protection.h"
17 #include "exec/target_page.h"
18 #include "accel/tcg/cpu-ldst.h"
19 #include "exec/log.h"
20 #include "cpu-csr.h"
21 #include "tcg/tcg_loongarch.h"
22
check_ps(CPULoongArchState * env,uint8_t tlb_ps)23 bool check_ps(CPULoongArchState *env, uint8_t tlb_ps)
24 {
25 if (tlb_ps >= 64) {
26 return false;
27 }
28 return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
29 }
30
raise_mmu_exception(CPULoongArchState * env,target_ulong address,MMUAccessType access_type,int tlb_error)31 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
32 MMUAccessType access_type, int tlb_error)
33 {
34 CPUState *cs = env_cpu(env);
35
36 switch (tlb_error) {
37 default:
38 case TLBRET_BADADDR:
39 cs->exception_index = access_type == MMU_INST_FETCH
40 ? EXCCODE_ADEF : EXCCODE_ADEM;
41 break;
42 case TLBRET_NOMATCH:
43 /* No TLB match for a mapped address */
44 if (access_type == MMU_DATA_LOAD) {
45 cs->exception_index = EXCCODE_PIL;
46 } else if (access_type == MMU_DATA_STORE) {
47 cs->exception_index = EXCCODE_PIS;
48 } else if (access_type == MMU_INST_FETCH) {
49 cs->exception_index = EXCCODE_PIF;
50 }
51 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
52 break;
53 case TLBRET_INVALID:
54 /* TLB match with no valid bit */
55 if (access_type == MMU_DATA_LOAD) {
56 cs->exception_index = EXCCODE_PIL;
57 } else if (access_type == MMU_DATA_STORE) {
58 cs->exception_index = EXCCODE_PIS;
59 } else if (access_type == MMU_INST_FETCH) {
60 cs->exception_index = EXCCODE_PIF;
61 }
62 break;
63 case TLBRET_DIRTY:
64 /* TLB match but 'D' bit is cleared */
65 cs->exception_index = EXCCODE_PME;
66 break;
67 case TLBRET_XI:
68 /* Execute-Inhibit Exception */
69 cs->exception_index = EXCCODE_PNX;
70 break;
71 case TLBRET_RI:
72 /* Read-Inhibit Exception */
73 cs->exception_index = EXCCODE_PNR;
74 break;
75 case TLBRET_PE:
76 /* Privileged Exception */
77 cs->exception_index = EXCCODE_PPI;
78 break;
79 }
80
81 if (tlb_error == TLBRET_NOMATCH) {
82 env->CSR_TLBRBADV = address;
83 if (is_la64(env)) {
84 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64,
85 VPPN, extract64(address, 13, 35));
86 } else {
87 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32,
88 VPPN, extract64(address, 13, 19));
89 }
90 } else {
91 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
92 env->CSR_BADV = address;
93 }
94 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
95 }
96 }
97
invalidate_tlb_entry(CPULoongArchState * env,int index)98 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
99 {
100 target_ulong addr, mask, pagesize;
101 uint8_t tlb_ps;
102 LoongArchTLB *tlb = &env->tlb[index];
103
104 int mmu_idx = cpu_mmu_index(env_cpu(env), false);
105 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
106 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
107 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
108 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
109
110 if (!tlb_e) {
111 return;
112 }
113 if (index >= LOONGARCH_STLB) {
114 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
115 } else {
116 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
117 }
118 pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
119 mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
120
121 if (tlb_v0) {
122 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
123 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
124 mmu_idx, TARGET_LONG_BITS);
125 }
126
127 if (tlb_v1) {
128 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
129 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
130 mmu_idx, TARGET_LONG_BITS);
131 }
132 }
133
invalidate_tlb(CPULoongArchState * env,int index)134 static void invalidate_tlb(CPULoongArchState *env, int index)
135 {
136 LoongArchTLB *tlb;
137 uint16_t csr_asid, tlb_asid, tlb_g;
138
139 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
140 tlb = &env->tlb[index];
141 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
142 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
143 if (tlb_g == 0 && tlb_asid != csr_asid) {
144 return;
145 }
146 invalidate_tlb_entry(env, index);
147 }
148
fill_tlb_entry(CPULoongArchState * env,int index)149 static void fill_tlb_entry(CPULoongArchState *env, int index)
150 {
151 LoongArchTLB *tlb = &env->tlb[index];
152 uint64_t lo0, lo1, csr_vppn;
153 uint16_t csr_asid;
154 uint8_t csr_ps;
155
156 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
157 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
158 if (is_la64(env)) {
159 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN);
160 } else {
161 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN);
162 }
163 lo0 = env->CSR_TLBRELO0;
164 lo1 = env->CSR_TLBRELO1;
165 } else {
166 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
167 if (is_la64(env)) {
168 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN);
169 } else {
170 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN);
171 }
172 lo0 = env->CSR_TLBELO0;
173 lo1 = env->CSR_TLBELO1;
174 }
175
176 /* Only MTLB has the ps fields */
177 if (index >= LOONGARCH_STLB) {
178 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
179 }
180
181 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
182 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
183 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
184 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
185
186 tlb->tlb_entry0 = lo0;
187 tlb->tlb_entry1 = lo1;
188 }
189
190 /* Return an random value between low and high */
get_random_tlb(uint32_t low,uint32_t high)191 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
192 {
193 uint32_t val;
194
195 qemu_guest_getrandom_nofail(&val, sizeof(val));
196 return val % (high - low + 1) + low;
197 }
198
199 /*
200 * One tlb entry holds an adjacent odd/even pair, the vpn is the
201 * content of the virtual page number divided by 2. So the
202 * compare vpn is bit[47:15] for 16KiB page. while the vppn
203 * field in tlb entry contains bit[47:13], so need adjust.
204 * virt_vpn = vaddr[47:13]
205 */
loongarch_tlb_search(CPULoongArchState * env,target_ulong vaddr,int * index)206 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
207 int *index)
208 {
209 LoongArchTLB *tlb;
210 uint16_t csr_asid, tlb_asid, stlb_idx;
211 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
212 int i, compare_shift;
213 uint64_t vpn, tlb_vppn;
214
215 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
216 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
217 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
218 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
219 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
220
221 /* Search STLB */
222 for (i = 0; i < 8; ++i) {
223 tlb = &env->tlb[i * 256 + stlb_idx];
224 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
225 if (tlb_e) {
226 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
227 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
228 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
229
230 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
231 (vpn == (tlb_vppn >> compare_shift))) {
232 *index = i * 256 + stlb_idx;
233 return true;
234 }
235 }
236 }
237
238 /* Search MTLB */
239 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
240 tlb = &env->tlb[i];
241 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
242 if (tlb_e) {
243 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
244 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
245 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
246 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
247 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
248 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
249 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
250 (vpn == (tlb_vppn >> compare_shift))) {
251 *index = i;
252 return true;
253 }
254 }
255 }
256 return false;
257 }
258
helper_tlbsrch(CPULoongArchState * env)259 void helper_tlbsrch(CPULoongArchState *env)
260 {
261 int index, match;
262
263 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
264 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
265 } else {
266 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
267 }
268
269 if (match) {
270 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
271 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
272 return;
273 }
274
275 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
276 }
277
helper_tlbrd(CPULoongArchState * env)278 void helper_tlbrd(CPULoongArchState *env)
279 {
280 LoongArchTLB *tlb;
281 int index;
282 uint8_t tlb_ps, tlb_e;
283
284 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
285 tlb = &env->tlb[index];
286
287 if (index >= LOONGARCH_STLB) {
288 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
289 } else {
290 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
291 }
292 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
293
294 if (!tlb_e) {
295 /* Invalid TLB entry */
296 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
297 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
298 env->CSR_TLBEHI = 0;
299 env->CSR_TLBELO0 = 0;
300 env->CSR_TLBELO1 = 0;
301 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
302 } else {
303 /* Valid TLB entry */
304 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
305 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
306 PS, (tlb_ps & 0x3f));
307 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
308 R_TLB_MISC_VPPN_SHIFT;
309 env->CSR_TLBELO0 = tlb->tlb_entry0;
310 env->CSR_TLBELO1 = tlb->tlb_entry1;
311 }
312 }
313
helper_tlbwr(CPULoongArchState * env)314 void helper_tlbwr(CPULoongArchState *env)
315 {
316 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
317
318 invalidate_tlb(env, index);
319
320 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
321 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
322 TLB_MISC, E, 0);
323 return;
324 }
325
326 fill_tlb_entry(env, index);
327 }
328
helper_tlbfill(CPULoongArchState * env)329 void helper_tlbfill(CPULoongArchState *env)
330 {
331 uint64_t address, entryhi;
332 int index, set, stlb_idx;
333 uint16_t pagesize, stlb_ps;
334
335 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
336 entryhi = env->CSR_TLBREHI;
337 /* Validity of pagesize is checked in helper_ldpte() */
338 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
339 } else {
340 entryhi = env->CSR_TLBEHI;
341 /* Validity of pagesize is checked in helper_tlbrd() */
342 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
343 }
344
345 /* Validity of stlb_ps is checked in helper_csrwr_stlbps() */
346 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
347 if (pagesize == stlb_ps) {
348 /* Only write into STLB bits [47:13] */
349 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
350
351 /* Choose one set ramdomly */
352 set = get_random_tlb(0, 7);
353
354 /* Index in one set */
355 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
356
357 index = set * 256 + stlb_idx;
358 } else {
359 /* Only write into MTLB */
360 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
361 }
362
363 invalidate_tlb(env, index);
364 fill_tlb_entry(env, index);
365 }
366
helper_tlbclr(CPULoongArchState * env)367 void helper_tlbclr(CPULoongArchState *env)
368 {
369 LoongArchTLB *tlb;
370 int i, index;
371 uint16_t csr_asid, tlb_asid, tlb_g;
372
373 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
374 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
375
376 if (index < LOONGARCH_STLB) {
377 /* STLB. One line per operation */
378 for (i = 0; i < 8; i++) {
379 tlb = &env->tlb[i * 256 + (index % 256)];
380 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
381 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
382 if (!tlb_g && tlb_asid == csr_asid) {
383 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
384 }
385 }
386 } else if (index < LOONGARCH_TLB_MAX) {
387 /* All MTLB entries */
388 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
389 tlb = &env->tlb[i];
390 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
391 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
392 if (!tlb_g && tlb_asid == csr_asid) {
393 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
394 }
395 }
396 }
397
398 tlb_flush(env_cpu(env));
399 }
400
helper_tlbflush(CPULoongArchState * env)401 void helper_tlbflush(CPULoongArchState *env)
402 {
403 int i, index;
404
405 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
406
407 if (index < LOONGARCH_STLB) {
408 /* STLB. One line per operation */
409 for (i = 0; i < 8; i++) {
410 int s_idx = i * 256 + (index % 256);
411 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
412 TLB_MISC, E, 0);
413 }
414 } else if (index < LOONGARCH_TLB_MAX) {
415 /* All MTLB entries */
416 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
417 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
418 TLB_MISC, E, 0);
419 }
420 }
421
422 tlb_flush(env_cpu(env));
423 }
424
helper_invtlb_all(CPULoongArchState * env)425 void helper_invtlb_all(CPULoongArchState *env)
426 {
427 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
428 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
429 TLB_MISC, E, 0);
430 }
431 tlb_flush(env_cpu(env));
432 }
433
helper_invtlb_all_g(CPULoongArchState * env,uint32_t g)434 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
435 {
436 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
437 LoongArchTLB *tlb = &env->tlb[i];
438 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
439
440 if (tlb_g == g) {
441 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
442 }
443 }
444 tlb_flush(env_cpu(env));
445 }
446
helper_invtlb_all_asid(CPULoongArchState * env,target_ulong info)447 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
448 {
449 uint16_t asid = info & R_CSR_ASID_ASID_MASK;
450
451 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
452 LoongArchTLB *tlb = &env->tlb[i];
453 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
454 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
455
456 if (!tlb_g && (tlb_asid == asid)) {
457 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
458 }
459 }
460 tlb_flush(env_cpu(env));
461 }
462
helper_invtlb_page_asid(CPULoongArchState * env,target_ulong info,target_ulong addr)463 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
464 target_ulong addr)
465 {
466 uint16_t asid = info & 0x3ff;
467
468 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
469 LoongArchTLB *tlb = &env->tlb[i];
470 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
471 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
472 uint64_t vpn, tlb_vppn;
473 uint8_t tlb_ps, compare_shift;
474 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
475
476 if (!tlb_e) {
477 continue;
478 }
479 if (i >= LOONGARCH_STLB) {
480 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
481 } else {
482 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
483 }
484 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
485 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
486 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
487
488 if (!tlb_g && (tlb_asid == asid) &&
489 (vpn == (tlb_vppn >> compare_shift))) {
490 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
491 }
492 }
493 tlb_flush(env_cpu(env));
494 }
495
helper_invtlb_page_asid_or_g(CPULoongArchState * env,target_ulong info,target_ulong addr)496 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
497 target_ulong info, target_ulong addr)
498 {
499 uint16_t asid = info & 0x3ff;
500
501 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
502 LoongArchTLB *tlb = &env->tlb[i];
503 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
504 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
505 uint64_t vpn, tlb_vppn;
506 uint8_t tlb_ps, compare_shift;
507 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
508
509 if (!tlb_e) {
510 continue;
511 }
512 if (i >= LOONGARCH_STLB) {
513 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
514 } else {
515 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
516 }
517 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
518 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
519 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
520
521 if ((tlb_g || (tlb_asid == asid)) &&
522 (vpn == (tlb_vppn >> compare_shift))) {
523 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
524 }
525 }
526 tlb_flush(env_cpu(env));
527 }
528
loongarch_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)529 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
530 MMUAccessType access_type, int mmu_idx,
531 bool probe, uintptr_t retaddr)
532 {
533 CPULoongArchState *env = cpu_env(cs);
534 hwaddr physical;
535 int prot;
536 int ret;
537
538 /* Data access */
539 ret = get_physical_address(env, &physical, &prot, address,
540 access_type, mmu_idx, 0);
541
542 if (ret == TLBRET_MATCH) {
543 tlb_set_page(cs, address & TARGET_PAGE_MASK,
544 physical & TARGET_PAGE_MASK, prot,
545 mmu_idx, TARGET_PAGE_SIZE);
546 qemu_log_mask(CPU_LOG_MMU,
547 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
548 " prot %d\n", __func__, address, physical, prot);
549 return true;
550 } else {
551 qemu_log_mask(CPU_LOG_MMU,
552 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
553 ret);
554 }
555 if (probe) {
556 return false;
557 }
558 raise_mmu_exception(env, address, access_type, ret);
559 cpu_loop_exit_restore(cs, retaddr);
560 }
561
helper_lddir(CPULoongArchState * env,target_ulong base,target_ulong level,uint32_t mem_idx)562 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
563 target_ulong level, uint32_t mem_idx)
564 {
565 CPUState *cs = env_cpu(env);
566 target_ulong badvaddr, index, phys;
567 uint64_t dir_base, dir_width;
568
569 if (unlikely((level == 0) || (level > 4))) {
570 qemu_log_mask(LOG_GUEST_ERROR,
571 "Attepted LDDIR with level %"PRId64"\n", level);
572 return base;
573 }
574
575 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
576 if (unlikely(level == 4)) {
577 qemu_log_mask(LOG_GUEST_ERROR,
578 "Attempted use of level 4 huge page\n");
579 return base;
580 }
581
582 if (FIELD_EX64(base, TLBENTRY, LEVEL)) {
583 return base;
584 } else {
585 return FIELD_DP64(base, TLBENTRY, LEVEL, level);
586 }
587 }
588
589 badvaddr = env->CSR_TLBRBADV;
590 base = base & TARGET_PHYS_MASK;
591 get_dir_base_width(env, &dir_base, &dir_width, level);
592 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
593 phys = base | index << 3;
594 return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
595 }
596
helper_ldpte(CPULoongArchState * env,target_ulong base,target_ulong odd,uint32_t mem_idx)597 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
598 uint32_t mem_idx)
599 {
600 CPUState *cs = env_cpu(env);
601 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, badv;
602 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
603 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
604 uint64_t dir_base, dir_width;
605 uint8_t ps;
606
607 /*
608 * The parameter "base" has only two types,
609 * one is the page table base address,
610 * whose bit 6 should be 0,
611 * and the other is the huge page entry,
612 * whose bit 6 should be 1.
613 */
614 base = base & TARGET_PHYS_MASK;
615 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
616 /*
617 * Gets the huge page level and Gets huge page size.
618 * Clears the huge page level information in the entry.
619 * Clears huge page bit.
620 * Move HGLOBAL bit to GLOBAL bit.
621 */
622 get_dir_base_width(env, &dir_base, &dir_width,
623 FIELD_EX64(base, TLBENTRY, LEVEL));
624
625 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
626 base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
627 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
628 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
629 base = FIELD_DP64(base, TLBENTRY, G, 1);
630 }
631
632 ps = dir_base + dir_width - 1;
633 /*
634 * Huge pages are evenly split into parity pages
635 * when loaded into the tlb,
636 * so the tlb page size needs to be divided by 2.
637 */
638 tmp0 = base;
639 if (odd) {
640 tmp0 += MAKE_64BIT_MASK(ps, 1);
641 }
642
643 if (!check_ps(env, ps)) {
644 qemu_log_mask(LOG_GUEST_ERROR, "Illegal huge pagesize %d\n", ps);
645 return;
646 }
647 } else {
648 badv = env->CSR_TLBRBADV;
649
650 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
651 ptindex = ptindex & ~0x1; /* clear bit 0 */
652 ptoffset0 = ptindex << 3;
653 ptoffset1 = (ptindex + 1) << 3;
654 phys = base | (odd ? ptoffset1 : ptoffset0);
655 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
656 ps = ptbase;
657 }
658
659 if (odd) {
660 env->CSR_TLBRELO1 = tmp0;
661 } else {
662 env->CSR_TLBRELO0 = tmp0;
663 }
664 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
665 }
666
loongarch_map_tlb_entry(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,int access_type,int index,int mmu_idx)667 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
668 int *prot, target_ulong address,
669 int access_type, int index, int mmu_idx)
670 {
671 LoongArchTLB *tlb = &env->tlb[index];
672 uint64_t plv = mmu_idx;
673 uint64_t tlb_entry, tlb_ppn;
674 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
675
676 if (index >= LOONGARCH_STLB) {
677 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
678 } else {
679 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
680 }
681 n = (address >> tlb_ps) & 0x1;/* Odd or even */
682
683 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
684 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
685 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
686 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
687 if (is_la64(env)) {
688 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
689 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
690 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
691 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
692 } else {
693 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
694 tlb_nx = 0;
695 tlb_nr = 0;
696 tlb_rplv = 0;
697 }
698
699 /* Remove sw bit between bit12 -- bit PS*/
700 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1));
701
702 /* Check access rights */
703 if (!tlb_v) {
704 return TLBRET_INVALID;
705 }
706
707 if (access_type == MMU_INST_FETCH && tlb_nx) {
708 return TLBRET_XI;
709 }
710
711 if (access_type == MMU_DATA_LOAD && tlb_nr) {
712 return TLBRET_RI;
713 }
714
715 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
716 ((tlb_rplv == 1) && (plv != tlb_plv))) {
717 return TLBRET_PE;
718 }
719
720 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
721 return TLBRET_DIRTY;
722 }
723
724 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
725 (address & MAKE_64BIT_MASK(0, tlb_ps));
726 *prot = PAGE_READ;
727 if (tlb_d) {
728 *prot |= PAGE_WRITE;
729 }
730 if (!tlb_nx) {
731 *prot |= PAGE_EXEC;
732 }
733 return TLBRET_MATCH;
734 }
735
loongarch_get_addr_from_tlb(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx)736 int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical,
737 int *prot, target_ulong address,
738 MMUAccessType access_type, int mmu_idx)
739 {
740 int index, match;
741
742 match = loongarch_tlb_search(env, address, &index);
743 if (match) {
744 return loongarch_map_tlb_entry(env, physical, prot,
745 address, access_type, index, mmu_idx);
746 }
747
748 return TLBRET_NOMATCH;
749 }
750