1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * LoongArch CPU helpers for qemu
4 *
5 * Copyright (c) 2024 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "cpu-csr.h"
13
14 #ifdef CONFIG_TCG
loongarch_map_tlb_entry(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,int access_type,int index,int mmu_idx)15 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
16 int *prot, target_ulong address,
17 int access_type, int index, int mmu_idx)
18 {
19 LoongArchTLB *tlb = &env->tlb[index];
20 uint64_t plv = mmu_idx;
21 uint64_t tlb_entry, tlb_ppn;
22 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
23
24 if (index >= LOONGARCH_STLB) {
25 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
26 } else {
27 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
28 }
29 n = (address >> tlb_ps) & 0x1;/* Odd or even */
30
31 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
32 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
33 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
34 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
35 if (is_la64(env)) {
36 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
37 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
38 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
39 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
40 } else {
41 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
42 tlb_nx = 0;
43 tlb_nr = 0;
44 tlb_rplv = 0;
45 }
46
47 /* Remove sw bit between bit12 -- bit PS*/
48 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1));
49
50 /* Check access rights */
51 if (!tlb_v) {
52 return TLBRET_INVALID;
53 }
54
55 if (access_type == MMU_INST_FETCH && tlb_nx) {
56 return TLBRET_XI;
57 }
58
59 if (access_type == MMU_DATA_LOAD && tlb_nr) {
60 return TLBRET_RI;
61 }
62
63 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
64 ((tlb_rplv == 1) && (plv != tlb_plv))) {
65 return TLBRET_PE;
66 }
67
68 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
69 return TLBRET_DIRTY;
70 }
71
72 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
73 (address & MAKE_64BIT_MASK(0, tlb_ps));
74 *prot = PAGE_READ;
75 if (tlb_d) {
76 *prot |= PAGE_WRITE;
77 }
78 if (!tlb_nx) {
79 *prot |= PAGE_EXEC;
80 }
81 return TLBRET_MATCH;
82 }
83
84 /*
85 * One tlb entry holds an adjacent odd/even pair, the vpn is the
86 * content of the virtual page number divided by 2. So the
87 * compare vpn is bit[47:15] for 16KiB page. while the vppn
88 * field in tlb entry contains bit[47:13], so need adjust.
89 * virt_vpn = vaddr[47:13]
90 */
loongarch_tlb_search(CPULoongArchState * env,target_ulong vaddr,int * index)91 bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
92 int *index)
93 {
94 LoongArchTLB *tlb;
95 uint16_t csr_asid, tlb_asid, stlb_idx;
96 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
97 int i, compare_shift;
98 uint64_t vpn, tlb_vppn;
99
100 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
101 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
102 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
103 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
104 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
105
106 /* Search STLB */
107 for (i = 0; i < 8; ++i) {
108 tlb = &env->tlb[i * 256 + stlb_idx];
109 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
110 if (tlb_e) {
111 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
112 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
113 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
114
115 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
116 (vpn == (tlb_vppn >> compare_shift))) {
117 *index = i * 256 + stlb_idx;
118 return true;
119 }
120 }
121 }
122
123 /* Search MTLB */
124 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
125 tlb = &env->tlb[i];
126 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
127 if (tlb_e) {
128 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
129 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
130 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
131 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
132 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
133 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
134 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
135 (vpn == (tlb_vppn >> compare_shift))) {
136 *index = i;
137 return true;
138 }
139 }
140 }
141 return false;
142 }
143
loongarch_map_address(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx)144 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
145 int *prot, target_ulong address,
146 MMUAccessType access_type, int mmu_idx)
147 {
148 int index, match;
149
150 match = loongarch_tlb_search(env, address, &index);
151 if (match) {
152 return loongarch_map_tlb_entry(env, physical, prot,
153 address, access_type, index, mmu_idx);
154 }
155
156 return TLBRET_NOMATCH;
157 }
158 #else
loongarch_map_address(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx)159 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
160 int *prot, target_ulong address,
161 MMUAccessType access_type, int mmu_idx)
162 {
163 return TLBRET_NOMATCH;
164 }
165 #endif
166
dmw_va2pa(CPULoongArchState * env,target_ulong va,target_ulong dmw)167 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
168 target_ulong dmw)
169 {
170 if (is_la64(env)) {
171 return va & TARGET_VIRT_MASK;
172 } else {
173 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
174 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
175 (pseg << R_CSR_DMW_32_VSEG_SHIFT);
176 }
177 }
178
get_physical_address(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx)179 int get_physical_address(CPULoongArchState *env, hwaddr *physical,
180 int *prot, target_ulong address,
181 MMUAccessType access_type, int mmu_idx)
182 {
183 int user_mode = mmu_idx == MMU_USER_IDX;
184 int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
185 uint32_t plv, base_c, base_v;
186 int64_t addr_high;
187 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
188 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
189
190 /* Check PG and DA */
191 if (da & !pg) {
192 *physical = address & TARGET_PHYS_MASK;
193 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
194 return TLBRET_MATCH;
195 }
196
197 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
198 if (is_la64(env)) {
199 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
200 } else {
201 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
202 }
203 /* Check direct map window */
204 for (int i = 0; i < 4; i++) {
205 if (is_la64(env)) {
206 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
207 } else {
208 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
209 }
210 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
211 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
212 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
213 return TLBRET_MATCH;
214 }
215 }
216
217 /* Check valid extension */
218 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
219 if (!(addr_high == 0 || addr_high == -1)) {
220 return TLBRET_BADADDR;
221 }
222
223 /* Mapped address */
224 return loongarch_map_address(env, physical, prot, address,
225 access_type, mmu_idx);
226 }
227
loongarch_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)228 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
229 {
230 CPULoongArchState *env = cpu_env(cs);
231 hwaddr phys_addr;
232 int prot;
233
234 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
235 cpu_mmu_index(cs, false)) != 0) {
236 return -1;
237 }
238 return phys_addr;
239 }
240