xref: /openbmc/qemu/target/loongarch/cpu_helper.c (revision 31a42bb0a30fc1887d70b08c254cff46033e9f63)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * LoongArch CPU helpers for qemu
4  *
5  * Copyright (c) 2024 Loongson Technology Corporation Limited
6  *
7  */
8 
9 #include "qemu/osdep.h"
10 #include "system/tcg.h"
11 #include "cpu.h"
12 #include "accel/tcg/cpu-mmu-index.h"
13 #include "exec/target_page.h"
14 #include "internals.h"
15 #include "cpu-csr.h"
16 #include "cpu-mmu.h"
17 #include "tcg/tcg_loongarch.h"
18 
19 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
20                         uint64_t *dir_width, unsigned int level)
21 {
22     switch (level) {
23     case 1:
24         *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
25         *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
26         break;
27     case 2:
28         *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
29         *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
30         break;
31     case 3:
32         *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
33         *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
34         break;
35     case 4:
36         *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
37         *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
38         break;
39     default:
40         /* level may be zero for ldpte */
41         *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
42         *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
43         break;
44     }
45 }
46 
47 TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
48                            MMUAccessType access_type, int mmu_idx)
49 {
50     uint64_t plv = mmu_idx;
51     uint64_t tlb_entry, tlb_ppn;
52     uint8_t tlb_ps, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
53     bool tlb_v, tlb_d;
54 
55     tlb_entry = context->pte;
56     tlb_ps = context->ps;
57     tlb_v = pte_present(env, tlb_entry);
58     tlb_d = pte_write(env, tlb_entry);
59     tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
60     if (is_la64(env)) {
61         tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
62         tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
63         tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
64         tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
65     } else {
66         tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
67         tlb_nx = 0;
68         tlb_nr = 0;
69         tlb_rplv = 0;
70     }
71 
72     /* Remove sw bit between bit12 -- bit PS*/
73     tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1));
74 
75     /* Check access rights */
76     if (!tlb_v) {
77         return TLBRET_INVALID;
78     }
79 
80     if (access_type == MMU_INST_FETCH && tlb_nx) {
81         return TLBRET_XI;
82     }
83 
84     if (access_type == MMU_DATA_LOAD && tlb_nr) {
85         return TLBRET_RI;
86     }
87 
88     if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
89         ((tlb_rplv == 1) && (plv != tlb_plv))) {
90         return TLBRET_PE;
91     }
92 
93     if ((access_type == MMU_DATA_STORE) && !tlb_d) {
94         return TLBRET_DIRTY;
95     }
96 
97     context->physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
98                         (context->addr & MAKE_64BIT_MASK(0, tlb_ps));
99     context->prot = PAGE_READ;
100     context->mmu_index = tlb_plv;
101     if (tlb_d) {
102         context->prot |= PAGE_WRITE;
103     }
104     if (!tlb_nx) {
105         context->prot |= PAGE_EXEC;
106     }
107     return TLBRET_MATCH;
108 }
109 
110 static MemTxResult loongarch_cmpxchg_phys(CPUState *cs, hwaddr phys,
111                                           uint64_t old, uint64_t new)
112 {
113     hwaddr addr1, l = 8;
114     MemoryRegion *mr;
115     uint8_t *ram_ptr;
116     uint64_t old1;
117     MemTxResult ret;
118 
119     rcu_read_lock();
120     mr = address_space_translate(cs->as, phys, &addr1, &l,
121                                  false, MEMTXATTRS_UNSPECIFIED);
122     if (!memory_region_is_ram(mr)) {
123         /*
124          * Misconfigured PTE in ROM (AD bits are not preset) or
125          * PTE is in IO space and can't be updated atomically.
126          */
127          rcu_read_unlock();
128          return MEMTX_ACCESS_ERROR;
129     }
130 
131     ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
132     old1 = qatomic_cmpxchg((uint64_t *)ram_ptr, cpu_to_le64(old),
133                            cpu_to_le64(new));
134     old1 = le64_to_cpu(old1);
135     if (old1 == old) {
136         ret = MEMTX_OK;
137     } else {
138         ret = MEMTX_DECODE_ERROR;
139     }
140     rcu_read_unlock();
141 
142     return ret;
143 }
144 
145 TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
146                      int access_type, int mmu_idx, int debug)
147 {
148     CPUState *cs = env_cpu(env);
149     target_ulong index = 0, phys = 0;
150     uint64_t dir_base, dir_width;
151     uint64_t base, pte;
152     int level;
153     vaddr address;
154     TLBRet ret;
155     MemTxResult ret1;
156 
157     address = context->addr;
158     if ((address >> 63) & 0x1) {
159         base = env->CSR_PGDH;
160     } else {
161         base = env->CSR_PGDL;
162     }
163     base &= TARGET_PHYS_MASK;
164 
165     for (level = 4; level >= 0; level--) {
166         get_dir_base_width(env, &dir_base, &dir_width, level);
167 
168         if (dir_width == 0) {
169             continue;
170         }
171 
172         /* get next level page directory */
173         index = (address >> dir_base) & ((1 << dir_width) - 1);
174         phys = base | index << 3;
175         base = ldq_phys(cs->as, phys);
176         if (level) {
177             if (FIELD_EX64(base, TLBENTRY, HUGE)) {
178                 /* base is a huge pte */
179                 index = 0;
180                 dir_base -= 1;
181                 break;
182             } else {
183                 /* Discard high bits with page directory table */
184                 base &= TARGET_PHYS_MASK;
185             }
186         }
187     }
188 
189 restart:
190     /* pte */
191     pte = base;
192     if (level > 0) {
193         /* Huge Page. base is pte */
194         base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
195         base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
196         if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
197             base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
198             base = FIELD_DP64(base, TLBENTRY, G, 1);
199         }
200 
201         context->pte_buddy[index] = base;
202         context->pte_buddy[1 - index] = base + BIT_ULL(dir_base);
203         base += (BIT_ULL(dir_base) & address);
204     } else if (cpu_has_ptw(env)) {
205         index &= 1;
206         context->pte_buddy[index] = base;
207         context->pte_buddy[1 - index] = ldq_phys(cs->as,
208                                             phys + 8 * (1 - 2 * index));
209     }
210 
211     context->ps = dir_base;
212     context->pte = base;
213     ret = loongarch_check_pte(env, context, access_type, mmu_idx);
214     if (debug) {
215         return ret;
216     }
217 
218     /*
219      * Update bit A/D with hardware PTW supported
220      *
221      * Need atomic compchxg operation with pte update, other vCPUs may
222      * update pte at the same time.
223      */
224     if (ret == TLBRET_MATCH && cpu_has_ptw(env)) {
225         if (access_type == MMU_DATA_STORE && pte_dirty(base)) {
226             return ret;
227         }
228 
229         if (access_type != MMU_DATA_STORE && pte_access(base)) {
230             return ret;
231         }
232 
233         base = pte_mkaccess(pte);
234         if (access_type == MMU_DATA_STORE) {
235             base = pte_mkdirty(base);
236         }
237         ret1 = loongarch_cmpxchg_phys(cs, phys, pte, base);
238         /* PTE updated by other CPU, reload PTE entry */
239         if (ret1 == MEMTX_DECODE_ERROR) {
240             base = ldq_phys(cs->as, phys);
241             goto restart;
242         }
243 
244         base = context->pte_buddy[index];
245         base = pte_mkaccess(base);
246         if (access_type == MMU_DATA_STORE) {
247             base = pte_mkdirty(base);
248         }
249         context->pte_buddy[index] = base;
250 
251         /* Bit A/D need be updated with both Even/Odd page with huge pte */
252         if (level > 0) {
253             index = 1 - index;
254             base = context->pte_buddy[index];
255             base = pte_mkaccess(base);
256             if (access_type == MMU_DATA_STORE) {
257                 base = pte_mkdirty(base);
258             }
259             context->pte_buddy[index] = base;
260         }
261     }
262 
263     return ret;
264 }
265 
266 static TLBRet loongarch_map_address(CPULoongArchState *env,
267                                     MMUContext *context,
268                                     MMUAccessType access_type, int mmu_idx,
269                                     int is_debug)
270 {
271     TLBRet ret;
272 
273     if (tcg_enabled()) {
274         ret = loongarch_get_addr_from_tlb(env, context, access_type, mmu_idx);
275         if (ret != TLBRET_NOMATCH) {
276             return ret;
277         }
278     }
279 
280     if (is_debug) {
281         /*
282          * For debugger memory access, we want to do the map when there is a
283          * legal mapping, even if the mapping is not yet in TLB. return 0 if
284          * there is a valid map, else none zero.
285          */
286         return loongarch_ptw(env, context, access_type, mmu_idx, is_debug);
287     }
288 
289     return TLBRET_NOMATCH;
290 }
291 
292 static hwaddr dmw_va2pa(CPULoongArchState *env, vaddr va, target_ulong dmw)
293 {
294     if (is_la64(env)) {
295         return va & TARGET_VIRT_MASK;
296     } else {
297         uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
298         return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
299             (pseg << R_CSR_DMW_32_VSEG_SHIFT);
300     }
301 }
302 
303 TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
304                             MMUAccessType access_type, int mmu_idx,
305                             int is_debug)
306 {
307     int user_mode = mmu_idx == MMU_USER_IDX;
308     int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
309     uint32_t plv, base_c, base_v;
310     int64_t addr_high;
311     uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
312     uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
313     vaddr address;
314 
315     /* Check PG and DA */
316     address = context->addr;
317     if (da & !pg) {
318         context->physical = address & TARGET_PHYS_MASK;
319         context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
320         context->mmu_index = MMU_DA_IDX;
321         return TLBRET_MATCH;
322     }
323 
324     plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
325     if (is_la64(env)) {
326         base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
327     } else {
328         base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
329     }
330     /* Check direct map window */
331     for (int i = 0; i < 4; i++) {
332         if (is_la64(env)) {
333             base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
334         } else {
335             base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
336         }
337         if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
338             context->physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
339             context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
340             context->mmu_index = MMU_DA_IDX;
341             return TLBRET_MATCH;
342         }
343     }
344 
345     /* Check valid extension */
346     addr_high = (int64_t)address >> (TARGET_VIRT_ADDR_SPACE_BITS - 1);
347     if (!(addr_high == 0 || addr_high == -1ULL)) {
348         return TLBRET_BADADDR;
349     }
350 
351     /* Mapped address */
352     return loongarch_map_address(env, context, access_type, mmu_idx, is_debug);
353 }
354 
355 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
356 {
357     CPULoongArchState *env = cpu_env(cs);
358     MMUContext context;
359 
360     context.addr = addr;
361     if (get_physical_address(env, &context, MMU_DATA_LOAD,
362                              cpu_mmu_index(cs, false), 1) != TLBRET_MATCH) {
363         return -1;
364     }
365     return context.physical;
366 }
367