xref: /openbmc/qemu/target/hppa/mem_helper.c (revision d525f73f)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
29 {
30     int i;
31 
32     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
33         hppa_tlb_entry *ent = &env->tlb[i];
34         if (ent->va_b <= addr && addr <= ent->va_e) {
35             trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
36                                       ent->va_b, ent->va_e, ent->pa);
37             return ent;
38         }
39     }
40     trace_hppa_tlb_find_entry_not_found(env, addr);
41     return NULL;
42 }
43 
44 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
45 {
46     CPUState *cs = env_cpu(env);
47     unsigned i, n = 1 << (2 * ent->page_size);
48     uint64_t addr = ent->va_b;
49 
50     trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
51 
52     for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
53         /* Do not flush MMU_PHYS_IDX.  */
54         tlb_flush_page_by_mmuidx(cs, addr, 0xf);
55     }
56 
57     memset(ent, 0, sizeof(*ent));
58     ent->va_b = -1;
59 }
60 
61 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
62 {
63     hppa_tlb_entry *ent;
64     uint32_t i = env->tlb_last;
65 
66     env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
67     ent = &env->tlb[i];
68 
69     hppa_flush_tlb_ent(env, ent);
70     return ent;
71 }
72 
73 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
74                               int type, hwaddr *pphys, int *pprot)
75 {
76     hwaddr phys;
77     int prot, r_prot, w_prot, x_prot;
78     hppa_tlb_entry *ent;
79     int ret = -1;
80 
81     /* Virtual translation disabled.  Direct map virtual to physical.  */
82     if (mmu_idx == MMU_PHYS_IDX) {
83         phys = addr;
84         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
85         goto egress;
86     }
87 
88     /* Find a valid tlb entry that matches the virtual address.  */
89     ent = hppa_find_tlb(env, addr);
90     if (ent == NULL || !ent->entry_valid) {
91         phys = 0;
92         prot = 0;
93         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
94         goto egress;
95     }
96 
97     /* We now know the physical address.  */
98     phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
99 
100     /* Map TLB access_rights field to QEMU protection.  */
101     r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
102     w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
103     x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
104     switch (ent->ar_type) {
105     case 0: /* read-only: data page */
106         prot = r_prot;
107         break;
108     case 1: /* read/write: dynamic data page */
109         prot = r_prot | w_prot;
110         break;
111     case 2: /* read/execute: normal code page */
112         prot = r_prot | x_prot;
113         break;
114     case 3: /* read/write/execute: dynamic code page */
115         prot = r_prot | w_prot | x_prot;
116         break;
117     default: /* execute: promote to privilege level type & 3 */
118         prot = x_prot;
119         break;
120     }
121 
122     /* access_id == 0 means public page and no check is performed */
123     if ((env->psw & PSW_P) && ent->access_id) {
124         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
125         int match = ent->access_id * 2 + 1;
126 
127         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
128             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
129             prot &= PAGE_READ | PAGE_EXEC;
130             if (type == PAGE_WRITE) {
131                 ret = EXCP_DMPI;
132                 goto egress;
133             }
134         }
135     }
136 
137     /* No guest access type indicates a non-architectural access from
138        within QEMU.  Bypass checks for access, D, B and T bits.  */
139     if (type == 0) {
140         goto egress;
141     }
142 
143     if (unlikely(!(prot & type))) {
144         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
145         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
146         goto egress;
147     }
148 
149     /* In reverse priority order, check for conditions which raise faults.
150        As we go, remove PROT bits that cover the condition we want to check.
151        In this way, the resulting PROT will force a re-check of the
152        architectural TLB entry for the next access.  */
153     if (unlikely(!ent->d)) {
154         if (type & PAGE_WRITE) {
155             /* The D bit is not set -- TLB Dirty Bit Fault.  */
156             ret = EXCP_TLB_DIRTY;
157         }
158         prot &= PAGE_READ | PAGE_EXEC;
159     }
160     if (unlikely(ent->b)) {
161         if (type & PAGE_WRITE) {
162             /* The B bit is set -- Data Memory Break Fault.  */
163             ret = EXCP_DMB;
164         }
165         prot &= PAGE_READ | PAGE_EXEC;
166     }
167     if (unlikely(ent->t)) {
168         if (!(type & PAGE_EXEC)) {
169             /* The T bit is set -- Page Reference Fault.  */
170             ret = EXCP_PAGE_REF;
171         }
172         prot &= PAGE_EXEC;
173     }
174 
175  egress:
176     *pphys = phys;
177     *pprot = prot;
178     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
179     return ret;
180 }
181 
182 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
183 {
184     HPPACPU *cpu = HPPA_CPU(cs);
185     hwaddr phys;
186     int prot, excp;
187 
188     /* If the (data) mmu is disabled, bypass translation.  */
189     /* ??? We really ought to know if the code mmu is disabled too,
190        in order to get the correct debugging dumps.  */
191     if (!(cpu->env.psw & PSW_D)) {
192         return addr;
193     }
194 
195     excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
196                                      &phys, &prot);
197 
198     /* Since we're translating for debugging, the only error that is a
199        hard error is no translation at all.  Otherwise, while a real cpu
200        access might not have permission, the debugger does.  */
201     return excp == EXCP_DTLB_MISS ? -1 : phys;
202 }
203 
204 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
205                        MMUAccessType type, int mmu_idx,
206                        bool probe, uintptr_t retaddr)
207 {
208     HPPACPU *cpu = HPPA_CPU(cs);
209     CPUHPPAState *env = &cpu->env;
210     int prot, excp, a_prot;
211     hwaddr phys;
212 
213     switch (type) {
214     case MMU_INST_FETCH:
215         a_prot = PAGE_EXEC;
216         break;
217     case MMU_DATA_STORE:
218         a_prot = PAGE_WRITE;
219         break;
220     default:
221         a_prot = PAGE_READ;
222         break;
223     }
224 
225     excp = hppa_get_physical_address(env, addr, mmu_idx,
226                                      a_prot, &phys, &prot);
227     if (unlikely(excp >= 0)) {
228         if (probe) {
229             return false;
230         }
231         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
232         /* Failure.  Raise the indicated exception.  */
233         cs->exception_index = excp;
234         if (cpu->env.psw & PSW_Q) {
235             /* ??? Needs tweaking for hppa64.  */
236             cpu->env.cr[CR_IOR] = addr;
237             cpu->env.cr[CR_ISR] = addr >> 32;
238         }
239         cpu_loop_exit_restore(cs, retaddr);
240     }
241 
242     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
243                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
244     /* Success!  Store the translation into the QEMU TLB.  */
245     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
246                  prot, mmu_idx, TARGET_PAGE_SIZE);
247     return true;
248 }
249 
250 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
251 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
252 {
253     hppa_tlb_entry *empty = NULL;
254     int i;
255 
256     /* Zap any old entries covering ADDR; notice empty entries on the way.  */
257     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
258         hppa_tlb_entry *ent = &env->tlb[i];
259         if (ent->va_b <= addr && addr <= ent->va_e) {
260             if (ent->entry_valid) {
261                 hppa_flush_tlb_ent(env, ent);
262             }
263             if (!empty) {
264                 empty = ent;
265             }
266         }
267     }
268 
269     /* If we didn't see an empty entry, evict one.  */
270     if (empty == NULL) {
271         empty = hppa_alloc_tlb_ent(env);
272     }
273 
274     /* Note that empty->entry_valid == 0 already.  */
275     empty->va_b = addr & TARGET_PAGE_MASK;
276     empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
277     empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
278     trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
279 }
280 
281 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
282 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
283 {
284     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
285 
286     if (unlikely(ent == NULL)) {
287         qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
288         return;
289     }
290 
291     ent->access_id = extract32(reg, 1, 18);
292     ent->u = extract32(reg, 19, 1);
293     ent->ar_pl2 = extract32(reg, 20, 2);
294     ent->ar_pl1 = extract32(reg, 22, 2);
295     ent->ar_type = extract32(reg, 24, 3);
296     ent->b = extract32(reg, 27, 1);
297     ent->d = extract32(reg, 28, 1);
298     ent->t = extract32(reg, 29, 1);
299     ent->entry_valid = 1;
300     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
301                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
302 }
303 
304 /* Purge (Insn/Data) TLB.  This is explicitly page-based, and is
305    synchronous across all processors.  */
306 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
307 {
308     CPUHPPAState *env = cpu->env_ptr;
309     target_ulong addr = (target_ulong) data.target_ptr;
310     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
311 
312     if (ent && ent->entry_valid) {
313         hppa_flush_tlb_ent(env, ent);
314     }
315 }
316 
317 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
318 {
319     CPUState *src = env_cpu(env);
320     CPUState *cpu;
321     trace_hppa_tlb_ptlb(env);
322     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
323 
324     CPU_FOREACH(cpu) {
325         if (cpu != src) {
326             async_run_on_cpu(cpu, ptlb_work, data);
327         }
328     }
329     async_safe_run_on_cpu(src, ptlb_work, data);
330 }
331 
332 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
333    number of pages/entries (we choose all), and is local to the cpu.  */
334 void HELPER(ptlbe)(CPUHPPAState *env)
335 {
336     trace_hppa_tlb_ptlbe(env);
337     memset(env->tlb, 0, sizeof(env->tlb));
338     tlb_flush_by_mmuidx(env_cpu(env), 0xf);
339 }
340 
341 void cpu_hppa_change_prot_id(CPUHPPAState *env)
342 {
343     if (env->psw & PSW_P) {
344         tlb_flush_by_mmuidx(env_cpu(env), 0xf);
345     }
346 }
347 
348 void HELPER(change_prot_id)(CPUHPPAState *env)
349 {
350     cpu_hppa_change_prot_id(env);
351 }
352 
353 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
354 {
355     hwaddr phys;
356     int prot, excp;
357 
358     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
359                                      &phys, &prot);
360     if (excp >= 0) {
361         if (env->psw & PSW_Q) {
362             /* ??? Needs tweaking for hppa64.  */
363             env->cr[CR_IOR] = addr;
364             env->cr[CR_ISR] = addr >> 32;
365         }
366         if (excp == EXCP_DTLB_MISS) {
367             excp = EXCP_NA_DTLB_MISS;
368         }
369         trace_hppa_tlb_lpa_failed(env, addr);
370         hppa_dynamic_excp(env, excp, GETPC());
371     }
372     trace_hppa_tlb_lpa_success(env, addr, phys);
373     return phys;
374 }
375 
376 /* Return the ar_type of the TLB at VADDR, or -1.  */
377 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
378 {
379     hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
380     return ent ? ent->ar_type : -1;
381 }
382