xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 99d46107)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
25 
26 #ifdef CONFIG_USER_ONLY
27 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28                               int size, int rw, int mmu_idx)
29 {
30     HPPACPU *cpu = HPPA_CPU(cs);
31 
32     /* ??? Test between data page fault and data memory protection trap,
33        which would affect si_code.  */
34     cs->exception_index = EXCP_DMP;
35     cpu->env.cr[CR_IOR] = address;
36     return 1;
37 }
38 #else
39 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
40 {
41     int i;
42 
43     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44         hppa_tlb_entry *ent = &env->tlb[i];
45         if (ent->va_b <= addr && addr <= ent->va_e) {
46             return ent;
47         }
48     }
49     return NULL;
50 }
51 
52 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
53 {
54     CPUState *cs = CPU(hppa_env_get_cpu(env));
55     unsigned i, n = 1 << (2 * ent->page_size);
56     uint64_t addr = ent->va_b;
57 
58     for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
59         /* Do not flush MMU_PHYS_IDX.  */
60         tlb_flush_page_by_mmuidx(cs, addr, 0xf);
61     }
62 
63     memset(ent, 0, sizeof(*ent));
64     ent->va_b = -1;
65 }
66 
67 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
68 {
69     hppa_tlb_entry *ent;
70     uint32_t i = env->tlb_last;
71 
72     env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
73     ent = &env->tlb[i];
74 
75     hppa_flush_tlb_ent(env, ent);
76     return ent;
77 }
78 
79 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
80                               int type, hwaddr *pphys, int *pprot)
81 {
82     hwaddr phys;
83     int prot, r_prot, w_prot, x_prot;
84     hppa_tlb_entry *ent;
85     int ret = -1;
86 
87     /* Virtual translation disabled.  Direct map virtual to physical.  */
88     if (mmu_idx == MMU_PHYS_IDX) {
89         phys = addr;
90         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91         goto egress;
92     }
93 
94     /* Find a valid tlb entry that matches the virtual address.  */
95     ent = hppa_find_tlb(env, addr);
96     if (ent == NULL || !ent->entry_valid) {
97         phys = 0;
98         prot = 0;
99         /* ??? Unconditionally report data tlb miss,
100            even if this is an instruction fetch.  */
101         ret = EXCP_DTLB_MISS;
102         goto egress;
103     }
104 
105     /* We now know the physical address.  */
106     phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
107 
108     /* Map TLB access_rights field to QEMU protection.  */
109     r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
110     w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
111     x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
112     switch (ent->ar_type) {
113     case 0: /* read-only: data page */
114         prot = r_prot;
115         break;
116     case 1: /* read/write: dynamic data page */
117         prot = r_prot | w_prot;
118         break;
119     case 2: /* read/execute: normal code page */
120         prot = r_prot | x_prot;
121         break;
122     case 3: /* read/write/execute: dynamic code page */
123         prot = r_prot | w_prot | x_prot;
124         break;
125     default: /* execute: promote to privilege level type & 3 */
126         prot = x_prot;
127         break;
128     }
129 
130     /* ??? Check PSW_P and ent->access_prot.  This can remove PAGE_WRITE.  */
131 
132     /* No guest access type indicates a non-architectural access from
133        within QEMU.  Bypass checks for access, D, B and T bits.  */
134     if (type == 0) {
135         goto egress;
136     }
137 
138     if (unlikely(!(prot & type))) {
139         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
140         ret = (type & PAGE_EXEC ? EXCP_IMP :
141                prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
142         goto egress;
143     }
144 
145     /* In reverse priority order, check for conditions which raise faults.
146        As we go, remove PROT bits that cover the condition we want to check.
147        In this way, the resulting PROT will force a re-check of the
148        architectural TLB entry for the next access.  */
149     if (unlikely(!ent->d)) {
150         if (type & PAGE_WRITE) {
151             /* The D bit is not set -- TLB Dirty Bit Fault.  */
152             ret = EXCP_TLB_DIRTY;
153         }
154         prot &= PAGE_READ | PAGE_EXEC;
155     }
156     if (unlikely(ent->b)) {
157         if (type & PAGE_WRITE) {
158             /* The B bit is set -- Data Memory Break Fault.  */
159             ret = EXCP_DMB;
160         }
161         prot &= PAGE_READ | PAGE_EXEC;
162     }
163     if (unlikely(ent->t)) {
164         if (!(type & PAGE_EXEC)) {
165             /* The T bit is set -- Page Reference Fault.  */
166             ret = EXCP_PAGE_REF;
167         }
168         prot &= PAGE_EXEC;
169     }
170 
171  egress:
172     *pphys = phys;
173     *pprot = prot;
174     return ret;
175 }
176 
177 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
178 {
179     HPPACPU *cpu = HPPA_CPU(cs);
180     hwaddr phys;
181     int prot, excp;
182 
183     /* If the (data) mmu is disabled, bypass translation.  */
184     /* ??? We really ought to know if the code mmu is disabled too,
185        in order to get the correct debugging dumps.  */
186     if (!(cpu->env.psw & PSW_D)) {
187         return addr;
188     }
189 
190     excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
191                                      &phys, &prot);
192 
193     /* Since we're translating for debugging, the only error that is a
194        hard error is no translation at all.  Otherwise, while a real cpu
195        access might not have permission, the debugger does.  */
196     return excp == EXCP_DTLB_MISS ? -1 : phys;
197 }
198 
199 void tlb_fill(CPUState *cs, target_ulong addr, int size,
200               MMUAccessType type, int mmu_idx, uintptr_t retaddr)
201 {
202     HPPACPU *cpu = HPPA_CPU(cs);
203     int prot, excp, a_prot;
204     hwaddr phys;
205 
206     switch (type) {
207     case MMU_INST_FETCH:
208         a_prot = PAGE_EXEC;
209         break;
210     case MMU_DATA_STORE:
211         a_prot = PAGE_WRITE;
212         break;
213     default:
214         a_prot = PAGE_READ;
215         break;
216     }
217 
218     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
219                                      a_prot, &phys, &prot);
220     if (unlikely(excp >= 0)) {
221         /* Failure.  Raise the indicated exception.  */
222         cs->exception_index = excp;
223         if (cpu->env.psw & PSW_Q) {
224             /* ??? Needs tweaking for hppa64.  */
225             cpu->env.cr[CR_IOR] = addr;
226             cpu->env.cr[CR_ISR] = addr >> 32;
227         }
228         cpu_loop_exit_restore(cs, retaddr);
229     }
230 
231     /* Success!  Store the translation into the QEMU TLB.  */
232     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
233                  prot, mmu_idx, TARGET_PAGE_SIZE);
234 }
235 
236 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
237 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
238 {
239     hppa_tlb_entry *empty = NULL;
240     int i;
241 
242     /* Zap any old entries covering ADDR; notice empty entries on the way.  */
243     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
244         hppa_tlb_entry *ent = &env->tlb[i];
245         if (!ent->entry_valid) {
246             empty = ent;
247         } else if (ent->va_b <= addr && addr <= ent->va_e) {
248             hppa_flush_tlb_ent(env, ent);
249             empty = ent;
250         }
251     }
252 
253     /* If we didn't see an empty entry, evict one.  */
254     if (empty == NULL) {
255         empty = hppa_alloc_tlb_ent(env);
256     }
257 
258     /* Note that empty->entry_valid == 0 already.  */
259     empty->va_b = addr & TARGET_PAGE_MASK;
260     empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
261     empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
262 }
263 
264 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
265 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
266 {
267     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
268 
269     if (unlikely(ent == NULL || ent->entry_valid)) {
270         qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
271         return;
272     }
273 
274     ent->access_id = extract32(reg, 1, 18);
275     ent->u = extract32(reg, 19, 1);
276     ent->ar_pl2 = extract32(reg, 20, 2);
277     ent->ar_pl1 = extract32(reg, 22, 2);
278     ent->ar_type = extract32(reg, 24, 3);
279     ent->b = extract32(reg, 27, 1);
280     ent->d = extract32(reg, 28, 1);
281     ent->t = extract32(reg, 29, 1);
282     ent->entry_valid = 1;
283 }
284 
285 /* Purge (Insn/Data) TLB.  This is explicitly page-based, and is
286    synchronous across all processors.  */
287 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
288 {
289     CPUHPPAState *env = cpu->env_ptr;
290     target_ulong addr = (target_ulong) data.target_ptr;
291     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
292 
293     if (ent && ent->entry_valid) {
294         hppa_flush_tlb_ent(env, ent);
295     }
296 }
297 
298 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
299 {
300     CPUState *src = CPU(hppa_env_get_cpu(env));
301     CPUState *cpu;
302     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
303 
304     CPU_FOREACH(cpu) {
305         if (cpu != src) {
306             async_run_on_cpu(cpu, ptlb_work, data);
307         }
308     }
309     async_safe_run_on_cpu(src, ptlb_work, data);
310 }
311 
312 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
313    number of pages/entries (we choose all), and is local to the cpu.  */
314 void HELPER(ptlbe)(CPUHPPAState *env)
315 {
316     CPUState *src = CPU(hppa_env_get_cpu(env));
317 
318     memset(env->tlb, 0, sizeof(env->tlb));
319     tlb_flush_by_mmuidx(src, 0xf);
320 }
321 
322 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
323 {
324     hwaddr phys;
325     int prot, excp;
326 
327     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
328                                      &phys, &prot);
329     if (excp >= 0) {
330         if (env->psw & PSW_Q) {
331             /* ??? Needs tweaking for hppa64.  */
332             env->cr[CR_IOR] = addr;
333             env->cr[CR_ISR] = addr >> 32;
334         }
335         if (excp == EXCP_DTLB_MISS) {
336             excp = EXCP_NA_DTLB_MISS;
337         }
338         hppa_dynamic_excp(env, excp, GETPC());
339     }
340     return phys;
341 }
342 
343 /* Return the ar_type of the TLB at VADDR, or -1.  */
344 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
345 {
346     hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
347     return ent ? ent->ar_type : -1;
348 }
349 #endif /* CONFIG_USER_ONLY */
350