xref: /openbmc/qemu/target/hppa/mem_helper.c (revision cbb45ff0)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "hw/core/cpu.h"
25 #include "trace.h"
26 
27 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
28 {
29     int i;
30 
31     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
32         hppa_tlb_entry *ent = &env->tlb[i];
33         if (ent->va_b <= addr && addr <= ent->va_e) {
34             trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
35                                       ent->va_b, ent->va_e, ent->pa);
36             return ent;
37         }
38     }
39     trace_hppa_tlb_find_entry_not_found(env, addr);
40     return NULL;
41 }
42 
43 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
44 {
45     CPUState *cs = env_cpu(env);
46     unsigned i, n = 1 << (2 * ent->page_size);
47     uint64_t addr = ent->va_b;
48 
49     trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
50 
51     for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
52         /* Do not flush MMU_PHYS_IDX.  */
53         tlb_flush_page_by_mmuidx(cs, addr, 0xf);
54     }
55 
56     memset(ent, 0, sizeof(*ent));
57     ent->va_b = -1;
58 }
59 
60 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
61 {
62     hppa_tlb_entry *ent;
63     uint32_t i = env->tlb_last;
64 
65     env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
66     ent = &env->tlb[i];
67 
68     hppa_flush_tlb_ent(env, ent);
69     return ent;
70 }
71 
72 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
73                               int type, hwaddr *pphys, int *pprot)
74 {
75     hwaddr phys;
76     int prot, r_prot, w_prot, x_prot;
77     hppa_tlb_entry *ent;
78     int ret = -1;
79 
80     /* Virtual translation disabled.  Direct map virtual to physical.  */
81     if (mmu_idx == MMU_PHYS_IDX) {
82         phys = addr;
83         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
84         goto egress;
85     }
86 
87     /* Find a valid tlb entry that matches the virtual address.  */
88     ent = hppa_find_tlb(env, addr);
89     if (ent == NULL || !ent->entry_valid) {
90         phys = 0;
91         prot = 0;
92         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
93         goto egress;
94     }
95 
96     /* We now know the physical address.  */
97     phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
98 
99     /* Map TLB access_rights field to QEMU protection.  */
100     r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
101     w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
102     x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
103     switch (ent->ar_type) {
104     case 0: /* read-only: data page */
105         prot = r_prot;
106         break;
107     case 1: /* read/write: dynamic data page */
108         prot = r_prot | w_prot;
109         break;
110     case 2: /* read/execute: normal code page */
111         prot = r_prot | x_prot;
112         break;
113     case 3: /* read/write/execute: dynamic code page */
114         prot = r_prot | w_prot | x_prot;
115         break;
116     default: /* execute: promote to privilege level type & 3 */
117         prot = x_prot;
118         break;
119     }
120 
121     /* access_id == 0 means public page and no check is performed */
122     if ((env->psw & PSW_P) && ent->access_id) {
123         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
124         int match = ent->access_id * 2 + 1;
125 
126         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
127             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
128             prot &= PAGE_READ | PAGE_EXEC;
129             if (type == PAGE_WRITE) {
130                 ret = EXCP_DMPI;
131                 goto egress;
132             }
133         }
134     }
135 
136     /* No guest access type indicates a non-architectural access from
137        within QEMU.  Bypass checks for access, D, B and T bits.  */
138     if (type == 0) {
139         goto egress;
140     }
141 
142     if (unlikely(!(prot & type))) {
143         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
144         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
145         goto egress;
146     }
147 
148     /* In reverse priority order, check for conditions which raise faults.
149        As we go, remove PROT bits that cover the condition we want to check.
150        In this way, the resulting PROT will force a re-check of the
151        architectural TLB entry for the next access.  */
152     if (unlikely(!ent->d)) {
153         if (type & PAGE_WRITE) {
154             /* The D bit is not set -- TLB Dirty Bit Fault.  */
155             ret = EXCP_TLB_DIRTY;
156         }
157         prot &= PAGE_READ | PAGE_EXEC;
158     }
159     if (unlikely(ent->b)) {
160         if (type & PAGE_WRITE) {
161             /* The B bit is set -- Data Memory Break Fault.  */
162             ret = EXCP_DMB;
163         }
164         prot &= PAGE_READ | PAGE_EXEC;
165     }
166     if (unlikely(ent->t)) {
167         if (!(type & PAGE_EXEC)) {
168             /* The T bit is set -- Page Reference Fault.  */
169             ret = EXCP_PAGE_REF;
170         }
171         prot &= PAGE_EXEC;
172     }
173 
174  egress:
175     *pphys = phys;
176     *pprot = prot;
177     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
178     return ret;
179 }
180 
181 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
182 {
183     HPPACPU *cpu = HPPA_CPU(cs);
184     hwaddr phys;
185     int prot, excp;
186 
187     /* If the (data) mmu is disabled, bypass translation.  */
188     /* ??? We really ought to know if the code mmu is disabled too,
189        in order to get the correct debugging dumps.  */
190     if (!(cpu->env.psw & PSW_D)) {
191         return addr;
192     }
193 
194     excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
195                                      &phys, &prot);
196 
197     /* Since we're translating for debugging, the only error that is a
198        hard error is no translation at all.  Otherwise, while a real cpu
199        access might not have permission, the debugger does.  */
200     return excp == EXCP_DTLB_MISS ? -1 : phys;
201 }
202 
203 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
204                        MMUAccessType type, int mmu_idx,
205                        bool probe, uintptr_t retaddr)
206 {
207     HPPACPU *cpu = HPPA_CPU(cs);
208     CPUHPPAState *env = &cpu->env;
209     int prot, excp, a_prot;
210     hwaddr phys;
211 
212     switch (type) {
213     case MMU_INST_FETCH:
214         a_prot = PAGE_EXEC;
215         break;
216     case MMU_DATA_STORE:
217         a_prot = PAGE_WRITE;
218         break;
219     default:
220         a_prot = PAGE_READ;
221         break;
222     }
223 
224     excp = hppa_get_physical_address(env, addr, mmu_idx,
225                                      a_prot, &phys, &prot);
226     if (unlikely(excp >= 0)) {
227         if (probe) {
228             return false;
229         }
230         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
231         /* Failure.  Raise the indicated exception.  */
232         cs->exception_index = excp;
233         if (cpu->env.psw & PSW_Q) {
234             /* ??? Needs tweaking for hppa64.  */
235             cpu->env.cr[CR_IOR] = addr;
236             cpu->env.cr[CR_ISR] = addr >> 32;
237         }
238         cpu_loop_exit_restore(cs, retaddr);
239     }
240 
241     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
242                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
243     /* Success!  Store the translation into the QEMU TLB.  */
244     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
245                  prot, mmu_idx, TARGET_PAGE_SIZE);
246     return true;
247 }
248 
249 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
250 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
251 {
252     hppa_tlb_entry *empty = NULL;
253     int i;
254 
255     /* Zap any old entries covering ADDR; notice empty entries on the way.  */
256     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
257         hppa_tlb_entry *ent = &env->tlb[i];
258         if (ent->va_b <= addr && addr <= ent->va_e) {
259             if (ent->entry_valid) {
260                 hppa_flush_tlb_ent(env, ent);
261             }
262             if (!empty) {
263                 empty = ent;
264             }
265         }
266     }
267 
268     /* If we didn't see an empty entry, evict one.  */
269     if (empty == NULL) {
270         empty = hppa_alloc_tlb_ent(env);
271     }
272 
273     /* Note that empty->entry_valid == 0 already.  */
274     empty->va_b = addr & TARGET_PAGE_MASK;
275     empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
276     empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
277     trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
278 }
279 
280 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
281 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
282 {
283     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
284 
285     if (unlikely(ent == NULL)) {
286         qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
287         return;
288     }
289 
290     ent->access_id = extract32(reg, 1, 18);
291     ent->u = extract32(reg, 19, 1);
292     ent->ar_pl2 = extract32(reg, 20, 2);
293     ent->ar_pl1 = extract32(reg, 22, 2);
294     ent->ar_type = extract32(reg, 24, 3);
295     ent->b = extract32(reg, 27, 1);
296     ent->d = extract32(reg, 28, 1);
297     ent->t = extract32(reg, 29, 1);
298     ent->entry_valid = 1;
299     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
300                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
301 }
302 
303 /* Purge (Insn/Data) TLB.  This is explicitly page-based, and is
304    synchronous across all processors.  */
305 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
306 {
307     CPUHPPAState *env = cpu->env_ptr;
308     target_ulong addr = (target_ulong) data.target_ptr;
309     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
310 
311     if (ent && ent->entry_valid) {
312         hppa_flush_tlb_ent(env, ent);
313     }
314 }
315 
316 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
317 {
318     CPUState *src = env_cpu(env);
319     CPUState *cpu;
320     trace_hppa_tlb_ptlb(env);
321     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
322 
323     CPU_FOREACH(cpu) {
324         if (cpu != src) {
325             async_run_on_cpu(cpu, ptlb_work, data);
326         }
327     }
328     async_safe_run_on_cpu(src, ptlb_work, data);
329 }
330 
331 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
332    number of pages/entries (we choose all), and is local to the cpu.  */
333 void HELPER(ptlbe)(CPUHPPAState *env)
334 {
335     trace_hppa_tlb_ptlbe(env);
336     memset(env->tlb, 0, sizeof(env->tlb));
337     tlb_flush_by_mmuidx(env_cpu(env), 0xf);
338 }
339 
340 void cpu_hppa_change_prot_id(CPUHPPAState *env)
341 {
342     if (env->psw & PSW_P) {
343         tlb_flush_by_mmuidx(env_cpu(env), 0xf);
344     }
345 }
346 
347 void HELPER(change_prot_id)(CPUHPPAState *env)
348 {
349     cpu_hppa_change_prot_id(env);
350 }
351 
352 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
353 {
354     hwaddr phys;
355     int prot, excp;
356 
357     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
358                                      &phys, &prot);
359     if (excp >= 0) {
360         if (env->psw & PSW_Q) {
361             /* ??? Needs tweaking for hppa64.  */
362             env->cr[CR_IOR] = addr;
363             env->cr[CR_ISR] = addr >> 32;
364         }
365         if (excp == EXCP_DTLB_MISS) {
366             excp = EXCP_NA_DTLB_MISS;
367         }
368         trace_hppa_tlb_lpa_failed(env, addr);
369         hppa_dynamic_excp(env, excp, GETPC());
370     }
371     trace_hppa_tlb_lpa_success(env, addr, phys);
372     return phys;
373 }
374 
375 /* Return the ar_type of the TLB at VADDR, or -1.  */
376 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
377 {
378     hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
379     return ent ? ent->ar_type : -1;
380 }
381