xref: /openbmc/qemu/target/hppa/mem_helper.c (revision e0c7de8d)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
29 {
30     int i;
31 
32     for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
33         hppa_tlb_entry *ent = &env->tlb[i];
34         if (ent->va_b <= addr && addr <= ent->va_e) {
35             trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
36                                       ent->va_b, ent->va_e, ent->pa);
37             return ent;
38         }
39     }
40     trace_hppa_tlb_find_entry_not_found(env, addr);
41     return NULL;
42 }
43 
44 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent,
45                                bool force_flush_btlb)
46 {
47     CPUState *cs = env_cpu(env);
48 
49     if (!ent->entry_valid) {
50         return;
51     }
52 
53     trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
54 
55     tlb_flush_range_by_mmuidx(cs, ent->va_b,
56                                 ent->va_e - ent->va_b + 1,
57                                 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
58 
59     /* never clear BTLBs, unless forced to do so. */
60     if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
61         return;
62     }
63 
64     memset(ent, 0, sizeof(*ent));
65     ent->va_b = -1;
66 }
67 
68 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
69 {
70     hppa_tlb_entry *ent;
71     uint32_t i;
72 
73     if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
74         i = HPPA_BTLB_ENTRIES;
75         env->tlb_last = HPPA_BTLB_ENTRIES + 1;
76     } else {
77         i = env->tlb_last;
78         env->tlb_last++;
79     }
80 
81     ent = &env->tlb[i];
82 
83     hppa_flush_tlb_ent(env, ent, false);
84     return ent;
85 }
86 
87 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
88                               int type, hwaddr *pphys, int *pprot,
89                               hppa_tlb_entry **tlb_entry)
90 {
91     hwaddr phys;
92     int prot, r_prot, w_prot, x_prot, priv;
93     hppa_tlb_entry *ent;
94     int ret = -1;
95 
96     if (tlb_entry) {
97         *tlb_entry = NULL;
98     }
99 
100     /* Virtual translation disabled.  Direct map virtual to physical.  */
101     if (mmu_idx == MMU_PHYS_IDX) {
102         phys = addr;
103         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
104         goto egress;
105     }
106 
107     /* Find a valid tlb entry that matches the virtual address.  */
108     ent = hppa_find_tlb(env, addr);
109     if (ent == NULL || !ent->entry_valid) {
110         phys = 0;
111         prot = 0;
112         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
113         goto egress;
114     }
115 
116     if (tlb_entry) {
117         *tlb_entry = ent;
118     }
119 
120     /* We now know the physical address.  */
121     phys = ent->pa + (addr - ent->va_b);
122 
123     /* Map TLB access_rights field to QEMU protection.  */
124     priv = MMU_IDX_TO_PRIV(mmu_idx);
125     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
126     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
127     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
128     switch (ent->ar_type) {
129     case 0: /* read-only: data page */
130         prot = r_prot;
131         break;
132     case 1: /* read/write: dynamic data page */
133         prot = r_prot | w_prot;
134         break;
135     case 2: /* read/execute: normal code page */
136         prot = r_prot | x_prot;
137         break;
138     case 3: /* read/write/execute: dynamic code page */
139         prot = r_prot | w_prot | x_prot;
140         break;
141     default: /* execute: promote to privilege level type & 3 */
142         prot = x_prot;
143         break;
144     }
145 
146     /* access_id == 0 means public page and no check is performed */
147     if ((env->psw & PSW_P) && ent->access_id) {
148         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
149         int match = ent->access_id * 2 + 1;
150 
151         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
152             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
153             prot &= PAGE_READ | PAGE_EXEC;
154             if (type == PAGE_WRITE) {
155                 ret = EXCP_DMPI;
156                 goto egress;
157             }
158         }
159     }
160 
161     /* No guest access type indicates a non-architectural access from
162        within QEMU.  Bypass checks for access, D, B and T bits.  */
163     if (type == 0) {
164         goto egress;
165     }
166 
167     if (unlikely(!(prot & type))) {
168         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
169         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
170         goto egress;
171     }
172 
173     /* In reverse priority order, check for conditions which raise faults.
174        As we go, remove PROT bits that cover the condition we want to check.
175        In this way, the resulting PROT will force a re-check of the
176        architectural TLB entry for the next access.  */
177     if (unlikely(!ent->d)) {
178         if (type & PAGE_WRITE) {
179             /* The D bit is not set -- TLB Dirty Bit Fault.  */
180             ret = EXCP_TLB_DIRTY;
181         }
182         prot &= PAGE_READ | PAGE_EXEC;
183     }
184     if (unlikely(ent->b)) {
185         if (type & PAGE_WRITE) {
186             /* The B bit is set -- Data Memory Break Fault.  */
187             ret = EXCP_DMB;
188         }
189         prot &= PAGE_READ | PAGE_EXEC;
190     }
191     if (unlikely(ent->t)) {
192         if (!(type & PAGE_EXEC)) {
193             /* The T bit is set -- Page Reference Fault.  */
194             ret = EXCP_PAGE_REF;
195         }
196         prot &= PAGE_EXEC;
197     }
198 
199  egress:
200     *pphys = phys;
201     *pprot = prot;
202     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
203     return ret;
204 }
205 
206 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
207 {
208     HPPACPU *cpu = HPPA_CPU(cs);
209     hwaddr phys;
210     int prot, excp;
211 
212     /* If the (data) mmu is disabled, bypass translation.  */
213     /* ??? We really ought to know if the code mmu is disabled too,
214        in order to get the correct debugging dumps.  */
215     if (!(cpu->env.psw & PSW_D)) {
216         return addr;
217     }
218 
219     excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
220                                      &phys, &prot, NULL);
221 
222     /* Since we're translating for debugging, the only error that is a
223        hard error is no translation at all.  Otherwise, while a real cpu
224        access might not have permission, the debugger does.  */
225     return excp == EXCP_DTLB_MISS ? -1 : phys;
226 }
227 
228 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
229                        MMUAccessType type, int mmu_idx,
230                        bool probe, uintptr_t retaddr)
231 {
232     HPPACPU *cpu = HPPA_CPU(cs);
233     CPUHPPAState *env = &cpu->env;
234     hppa_tlb_entry *ent;
235     int prot, excp, a_prot;
236     hwaddr phys;
237 
238     switch (type) {
239     case MMU_INST_FETCH:
240         a_prot = PAGE_EXEC;
241         break;
242     case MMU_DATA_STORE:
243         a_prot = PAGE_WRITE;
244         break;
245     default:
246         a_prot = PAGE_READ;
247         break;
248     }
249 
250     excp = hppa_get_physical_address(env, addr, mmu_idx,
251                                      a_prot, &phys, &prot, &ent);
252     if (unlikely(excp >= 0)) {
253         if (probe) {
254             return false;
255         }
256         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
257         /* Failure.  Raise the indicated exception.  */
258         cs->exception_index = excp;
259         if (cpu->env.psw & PSW_Q) {
260             /* ??? Needs tweaking for hppa64.  */
261             cpu->env.cr[CR_IOR] = addr;
262             cpu->env.cr[CR_ISR] = addr >> 32;
263         }
264         cpu_loop_exit_restore(cs, retaddr);
265     }
266 
267     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
268                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
269     /* Success!  Store the translation into the QEMU TLB.  */
270     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
271                  prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
272     return true;
273 }
274 
275 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
276 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
277 {
278     hppa_tlb_entry *empty = NULL;
279     int i;
280 
281     /* Zap any old entries covering ADDR; notice empty entries on the way.  */
282     for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
283         hppa_tlb_entry *ent = &env->tlb[i];
284         if (ent->va_b <= addr && addr <= ent->va_e) {
285             if (ent->entry_valid) {
286                 hppa_flush_tlb_ent(env, ent, false);
287             }
288             if (!empty) {
289                 empty = ent;
290             }
291         }
292     }
293 
294     /* If we didn't see an empty entry, evict one.  */
295     if (empty == NULL) {
296         empty = hppa_alloc_tlb_ent(env);
297     }
298 
299     /* Note that empty->entry_valid == 0 already.  */
300     empty->va_b = addr & TARGET_PAGE_MASK;
301     empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
302     empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
303     trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
304 }
305 
306 static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg)
307 {
308     ent->access_id = extract32(reg, 1, 18);
309     ent->u = extract32(reg, 19, 1);
310     ent->ar_pl2 = extract32(reg, 20, 2);
311     ent->ar_pl1 = extract32(reg, 22, 2);
312     ent->ar_type = extract32(reg, 24, 3);
313     ent->b = extract32(reg, 27, 1);
314     ent->d = extract32(reg, 28, 1);
315     ent->t = extract32(reg, 29, 1);
316     ent->entry_valid = 1;
317     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
318                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
319 }
320 
321 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
322 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
323 {
324     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
325 
326     if (unlikely(ent == NULL)) {
327         qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
328         return;
329     }
330 
331     set_access_bits(env, ent, reg);
332 }
333 
334 /* Purge (Insn/Data) TLB.  This is explicitly page-based, and is
335    synchronous across all processors.  */
336 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
337 {
338     CPUHPPAState *env = cpu_env(cpu);
339     target_ulong addr = (target_ulong) data.target_ptr;
340     hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
341 
342     if (ent && ent->entry_valid) {
343         hppa_flush_tlb_ent(env, ent, false);
344     }
345 }
346 
347 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
348 {
349     CPUState *src = env_cpu(env);
350     CPUState *cpu;
351     trace_hppa_tlb_ptlb(env);
352     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
353 
354     CPU_FOREACH(cpu) {
355         if (cpu != src) {
356             async_run_on_cpu(cpu, ptlb_work, data);
357         }
358     }
359     async_safe_run_on_cpu(src, ptlb_work, data);
360 }
361 
362 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
363    number of pages/entries (we choose all), and is local to the cpu.  */
364 void HELPER(ptlbe)(CPUHPPAState *env)
365 {
366     trace_hppa_tlb_ptlbe(env);
367     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
368     memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
369         sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
370     env->tlb_last = HPPA_BTLB_ENTRIES;
371     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
372 }
373 
374 void cpu_hppa_change_prot_id(CPUHPPAState *env)
375 {
376     if (env->psw & PSW_P) {
377         tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
378     }
379 }
380 
381 void HELPER(change_prot_id)(CPUHPPAState *env)
382 {
383     cpu_hppa_change_prot_id(env);
384 }
385 
386 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
387 {
388     hwaddr phys;
389     int prot, excp;
390 
391     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
392                                      &phys, &prot, NULL);
393     if (excp >= 0) {
394         if (env->psw & PSW_Q) {
395             /* ??? Needs tweaking for hppa64.  */
396             env->cr[CR_IOR] = addr;
397             env->cr[CR_ISR] = addr >> 32;
398         }
399         if (excp == EXCP_DTLB_MISS) {
400             excp = EXCP_NA_DTLB_MISS;
401         }
402         trace_hppa_tlb_lpa_failed(env, addr);
403         hppa_dynamic_excp(env, excp, GETPC());
404     }
405     trace_hppa_tlb_lpa_success(env, addr, phys);
406     return phys;
407 }
408 
409 /* Return the ar_type of the TLB at VADDR, or -1.  */
410 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
411 {
412     hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
413     return ent ? ent->ar_type : -1;
414 }
415 
416 /*
417  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
418  * allow operating systems to modify the Block TLB (BTLB) entries.
419  * For implementation details see page 1-13 in
420  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
421  */
422 void HELPER(diag_btlb)(CPUHPPAState *env)
423 {
424     unsigned int phys_page, len, slot;
425     int mmu_idx = cpu_mmu_index(env, 0);
426     uintptr_t ra = GETPC();
427     hppa_tlb_entry *btlb;
428     uint64_t virt_page;
429     uint32_t *vaddr;
430 
431 #ifdef TARGET_HPPA64
432     /* BTLBs are not supported on 64-bit CPUs */
433     env->gr[28] = -1; /* nonexistent procedure */
434     return;
435 #endif
436     env->gr[28] = 0; /* PDC_OK */
437 
438     switch (env->gr[25]) {
439     case 0:
440         /* return BTLB parameters */
441         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
442         vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
443                              MMU_DATA_STORE, mmu_idx, ra);
444         if (vaddr == NULL) {
445             env->gr[28] = -10; /* invalid argument */
446         } else {
447             vaddr[0] = cpu_to_be32(1);
448             vaddr[1] = cpu_to_be32(16 * 1024);
449             vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED);
450             vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE);
451         }
452         break;
453     case 1:
454         /* insert BTLB entry */
455         virt_page = env->gr[24];        /* upper 32 bits */
456         virt_page <<= 32;
457         virt_page |= env->gr[23];       /* lower 32 bits */
458         phys_page = env->gr[22];
459         len = env->gr[21];
460         slot = env->gr[19];
461         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
462                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
463                     "into slot %d\n",
464                     (long long) virt_page << TARGET_PAGE_BITS,
465                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
466                     (long long) virt_page, phys_page, len, slot);
467         if (slot < HPPA_BTLB_ENTRIES) {
468             btlb = &env->tlb[slot];
469             /* force flush of possibly existing BTLB entry */
470             hppa_flush_tlb_ent(env, btlb, true);
471             /* create new BTLB entry */
472             btlb->va_b = virt_page << TARGET_PAGE_BITS;
473             btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1;
474             btlb->pa = phys_page << TARGET_PAGE_BITS;
475             set_access_bits(env, btlb, env->gr[20]);
476             btlb->t = 0;
477             btlb->d = 1;
478         } else {
479             env->gr[28] = -10; /* invalid argument */
480         }
481         break;
482     case 2:
483         /* Purge BTLB entry */
484         slot = env->gr[22];
485         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
486                                     slot);
487         if (slot < HPPA_BTLB_ENTRIES) {
488             btlb = &env->tlb[slot];
489             hppa_flush_tlb_ent(env, btlb, true);
490         } else {
491             env->gr[28] = -10; /* invalid argument */
492         }
493         break;
494     case 3:
495         /* Purge all BTLB entries */
496         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
497         for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) {
498             btlb = &env->tlb[slot];
499             hppa_flush_tlb_ent(env, btlb, true);
500         }
501         break;
502     default:
503         env->gr[28] = -2; /* nonexistent option */
504         break;
505     }
506 }
507