xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 3011c1dd)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30     /*
31      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32      * an algorithm in which a 62-bit absolute address is transformed to
33      * a 64-bit physical address.  This must then be combined with that
34      * pictured in Figure H-11 "Physical Address Space Mapping", in which
35      * the full physical address is truncated to the N-bit physical address
36      * supported by the implementation.
37      *
38      * Since the supported physical address space is below 54 bits, the
39      * H-8 algorithm is moot and all that is left is to truncate.
40      */
41     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
43 }
44 
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
46 {
47     /*
48      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49      * combined with Figure H-11, as above.
50      */
51     if (likely(extract32(addr, 28, 4) != 0xf)) {
52         /* Memory address space */
53         addr = (uint32_t)addr;
54     } else if (extract32(addr, 24, 4) != 0) {
55         /* I/O address space */
56         addr = (int32_t)addr;
57     } else {
58         /*
59          * PDC address space:
60          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61          * where to map into the 64-bit PDC address space.
62          * We map with an offset which equals the 32-bit address, which
63          * is what can be seen on physical machines too.
64          */
65         addr = (uint32_t)addr;
66         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
67     }
68     return addr;
69 }
70 
71 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
72 {
73     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
74 
75     if (i) {
76         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
77         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
78                                   ent->itree.start, ent->itree.last, ent->pa);
79         return ent;
80     }
81     trace_hppa_tlb_find_entry_not_found(env, addr);
82     return NULL;
83 }
84 
85 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
86                                bool force_flush_btlb)
87 {
88     CPUState *cs = env_cpu(env);
89     bool is_btlb;
90 
91     if (!ent->entry_valid) {
92         return;
93     }
94 
95     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
96                              ent->itree.last, ent->pa);
97 
98     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
99                               ent->itree.last - ent->itree.start + 1,
100                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
101 
102     /* Never clear BTLBs, unless forced to do so. */
103     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
104     if (is_btlb && !force_flush_btlb) {
105         return;
106     }
107 
108     interval_tree_remove(&ent->itree, &env->tlb_root);
109     memset(ent, 0, sizeof(*ent));
110 
111     if (!is_btlb) {
112         ent->unused_next = env->tlb_unused;
113         env->tlb_unused = ent;
114     }
115 }
116 
117 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
118 {
119     IntervalTreeNode *i, *n;
120 
121     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
122     for (; i ; i = n) {
123         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
124 
125         /*
126          * Find the next entry now: In the normal case the current entry
127          * will be removed, but in the BTLB case it will remain.
128          */
129         n = interval_tree_iter_next(i, va_b, va_e);
130         hppa_flush_tlb_ent(env, ent, false);
131     }
132 }
133 
134 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
135 {
136     HPPATLBEntry *ent = env->tlb_unused;
137 
138     if (ent == NULL) {
139         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
140         uint32_t i = env->tlb_last;
141 
142         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
143             i = btlb_entries;
144         }
145         env->tlb_last = i + 1;
146 
147         ent = &env->tlb[i];
148         hppa_flush_tlb_ent(env, ent, false);
149     }
150 
151     env->tlb_unused = ent->unused_next;
152     return ent;
153 }
154 
155 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
156                               int type, hwaddr *pphys, int *pprot,
157                               HPPATLBEntry **tlb_entry)
158 {
159     hwaddr phys;
160     int prot, r_prot, w_prot, x_prot, priv;
161     HPPATLBEntry *ent;
162     int ret = -1;
163 
164     if (tlb_entry) {
165         *tlb_entry = NULL;
166     }
167 
168     /* Virtual translation disabled.  Map absolute to physical.  */
169     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
170         switch (mmu_idx) {
171         case MMU_ABS_W_IDX:
172             phys = hppa_abs_to_phys_pa2_w1(addr);
173             break;
174         case MMU_ABS_IDX:
175             if (hppa_is_pa20(env)) {
176                 phys = hppa_abs_to_phys_pa2_w0(addr);
177             } else {
178                 phys = (uint32_t)addr;
179             }
180             break;
181         default:
182             g_assert_not_reached();
183         }
184         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
185         goto egress;
186     }
187 
188     /* Find a valid tlb entry that matches the virtual address.  */
189     ent = hppa_find_tlb(env, addr);
190     if (ent == NULL) {
191         phys = 0;
192         prot = 0;
193         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
194         goto egress;
195     }
196 
197     if (tlb_entry) {
198         *tlb_entry = ent;
199     }
200 
201     /* We now know the physical address.  */
202     phys = ent->pa + (addr - ent->itree.start);
203 
204     /* Map TLB access_rights field to QEMU protection.  */
205     priv = MMU_IDX_TO_PRIV(mmu_idx);
206     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
207     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
208     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
209     switch (ent->ar_type) {
210     case 0: /* read-only: data page */
211         prot = r_prot;
212         break;
213     case 1: /* read/write: dynamic data page */
214         prot = r_prot | w_prot;
215         break;
216     case 2: /* read/execute: normal code page */
217         prot = r_prot | x_prot;
218         break;
219     case 3: /* read/write/execute: dynamic code page */
220         prot = r_prot | w_prot | x_prot;
221         break;
222     default: /* execute: promote to privilege level type & 3 */
223         prot = x_prot;
224         break;
225     }
226 
227     /* access_id == 0 means public page and no check is performed */
228     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
229         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
230         int match = ent->access_id * 2 + 1;
231 
232         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
233             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
234             prot &= PAGE_READ | PAGE_EXEC;
235             if (type == PAGE_WRITE) {
236                 ret = EXCP_DMPI;
237                 goto egress;
238             }
239         }
240     }
241 
242     /* No guest access type indicates a non-architectural access from
243        within QEMU.  Bypass checks for access, D, B and T bits.  */
244     if (type == 0) {
245         goto egress;
246     }
247 
248     if (unlikely(!(prot & type))) {
249         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
250         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
251         goto egress;
252     }
253 
254     /* In reverse priority order, check for conditions which raise faults.
255        As we go, remove PROT bits that cover the condition we want to check.
256        In this way, the resulting PROT will force a re-check of the
257        architectural TLB entry for the next access.  */
258     if (unlikely(!ent->d)) {
259         if (type & PAGE_WRITE) {
260             /* The D bit is not set -- TLB Dirty Bit Fault.  */
261             ret = EXCP_TLB_DIRTY;
262         }
263         prot &= PAGE_READ | PAGE_EXEC;
264     }
265     if (unlikely(ent->b)) {
266         if (type & PAGE_WRITE) {
267             /* The B bit is set -- Data Memory Break Fault.  */
268             ret = EXCP_DMB;
269         }
270         prot &= PAGE_READ | PAGE_EXEC;
271     }
272     if (unlikely(ent->t)) {
273         if (!(type & PAGE_EXEC)) {
274             /* The T bit is set -- Page Reference Fault.  */
275             ret = EXCP_PAGE_REF;
276         }
277         prot &= PAGE_EXEC;
278     }
279 
280  egress:
281     *pphys = phys;
282     *pprot = prot;
283     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
284     return ret;
285 }
286 
287 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
288 {
289     HPPACPU *cpu = HPPA_CPU(cs);
290     hwaddr phys;
291     int prot, excp, mmu_idx;
292 
293     /* If the (data) mmu is disabled, bypass translation.  */
294     /* ??? We really ought to know if the code mmu is disabled too,
295        in order to get the correct debugging dumps.  */
296     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
297                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
298 
299     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
300                                      &phys, &prot, NULL);
301 
302     /* Since we're translating for debugging, the only error that is a
303        hard error is no translation at all.  Otherwise, while a real cpu
304        access might not have permission, the debugger does.  */
305     return excp == EXCP_DTLB_MISS ? -1 : phys;
306 }
307 
308 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
309 {
310     if (env->psw & PSW_Q) {
311         /*
312          * For pa1.x, the offset and space never overlap, and so we
313          * simply extract the high and low part of the virtual address.
314          *
315          * For pa2.0, the formation of these are described in section
316          * "Interruption Parameter Registers", page 2-15.
317          */
318         env->cr[CR_IOR] = (uint32_t)addr;
319         env->cr[CR_ISR] = addr >> 32;
320 
321         if (hppa_is_pa20(env)) {
322             if (mmu_disabled) {
323                 /*
324                  * If data translation was disabled, the ISR contains
325                  * the upper portion of the abs address, zero-extended.
326                  */
327                 env->cr[CR_ISR] &= 0x3fffffff;
328             } else {
329                 /*
330                  * If data translation was enabled, the upper two bits
331                  * of the IOR (the b field) are equal to the two space
332                  * bits from the base register used to form the gva.
333                  */
334                 uint64_t b;
335 
336                 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
337                 b >>= (env->psw & PSW_W ? 62 : 30);
338                 env->cr[CR_IOR] |= b << 62;
339             }
340         }
341     }
342 }
343 
344 G_NORETURN static void
345 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
346                          vaddr addr, bool mmu_disabled)
347 {
348     CPUState *cs = env_cpu(env);
349 
350     cs->exception_index = excp;
351     cpu_restore_state(cs, retaddr);
352     hppa_set_ior_and_isr(env, addr, mmu_disabled);
353 
354     cpu_loop_exit(cs);
355 }
356 
357 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
358                                      vaddr addr, unsigned size,
359                                      MMUAccessType access_type,
360                                      int mmu_idx, MemTxAttrs attrs,
361                                      MemTxResult response, uintptr_t retaddr)
362 {
363     CPUHPPAState *env = cpu_env(cs);
364 
365     qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
366                 " while accessing I/O at %#08" HWADDR_PRIx "\n",
367                 env->iasq_f, env->iaoq_f, physaddr);
368 
369     /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
370     if (0) {
371         raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
372                                  MMU_IDX_MMU_DISABLED(mmu_idx));
373     }
374 }
375 
376 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
377                        MMUAccessType type, int mmu_idx,
378                        bool probe, uintptr_t retaddr)
379 {
380     HPPACPU *cpu = HPPA_CPU(cs);
381     CPUHPPAState *env = &cpu->env;
382     HPPATLBEntry *ent;
383     int prot, excp, a_prot;
384     hwaddr phys;
385 
386     switch (type) {
387     case MMU_INST_FETCH:
388         a_prot = PAGE_EXEC;
389         break;
390     case MMU_DATA_STORE:
391         a_prot = PAGE_WRITE;
392         break;
393     default:
394         a_prot = PAGE_READ;
395         break;
396     }
397 
398     excp = hppa_get_physical_address(env, addr, mmu_idx,
399                                      a_prot, &phys, &prot, &ent);
400     if (unlikely(excp >= 0)) {
401         if (probe) {
402             return false;
403         }
404         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
405 
406         /* Failure.  Raise the indicated exception.  */
407         raise_exception_with_ior(env, excp, retaddr, addr,
408                                  MMU_IDX_MMU_DISABLED(mmu_idx));
409     }
410 
411     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
412                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
413 
414     /*
415      * Success!  Store the translation into the QEMU TLB.
416      * Note that we always install a single-page entry, because that
417      * is what works best with softmmu -- anything else will trigger
418      * the large page protection mask.  We do not require this,
419      * because we record the large page here in the hppa tlb.
420      */
421     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
422                  prot, mmu_idx, TARGET_PAGE_SIZE);
423     return true;
424 }
425 
426 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
427 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
428 {
429     HPPATLBEntry *ent;
430 
431     /* Zap any old entries covering ADDR. */
432     addr &= TARGET_PAGE_MASK;
433     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
434 
435     ent = env->tlb_partial;
436     if (ent == NULL) {
437         ent = hppa_alloc_tlb_ent(env);
438         env->tlb_partial = ent;
439     }
440 
441     /* Note that ent->entry_valid == 0 already.  */
442     ent->itree.start = addr;
443     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
444     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
445     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
446 }
447 
448 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
449                                  target_ulong reg)
450 {
451     ent->access_id = extract32(reg, 1, 18);
452     ent->u = extract32(reg, 19, 1);
453     ent->ar_pl2 = extract32(reg, 20, 2);
454     ent->ar_pl1 = extract32(reg, 22, 2);
455     ent->ar_type = extract32(reg, 24, 3);
456     ent->b = extract32(reg, 27, 1);
457     ent->d = extract32(reg, 28, 1);
458     ent->t = extract32(reg, 29, 1);
459     ent->entry_valid = 1;
460 
461     interval_tree_insert(&ent->itree, &env->tlb_root);
462     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
463                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
464 }
465 
466 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
467 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
468 {
469     HPPATLBEntry *ent = env->tlb_partial;
470 
471     if (ent) {
472         env->tlb_partial = NULL;
473         if (ent->itree.start <= addr && addr <= ent->itree.last) {
474             set_access_bits_pa11(env, ent, reg);
475             return;
476         }
477     }
478     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
479 }
480 
481 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
482                        target_ulong r2, vaddr va_b)
483 {
484     HPPATLBEntry *ent;
485     vaddr va_e;
486     uint64_t va_size;
487     int mask_shift;
488 
489     mask_shift = 2 * (r1 & 0xf);
490     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
491     va_b &= -va_size;
492     va_e = va_b + va_size - 1;
493 
494     hppa_flush_tlb_range(env, va_b, va_e);
495     ent = hppa_alloc_tlb_ent(env);
496 
497     ent->itree.start = va_b;
498     ent->itree.last = va_e;
499 
500     /* Extract all 52 bits present in the page table entry. */
501     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
502     /* Align per the page size. */
503     ent->pa &= TARGET_PAGE_MASK << mask_shift;
504     /* Ignore the bits beyond physical address space. */
505     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
506 
507     ent->t = extract64(r2, 61, 1);
508     ent->d = extract64(r2, 60, 1);
509     ent->b = extract64(r2, 59, 1);
510     ent->ar_type = extract64(r2, 56, 3);
511     ent->ar_pl1 = extract64(r2, 54, 2);
512     ent->ar_pl2 = extract64(r2, 52, 2);
513     ent->u = extract64(r2, 51, 1);
514     /* o = bit 50 */
515     /* p = bit 49 */
516     ent->access_id = extract64(r2, 1, 31);
517     ent->entry_valid = 1;
518 
519     interval_tree_insert(&ent->itree, &env->tlb_root);
520     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
521     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
522                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
523                          ent->b, ent->d, ent->t);
524 }
525 
526 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
527 {
528     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
529     itlbt_pa20(env, r1, r2, va_b);
530 }
531 
532 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
533 {
534     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
535     itlbt_pa20(env, r1, r2, va_b);
536 }
537 
538 /* Purge (Insn/Data) TLB. */
539 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
540 {
541     CPUHPPAState *env = cpu_env(cpu);
542     vaddr start = data.target_ptr;
543     vaddr end;
544 
545     /*
546      * PA2.0 allows a range of pages encoded into GR[b], which we have
547      * copied into the bottom bits of the otherwise page-aligned address.
548      * PA1.x will always provide zero here, for a single page flush.
549      */
550     end = start & 0xf;
551     start &= TARGET_PAGE_MASK;
552     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
553     end = start + end - 1;
554 
555     hppa_flush_tlb_range(env, start, end);
556 }
557 
558 /* This is local to the current cpu. */
559 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
560 {
561     trace_hppa_tlb_ptlb_local(env);
562     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
563 }
564 
565 /* This is synchronous across all processors.  */
566 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
567 {
568     CPUState *src = env_cpu(env);
569     CPUState *cpu;
570     bool wait = false;
571 
572     trace_hppa_tlb_ptlb(env);
573     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
574 
575     CPU_FOREACH(cpu) {
576         if (cpu != src) {
577             async_run_on_cpu(cpu, ptlb_work, data);
578             wait = true;
579         }
580     }
581     if (wait) {
582         async_safe_run_on_cpu(src, ptlb_work, data);
583     } else {
584         ptlb_work(src, data);
585     }
586 }
587 
588 void hppa_ptlbe(CPUHPPAState *env)
589 {
590     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
591     uint32_t i;
592 
593     /* Zap the (non-btlb) tlb entries themselves. */
594     memset(&env->tlb[btlb_entries], 0,
595            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
596     env->tlb_last = btlb_entries;
597     env->tlb_partial = NULL;
598 
599     /* Put them all onto the unused list. */
600     env->tlb_unused = &env->tlb[btlb_entries];
601     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
602         env->tlb[i].unused_next = &env->tlb[i + 1];
603     }
604 
605     /* Re-initialize the interval tree with only the btlb entries. */
606     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
607     for (i = 0; i < btlb_entries; ++i) {
608         if (env->tlb[i].entry_valid) {
609             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
610         }
611     }
612 
613     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
614 }
615 
616 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
617    number of pages/entries (we choose all), and is local to the cpu.  */
618 void HELPER(ptlbe)(CPUHPPAState *env)
619 {
620     trace_hppa_tlb_ptlbe(env);
621     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
622     hppa_ptlbe(env);
623 }
624 
625 void cpu_hppa_change_prot_id(CPUHPPAState *env)
626 {
627     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
628 }
629 
630 void HELPER(change_prot_id)(CPUHPPAState *env)
631 {
632     cpu_hppa_change_prot_id(env);
633 }
634 
635 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
636 {
637     hwaddr phys;
638     int prot, excp;
639 
640     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
641                                      &phys, &prot, NULL);
642     if (excp >= 0) {
643         if (excp == EXCP_DTLB_MISS) {
644             excp = EXCP_NA_DTLB_MISS;
645         }
646         trace_hppa_tlb_lpa_failed(env, addr);
647         raise_exception_with_ior(env, excp, GETPC(), addr, false);
648     }
649     trace_hppa_tlb_lpa_success(env, addr, phys);
650     return phys;
651 }
652 
653 /* Return the ar_type of the TLB at VADDR, or -1.  */
654 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
655 {
656     HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
657     return ent ? ent->ar_type : -1;
658 }
659 
660 /*
661  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
662  * allow operating systems to modify the Block TLB (BTLB) entries.
663  * For implementation details see page 1-13 in
664  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
665  */
666 void HELPER(diag_btlb)(CPUHPPAState *env)
667 {
668     unsigned int phys_page, len, slot;
669     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
670     uintptr_t ra = GETPC();
671     HPPATLBEntry *btlb;
672     uint64_t virt_page;
673     uint32_t *vaddr;
674     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
675 
676     /* BTLBs are not supported on 64-bit CPUs */
677     if (btlb_entries == 0) {
678         env->gr[28] = -1; /* nonexistent procedure */
679         return;
680     }
681 
682     env->gr[28] = 0; /* PDC_OK */
683 
684     switch (env->gr[25]) {
685     case 0:
686         /* return BTLB parameters */
687         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
688         vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
689                              MMU_DATA_STORE, mmu_idx, ra);
690         if (vaddr == NULL) {
691             env->gr[28] = -10; /* invalid argument */
692         } else {
693             vaddr[0] = cpu_to_be32(1);
694             vaddr[1] = cpu_to_be32(16 * 1024);
695             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
696             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
697         }
698         break;
699     case 1:
700         /* insert BTLB entry */
701         virt_page = env->gr[24];        /* upper 32 bits */
702         virt_page <<= 32;
703         virt_page |= env->gr[23];       /* lower 32 bits */
704         phys_page = env->gr[22];
705         len = env->gr[21];
706         slot = env->gr[19];
707         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
708                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
709                     "into slot %d\n",
710                     (long long) virt_page << TARGET_PAGE_BITS,
711                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
712                     (long long) virt_page, phys_page, len, slot);
713         if (slot < btlb_entries) {
714             btlb = &env->tlb[slot];
715 
716             /* Force flush of possibly existing BTLB entry. */
717             hppa_flush_tlb_ent(env, btlb, true);
718 
719             /* Create new BTLB entry */
720             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
721             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
722             btlb->pa = phys_page << TARGET_PAGE_BITS;
723             set_access_bits_pa11(env, btlb, env->gr[20]);
724             btlb->t = 0;
725             btlb->d = 1;
726         } else {
727             env->gr[28] = -10; /* invalid argument */
728         }
729         break;
730     case 2:
731         /* Purge BTLB entry */
732         slot = env->gr[22];
733         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
734                                     slot);
735         if (slot < btlb_entries) {
736             btlb = &env->tlb[slot];
737             hppa_flush_tlb_ent(env, btlb, true);
738         } else {
739             env->gr[28] = -10; /* invalid argument */
740         }
741         break;
742     case 3:
743         /* Purge all BTLB entries */
744         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
745         for (slot = 0; slot < btlb_entries; slot++) {
746             btlb = &env->tlb[slot];
747             hppa_flush_tlb_ent(env, btlb, true);
748         }
749         break;
750     default:
751         env->gr[28] = -2; /* nonexistent option */
752         break;
753     }
754 }
755