xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 8aa2211e)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/helper-proto.h"
26 #include "hw/core/cpu.h"
27 #include "trace.h"
28 
29 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
30 {
31     /*
32      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
33      * an algorithm in which a 62-bit absolute address is transformed to
34      * a 64-bit physical address.  This must then be combined with that
35      * pictured in Figure H-11 "Physical Address Space Mapping", in which
36      * the full physical address is truncated to the N-bit physical address
37      * supported by the implementation.
38      *
39      * Since the supported physical address space is below 54 bits, the
40      * H-8 algorithm is moot and all that is left is to truncate.
41      */
42     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
43     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
44 }
45 
46 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
47 {
48     /*
49      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
50      * combined with Figure H-11, as above.
51      */
52     if (likely(extract32(addr, 28, 4) != 0xf)) {
53         /* Memory address space */
54         addr = (uint32_t)addr;
55     } else if (extract32(addr, 24, 4) != 0) {
56         /* I/O address space */
57         addr = (int32_t)addr;
58     } else {
59         /*
60          * PDC address space:
61          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
62          * where to map into the 64-bit PDC address space.
63          * We map with an offset which equals the 32-bit address, which
64          * is what can be seen on physical machines too.
65          */
66         addr = (uint32_t)addr;
67         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
68     }
69     return addr;
70 }
71 
72 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
73 {
74     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
75 
76     if (i) {
77         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
78         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
79                                   ent->itree.start, ent->itree.last, ent->pa);
80         return ent;
81     }
82     trace_hppa_tlb_find_entry_not_found(env, addr);
83     return NULL;
84 }
85 
86 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
87                                bool force_flush_btlb)
88 {
89     CPUState *cs = env_cpu(env);
90     bool is_btlb;
91 
92     if (!ent->entry_valid) {
93         return;
94     }
95 
96     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
97                              ent->itree.last, ent->pa);
98 
99     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
100                               ent->itree.last - ent->itree.start + 1,
101                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
102 
103     /* Never clear BTLBs, unless forced to do so. */
104     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
105     if (is_btlb && !force_flush_btlb) {
106         return;
107     }
108 
109     interval_tree_remove(&ent->itree, &env->tlb_root);
110     memset(ent, 0, sizeof(*ent));
111 
112     if (!is_btlb) {
113         ent->unused_next = env->tlb_unused;
114         env->tlb_unused = ent;
115     }
116 }
117 
118 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
119 {
120     IntervalTreeNode *i, *n;
121 
122     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
123     for (; i ; i = n) {
124         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
125 
126         /*
127          * Find the next entry now: In the normal case the current entry
128          * will be removed, but in the BTLB case it will remain.
129          */
130         n = interval_tree_iter_next(i, va_b, va_e);
131         hppa_flush_tlb_ent(env, ent, false);
132     }
133 }
134 
135 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
136 {
137     HPPATLBEntry *ent = env->tlb_unused;
138 
139     if (ent == NULL) {
140         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
141         uint32_t i = env->tlb_last;
142 
143         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
144             i = btlb_entries;
145         }
146         env->tlb_last = i + 1;
147 
148         ent = &env->tlb[i];
149         hppa_flush_tlb_ent(env, ent, false);
150     }
151 
152     env->tlb_unused = ent->unused_next;
153     return ent;
154 }
155 
156 #define ACCESS_ID_MASK 0xffff
157 
158 /* Return the set of protections allowed by a PID match. */
159 static int match_prot_id_1(uint32_t access_id, uint32_t prot_id)
160 {
161     if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) {
162         return (prot_id & 1
163                 ? PAGE_EXEC | PAGE_READ
164                 : PAGE_EXEC | PAGE_READ | PAGE_WRITE);
165     }
166     return 0;
167 }
168 
169 static int match_prot_id32(CPUHPPAState *env, uint32_t access_id)
170 {
171     int r, i;
172 
173     for (i = CR_PID1; i <= CR_PID4; ++i) {
174         r = match_prot_id_1(access_id, env->cr[i]);
175         if (r) {
176             return r;
177         }
178     }
179     return 0;
180 }
181 
182 static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
183 {
184     int r, i;
185 
186     for (i = CR_PID1; i <= CR_PID4; ++i) {
187         r = match_prot_id_1(access_id, env->cr[i]);
188         if (r) {
189             return r;
190         }
191         r = match_prot_id_1(access_id, env->cr[i] >> 32);
192         if (r) {
193             return r;
194         }
195     }
196     return 0;
197 }
198 
199 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
200                               int type, hwaddr *pphys, int *pprot,
201                               HPPATLBEntry **tlb_entry)
202 {
203     hwaddr phys;
204     int prot, r_prot, w_prot, x_prot, priv;
205     HPPATLBEntry *ent;
206     int ret = -1;
207 
208     if (tlb_entry) {
209         *tlb_entry = NULL;
210     }
211 
212     /* Virtual translation disabled.  Map absolute to physical.  */
213     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
214         switch (mmu_idx) {
215         case MMU_ABS_W_IDX:
216             phys = hppa_abs_to_phys_pa2_w1(addr);
217             break;
218         case MMU_ABS_IDX:
219             if (hppa_is_pa20(env)) {
220                 phys = hppa_abs_to_phys_pa2_w0(addr);
221             } else {
222                 phys = (uint32_t)addr;
223             }
224             break;
225         default:
226             g_assert_not_reached();
227         }
228         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
229         goto egress;
230     }
231 
232     /* Find a valid tlb entry that matches the virtual address.  */
233     ent = hppa_find_tlb(env, addr);
234     if (ent == NULL) {
235         phys = 0;
236         prot = 0;
237         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
238         goto egress;
239     }
240 
241     if (tlb_entry) {
242         *tlb_entry = ent;
243     }
244 
245     /* We now know the physical address.  */
246     phys = ent->pa + (addr - ent->itree.start);
247 
248     /* Map TLB access_rights field to QEMU protection.  */
249     priv = MMU_IDX_TO_PRIV(mmu_idx);
250     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
251     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
252     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
253     switch (ent->ar_type) {
254     case 0: /* read-only: data page */
255         prot = r_prot;
256         break;
257     case 1: /* read/write: dynamic data page */
258         prot = r_prot | w_prot;
259         break;
260     case 2: /* read/execute: normal code page */
261         prot = r_prot | x_prot;
262         break;
263     case 3: /* read/write/execute: dynamic code page */
264         prot = r_prot | w_prot | x_prot;
265         break;
266     default: /* execute: promote to privilege level type & 3 */
267         prot = x_prot;
268         break;
269     }
270 
271     /*
272      * No guest access type indicates a non-architectural access from
273      * within QEMU.  Bypass checks for access, D, B, P and T bits.
274      */
275     if (type == 0) {
276         goto egress;
277     }
278 
279     /* access_id == 0 means public page and no check is performed */
280     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
281         int access_prot = (hppa_is_pa20(env)
282                            ? match_prot_id64(env, ent->access_id)
283                            : match_prot_id32(env, ent->access_id));
284         if (unlikely(!(type & access_prot))) {
285             /* Not allowed -- Inst/Data Memory Protection Id Fault. */
286             ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI;
287             goto egress;
288         }
289         /* Otherwise exclude permissions not allowed (i.e WD). */
290         prot &= access_prot;
291     }
292 
293     if (unlikely(!(prot & type))) {
294         /* Not allowed -- Inst/Data Memory Access Rights Fault. */
295         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
296         goto egress;
297     }
298 
299     /* In reverse priority order, check for conditions which raise faults.
300        As we go, remove PROT bits that cover the condition we want to check.
301        In this way, the resulting PROT will force a re-check of the
302        architectural TLB entry for the next access.  */
303     if (unlikely(!ent->d)) {
304         if (type & PAGE_WRITE) {
305             /* The D bit is not set -- TLB Dirty Bit Fault.  */
306             ret = EXCP_TLB_DIRTY;
307         }
308         prot &= PAGE_READ | PAGE_EXEC;
309     }
310     if (unlikely(ent->b)) {
311         if (type & PAGE_WRITE) {
312             /* The B bit is set -- Data Memory Break Fault.  */
313             ret = EXCP_DMB;
314         }
315         prot &= PAGE_READ | PAGE_EXEC;
316     }
317     if (unlikely(ent->t)) {
318         if (!(type & PAGE_EXEC)) {
319             /* The T bit is set -- Page Reference Fault.  */
320             ret = EXCP_PAGE_REF;
321         }
322         prot &= PAGE_EXEC;
323     }
324 
325  egress:
326     *pphys = phys;
327     *pprot = prot;
328     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
329     return ret;
330 }
331 
332 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
333 {
334     HPPACPU *cpu = HPPA_CPU(cs);
335     hwaddr phys;
336     int prot, excp, mmu_idx;
337 
338     /* If the (data) mmu is disabled, bypass translation.  */
339     /* ??? We really ought to know if the code mmu is disabled too,
340        in order to get the correct debugging dumps.  */
341     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
342                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
343 
344     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
345                                      &phys, &prot, NULL);
346 
347     /* Since we're translating for debugging, the only error that is a
348        hard error is no translation at all.  Otherwise, while a real cpu
349        access might not have permission, the debugger does.  */
350     return excp == EXCP_DTLB_MISS ? -1 : phys;
351 }
352 
353 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
354 {
355     if (env->psw & PSW_Q) {
356         /*
357          * For pa1.x, the offset and space never overlap, and so we
358          * simply extract the high and low part of the virtual address.
359          *
360          * For pa2.0, the formation of these are described in section
361          * "Interruption Parameter Registers", page 2-15.
362          */
363         env->cr[CR_IOR] = (uint32_t)addr;
364         env->cr[CR_ISR] = addr >> 32;
365 
366         if (hppa_is_pa20(env)) {
367             if (mmu_disabled) {
368                 /*
369                  * If data translation was disabled, the ISR contains
370                  * the upper portion of the abs address, zero-extended.
371                  */
372                 env->cr[CR_ISR] &= 0x3fffffff;
373             } else {
374                 /*
375                  * If data translation was enabled, the upper two bits
376                  * of the IOR (the b field) are equal to the two space
377                  * bits from the base register used to form the gva.
378                  */
379                 uint64_t b;
380 
381                 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
382                 b >>= (env->psw & PSW_W ? 62 : 30);
383                 env->cr[CR_IOR] |= b << 62;
384             }
385         }
386     }
387 }
388 
389 G_NORETURN static void
390 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
391                          vaddr addr, bool mmu_disabled)
392 {
393     CPUState *cs = env_cpu(env);
394 
395     cs->exception_index = excp;
396     cpu_restore_state(cs, retaddr);
397     hppa_set_ior_and_isr(env, addr, mmu_disabled);
398 
399     cpu_loop_exit(cs);
400 }
401 
402 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
403                                      vaddr addr, unsigned size,
404                                      MMUAccessType access_type,
405                                      int mmu_idx, MemTxAttrs attrs,
406                                      MemTxResult response, uintptr_t retaddr)
407 {
408     CPUHPPAState *env = cpu_env(cs);
409 
410     qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
411                 " while accessing I/O at %#08" HWADDR_PRIx "\n",
412                 env->iasq_f, env->iaoq_f, physaddr);
413 
414     /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
415     if (0) {
416         raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
417                                  MMU_IDX_MMU_DISABLED(mmu_idx));
418     }
419 }
420 
421 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
422                        MMUAccessType type, int mmu_idx,
423                        bool probe, uintptr_t retaddr)
424 {
425     HPPACPU *cpu = HPPA_CPU(cs);
426     CPUHPPAState *env = &cpu->env;
427     HPPATLBEntry *ent;
428     int prot, excp, a_prot;
429     hwaddr phys;
430 
431     switch (type) {
432     case MMU_INST_FETCH:
433         a_prot = PAGE_EXEC;
434         break;
435     case MMU_DATA_STORE:
436         a_prot = PAGE_WRITE;
437         break;
438     default:
439         a_prot = PAGE_READ;
440         break;
441     }
442 
443     excp = hppa_get_physical_address(env, addr, mmu_idx,
444                                      a_prot, &phys, &prot, &ent);
445     if (unlikely(excp >= 0)) {
446         if (probe) {
447             return false;
448         }
449         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
450 
451         /* Failure.  Raise the indicated exception.  */
452         raise_exception_with_ior(env, excp, retaddr, addr,
453                                  MMU_IDX_MMU_DISABLED(mmu_idx));
454     }
455 
456     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
457                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
458 
459     /*
460      * Success!  Store the translation into the QEMU TLB.
461      * Note that we always install a single-page entry, because that
462      * is what works best with softmmu -- anything else will trigger
463      * the large page protection mask.  We do not require this,
464      * because we record the large page here in the hppa tlb.
465      */
466     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
467                  prot, mmu_idx, TARGET_PAGE_SIZE);
468     return true;
469 }
470 
471 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
472 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
473 {
474     HPPATLBEntry *ent;
475 
476     /* Zap any old entries covering ADDR. */
477     addr &= TARGET_PAGE_MASK;
478     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
479 
480     ent = env->tlb_partial;
481     if (ent == NULL) {
482         ent = hppa_alloc_tlb_ent(env);
483         env->tlb_partial = ent;
484     }
485 
486     /* Note that ent->entry_valid == 0 already.  */
487     ent->itree.start = addr;
488     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
489     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
490     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
491 }
492 
493 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
494                                  target_ulong reg)
495 {
496     ent->access_id = extract32(reg, 1, 18);
497     ent->u = extract32(reg, 19, 1);
498     ent->ar_pl2 = extract32(reg, 20, 2);
499     ent->ar_pl1 = extract32(reg, 22, 2);
500     ent->ar_type = extract32(reg, 24, 3);
501     ent->b = extract32(reg, 27, 1);
502     ent->d = extract32(reg, 28, 1);
503     ent->t = extract32(reg, 29, 1);
504     ent->entry_valid = 1;
505 
506     interval_tree_insert(&ent->itree, &env->tlb_root);
507     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
508                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
509 }
510 
511 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
512 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
513 {
514     HPPATLBEntry *ent = env->tlb_partial;
515 
516     if (ent) {
517         env->tlb_partial = NULL;
518         if (ent->itree.start <= addr && addr <= ent->itree.last) {
519             set_access_bits_pa11(env, ent, reg);
520             return;
521         }
522     }
523     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
524 }
525 
526 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
527                        target_ulong r2, vaddr va_b)
528 {
529     HPPATLBEntry *ent;
530     vaddr va_e;
531     uint64_t va_size;
532     int mask_shift;
533 
534     mask_shift = 2 * (r1 & 0xf);
535     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
536     va_b &= -va_size;
537     va_e = va_b + va_size - 1;
538 
539     hppa_flush_tlb_range(env, va_b, va_e);
540     ent = hppa_alloc_tlb_ent(env);
541 
542     ent->itree.start = va_b;
543     ent->itree.last = va_e;
544 
545     /* Extract all 52 bits present in the page table entry. */
546     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
547     /* Align per the page size. */
548     ent->pa &= TARGET_PAGE_MASK << mask_shift;
549     /* Ignore the bits beyond physical address space. */
550     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
551 
552     ent->t = extract64(r2, 61, 1);
553     ent->d = extract64(r2, 60, 1);
554     ent->b = extract64(r2, 59, 1);
555     ent->ar_type = extract64(r2, 56, 3);
556     ent->ar_pl1 = extract64(r2, 54, 2);
557     ent->ar_pl2 = extract64(r2, 52, 2);
558     ent->u = extract64(r2, 51, 1);
559     /* o = bit 50 */
560     /* p = bit 49 */
561     ent->access_id = extract64(r2, 1, 31);
562     ent->entry_valid = 1;
563 
564     interval_tree_insert(&ent->itree, &env->tlb_root);
565     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
566     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
567                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
568                          ent->b, ent->d, ent->t);
569 }
570 
571 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
572 {
573     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
574     itlbt_pa20(env, r1, r2, va_b);
575 }
576 
577 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
578 {
579     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
580     itlbt_pa20(env, r1, r2, va_b);
581 }
582 
583 /* Purge (Insn/Data) TLB. */
584 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
585 {
586     vaddr start = data.target_ptr;
587     vaddr end;
588 
589     /*
590      * PA2.0 allows a range of pages encoded into GR[b], which we have
591      * copied into the bottom bits of the otherwise page-aligned address.
592      * PA1.x will always provide zero here, for a single page flush.
593      */
594     end = start & 0xf;
595     start &= TARGET_PAGE_MASK;
596     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
597     end = start + end - 1;
598 
599     hppa_flush_tlb_range(cpu_env(cpu), start, end);
600 }
601 
602 /* This is local to the current cpu. */
603 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
604 {
605     trace_hppa_tlb_ptlb_local(env);
606     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
607 }
608 
609 /* This is synchronous across all processors.  */
610 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
611 {
612     CPUState *src = env_cpu(env);
613     CPUState *cpu;
614     bool wait = false;
615 
616     trace_hppa_tlb_ptlb(env);
617     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
618 
619     CPU_FOREACH(cpu) {
620         if (cpu != src) {
621             async_run_on_cpu(cpu, ptlb_work, data);
622             wait = true;
623         }
624     }
625     if (wait) {
626         async_safe_run_on_cpu(src, ptlb_work, data);
627     } else {
628         ptlb_work(src, data);
629     }
630 }
631 
632 void hppa_ptlbe(CPUHPPAState *env)
633 {
634     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
635     uint32_t i;
636 
637     /* Zap the (non-btlb) tlb entries themselves. */
638     memset(&env->tlb[btlb_entries], 0,
639            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
640     env->tlb_last = btlb_entries;
641     env->tlb_partial = NULL;
642 
643     /* Put them all onto the unused list. */
644     env->tlb_unused = &env->tlb[btlb_entries];
645     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
646         env->tlb[i].unused_next = &env->tlb[i + 1];
647     }
648 
649     /* Re-initialize the interval tree with only the btlb entries. */
650     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
651     for (i = 0; i < btlb_entries; ++i) {
652         if (env->tlb[i].entry_valid) {
653             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
654         }
655     }
656 
657     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
658 }
659 
660 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
661    number of pages/entries (we choose all), and is local to the cpu.  */
662 void HELPER(ptlbe)(CPUHPPAState *env)
663 {
664     trace_hppa_tlb_ptlbe(env);
665     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
666     hppa_ptlbe(env);
667 }
668 
669 void cpu_hppa_change_prot_id(CPUHPPAState *env)
670 {
671     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
672 }
673 
674 void HELPER(change_prot_id)(CPUHPPAState *env)
675 {
676     cpu_hppa_change_prot_id(env);
677 }
678 
679 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
680 {
681     hwaddr phys;
682     int prot, excp;
683 
684     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
685                                      &phys, &prot, NULL);
686     if (excp >= 0) {
687         if (excp == EXCP_DTLB_MISS) {
688             excp = EXCP_NA_DTLB_MISS;
689         }
690         trace_hppa_tlb_lpa_failed(env, addr);
691         raise_exception_with_ior(env, excp, GETPC(), addr, false);
692     }
693     trace_hppa_tlb_lpa_success(env, addr, phys);
694     return phys;
695 }
696 
697 /* Return the ar_type of the TLB at VADDR, or -1.  */
698 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
699 {
700     HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
701     return ent ? ent->ar_type : -1;
702 }
703 
704 /*
705  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
706  * allow operating systems to modify the Block TLB (BTLB) entries.
707  * For implementation details see page 1-13 in
708  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
709  */
710 void HELPER(diag_btlb)(CPUHPPAState *env)
711 {
712     unsigned int phys_page, len, slot;
713     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
714     uintptr_t ra = GETPC();
715     HPPATLBEntry *btlb;
716     uint64_t virt_page;
717     uint32_t *vaddr;
718     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
719 
720     /* BTLBs are not supported on 64-bit CPUs */
721     if (btlb_entries == 0) {
722         env->gr[28] = -1; /* nonexistent procedure */
723         return;
724     }
725 
726     env->gr[28] = 0; /* PDC_OK */
727 
728     switch (env->gr[25]) {
729     case 0:
730         /* return BTLB parameters */
731         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
732         vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
733                              MMU_DATA_STORE, mmu_idx, ra);
734         if (vaddr == NULL) {
735             env->gr[28] = -10; /* invalid argument */
736         } else {
737             vaddr[0] = cpu_to_be32(1);
738             vaddr[1] = cpu_to_be32(16 * 1024);
739             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
740             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
741         }
742         break;
743     case 1:
744         /* insert BTLB entry */
745         virt_page = env->gr[24];        /* upper 32 bits */
746         virt_page <<= 32;
747         virt_page |= env->gr[23];       /* lower 32 bits */
748         phys_page = env->gr[22];
749         len = env->gr[21];
750         slot = env->gr[19];
751         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
752                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
753                     "into slot %d\n",
754                     (long long) virt_page << TARGET_PAGE_BITS,
755                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
756                     (long long) virt_page, phys_page, len, slot);
757         if (slot < btlb_entries) {
758             btlb = &env->tlb[slot];
759 
760             /* Force flush of possibly existing BTLB entry. */
761             hppa_flush_tlb_ent(env, btlb, true);
762 
763             /* Create new BTLB entry */
764             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
765             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
766             btlb->pa = phys_page << TARGET_PAGE_BITS;
767             set_access_bits_pa11(env, btlb, env->gr[20]);
768             btlb->t = 0;
769             btlb->d = 1;
770         } else {
771             env->gr[28] = -10; /* invalid argument */
772         }
773         break;
774     case 2:
775         /* Purge BTLB entry */
776         slot = env->gr[22];
777         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
778                                     slot);
779         if (slot < btlb_entries) {
780             btlb = &env->tlb[slot];
781             hppa_flush_tlb_ent(env, btlb, true);
782         } else {
783             env->gr[28] = -10; /* invalid argument */
784         }
785         break;
786     case 3:
787         /* Purge all BTLB entries */
788         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
789         for (slot = 0; slot < btlb_entries; slot++) {
790             btlb = &env->tlb[slot];
791             hppa_flush_tlb_ent(env, btlb, true);
792         }
793         break;
794     default:
795         env->gr[28] = -2; /* nonexistent option */
796         break;
797     }
798 }
799