xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 99367627)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30     /*
31      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32      * an algorithm in which a 62-bit absolute address is transformed to
33      * a 64-bit physical address.  This must then be combined with that
34      * pictured in Figure H-11 "Physical Address Space Mapping", in which
35      * the full physical address is truncated to the N-bit physical address
36      * supported by the implementation.
37      *
38      * Since the supported physical address space is below 54 bits, the
39      * H-8 algorithm is moot and all that is left is to truncate.
40      */
41     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
43 }
44 
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
46 {
47     /*
48      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49      * combined with Figure H-11, as above.
50      */
51     if (likely(extract32(addr, 28, 4) != 0xf)) {
52         /* Memory address space */
53         addr = (uint32_t)addr;
54     } else if (extract32(addr, 24, 4) != 0) {
55         /* I/O address space */
56         addr = (int32_t)addr;
57     } else {
58         /* PDC address space */
59         addr &= MAKE_64BIT_MASK(0, 24);
60         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
61     }
62     return addr;
63 }
64 
65 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
66 {
67     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
68 
69     if (i) {
70         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
71         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
72                                   ent->itree.start, ent->itree.last, ent->pa);
73         return ent;
74     }
75     trace_hppa_tlb_find_entry_not_found(env, addr);
76     return NULL;
77 }
78 
79 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
80                                bool force_flush_btlb)
81 {
82     CPUState *cs = env_cpu(env);
83     bool is_btlb;
84 
85     if (!ent->entry_valid) {
86         return;
87     }
88 
89     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
90                              ent->itree.last, ent->pa);
91 
92     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
93                               ent->itree.last - ent->itree.start + 1,
94                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
95 
96     /* Never clear BTLBs, unless forced to do so. */
97     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
98     if (is_btlb && !force_flush_btlb) {
99         return;
100     }
101 
102     interval_tree_remove(&ent->itree, &env->tlb_root);
103     memset(ent, 0, sizeof(*ent));
104 
105     if (!is_btlb) {
106         ent->unused_next = env->tlb_unused;
107         env->tlb_unused = ent;
108     }
109 }
110 
111 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
112 {
113     IntervalTreeNode *i, *n;
114 
115     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
116     for (; i ; i = n) {
117         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
118 
119         /*
120          * Find the next entry now: In the normal case the current entry
121          * will be removed, but in the BTLB case it will remain.
122          */
123         n = interval_tree_iter_next(i, va_b, va_e);
124         hppa_flush_tlb_ent(env, ent, false);
125     }
126 }
127 
128 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
129 {
130     HPPATLBEntry *ent = env->tlb_unused;
131 
132     if (ent == NULL) {
133         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
134         uint32_t i = env->tlb_last;
135 
136         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
137             i = btlb_entries;
138         }
139         env->tlb_last = i + 1;
140 
141         ent = &env->tlb[i];
142         hppa_flush_tlb_ent(env, ent, false);
143     }
144 
145     env->tlb_unused = ent->unused_next;
146     return ent;
147 }
148 
149 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
150                               int type, hwaddr *pphys, int *pprot,
151                               HPPATLBEntry **tlb_entry)
152 {
153     hwaddr phys;
154     int prot, r_prot, w_prot, x_prot, priv;
155     HPPATLBEntry *ent;
156     int ret = -1;
157 
158     if (tlb_entry) {
159         *tlb_entry = NULL;
160     }
161 
162     /* Virtual translation disabled.  Map absolute to physical.  */
163     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
164         switch (mmu_idx) {
165         case MMU_ABS_W_IDX:
166             phys = hppa_abs_to_phys_pa2_w1(addr);
167             break;
168         case MMU_ABS_IDX:
169             if (hppa_is_pa20(env)) {
170                 phys = hppa_abs_to_phys_pa2_w0(addr);
171             } else {
172                 phys = (uint32_t)addr;
173             }
174             break;
175         default:
176             g_assert_not_reached();
177         }
178         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
179         goto egress;
180     }
181 
182     /* Find a valid tlb entry that matches the virtual address.  */
183     ent = hppa_find_tlb(env, addr);
184     if (ent == NULL) {
185         phys = 0;
186         prot = 0;
187         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
188         goto egress;
189     }
190 
191     if (tlb_entry) {
192         *tlb_entry = ent;
193     }
194 
195     /* We now know the physical address.  */
196     phys = ent->pa + (addr - ent->itree.start);
197 
198     /* Map TLB access_rights field to QEMU protection.  */
199     priv = MMU_IDX_TO_PRIV(mmu_idx);
200     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
201     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
202     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
203     switch (ent->ar_type) {
204     case 0: /* read-only: data page */
205         prot = r_prot;
206         break;
207     case 1: /* read/write: dynamic data page */
208         prot = r_prot | w_prot;
209         break;
210     case 2: /* read/execute: normal code page */
211         prot = r_prot | x_prot;
212         break;
213     case 3: /* read/write/execute: dynamic code page */
214         prot = r_prot | w_prot | x_prot;
215         break;
216     default: /* execute: promote to privilege level type & 3 */
217         prot = x_prot;
218         break;
219     }
220 
221     /* access_id == 0 means public page and no check is performed */
222     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
223         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
224         int match = ent->access_id * 2 + 1;
225 
226         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
227             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
228             prot &= PAGE_READ | PAGE_EXEC;
229             if (type == PAGE_WRITE) {
230                 ret = EXCP_DMPI;
231                 goto egress;
232             }
233         }
234     }
235 
236     /* No guest access type indicates a non-architectural access from
237        within QEMU.  Bypass checks for access, D, B and T bits.  */
238     if (type == 0) {
239         goto egress;
240     }
241 
242     if (unlikely(!(prot & type))) {
243         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
244         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
245         goto egress;
246     }
247 
248     /* In reverse priority order, check for conditions which raise faults.
249        As we go, remove PROT bits that cover the condition we want to check.
250        In this way, the resulting PROT will force a re-check of the
251        architectural TLB entry for the next access.  */
252     if (unlikely(!ent->d)) {
253         if (type & PAGE_WRITE) {
254             /* The D bit is not set -- TLB Dirty Bit Fault.  */
255             ret = EXCP_TLB_DIRTY;
256         }
257         prot &= PAGE_READ | PAGE_EXEC;
258     }
259     if (unlikely(ent->b)) {
260         if (type & PAGE_WRITE) {
261             /* The B bit is set -- Data Memory Break Fault.  */
262             ret = EXCP_DMB;
263         }
264         prot &= PAGE_READ | PAGE_EXEC;
265     }
266     if (unlikely(ent->t)) {
267         if (!(type & PAGE_EXEC)) {
268             /* The T bit is set -- Page Reference Fault.  */
269             ret = EXCP_PAGE_REF;
270         }
271         prot &= PAGE_EXEC;
272     }
273 
274  egress:
275     *pphys = phys;
276     *pprot = prot;
277     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
278     return ret;
279 }
280 
281 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
282 {
283     HPPACPU *cpu = HPPA_CPU(cs);
284     hwaddr phys;
285     int prot, excp, mmu_idx;
286 
287     /* If the (data) mmu is disabled, bypass translation.  */
288     /* ??? We really ought to know if the code mmu is disabled too,
289        in order to get the correct debugging dumps.  */
290     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
291                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
292 
293     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
294                                      &phys, &prot, NULL);
295 
296     /* Since we're translating for debugging, the only error that is a
297        hard error is no translation at all.  Otherwise, while a real cpu
298        access might not have permission, the debugger does.  */
299     return excp == EXCP_DTLB_MISS ? -1 : phys;
300 }
301 
302 G_NORETURN static void
303 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
304                          vaddr addr, bool mmu_disabled)
305 {
306     CPUState *cs = env_cpu(env);
307 
308     cs->exception_index = excp;
309 
310     if (env->psw & PSW_Q) {
311         /*
312          * For pa1.x, the offset and space never overlap, and so we
313          * simply extract the high and low part of the virtual address.
314          *
315          * For pa2.0, the formation of these are described in section
316          * "Interruption Parameter Registers", page 2-15.
317          */
318         env->cr[CR_IOR] = (uint32_t)addr;
319         env->cr[CR_ISR] = addr >> 32;
320 
321         if (hppa_is_pa20(env)) {
322             if (mmu_disabled) {
323                 /*
324                  * If data translation was disabled, the ISR contains
325                  * the upper portion of the abs address, zero-extended.
326                  */
327                 env->cr[CR_ISR] &= 0x3fffffff;
328             } else {
329                 /*
330                  * If data translation was enabled, the upper two bits
331                  * of the IOR (the b field) are equal to the two space
332                  * bits from the base register used to form the gva.
333                  */
334                 uint64_t b;
335 
336                 cpu_restore_state(cs, retaddr);
337 
338                 b = env->gr[env->unwind_breg];
339                 b >>= (env->psw & PSW_W ? 62 : 30);
340                 env->cr[CR_IOR] |= b << 62;
341 
342                 cpu_loop_exit(cs);
343             }
344         }
345     }
346     cpu_loop_exit_restore(cs, retaddr);
347 }
348 
349 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
350                        MMUAccessType type, int mmu_idx,
351                        bool probe, uintptr_t retaddr)
352 {
353     HPPACPU *cpu = HPPA_CPU(cs);
354     CPUHPPAState *env = &cpu->env;
355     HPPATLBEntry *ent;
356     int prot, excp, a_prot;
357     hwaddr phys;
358 
359     switch (type) {
360     case MMU_INST_FETCH:
361         a_prot = PAGE_EXEC;
362         break;
363     case MMU_DATA_STORE:
364         a_prot = PAGE_WRITE;
365         break;
366     default:
367         a_prot = PAGE_READ;
368         break;
369     }
370 
371     excp = hppa_get_physical_address(env, addr, mmu_idx,
372                                      a_prot, &phys, &prot, &ent);
373     if (unlikely(excp >= 0)) {
374         if (probe) {
375             return false;
376         }
377         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
378 
379         /* Failure.  Raise the indicated exception.  */
380         raise_exception_with_ior(env, excp, retaddr, addr,
381                                  MMU_IDX_MMU_DISABLED(mmu_idx));
382     }
383 
384     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
385                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
386 
387     /*
388      * Success!  Store the translation into the QEMU TLB.
389      * Note that we always install a single-page entry, because that
390      * is what works best with softmmu -- anything else will trigger
391      * the large page protection mask.  We do not require this,
392      * because we record the large page here in the hppa tlb.
393      */
394     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
395                  prot, mmu_idx, TARGET_PAGE_SIZE);
396     return true;
397 }
398 
399 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
400 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
401 {
402     HPPATLBEntry *ent;
403 
404     /* Zap any old entries covering ADDR. */
405     addr &= TARGET_PAGE_MASK;
406     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
407 
408     ent = env->tlb_partial;
409     if (ent == NULL) {
410         ent = hppa_alloc_tlb_ent(env);
411         env->tlb_partial = ent;
412     }
413 
414     /* Note that ent->entry_valid == 0 already.  */
415     ent->itree.start = addr;
416     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
417     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
418     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
419 }
420 
421 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
422                                  target_ulong reg)
423 {
424     ent->access_id = extract32(reg, 1, 18);
425     ent->u = extract32(reg, 19, 1);
426     ent->ar_pl2 = extract32(reg, 20, 2);
427     ent->ar_pl1 = extract32(reg, 22, 2);
428     ent->ar_type = extract32(reg, 24, 3);
429     ent->b = extract32(reg, 27, 1);
430     ent->d = extract32(reg, 28, 1);
431     ent->t = extract32(reg, 29, 1);
432     ent->entry_valid = 1;
433 
434     interval_tree_insert(&ent->itree, &env->tlb_root);
435     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
436                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
437 }
438 
439 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
440 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
441 {
442     HPPATLBEntry *ent = env->tlb_partial;
443 
444     if (ent) {
445         env->tlb_partial = NULL;
446         if (ent->itree.start <= addr && addr <= ent->itree.last) {
447             set_access_bits_pa11(env, ent, reg);
448             return;
449         }
450     }
451     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
452 }
453 
454 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
455                        target_ulong r2, vaddr va_b)
456 {
457     HPPATLBEntry *ent;
458     vaddr va_e;
459     uint64_t va_size;
460     int mask_shift;
461 
462     mask_shift = 2 * (r1 & 0xf);
463     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
464     va_b &= -va_size;
465     va_e = va_b + va_size - 1;
466 
467     hppa_flush_tlb_range(env, va_b, va_e);
468     ent = hppa_alloc_tlb_ent(env);
469 
470     ent->itree.start = va_b;
471     ent->itree.last = va_e;
472 
473     /* Extract all 52 bits present in the page table entry. */
474     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
475     /* Align per the page size. */
476     ent->pa &= TARGET_PAGE_MASK << mask_shift;
477     /* Ignore the bits beyond physical address space. */
478     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
479 
480     ent->t = extract64(r2, 61, 1);
481     ent->d = extract64(r2, 60, 1);
482     ent->b = extract64(r2, 59, 1);
483     ent->ar_type = extract64(r2, 56, 3);
484     ent->ar_pl1 = extract64(r2, 54, 2);
485     ent->ar_pl2 = extract64(r2, 52, 2);
486     ent->u = extract64(r2, 51, 1);
487     /* o = bit 50 */
488     /* p = bit 49 */
489     ent->access_id = extract64(r2, 1, 31);
490     ent->entry_valid = 1;
491 
492     interval_tree_insert(&ent->itree, &env->tlb_root);
493     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
494     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
495                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
496                          ent->b, ent->d, ent->t);
497 }
498 
499 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
500 {
501     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
502     itlbt_pa20(env, r1, r2, va_b);
503 }
504 
505 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
506 {
507     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
508     itlbt_pa20(env, r1, r2, va_b);
509 }
510 
511 /* Purge (Insn/Data) TLB. */
512 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
513 {
514     CPUHPPAState *env = cpu_env(cpu);
515     vaddr start = data.target_ptr;
516     vaddr end;
517 
518     /*
519      * PA2.0 allows a range of pages encoded into GR[b], which we have
520      * copied into the bottom bits of the otherwise page-aligned address.
521      * PA1.x will always provide zero here, for a single page flush.
522      */
523     end = start & 0xf;
524     start &= TARGET_PAGE_MASK;
525     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
526     end = start + end - 1;
527 
528     hppa_flush_tlb_range(env, start, end);
529 }
530 
531 /* This is local to the current cpu. */
532 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
533 {
534     trace_hppa_tlb_ptlb_local(env);
535     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
536 }
537 
538 /* This is synchronous across all processors.  */
539 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
540 {
541     CPUState *src = env_cpu(env);
542     CPUState *cpu;
543     bool wait = false;
544 
545     trace_hppa_tlb_ptlb(env);
546     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
547 
548     CPU_FOREACH(cpu) {
549         if (cpu != src) {
550             async_run_on_cpu(cpu, ptlb_work, data);
551             wait = true;
552         }
553     }
554     if (wait) {
555         async_safe_run_on_cpu(src, ptlb_work, data);
556     } else {
557         ptlb_work(src, data);
558     }
559 }
560 
561 void hppa_ptlbe(CPUHPPAState *env)
562 {
563     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
564     uint32_t i;
565 
566     /* Zap the (non-btlb) tlb entries themselves. */
567     memset(&env->tlb[btlb_entries], 0,
568            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
569     env->tlb_last = btlb_entries;
570     env->tlb_partial = NULL;
571 
572     /* Put them all onto the unused list. */
573     env->tlb_unused = &env->tlb[btlb_entries];
574     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
575         env->tlb[i].unused_next = &env->tlb[i + 1];
576     }
577 
578     /* Re-initialize the interval tree with only the btlb entries. */
579     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
580     for (i = 0; i < btlb_entries; ++i) {
581         if (env->tlb[i].entry_valid) {
582             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
583         }
584     }
585 
586     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
587 }
588 
589 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
590    number of pages/entries (we choose all), and is local to the cpu.  */
591 void HELPER(ptlbe)(CPUHPPAState *env)
592 {
593     trace_hppa_tlb_ptlbe(env);
594     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
595     hppa_ptlbe(env);
596 }
597 
598 void cpu_hppa_change_prot_id(CPUHPPAState *env)
599 {
600     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
601 }
602 
603 void HELPER(change_prot_id)(CPUHPPAState *env)
604 {
605     cpu_hppa_change_prot_id(env);
606 }
607 
608 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
609 {
610     hwaddr phys;
611     int prot, excp;
612 
613     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
614                                      &phys, &prot, NULL);
615     if (excp >= 0) {
616         if (excp == EXCP_DTLB_MISS) {
617             excp = EXCP_NA_DTLB_MISS;
618         }
619         trace_hppa_tlb_lpa_failed(env, addr);
620         raise_exception_with_ior(env, excp, GETPC(), addr, false);
621     }
622     trace_hppa_tlb_lpa_success(env, addr, phys);
623     return phys;
624 }
625 
626 /* Return the ar_type of the TLB at VADDR, or -1.  */
627 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
628 {
629     HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
630     return ent ? ent->ar_type : -1;
631 }
632 
633 /*
634  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
635  * allow operating systems to modify the Block TLB (BTLB) entries.
636  * For implementation details see page 1-13 in
637  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
638  */
639 void HELPER(diag_btlb)(CPUHPPAState *env)
640 {
641     unsigned int phys_page, len, slot;
642     int mmu_idx = cpu_mmu_index(env, 0);
643     uintptr_t ra = GETPC();
644     HPPATLBEntry *btlb;
645     uint64_t virt_page;
646     uint32_t *vaddr;
647     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
648 
649     /* BTLBs are not supported on 64-bit CPUs */
650     if (btlb_entries == 0) {
651         env->gr[28] = -1; /* nonexistent procedure */
652         return;
653     }
654 
655     env->gr[28] = 0; /* PDC_OK */
656 
657     switch (env->gr[25]) {
658     case 0:
659         /* return BTLB parameters */
660         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
661         vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
662                              MMU_DATA_STORE, mmu_idx, ra);
663         if (vaddr == NULL) {
664             env->gr[28] = -10; /* invalid argument */
665         } else {
666             vaddr[0] = cpu_to_be32(1);
667             vaddr[1] = cpu_to_be32(16 * 1024);
668             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
669             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
670         }
671         break;
672     case 1:
673         /* insert BTLB entry */
674         virt_page = env->gr[24];        /* upper 32 bits */
675         virt_page <<= 32;
676         virt_page |= env->gr[23];       /* lower 32 bits */
677         phys_page = env->gr[22];
678         len = env->gr[21];
679         slot = env->gr[19];
680         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
681                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
682                     "into slot %d\n",
683                     (long long) virt_page << TARGET_PAGE_BITS,
684                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
685                     (long long) virt_page, phys_page, len, slot);
686         if (slot < btlb_entries) {
687             btlb = &env->tlb[slot];
688 
689             /* Force flush of possibly existing BTLB entry. */
690             hppa_flush_tlb_ent(env, btlb, true);
691 
692             /* Create new BTLB entry */
693             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
694             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
695             btlb->pa = phys_page << TARGET_PAGE_BITS;
696             set_access_bits_pa11(env, btlb, env->gr[20]);
697             btlb->t = 0;
698             btlb->d = 1;
699         } else {
700             env->gr[28] = -10; /* invalid argument */
701         }
702         break;
703     case 2:
704         /* Purge BTLB entry */
705         slot = env->gr[22];
706         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
707                                     slot);
708         if (slot < btlb_entries) {
709             btlb = &env->tlb[slot];
710             hppa_flush_tlb_ent(env, btlb, true);
711         } else {
712             env->gr[28] = -10; /* invalid argument */
713         }
714         break;
715     case 3:
716         /* Purge all BTLB entries */
717         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
718         for (slot = 0; slot < btlb_entries; slot++) {
719             btlb = &env->tlb[slot];
720             hppa_flush_tlb_ent(env, btlb, true);
721         }
722         break;
723     default:
724         env->gr[28] = -2; /* nonexistent option */
725         break;
726     }
727 }
728