xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 6ce18d53)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30     /*
31      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32      * an algorithm in which a 62-bit absolute address is transformed to
33      * a 64-bit physical address.  This must then be combined with that
34      * pictured in Figure H-11 "Physical Address Space Mapping", in which
35      * the full physical address is truncated to the N-bit physical address
36      * supported by the implementation.
37      *
38      * Since the supported physical address space is below 54 bits, the
39      * H-8 algorithm is moot and all that is left is to truncate.
40      */
41     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
43 }
44 
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
46 {
47     /*
48      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49      * combined with Figure H-11, as above.
50      */
51     if (likely(extract32(addr, 28, 4) != 0xf)) {
52         /* Memory address space */
53         addr = (uint32_t)addr;
54     } else if (extract32(addr, 24, 4) != 0) {
55         /* I/O address space */
56         addr = (int32_t)addr;
57     } else {
58         /*
59          * PDC address space:
60          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61          * where to map into the 64-bit PDC address space.
62          * We map with an offset which equals the 32-bit address, which
63          * is what can be seen on physical machines too.
64          */
65         addr = (uint32_t)addr;
66         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
67     }
68     return addr;
69 }
70 
71 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
72 {
73     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
74 
75     if (i) {
76         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
77         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
78                                   ent->itree.start, ent->itree.last, ent->pa);
79         return ent;
80     }
81     trace_hppa_tlb_find_entry_not_found(env, addr);
82     return NULL;
83 }
84 
85 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
86                                bool force_flush_btlb)
87 {
88     CPUState *cs = env_cpu(env);
89     bool is_btlb;
90 
91     if (!ent->entry_valid) {
92         return;
93     }
94 
95     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
96                              ent->itree.last, ent->pa);
97 
98     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
99                               ent->itree.last - ent->itree.start + 1,
100                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
101 
102     /* Never clear BTLBs, unless forced to do so. */
103     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
104     if (is_btlb && !force_flush_btlb) {
105         return;
106     }
107 
108     interval_tree_remove(&ent->itree, &env->tlb_root);
109     memset(ent, 0, sizeof(*ent));
110 
111     if (!is_btlb) {
112         ent->unused_next = env->tlb_unused;
113         env->tlb_unused = ent;
114     }
115 }
116 
117 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
118 {
119     IntervalTreeNode *i, *n;
120 
121     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
122     for (; i ; i = n) {
123         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
124 
125         /*
126          * Find the next entry now: In the normal case the current entry
127          * will be removed, but in the BTLB case it will remain.
128          */
129         n = interval_tree_iter_next(i, va_b, va_e);
130         hppa_flush_tlb_ent(env, ent, false);
131     }
132 }
133 
134 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
135 {
136     HPPATLBEntry *ent = env->tlb_unused;
137 
138     if (ent == NULL) {
139         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
140         uint32_t i = env->tlb_last;
141 
142         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
143             i = btlb_entries;
144         }
145         env->tlb_last = i + 1;
146 
147         ent = &env->tlb[i];
148         hppa_flush_tlb_ent(env, ent, false);
149     }
150 
151     env->tlb_unused = ent->unused_next;
152     return ent;
153 }
154 
155 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
156                               int type, hwaddr *pphys, int *pprot,
157                               HPPATLBEntry **tlb_entry)
158 {
159     hwaddr phys;
160     int prot, r_prot, w_prot, x_prot, priv;
161     HPPATLBEntry *ent;
162     int ret = -1;
163 
164     if (tlb_entry) {
165         *tlb_entry = NULL;
166     }
167 
168     /* Virtual translation disabled.  Map absolute to physical.  */
169     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
170         switch (mmu_idx) {
171         case MMU_ABS_W_IDX:
172             phys = hppa_abs_to_phys_pa2_w1(addr);
173             break;
174         case MMU_ABS_IDX:
175             if (hppa_is_pa20(env)) {
176                 phys = hppa_abs_to_phys_pa2_w0(addr);
177             } else {
178                 phys = (uint32_t)addr;
179             }
180             break;
181         default:
182             g_assert_not_reached();
183         }
184         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
185         goto egress;
186     }
187 
188     /* Find a valid tlb entry that matches the virtual address.  */
189     ent = hppa_find_tlb(env, addr);
190     if (ent == NULL) {
191         phys = 0;
192         prot = 0;
193         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
194         goto egress;
195     }
196 
197     if (tlb_entry) {
198         *tlb_entry = ent;
199     }
200 
201     /* We now know the physical address.  */
202     phys = ent->pa + (addr - ent->itree.start);
203 
204     /* Map TLB access_rights field to QEMU protection.  */
205     priv = MMU_IDX_TO_PRIV(mmu_idx);
206     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
207     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
208     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
209     switch (ent->ar_type) {
210     case 0: /* read-only: data page */
211         prot = r_prot;
212         break;
213     case 1: /* read/write: dynamic data page */
214         prot = r_prot | w_prot;
215         break;
216     case 2: /* read/execute: normal code page */
217         prot = r_prot | x_prot;
218         break;
219     case 3: /* read/write/execute: dynamic code page */
220         prot = r_prot | w_prot | x_prot;
221         break;
222     default: /* execute: promote to privilege level type & 3 */
223         prot = x_prot;
224         break;
225     }
226 
227     /* access_id == 0 means public page and no check is performed */
228     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
229         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
230         int match = ent->access_id * 2 + 1;
231 
232         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
233             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
234             prot &= PAGE_READ | PAGE_EXEC;
235             if (type == PAGE_WRITE) {
236                 ret = EXCP_DMPI;
237                 goto egress;
238             }
239         }
240     }
241 
242     /* No guest access type indicates a non-architectural access from
243        within QEMU.  Bypass checks for access, D, B and T bits.  */
244     if (type == 0) {
245         goto egress;
246     }
247 
248     if (unlikely(!(prot & type))) {
249         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
250         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
251         goto egress;
252     }
253 
254     /* In reverse priority order, check for conditions which raise faults.
255        As we go, remove PROT bits that cover the condition we want to check.
256        In this way, the resulting PROT will force a re-check of the
257        architectural TLB entry for the next access.  */
258     if (unlikely(!ent->d)) {
259         if (type & PAGE_WRITE) {
260             /* The D bit is not set -- TLB Dirty Bit Fault.  */
261             ret = EXCP_TLB_DIRTY;
262         }
263         prot &= PAGE_READ | PAGE_EXEC;
264     }
265     if (unlikely(ent->b)) {
266         if (type & PAGE_WRITE) {
267             /* The B bit is set -- Data Memory Break Fault.  */
268             ret = EXCP_DMB;
269         }
270         prot &= PAGE_READ | PAGE_EXEC;
271     }
272     if (unlikely(ent->t)) {
273         if (!(type & PAGE_EXEC)) {
274             /* The T bit is set -- Page Reference Fault.  */
275             ret = EXCP_PAGE_REF;
276         }
277         prot &= PAGE_EXEC;
278     }
279 
280  egress:
281     *pphys = phys;
282     *pprot = prot;
283     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
284     return ret;
285 }
286 
287 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
288 {
289     HPPACPU *cpu = HPPA_CPU(cs);
290     hwaddr phys;
291     int prot, excp, mmu_idx;
292 
293     /* If the (data) mmu is disabled, bypass translation.  */
294     /* ??? We really ought to know if the code mmu is disabled too,
295        in order to get the correct debugging dumps.  */
296     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
297                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
298 
299     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
300                                      &phys, &prot, NULL);
301 
302     /* Since we're translating for debugging, the only error that is a
303        hard error is no translation at all.  Otherwise, while a real cpu
304        access might not have permission, the debugger does.  */
305     return excp == EXCP_DTLB_MISS ? -1 : phys;
306 }
307 
308 G_NORETURN static void
309 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
310                          vaddr addr, bool mmu_disabled)
311 {
312     CPUState *cs = env_cpu(env);
313 
314     cs->exception_index = excp;
315 
316     if (env->psw & PSW_Q) {
317         /*
318          * For pa1.x, the offset and space never overlap, and so we
319          * simply extract the high and low part of the virtual address.
320          *
321          * For pa2.0, the formation of these are described in section
322          * "Interruption Parameter Registers", page 2-15.
323          */
324         env->cr[CR_IOR] = (uint32_t)addr;
325         env->cr[CR_ISR] = addr >> 32;
326 
327         if (hppa_is_pa20(env)) {
328             if (mmu_disabled) {
329                 /*
330                  * If data translation was disabled, the ISR contains
331                  * the upper portion of the abs address, zero-extended.
332                  */
333                 env->cr[CR_ISR] &= 0x3fffffff;
334             } else {
335                 /*
336                  * If data translation was enabled, the upper two bits
337                  * of the IOR (the b field) are equal to the two space
338                  * bits from the base register used to form the gva.
339                  */
340                 uint64_t b;
341 
342                 cpu_restore_state(cs, retaddr);
343 
344                 b = env->gr[env->unwind_breg];
345                 b >>= (env->psw & PSW_W ? 62 : 30);
346                 env->cr[CR_IOR] |= b << 62;
347 
348                 cpu_loop_exit(cs);
349             }
350         }
351     }
352     cpu_loop_exit_restore(cs, retaddr);
353 }
354 
355 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
356                        MMUAccessType type, int mmu_idx,
357                        bool probe, uintptr_t retaddr)
358 {
359     HPPACPU *cpu = HPPA_CPU(cs);
360     CPUHPPAState *env = &cpu->env;
361     HPPATLBEntry *ent;
362     int prot, excp, a_prot;
363     hwaddr phys;
364 
365     switch (type) {
366     case MMU_INST_FETCH:
367         a_prot = PAGE_EXEC;
368         break;
369     case MMU_DATA_STORE:
370         a_prot = PAGE_WRITE;
371         break;
372     default:
373         a_prot = PAGE_READ;
374         break;
375     }
376 
377     excp = hppa_get_physical_address(env, addr, mmu_idx,
378                                      a_prot, &phys, &prot, &ent);
379     if (unlikely(excp >= 0)) {
380         if (probe) {
381             return false;
382         }
383         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
384 
385         /* Failure.  Raise the indicated exception.  */
386         raise_exception_with_ior(env, excp, retaddr, addr,
387                                  MMU_IDX_MMU_DISABLED(mmu_idx));
388     }
389 
390     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
391                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
392 
393     /*
394      * Success!  Store the translation into the QEMU TLB.
395      * Note that we always install a single-page entry, because that
396      * is what works best with softmmu -- anything else will trigger
397      * the large page protection mask.  We do not require this,
398      * because we record the large page here in the hppa tlb.
399      */
400     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
401                  prot, mmu_idx, TARGET_PAGE_SIZE);
402     return true;
403 }
404 
405 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
406 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
407 {
408     HPPATLBEntry *ent;
409 
410     /* Zap any old entries covering ADDR. */
411     addr &= TARGET_PAGE_MASK;
412     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
413 
414     ent = env->tlb_partial;
415     if (ent == NULL) {
416         ent = hppa_alloc_tlb_ent(env);
417         env->tlb_partial = ent;
418     }
419 
420     /* Note that ent->entry_valid == 0 already.  */
421     ent->itree.start = addr;
422     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
423     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
424     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
425 }
426 
427 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
428                                  target_ulong reg)
429 {
430     ent->access_id = extract32(reg, 1, 18);
431     ent->u = extract32(reg, 19, 1);
432     ent->ar_pl2 = extract32(reg, 20, 2);
433     ent->ar_pl1 = extract32(reg, 22, 2);
434     ent->ar_type = extract32(reg, 24, 3);
435     ent->b = extract32(reg, 27, 1);
436     ent->d = extract32(reg, 28, 1);
437     ent->t = extract32(reg, 29, 1);
438     ent->entry_valid = 1;
439 
440     interval_tree_insert(&ent->itree, &env->tlb_root);
441     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
442                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
443 }
444 
445 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
446 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
447 {
448     HPPATLBEntry *ent = env->tlb_partial;
449 
450     if (ent) {
451         env->tlb_partial = NULL;
452         if (ent->itree.start <= addr && addr <= ent->itree.last) {
453             set_access_bits_pa11(env, ent, reg);
454             return;
455         }
456     }
457     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
458 }
459 
460 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
461                        target_ulong r2, vaddr va_b)
462 {
463     HPPATLBEntry *ent;
464     vaddr va_e;
465     uint64_t va_size;
466     int mask_shift;
467 
468     mask_shift = 2 * (r1 & 0xf);
469     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
470     va_b &= -va_size;
471     va_e = va_b + va_size - 1;
472 
473     hppa_flush_tlb_range(env, va_b, va_e);
474     ent = hppa_alloc_tlb_ent(env);
475 
476     ent->itree.start = va_b;
477     ent->itree.last = va_e;
478 
479     /* Extract all 52 bits present in the page table entry. */
480     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
481     /* Align per the page size. */
482     ent->pa &= TARGET_PAGE_MASK << mask_shift;
483     /* Ignore the bits beyond physical address space. */
484     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
485 
486     ent->t = extract64(r2, 61, 1);
487     ent->d = extract64(r2, 60, 1);
488     ent->b = extract64(r2, 59, 1);
489     ent->ar_type = extract64(r2, 56, 3);
490     ent->ar_pl1 = extract64(r2, 54, 2);
491     ent->ar_pl2 = extract64(r2, 52, 2);
492     ent->u = extract64(r2, 51, 1);
493     /* o = bit 50 */
494     /* p = bit 49 */
495     ent->access_id = extract64(r2, 1, 31);
496     ent->entry_valid = 1;
497 
498     interval_tree_insert(&ent->itree, &env->tlb_root);
499     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
500     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
501                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
502                          ent->b, ent->d, ent->t);
503 }
504 
505 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
506 {
507     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
508     itlbt_pa20(env, r1, r2, va_b);
509 }
510 
511 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
512 {
513     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
514     itlbt_pa20(env, r1, r2, va_b);
515 }
516 
517 /* Purge (Insn/Data) TLB. */
518 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
519 {
520     CPUHPPAState *env = cpu_env(cpu);
521     vaddr start = data.target_ptr;
522     vaddr end;
523 
524     /*
525      * PA2.0 allows a range of pages encoded into GR[b], which we have
526      * copied into the bottom bits of the otherwise page-aligned address.
527      * PA1.x will always provide zero here, for a single page flush.
528      */
529     end = start & 0xf;
530     start &= TARGET_PAGE_MASK;
531     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
532     end = start + end - 1;
533 
534     hppa_flush_tlb_range(env, start, end);
535 }
536 
537 /* This is local to the current cpu. */
538 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
539 {
540     trace_hppa_tlb_ptlb_local(env);
541     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
542 }
543 
544 /* This is synchronous across all processors.  */
545 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
546 {
547     CPUState *src = env_cpu(env);
548     CPUState *cpu;
549     bool wait = false;
550 
551     trace_hppa_tlb_ptlb(env);
552     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
553 
554     CPU_FOREACH(cpu) {
555         if (cpu != src) {
556             async_run_on_cpu(cpu, ptlb_work, data);
557             wait = true;
558         }
559     }
560     if (wait) {
561         async_safe_run_on_cpu(src, ptlb_work, data);
562     } else {
563         ptlb_work(src, data);
564     }
565 }
566 
567 void hppa_ptlbe(CPUHPPAState *env)
568 {
569     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
570     uint32_t i;
571 
572     /* Zap the (non-btlb) tlb entries themselves. */
573     memset(&env->tlb[btlb_entries], 0,
574            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
575     env->tlb_last = btlb_entries;
576     env->tlb_partial = NULL;
577 
578     /* Put them all onto the unused list. */
579     env->tlb_unused = &env->tlb[btlb_entries];
580     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
581         env->tlb[i].unused_next = &env->tlb[i + 1];
582     }
583 
584     /* Re-initialize the interval tree with only the btlb entries. */
585     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
586     for (i = 0; i < btlb_entries; ++i) {
587         if (env->tlb[i].entry_valid) {
588             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
589         }
590     }
591 
592     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
593 }
594 
595 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
596    number of pages/entries (we choose all), and is local to the cpu.  */
597 void HELPER(ptlbe)(CPUHPPAState *env)
598 {
599     trace_hppa_tlb_ptlbe(env);
600     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
601     hppa_ptlbe(env);
602 }
603 
604 void cpu_hppa_change_prot_id(CPUHPPAState *env)
605 {
606     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
607 }
608 
609 void HELPER(change_prot_id)(CPUHPPAState *env)
610 {
611     cpu_hppa_change_prot_id(env);
612 }
613 
614 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
615 {
616     hwaddr phys;
617     int prot, excp;
618 
619     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
620                                      &phys, &prot, NULL);
621     if (excp >= 0) {
622         if (excp == EXCP_DTLB_MISS) {
623             excp = EXCP_NA_DTLB_MISS;
624         }
625         trace_hppa_tlb_lpa_failed(env, addr);
626         raise_exception_with_ior(env, excp, GETPC(), addr, false);
627     }
628     trace_hppa_tlb_lpa_success(env, addr, phys);
629     return phys;
630 }
631 
632 /* Return the ar_type of the TLB at VADDR, or -1.  */
633 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
634 {
635     HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
636     return ent ? ent->ar_type : -1;
637 }
638 
639 /*
640  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
641  * allow operating systems to modify the Block TLB (BTLB) entries.
642  * For implementation details see page 1-13 in
643  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
644  */
645 void HELPER(diag_btlb)(CPUHPPAState *env)
646 {
647     unsigned int phys_page, len, slot;
648     int mmu_idx = cpu_mmu_index(env, 0);
649     uintptr_t ra = GETPC();
650     HPPATLBEntry *btlb;
651     uint64_t virt_page;
652     uint32_t *vaddr;
653     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
654 
655     /* BTLBs are not supported on 64-bit CPUs */
656     if (btlb_entries == 0) {
657         env->gr[28] = -1; /* nonexistent procedure */
658         return;
659     }
660 
661     env->gr[28] = 0; /* PDC_OK */
662 
663     switch (env->gr[25]) {
664     case 0:
665         /* return BTLB parameters */
666         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
667         vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
668                              MMU_DATA_STORE, mmu_idx, ra);
669         if (vaddr == NULL) {
670             env->gr[28] = -10; /* invalid argument */
671         } else {
672             vaddr[0] = cpu_to_be32(1);
673             vaddr[1] = cpu_to_be32(16 * 1024);
674             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
675             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
676         }
677         break;
678     case 1:
679         /* insert BTLB entry */
680         virt_page = env->gr[24];        /* upper 32 bits */
681         virt_page <<= 32;
682         virt_page |= env->gr[23];       /* lower 32 bits */
683         phys_page = env->gr[22];
684         len = env->gr[21];
685         slot = env->gr[19];
686         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
687                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
688                     "into slot %d\n",
689                     (long long) virt_page << TARGET_PAGE_BITS,
690                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
691                     (long long) virt_page, phys_page, len, slot);
692         if (slot < btlb_entries) {
693             btlb = &env->tlb[slot];
694 
695             /* Force flush of possibly existing BTLB entry. */
696             hppa_flush_tlb_ent(env, btlb, true);
697 
698             /* Create new BTLB entry */
699             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
700             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
701             btlb->pa = phys_page << TARGET_PAGE_BITS;
702             set_access_bits_pa11(env, btlb, env->gr[20]);
703             btlb->t = 0;
704             btlb->d = 1;
705         } else {
706             env->gr[28] = -10; /* invalid argument */
707         }
708         break;
709     case 2:
710         /* Purge BTLB entry */
711         slot = env->gr[22];
712         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
713                                     slot);
714         if (slot < btlb_entries) {
715             btlb = &env->tlb[slot];
716             hppa_flush_tlb_ent(env, btlb, true);
717         } else {
718             env->gr[28] = -10; /* invalid argument */
719         }
720         break;
721     case 3:
722         /* Purge all BTLB entries */
723         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
724         for (slot = 0; slot < btlb_entries; slot++) {
725             btlb = &env->tlb[slot];
726             hppa_flush_tlb_ent(env, btlb, true);
727         }
728         break;
729     default:
730         env->gr[28] = -2; /* nonexistent option */
731         break;
732     }
733 }
734