xref: /openbmc/qemu/target/hppa/mem_helper.c (revision 3860a2a8de56fad71db42f4ad120eb7eff03b51f)
1  /*
2   *  HPPA memory access helper routines
3   *
4   *  Copyright (c) 2017 Helge Deller
5   *
6   * This library is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU Lesser General Public
8   * License as published by the Free Software Foundation; either
9   * version 2.1 of the License, or (at your option) any later version.
10   *
11   * This library is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   * Lesser General Public License for more details.
15   *
16   * You should have received a copy of the GNU Lesser General Public
17   * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18   */
19  
20  #include "qemu/osdep.h"
21  #include "qemu/log.h"
22  #include "cpu.h"
23  #include "exec/exec-all.h"
24  #include "exec/page-protection.h"
25  #include "exec/helper-proto.h"
26  #include "hw/core/cpu.h"
27  #include "trace.h"
28  
hppa_abs_to_phys_pa2_w1(vaddr addr)29  hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
30  {
31      /*
32       * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
33       * an algorithm in which a 62-bit absolute address is transformed to
34       * a 64-bit physical address.  This must then be combined with that
35       * pictured in Figure H-11 "Physical Address Space Mapping", in which
36       * the full physical address is truncated to the N-bit physical address
37       * supported by the implementation.
38       *
39       * Since the supported physical address space is below 54 bits, the
40       * H-8 algorithm is moot and all that is left is to truncate.
41       */
42      QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
43      return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
44  }
45  
hppa_abs_to_phys_pa2_w0(vaddr addr)46  hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
47  {
48      /*
49       * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
50       * combined with Figure H-11, as above.
51       */
52      if (likely(extract32(addr, 28, 4) != 0xf)) {
53          /* Memory address space */
54          addr = (uint32_t)addr;
55      } else if (extract32(addr, 24, 4) != 0) {
56          /* I/O address space */
57          addr = (int32_t)addr;
58      } else {
59          /*
60           * PDC address space:
61           * Figures H-10 and H-11 of the parisc2.0 spec do not specify
62           * where to map into the 64-bit PDC address space.
63           * We map with an offset which equals the 32-bit address, which
64           * is what can be seen on physical machines too.
65           */
66          addr = (uint32_t)addr;
67          addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
68      }
69      return addr;
70  }
71  
hppa_find_tlb(CPUHPPAState * env,vaddr addr)72  static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
73  {
74      IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
75  
76      if (i) {
77          HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
78          trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
79                                    ent->itree.start, ent->itree.last, ent->pa);
80          return ent;
81      }
82      trace_hppa_tlb_find_entry_not_found(env, addr);
83      return NULL;
84  }
85  
hppa_flush_tlb_ent(CPUHPPAState * env,HPPATLBEntry * ent,bool force_flush_btlb)86  static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
87                                 bool force_flush_btlb)
88  {
89      CPUState *cs = env_cpu(env);
90      bool is_btlb;
91  
92      if (!ent->entry_valid) {
93          return;
94      }
95  
96      trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
97                               ent->itree.last, ent->pa);
98  
99      tlb_flush_range_by_mmuidx(cs, ent->itree.start,
100                                ent->itree.last - ent->itree.start + 1,
101                                HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
102  
103      /* Never clear BTLBs, unless forced to do so. */
104      is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
105      if (is_btlb && !force_flush_btlb) {
106          return;
107      }
108  
109      interval_tree_remove(&ent->itree, &env->tlb_root);
110      memset(ent, 0, sizeof(*ent));
111  
112      if (!is_btlb) {
113          ent->unused_next = env->tlb_unused;
114          env->tlb_unused = ent;
115      }
116  }
117  
hppa_flush_tlb_range(CPUHPPAState * env,vaddr va_b,vaddr va_e)118  static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
119  {
120      IntervalTreeNode *i, *n;
121  
122      i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
123      for (; i ; i = n) {
124          HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
125  
126          /*
127           * Find the next entry now: In the normal case the current entry
128           * will be removed, but in the BTLB case it will remain.
129           */
130          n = interval_tree_iter_next(i, va_b, va_e);
131          hppa_flush_tlb_ent(env, ent, false);
132      }
133  }
134  
hppa_alloc_tlb_ent(CPUHPPAState * env)135  static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
136  {
137      HPPATLBEntry *ent = env->tlb_unused;
138  
139      if (ent == NULL) {
140          uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
141          uint32_t i = env->tlb_last;
142  
143          if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
144              i = btlb_entries;
145          }
146          env->tlb_last = i + 1;
147  
148          ent = &env->tlb[i];
149          hppa_flush_tlb_ent(env, ent, false);
150      }
151  
152      env->tlb_unused = ent->unused_next;
153      return ent;
154  }
155  
156  #define ACCESS_ID_MASK 0xffff
157  
158  /* Return the set of protections allowed by a PID match. */
match_prot_id_1(uint32_t access_id,uint32_t prot_id)159  static int match_prot_id_1(uint32_t access_id, uint32_t prot_id)
160  {
161      if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) {
162          return (prot_id & 1
163                  ? PAGE_EXEC | PAGE_READ
164                  : PAGE_EXEC | PAGE_READ | PAGE_WRITE);
165      }
166      return 0;
167  }
168  
match_prot_id32(CPUHPPAState * env,uint32_t access_id)169  static int match_prot_id32(CPUHPPAState *env, uint32_t access_id)
170  {
171      int r, i;
172  
173      for (i = CR_PID1; i <= CR_PID4; ++i) {
174          r = match_prot_id_1(access_id, env->cr[i]);
175          if (r) {
176              return r;
177          }
178      }
179      return 0;
180  }
181  
match_prot_id64(CPUHPPAState * env,uint32_t access_id)182  static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
183  {
184      int r, i;
185  
186      for (i = CR_PID1; i <= CR_PID4; ++i) {
187          r = match_prot_id_1(access_id, env->cr[i]);
188          if (r) {
189              return r;
190          }
191          r = match_prot_id_1(access_id, env->cr[i] >> 32);
192          if (r) {
193              return r;
194          }
195      }
196      return 0;
197  }
198  
hppa_get_physical_address(CPUHPPAState * env,vaddr addr,int mmu_idx,int type,MemOp mop,hwaddr * pphys,int * pprot)199  int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
200                                int type, MemOp mop, hwaddr *pphys, int *pprot)
201  {
202      hwaddr phys;
203      int prot, r_prot, w_prot, x_prot, priv;
204      HPPATLBEntry *ent;
205      int ret = -1;
206  
207      /* Virtual translation disabled.  Map absolute to physical.  */
208      if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
209          switch (mmu_idx) {
210          case MMU_ABS_W_IDX:
211              phys = hppa_abs_to_phys_pa2_w1(addr);
212              break;
213          case MMU_ABS_IDX:
214              if (hppa_is_pa20(env)) {
215                  phys = hppa_abs_to_phys_pa2_w0(addr);
216              } else {
217                  phys = (uint32_t)addr;
218              }
219              break;
220          default:
221              g_assert_not_reached();
222          }
223          prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
224          goto egress_align;
225      }
226  
227      /* Find a valid tlb entry that matches the virtual address.  */
228      ent = hppa_find_tlb(env, addr);
229      if (ent == NULL) {
230          phys = 0;
231          prot = 0;
232          ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
233          goto egress;
234      }
235  
236      /* We now know the physical address.  */
237      phys = ent->pa + (addr - ent->itree.start);
238  
239      /* Map TLB access_rights field to QEMU protection.  */
240      priv = MMU_IDX_TO_PRIV(mmu_idx);
241      r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
242      w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
243      x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
244      switch (ent->ar_type) {
245      case 0: /* read-only: data page */
246          prot = r_prot;
247          break;
248      case 1: /* read/write: dynamic data page */
249          prot = r_prot | w_prot;
250          break;
251      case 2: /* read/execute: normal code page */
252          prot = r_prot | x_prot;
253          break;
254      case 3: /* read/write/execute: dynamic code page */
255          prot = r_prot | w_prot | x_prot;
256          break;
257      default: /* execute: promote to privilege level type & 3 */
258          prot = x_prot;
259          break;
260      }
261  
262      /*
263       * No guest access type indicates a non-architectural access from
264       * within QEMU.  Bypass checks for access, D, B, P and T bits.
265       */
266      if (type == 0) {
267          goto egress;
268      }
269  
270      if (unlikely(!(prot & type))) {
271          /* Not allowed -- Inst/Data Memory Access Rights Fault. */
272          ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
273          goto egress;
274      }
275  
276      /* access_id == 0 means public page and no check is performed */
277      if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
278          int access_prot = (hppa_is_pa20(env)
279                             ? match_prot_id64(env, ent->access_id)
280                             : match_prot_id32(env, ent->access_id));
281          if (unlikely(!(type & access_prot))) {
282              /* Not allowed -- Inst/Data Memory Protection Id Fault. */
283              ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI;
284              goto egress;
285          }
286          /* Otherwise exclude permissions not allowed (i.e WD). */
287          prot &= access_prot;
288      }
289  
290      /*
291       * In reverse priority order, check for conditions which raise faults.
292       * Remove PROT bits that cover the condition we want to check,
293       * so that the resulting PROT will force a re-check of the
294       * architectural TLB entry for the next access.
295       */
296      if (unlikely(ent->t)) {
297          prot &= PAGE_EXEC;
298          if (!(type & PAGE_EXEC)) {
299              /* The T bit is set -- Page Reference Fault.  */
300              ret = EXCP_PAGE_REF;
301          }
302      }
303      if (unlikely(!ent->d)) {
304          prot &= PAGE_READ | PAGE_EXEC;
305          if (type & PAGE_WRITE) {
306              /* The D bit is not set -- TLB Dirty Bit Fault.  */
307              ret = EXCP_TLB_DIRTY;
308          }
309      }
310      if (unlikely(ent->b)) {
311          prot &= PAGE_READ | PAGE_EXEC;
312          if (type & PAGE_WRITE) {
313              /*
314               * The B bit is set -- Data Memory Break Fault.
315               * Except when PSW_X is set, allow this single access to succeed.
316               * The write bit will be invalidated for subsequent accesses.
317               */
318              if (env->psw_xb & PSW_X) {
319                  prot |= PAGE_WRITE_INV;
320              } else {
321                  ret = EXCP_DMB;
322              }
323          }
324      }
325  
326   egress_align:
327      if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
328          ret = EXCP_UNALIGN;
329      }
330  
331   egress:
332      *pphys = phys;
333      *pprot = prot;
334      trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
335      return ret;
336  }
337  
hppa_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)338  hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
339  {
340      HPPACPU *cpu = HPPA_CPU(cs);
341      hwaddr phys;
342      int prot, excp, mmu_idx;
343  
344      /* If the (data) mmu is disabled, bypass translation.  */
345      /* ??? We really ought to know if the code mmu is disabled too,
346         in order to get the correct debugging dumps.  */
347      mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
348                 cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
349  
350      excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
351                                       &phys, &prot);
352  
353      /* Since we're translating for debugging, the only error that is a
354         hard error is no translation at all.  Otherwise, while a real cpu
355         access might not have permission, the debugger does.  */
356      return excp == EXCP_DTLB_MISS ? -1 : phys;
357  }
358  
hppa_set_ior_and_isr(CPUHPPAState * env,vaddr addr,bool mmu_disabled)359  void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
360  {
361      if (env->psw & PSW_Q) {
362          /*
363           * For pa1.x, the offset and space never overlap, and so we
364           * simply extract the high and low part of the virtual address.
365           *
366           * For pa2.0, the formation of these are described in section
367           * "Interruption Parameter Registers", page 2-15.
368           */
369          env->cr[CR_IOR] = (uint32_t)addr;
370          env->cr[CR_ISR] = addr >> 32;
371  
372          if (hppa_is_pa20(env)) {
373              if (mmu_disabled) {
374                  /*
375                   * If data translation was disabled, the ISR contains
376                   * the upper portion of the abs address, zero-extended.
377                   */
378                  env->cr[CR_ISR] &= 0x3fffffff;
379              } else {
380                  /*
381                   * If data translation was enabled, the upper two bits
382                   * of the IOR (the b field) are equal to the two space
383                   * bits from the base register used to form the gva.
384                   */
385                  uint64_t b;
386  
387                  b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
388                  b >>= (env->psw & PSW_W ? 62 : 30);
389                  env->cr[CR_IOR] |= b << 62;
390              }
391          }
392      }
393  }
394  
395  G_NORETURN static void
raise_exception_with_ior(CPUHPPAState * env,int excp,uintptr_t retaddr,vaddr addr,bool mmu_disabled)396  raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
397                           vaddr addr, bool mmu_disabled)
398  {
399      CPUState *cs = env_cpu(env);
400  
401      cs->exception_index = excp;
402      cpu_restore_state(cs, retaddr);
403      hppa_set_ior_and_isr(env, addr, mmu_disabled);
404  
405      cpu_loop_exit(cs);
406  }
407  
hppa_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)408  void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
409                                       vaddr addr, unsigned size,
410                                       MMUAccessType access_type,
411                                       int mmu_idx, MemTxAttrs attrs,
412                                       MemTxResult response, uintptr_t retaddr)
413  {
414      CPUHPPAState *env = cpu_env(cs);
415  
416      qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
417                  " while accessing I/O at %#08" HWADDR_PRIx "\n",
418                  env->iasq_f, env->iaoq_f, physaddr);
419  
420      /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
421      if (0) {
422          raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
423                                   MMU_IDX_MMU_DISABLED(mmu_idx));
424      }
425  }
426  
hppa_cpu_tlb_fill_align(CPUState * cs,CPUTLBEntryFull * out,vaddr addr,MMUAccessType type,int mmu_idx,MemOp memop,int size,bool probe,uintptr_t ra)427  bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
428                               MMUAccessType type, int mmu_idx,
429                               MemOp memop, int size, bool probe, uintptr_t ra)
430  {
431      CPUHPPAState *env = cpu_env(cs);
432      int prot, excp, a_prot;
433      hwaddr phys;
434  
435      switch (type) {
436      case MMU_INST_FETCH:
437          a_prot = PAGE_EXEC;
438          break;
439      case MMU_DATA_STORE:
440          a_prot = PAGE_WRITE;
441          break;
442      default:
443          a_prot = PAGE_READ;
444          break;
445      }
446  
447      excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
448                                       &phys, &prot);
449      if (unlikely(excp >= 0)) {
450          if (probe) {
451              return false;
452          }
453          trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
454  
455          /* Failure.  Raise the indicated exception.  */
456          raise_exception_with_ior(env, excp, ra, addr,
457                                   MMU_IDX_MMU_DISABLED(mmu_idx));
458      }
459  
460      trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
461                                  phys & TARGET_PAGE_MASK, size, type, mmu_idx);
462  
463      /*
464       * Success!  Store the translation into the QEMU TLB.
465       * Note that we always install a single-page entry, because that
466       * is what works best with softmmu -- anything else will trigger
467       * the large page protection mask.  We do not require this,
468       * because we record the large page here in the hppa tlb.
469       */
470      memset(out, 0, sizeof(*out));
471      out->phys_addr = phys;
472      out->prot = prot;
473      out->attrs = MEMTXATTRS_UNSPECIFIED;
474      out->lg_page_size = TARGET_PAGE_BITS;
475  
476      return true;
477  }
478  
479  /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
HELPER(itlba_pa11)480  void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
481  {
482      HPPATLBEntry *ent;
483  
484      /* Zap any old entries covering ADDR. */
485      addr &= TARGET_PAGE_MASK;
486      hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
487  
488      ent = env->tlb_partial;
489      if (ent == NULL) {
490          ent = hppa_alloc_tlb_ent(env);
491          env->tlb_partial = ent;
492      }
493  
494      /* Note that ent->entry_valid == 0 already.  */
495      ent->itree.start = addr;
496      ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
497      ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
498      trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
499  }
500  
set_access_bits_pa11(CPUHPPAState * env,HPPATLBEntry * ent,target_ulong reg)501  static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
502                                   target_ulong reg)
503  {
504      ent->access_id = extract32(reg, 1, 18);
505      ent->u = extract32(reg, 19, 1);
506      ent->ar_pl2 = extract32(reg, 20, 2);
507      ent->ar_pl1 = extract32(reg, 22, 2);
508      ent->ar_type = extract32(reg, 24, 3);
509      ent->b = extract32(reg, 27, 1);
510      ent->d = extract32(reg, 28, 1);
511      ent->t = extract32(reg, 29, 1);
512      ent->entry_valid = 1;
513  
514      interval_tree_insert(&ent->itree, &env->tlb_root);
515      trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
516                           ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
517  }
518  
519  /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
HELPER(itlbp_pa11)520  void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
521  {
522      HPPATLBEntry *ent = env->tlb_partial;
523  
524      if (ent) {
525          env->tlb_partial = NULL;
526          if (ent->itree.start <= addr && addr <= ent->itree.last) {
527              set_access_bits_pa11(env, ent, reg);
528              return;
529          }
530      }
531      qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
532  }
533  
itlbt_pa20(CPUHPPAState * env,target_ulong r1,target_ulong r2,vaddr va_b)534  static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
535                         target_ulong r2, vaddr va_b)
536  {
537      HPPATLBEntry *ent;
538      vaddr va_e;
539      uint64_t va_size;
540      int mask_shift;
541  
542      mask_shift = 2 * (r1 & 0xf);
543      va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
544      va_b &= -va_size;
545      va_e = va_b + va_size - 1;
546  
547      hppa_flush_tlb_range(env, va_b, va_e);
548      ent = hppa_alloc_tlb_ent(env);
549  
550      ent->itree.start = va_b;
551      ent->itree.last = va_e;
552  
553      /* Extract all 52 bits present in the page table entry. */
554      ent->pa = r1 << (TARGET_PAGE_BITS - 5);
555      /* Align per the page size. */
556      ent->pa &= TARGET_PAGE_MASK << mask_shift;
557      /* Ignore the bits beyond physical address space. */
558      ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
559  
560      ent->t = extract64(r2, 61, 1);
561      ent->d = extract64(r2, 60, 1);
562      ent->b = extract64(r2, 59, 1);
563      ent->ar_type = extract64(r2, 56, 3);
564      ent->ar_pl1 = extract64(r2, 54, 2);
565      ent->ar_pl2 = extract64(r2, 52, 2);
566      ent->u = extract64(r2, 51, 1);
567      /* o = bit 50 */
568      /* p = bit 49 */
569      ent->access_id = extract64(r2, 1, 31);
570      ent->entry_valid = 1;
571  
572      interval_tree_insert(&ent->itree, &env->tlb_root);
573      trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
574      trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
575                           ent->ar_pl2, ent->ar_pl1, ent->ar_type,
576                           ent->b, ent->d, ent->t);
577  }
578  
HELPER(idtlbt_pa20)579  void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
580  {
581      vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
582      itlbt_pa20(env, r1, r2, va_b);
583  }
584  
HELPER(iitlbt_pa20)585  void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
586  {
587      vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
588      itlbt_pa20(env, r1, r2, va_b);
589  }
590  
591  /* Purge (Insn/Data) TLB. */
ptlb_work(CPUState * cpu,run_on_cpu_data data)592  static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
593  {
594      vaddr start = data.target_ptr;
595      vaddr end;
596  
597      /*
598       * PA2.0 allows a range of pages encoded into GR[b], which we have
599       * copied into the bottom bits of the otherwise page-aligned address.
600       * PA1.x will always provide zero here, for a single page flush.
601       */
602      end = start & 0xf;
603      start &= TARGET_PAGE_MASK;
604      end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
605      end = start + end - 1;
606  
607      hppa_flush_tlb_range(cpu_env(cpu), start, end);
608  }
609  
610  /* This is local to the current cpu. */
HELPER(ptlb_l)611  void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
612  {
613      trace_hppa_tlb_ptlb_local(env);
614      ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
615  }
616  
617  /* This is synchronous across all processors.  */
HELPER(ptlb)618  void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
619  {
620      CPUState *src = env_cpu(env);
621      CPUState *cpu;
622      bool wait = false;
623  
624      trace_hppa_tlb_ptlb(env);
625      run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
626  
627      CPU_FOREACH(cpu) {
628          if (cpu != src) {
629              async_run_on_cpu(cpu, ptlb_work, data);
630              wait = true;
631          }
632      }
633      if (wait) {
634          async_safe_run_on_cpu(src, ptlb_work, data);
635      } else {
636          ptlb_work(src, data);
637      }
638  }
639  
hppa_ptlbe(CPUHPPAState * env)640  void hppa_ptlbe(CPUHPPAState *env)
641  {
642      uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
643      uint32_t i;
644  
645      /* Zap the (non-btlb) tlb entries themselves. */
646      memset(&env->tlb[btlb_entries], 0,
647             sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
648      env->tlb_last = btlb_entries;
649      env->tlb_partial = NULL;
650  
651      /* Put them all onto the unused list. */
652      env->tlb_unused = &env->tlb[btlb_entries];
653      for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
654          env->tlb[i].unused_next = &env->tlb[i + 1];
655      }
656  
657      /* Re-initialize the interval tree with only the btlb entries. */
658      memset(&env->tlb_root, 0, sizeof(env->tlb_root));
659      for (i = 0; i < btlb_entries; ++i) {
660          if (env->tlb[i].entry_valid) {
661              interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
662          }
663      }
664  
665      tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
666  }
667  
668  /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
669     number of pages/entries (we choose all), and is local to the cpu.  */
HELPER(ptlbe)670  void HELPER(ptlbe)(CPUHPPAState *env)
671  {
672      trace_hppa_tlb_ptlbe(env);
673      qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
674      hppa_ptlbe(env);
675  }
676  
cpu_hppa_change_prot_id(CPUHPPAState * env)677  void cpu_hppa_change_prot_id(CPUHPPAState *env)
678  {
679      tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
680  }
681  
HELPER(change_prot_id)682  void HELPER(change_prot_id)(CPUHPPAState *env)
683  {
684      cpu_hppa_change_prot_id(env);
685  }
686  
HELPER(lpa)687  target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
688  {
689      hwaddr phys;
690      int prot, excp;
691  
692      excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
693                                       &phys, &prot);
694      if (excp >= 0) {
695          if (excp == EXCP_DTLB_MISS) {
696              excp = EXCP_NA_DTLB_MISS;
697          }
698          trace_hppa_tlb_lpa_failed(env, addr);
699          raise_exception_with_ior(env, excp, GETPC(), addr, false);
700      }
701      trace_hppa_tlb_lpa_success(env, addr, phys);
702      return phys;
703  }
704  
705  /*
706   * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
707   * allow operating systems to modify the Block TLB (BTLB) entries.
708   * For implementation details see page 1-13 in
709   * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
710   */
HELPER(diag_btlb)711  void HELPER(diag_btlb)(CPUHPPAState *env)
712  {
713      unsigned int phys_page, len, slot;
714      int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
715      uintptr_t ra = GETPC();
716      HPPATLBEntry *btlb;
717      uint64_t virt_page;
718      uint32_t *vaddr;
719      uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
720  
721      /* BTLBs are not supported on 64-bit CPUs */
722      if (btlb_entries == 0) {
723          env->gr[28] = -1; /* nonexistent procedure */
724          return;
725      }
726  
727      env->gr[28] = 0; /* PDC_OK */
728  
729      switch (env->gr[25]) {
730      case 0:
731          /* return BTLB parameters */
732          qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
733          vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
734                               MMU_DATA_STORE, mmu_idx, ra);
735          if (vaddr == NULL) {
736              env->gr[28] = -10; /* invalid argument */
737          } else {
738              vaddr[0] = cpu_to_be32(1);
739              vaddr[1] = cpu_to_be32(16 * 1024);
740              vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
741              vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
742          }
743          break;
744      case 1:
745          /* insert BTLB entry */
746          virt_page = env->gr[24];        /* upper 32 bits */
747          virt_page <<= 32;
748          virt_page |= env->gr[23];       /* lower 32 bits */
749          phys_page = env->gr[22];
750          len = env->gr[21];
751          slot = env->gr[19];
752          qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
753                      "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
754                      "into slot %d\n",
755                      (long long) virt_page << TARGET_PAGE_BITS,
756                      (long long) (virt_page + len) << TARGET_PAGE_BITS,
757                      (long long) virt_page, phys_page, len, slot);
758          if (slot < btlb_entries) {
759              btlb = &env->tlb[slot];
760  
761              /* Force flush of possibly existing BTLB entry. */
762              hppa_flush_tlb_ent(env, btlb, true);
763  
764              /* Create new BTLB entry */
765              btlb->itree.start = virt_page << TARGET_PAGE_BITS;
766              btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
767              btlb->pa = phys_page << TARGET_PAGE_BITS;
768              set_access_bits_pa11(env, btlb, env->gr[20]);
769              btlb->t = 0;
770              btlb->d = 1;
771          } else {
772              env->gr[28] = -10; /* invalid argument */
773          }
774          break;
775      case 2:
776          /* Purge BTLB entry */
777          slot = env->gr[22];
778          qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
779                                      slot);
780          if (slot < btlb_entries) {
781              btlb = &env->tlb[slot];
782              hppa_flush_tlb_ent(env, btlb, true);
783          } else {
784              env->gr[28] = -10; /* invalid argument */
785          }
786          break;
787      case 3:
788          /* Purge all BTLB entries */
789          qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
790          for (slot = 0; slot < btlb_entries; slot++) {
791              btlb = &env->tlb[slot];
792              hppa_flush_tlb_ent(env, btlb, true);
793          }
794          break;
795      default:
796          env->gr[28] = -2; /* nonexistent option */
797          break;
798      }
799  }
800  
HELPER(b_gate_priv)801  uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
802  {
803      uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
804      HPPATLBEntry *ent = hppa_find_tlb(env, gva);
805  
806      if (ent == NULL) {
807          raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false);
808      }
809  
810      /*
811       * There should be no need to check page permissions, as that will
812       * already have been done by tb_lookup via get_page_addr_code.
813       * All we need at this point is to check the ar_type.
814       *
815       * No change for non-gateway pages or for priv decrease.
816       */
817      if (ent->ar_type & 4) {
818          int old_priv = iaoq_f & 3;
819          int new_priv = ent->ar_type & 3;
820  
821          if (new_priv < old_priv) {
822              iaoq_f = (iaoq_f & -4) | new_priv;
823          }
824      }
825      return iaoq_f;
826  }
827