Lines Matching +full:bypass +full:- +full:slot +full:- +full:no

23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/helper-proto.h"
32 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes in hppa_abs_to_phys_pa2_w1()
33 * an algorithm in which a 62-bit absolute address is transformed to in hppa_abs_to_phys_pa2_w1()
34 * a 64-bit physical address. This must then be combined with that in hppa_abs_to_phys_pa2_w1()
35 * pictured in Figure H-11 "Physical Address Space Mapping", in which in hppa_abs_to_phys_pa2_w1()
36 * the full physical address is truncated to the N-bit physical address in hppa_abs_to_phys_pa2_w1()
40 * H-8 algorithm is moot and all that is left is to truncate. in hppa_abs_to_phys_pa2_w1()
49 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", in hppa_abs_to_phys_pa2_w0()
50 * combined with Figure H-11, as above. in hppa_abs_to_phys_pa2_w0()
61 * Figures H-10 and H-11 of the parisc2.0 spec do not specify in hppa_abs_to_phys_pa2_w0()
62 * where to map into the 64-bit PDC address space. in hppa_abs_to_phys_pa2_w0()
63 * We map with an offset which equals the 32-bit address, which in hppa_abs_to_phys_pa2_w0()
67 addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); in hppa_abs_to_phys_pa2_w0()
74 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); in hppa_find_tlb()
78 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, in hppa_find_tlb()
79 ent->itree.start, ent->itree.last, ent->pa); in hppa_find_tlb()
92 if (!ent->entry_valid) { in hppa_flush_tlb_ent()
96 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, in hppa_flush_tlb_ent()
97 ent->itree.last, ent->pa); in hppa_flush_tlb_ent()
99 tlb_flush_range_by_mmuidx(cs, ent->itree.start, in hppa_flush_tlb_ent()
100 ent->itree.last - ent->itree.start + 1, in hppa_flush_tlb_ent()
104 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; in hppa_flush_tlb_ent()
109 interval_tree_remove(&ent->itree, &env->tlb_root); in hppa_flush_tlb_ent()
113 ent->unused_next = env->tlb_unused; in hppa_flush_tlb_ent()
114 env->tlb_unused = ent; in hppa_flush_tlb_ent()
122 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); in hppa_flush_tlb_range()
137 HPPATLBEntry *ent = env->tlb_unused; in hppa_alloc_tlb_ent()
141 uint32_t i = env->tlb_last; in hppa_alloc_tlb_ent()
143 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { in hppa_alloc_tlb_ent()
146 env->tlb_last = i + 1; in hppa_alloc_tlb_ent()
148 ent = &env->tlb[i]; in hppa_alloc_tlb_ent()
152 env->tlb_unused = ent->unused_next; in hppa_alloc_tlb_ent()
174 r = match_prot_id_1(access_id, env->cr[i]); in match_prot_id32()
187 r = match_prot_id_1(access_id, env->cr[i]); in match_prot_id64()
191 r = match_prot_id_1(access_id, env->cr[i] >> 32); in match_prot_id64()
205 int ret = -1; in hppa_get_physical_address()
237 phys = ent->pa + (addr - ent->itree.start); in hppa_get_physical_address()
241 r_prot = (priv <= ent->ar_pl1) * PAGE_READ; in hppa_get_physical_address()
242 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; in hppa_get_physical_address()
243 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; in hppa_get_physical_address()
244 switch (ent->ar_type) { in hppa_get_physical_address()
245 case 0: /* read-only: data page */ in hppa_get_physical_address()
263 * No guest access type indicates a non-architectural access from in hppa_get_physical_address()
264 * within QEMU. Bypass checks for access, D, B, P and T bits. in hppa_get_physical_address()
271 /* Not allowed -- Inst/Data Memory Access Rights Fault. */ in hppa_get_physical_address()
276 /* access_id == 0 means public page and no check is performed */ in hppa_get_physical_address()
277 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { in hppa_get_physical_address()
279 ? match_prot_id64(env, ent->access_id) in hppa_get_physical_address()
280 : match_prot_id32(env, ent->access_id)); in hppa_get_physical_address()
282 /* Not allowed -- Inst/Data Memory Protection Id Fault. */ in hppa_get_physical_address()
293 * so that the resulting PROT will force a re-check of the in hppa_get_physical_address()
296 if (unlikely(ent->t)) { in hppa_get_physical_address()
299 /* The T bit is set -- Page Reference Fault. */ in hppa_get_physical_address()
303 if (unlikely(!ent->d)) { in hppa_get_physical_address()
306 /* The D bit is not set -- TLB Dirty Bit Fault. */ in hppa_get_physical_address()
310 if (unlikely(ent->b)) { in hppa_get_physical_address()
314 * The B bit is set -- Data Memory Break Fault. in hppa_get_physical_address()
318 if (env->psw_xb & PSW_X) { in hppa_get_physical_address()
327 if (addr & ((1u << memop_alignment_bits(mop)) - 1)) { in hppa_get_physical_address()
344 /* If the (data) mmu is disabled, bypass translation. */ in hppa_cpu_get_phys_page_debug()
347 mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : in hppa_cpu_get_phys_page_debug()
348 cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); in hppa_cpu_get_phys_page_debug()
350 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0, in hppa_cpu_get_phys_page_debug()
354 hard error is no translation at all. Otherwise, while a real cpu in hppa_cpu_get_phys_page_debug()
356 return excp == EXCP_DTLB_MISS ? -1 : phys; in hppa_cpu_get_phys_page_debug()
361 if (env->psw & PSW_Q) { in hppa_set_ior_and_isr()
367 * "Interruption Parameter Registers", page 2-15. in hppa_set_ior_and_isr()
369 env->cr[CR_IOR] = (uint32_t)addr; in hppa_set_ior_and_isr()
370 env->cr[CR_ISR] = addr >> 32; in hppa_set_ior_and_isr()
376 * the upper portion of the abs address, zero-extended. in hppa_set_ior_and_isr()
378 env->cr[CR_ISR] &= 0x3fffffff; in hppa_set_ior_and_isr()
387 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; in hppa_set_ior_and_isr()
388 b >>= (env->psw & PSW_W ? 62 : 30); in hppa_set_ior_and_isr()
389 env->cr[CR_IOR] |= b << 62; in hppa_set_ior_and_isr()
401 cs->exception_index = excp; in raise_exception_with_ior()
418 env->iasq_f, env->iaoq_f, physaddr); in hppa_cpu_do_transaction_failed()
465 * Note that we always install a single-page entry, because that in hppa_cpu_tlb_fill_align()
466 * is what works best with softmmu -- anything else will trigger in hppa_cpu_tlb_fill_align()
471 out->phys_addr = phys; in hppa_cpu_tlb_fill_align()
472 out->prot = prot; in hppa_cpu_tlb_fill_align()
473 out->attrs = MEMTXATTRS_UNSPECIFIED; in hppa_cpu_tlb_fill_align()
474 out->lg_page_size = TARGET_PAGE_BITS; in hppa_cpu_tlb_fill_align()
486 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); in HELPER()
488 ent = env->tlb_partial; in HELPER()
491 env->tlb_partial = ent; in HELPER()
494 /* Note that ent->entry_valid == 0 already. */ in HELPER()
495 ent->itree.start = addr; in HELPER()
496 ent->itree.last = addr + TARGET_PAGE_SIZE - 1; in HELPER()
497 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; in HELPER()
498 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); in HELPER()
504 ent->access_id = extract32(reg, 1, 18); in set_access_bits_pa11()
505 ent->u = extract32(reg, 19, 1); in set_access_bits_pa11()
506 ent->ar_pl2 = extract32(reg, 20, 2); in set_access_bits_pa11()
507 ent->ar_pl1 = extract32(reg, 22, 2); in set_access_bits_pa11()
508 ent->ar_type = extract32(reg, 24, 3); in set_access_bits_pa11()
509 ent->b = extract32(reg, 27, 1); in set_access_bits_pa11()
510 ent->d = extract32(reg, 28, 1); in set_access_bits_pa11()
511 ent->t = extract32(reg, 29, 1); in set_access_bits_pa11()
512 ent->entry_valid = 1; in set_access_bits_pa11()
514 interval_tree_insert(&ent->itree, &env->tlb_root); in set_access_bits_pa11()
515 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, in set_access_bits_pa11()
516 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); in set_access_bits_pa11()
522 HPPATLBEntry *ent = env->tlb_partial; in HELPER()
525 env->tlb_partial = NULL; in HELPER()
526 if (ent->itree.start <= addr && addr <= ent->itree.last) { in HELPER()
544 va_b &= -va_size; in itlbt_pa20()
545 va_e = va_b + va_size - 1; in itlbt_pa20()
550 ent->itree.start = va_b; in itlbt_pa20()
551 ent->itree.last = va_e; in itlbt_pa20()
554 ent->pa = r1 << (TARGET_PAGE_BITS - 5); in itlbt_pa20()
556 ent->pa &= TARGET_PAGE_MASK << mask_shift; in itlbt_pa20()
558 ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); in itlbt_pa20()
560 ent->t = extract64(r2, 61, 1); in itlbt_pa20()
561 ent->d = extract64(r2, 60, 1); in itlbt_pa20()
562 ent->b = extract64(r2, 59, 1); in itlbt_pa20()
563 ent->ar_type = extract64(r2, 56, 3); in itlbt_pa20()
564 ent->ar_pl1 = extract64(r2, 54, 2); in itlbt_pa20()
565 ent->ar_pl2 = extract64(r2, 52, 2); in itlbt_pa20()
566 ent->u = extract64(r2, 51, 1); in itlbt_pa20()
569 ent->access_id = extract64(r2, 1, 31); in itlbt_pa20()
570 ent->entry_valid = 1; in itlbt_pa20()
572 interval_tree_insert(&ent->itree, &env->tlb_root); in itlbt_pa20()
573 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); in itlbt_pa20()
574 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, in itlbt_pa20()
575 ent->ar_pl2, ent->ar_pl1, ent->ar_type, in itlbt_pa20()
576 ent->b, ent->d, ent->t); in itlbt_pa20()
581 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); in HELPER()
587 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); in HELPER()
599 * copied into the bottom bits of the otherwise page-aligned address. in ptlb_work()
605 end = start + end - 1; in ptlb_work()
645 /* Zap the (non-btlb) tlb entries themselves. */ in hppa_ptlbe()
646 memset(&env->tlb[btlb_entries], 0, in hppa_ptlbe()
647 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); in hppa_ptlbe()
648 env->tlb_last = btlb_entries; in hppa_ptlbe()
649 env->tlb_partial = NULL; in hppa_ptlbe()
652 env->tlb_unused = &env->tlb[btlb_entries]; in hppa_ptlbe()
653 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { in hppa_ptlbe()
654 env->tlb[i].unused_next = &env->tlb[i + 1]; in hppa_ptlbe()
657 /* Re-initialize the interval tree with only the btlb entries. */ in hppa_ptlbe()
658 memset(&env->tlb_root, 0, sizeof(env->tlb_root)); in hppa_ptlbe()
660 if (env->tlb[i].entry_valid) { in hppa_ptlbe()
661 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); in hppa_ptlbe()
668 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
708 * For implementation details see page 1-13 in
709 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
713 unsigned int phys_page, len, slot; in HELPER() local
721 /* BTLBs are not supported on 64-bit CPUs */ in HELPER()
723 env->gr[28] = -1; /* nonexistent procedure */ in HELPER()
727 env->gr[28] = 0; /* PDC_OK */ in HELPER()
729 switch (env->gr[25]) { in HELPER()
733 vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), in HELPER()
736 env->gr[28] = -10; /* invalid argument */ in HELPER()
746 virt_page = env->gr[24]; /* upper 32 bits */ in HELPER()
748 virt_page |= env->gr[23]; /* lower 32 bits */ in HELPER()
749 phys_page = env->gr[22]; in HELPER()
750 len = env->gr[21]; in HELPER()
751 slot = env->gr[19]; in HELPER()
753 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " in HELPER()
754 "into slot %d\n", in HELPER()
757 (long long) virt_page, phys_page, len, slot); in HELPER()
758 if (slot < btlb_entries) { in HELPER()
759 btlb = &env->tlb[slot]; in HELPER()
765 btlb->itree.start = virt_page << TARGET_PAGE_BITS; in HELPER()
766 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; in HELPER()
767 btlb->pa = phys_page << TARGET_PAGE_BITS; in HELPER()
768 set_access_bits_pa11(env, btlb, env->gr[20]); in HELPER()
769 btlb->t = 0; in HELPER()
770 btlb->d = 1; in HELPER()
772 env->gr[28] = -10; /* invalid argument */ in HELPER()
777 slot = env->gr[22]; in HELPER()
778 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", in HELPER()
779 slot); in HELPER()
780 if (slot < btlb_entries) { in HELPER()
781 btlb = &env->tlb[slot]; in HELPER()
784 env->gr[28] = -10; /* invalid argument */ in HELPER()
790 for (slot = 0; slot < btlb_entries; slot++) { in HELPER()
791 btlb = &env->tlb[slot]; in HELPER()
796 env->gr[28] = -2; /* nonexistent option */ in HELPER()
803 uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f); in HELPER()
811 * There should be no need to check page permissions, as that will in HELPER()
815 * No change for non-gateway pages or for priv decrease. in HELPER()
817 if (ent->ar_type & 4) { in HELPER()
819 int new_priv = ent->ar_type & 3; in HELPER()
822 iaoq_f = (iaoq_f & -4) | new_priv; in HELPER()