1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_IA64_TLBFLUSH_H 3 #define _ASM_IA64_TLBFLUSH_H 4 5 /* 6 * Copyright (C) 2002 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 */ 9 10 11 #include <linux/mm.h> 12 13 #include <asm/intrinsics.h> 14 #include <asm/mmu_context.h> 15 #include <asm/page.h> 16 17 struct ia64_tr_entry { 18 u64 ifa; 19 u64 itir; 20 u64 pte; 21 u64 rr; 22 }; /*Record for tr entry!*/ 23 24 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); 25 extern void ia64_ptr_entry(u64 target_mask, int slot); 26 extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; 27 28 /* 29 region register macros 30 */ 31 #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) 32 #define RR_VE(val) (((val) & 0x0000000000000001) << 0) 33 #define RR_VE_MASK 0x0000000000000001L 34 #define RR_VE_SHIFT 0 35 #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) 36 #define RR_PS(val) (((val) & 0x000000000000003f) << 2) 37 #define RR_PS_MASK 0x00000000000000fcL 38 #define RR_PS_SHIFT 2 39 #define RR_RID_MASK 0x00000000ffffff00L 40 #define RR_TO_RID(val) ((val >> 8) & 0xffffff) 41 42 /* 43 * Now for some TLB flushing routines. This is the kind of stuff that 44 * can be very expensive, so try to avoid them whenever possible. 45 */ 46 extern void setup_ptcg_sem(int max_purges, int from_palo); 47 48 /* 49 * Flush everything (kernel mapping may also have changed due to 50 * vmalloc/vfree). 51 */ 52 extern void local_flush_tlb_all (void); 53 54 #ifdef CONFIG_SMP 55 extern void smp_flush_tlb_all (void); 56 extern void smp_flush_tlb_mm (struct mm_struct *mm); 57 extern void smp_flush_tlb_cpumask (cpumask_t xcpumask); 58 # define flush_tlb_all() smp_flush_tlb_all() 59 #else 60 # define flush_tlb_all() local_flush_tlb_all() 61 # define smp_flush_tlb_cpumask(m) local_flush_tlb_all() 62 #endif 63 64 static inline void 65 local_finish_flush_tlb_mm (struct mm_struct *mm) 66 { 67 if (mm == current->active_mm) 68 activate_context(mm); 69 } 70 71 /* 72 * Flush a specified user mapping. This is called, e.g., as a result of fork() and 73 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect 74 * the PTEs of the parent task. 75 */ 76 static inline void 77 flush_tlb_mm (struct mm_struct *mm) 78 { 79 if (!mm) 80 return; 81 82 set_bit(mm->context, ia64_ctx.flushmap); 83 mm->context = 0; 84 85 if (atomic_read(&mm->mm_users) == 0) 86 return; /* happens as a result of exit_mmap() */ 87 88 #ifdef CONFIG_SMP 89 smp_flush_tlb_mm(mm); 90 #else 91 local_finish_flush_tlb_mm(mm); 92 #endif 93 } 94 95 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); 96 97 /* 98 * Page-granular tlb flush. 99 */ 100 static inline void 101 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) 102 { 103 #ifdef CONFIG_SMP 104 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); 105 #else 106 if (vma->vm_mm == current->active_mm) 107 ia64_ptcl(addr, (PAGE_SHIFT << 2)); 108 else 109 vma->vm_mm->context = 0; 110 #endif 111 } 112 113 /* 114 * Flush the local TLB. Invoked from another cpu using an IPI. 115 */ 116 #ifdef CONFIG_SMP 117 void smp_local_flush_tlb(void); 118 #else 119 #define smp_local_flush_tlb() 120 #endif 121 122 static inline void flush_tlb_kernel_range(unsigned long start, 123 unsigned long end) 124 { 125 flush_tlb_all(); /* XXX fix me */ 126 } 127 128 #endif /* _ASM_IA64_TLBFLUSH_H */ 129