1 #ifndef _ASM_POWERPC_TLBFLUSH_H 2 #define _ASM_POWERPC_TLBFLUSH_H 3 4 /* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on 10 * the local processor 11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 13 * - flush_tlb_range(vma, start, end) flushes a range of pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License 18 * as published by the Free Software Foundation; either version 19 * 2 of the License, or (at your option) any later version. 20 */ 21 #ifdef __KERNEL__ 22 23 #ifdef CONFIG_PPC_MMU_NOHASH 24 /* 25 * TLB flushing for software loaded TLB chips 26 * 27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 28 * flush_tlb_kernel_range are best implemented as tlbia vs 29 * specific tlbie's 30 */ 31 32 struct vm_area_struct; 33 struct mm_struct; 34 35 #define MMU_NO_CONTEXT ((unsigned int)-1) 36 37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 38 unsigned long end); 39 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 40 41 extern void local_flush_tlb_mm(struct mm_struct *mm); 42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 43 44 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 45 int tsize, int ind); 46 47 #ifdef CONFIG_SMP 48 extern void flush_tlb_mm(struct mm_struct *mm); 49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 50 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 51 int tsize, int ind); 52 #else 53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 55 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 56 #endif 57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) 58 59 #elif defined(CONFIG_PPC_STD_MMU_32) 60 61 /* 62 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx 63 */ 64 extern void flush_tlb_mm(struct mm_struct *mm); 65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 68 unsigned long end); 69 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 71 unsigned long vmaddr) 72 { 73 flush_tlb_page(vma, vmaddr); 74 } 75 static inline void local_flush_tlb_mm(struct mm_struct *mm) 76 { 77 flush_tlb_mm(mm); 78 } 79 80 #elif defined(CONFIG_PPC_STD_MMU_64) 81 82 #define MMU_NO_CONTEXT 0 83 84 /* 85 * TLB flushing for 64-bit hash-MMU CPUs 86 */ 87 88 #include <linux/percpu.h> 89 #include <asm/page.h> 90 91 #define PPC64_TLB_BATCH_NR 192 92 93 struct ppc64_tlb_batch { 94 int active; 95 unsigned long index; 96 struct mm_struct *mm; 97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 98 unsigned long vpn[PPC64_TLB_BATCH_NR]; 99 unsigned int psize; 100 int ssize; 101 }; 102 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 103 104 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 105 106 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 107 108 static inline void arch_enter_lazy_mmu_mode(void) 109 { 110 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 111 112 batch->active = 1; 113 } 114 115 static inline void arch_leave_lazy_mmu_mode(void) 116 { 117 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 118 119 if (batch->index) 120 __flush_tlb_pending(batch); 121 batch->active = 0; 122 } 123 124 #define arch_flush_lazy_mmu_mode() do {} while (0) 125 126 127 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 128 int ssize, int local); 129 extern void flush_hash_range(unsigned long number, int local); 130 131 132 static inline void local_flush_tlb_mm(struct mm_struct *mm) 133 { 134 } 135 136 static inline void flush_tlb_mm(struct mm_struct *mm) 137 { 138 } 139 140 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 141 unsigned long vmaddr) 142 { 143 } 144 145 static inline void flush_tlb_page(struct vm_area_struct *vma, 146 unsigned long vmaddr) 147 { 148 } 149 150 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 151 unsigned long vmaddr) 152 { 153 } 154 155 static inline void flush_tlb_range(struct vm_area_struct *vma, 156 unsigned long start, unsigned long end) 157 { 158 } 159 160 static inline void flush_tlb_kernel_range(unsigned long start, 161 unsigned long end) 162 { 163 } 164 165 /* Private function for use by PCI IO mapping code */ 166 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 167 unsigned long end); 168 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, 169 unsigned long addr); 170 #else 171 #error Unsupported MMU type 172 #endif 173 174 #endif /*__KERNEL__ */ 175 #endif /* _ASM_POWERPC_TLBFLUSH_H */ 176