1 #ifndef _ASM_POWERPC_TLBFLUSH_H 2 #define _ASM_POWERPC_TLBFLUSH_H 3 4 /* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on 10 * the local processor 11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 13 * - flush_tlb_range(vma, start, end) flushes a range of pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License 18 * as published by the Free Software Foundation; either version 19 * 2 of the License, or (at your option) any later version. 20 */ 21 #ifdef __KERNEL__ 22 23 #ifdef CONFIG_PPC_MMU_NOHASH 24 /* 25 * TLB flushing for software loaded TLB chips 26 * 27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 28 * flush_tlb_kernel_range are best implemented as tlbia vs 29 * specific tlbie's 30 */ 31 32 struct vm_area_struct; 33 struct mm_struct; 34 35 #define MMU_NO_CONTEXT ((unsigned int)-1) 36 37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 38 unsigned long end); 39 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 40 41 extern void local_flush_tlb_mm(struct mm_struct *mm); 42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 43 44 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 45 int tsize, int ind); 46 47 #ifdef CONFIG_SMP 48 extern void flush_tlb_mm(struct mm_struct *mm); 49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 50 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 51 int tsize, int ind); 52 #else 53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 55 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 56 #endif 57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) 58 59 #elif defined(CONFIG_PPC_STD_MMU_32) 60 61 /* 62 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx 63 */ 64 extern void flush_tlb_mm(struct mm_struct *mm); 65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 68 unsigned long end); 69 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 71 unsigned long vmaddr) 72 { 73 flush_tlb_page(vma, vmaddr); 74 } 75 static inline void local_flush_tlb_mm(struct mm_struct *mm) 76 { 77 flush_tlb_mm(mm); 78 } 79 80 #elif defined(CONFIG_PPC_STD_MMU_64) 81 82 /* 83 * TLB flushing for 64-bit hash-MMU CPUs 84 */ 85 86 #include <linux/percpu.h> 87 #include <asm/page.h> 88 89 #define PPC64_TLB_BATCH_NR 192 90 91 struct ppc64_tlb_batch { 92 int active; 93 unsigned long index; 94 struct mm_struct *mm; 95 real_pte_t pte[PPC64_TLB_BATCH_NR]; 96 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 97 unsigned int psize; 98 int ssize; 99 }; 100 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 101 102 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 103 104 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 105 pte_t *ptep, unsigned long pte, int huge); 106 107 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 108 109 static inline void arch_enter_lazy_mmu_mode(void) 110 { 111 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 112 113 batch->active = 1; 114 } 115 116 static inline void arch_leave_lazy_mmu_mode(void) 117 { 118 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 119 120 if (batch->index) 121 __flush_tlb_pending(batch); 122 batch->active = 0; 123 } 124 125 #define arch_flush_lazy_mmu_mode() do {} while (0) 126 127 128 extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 129 int ssize, int local); 130 extern void flush_hash_range(unsigned long number, int local); 131 132 133 static inline void local_flush_tlb_mm(struct mm_struct *mm) 134 { 135 } 136 137 static inline void flush_tlb_mm(struct mm_struct *mm) 138 { 139 } 140 141 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 142 unsigned long vmaddr) 143 { 144 } 145 146 static inline void flush_tlb_page(struct vm_area_struct *vma, 147 unsigned long vmaddr) 148 { 149 } 150 151 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 152 unsigned long vmaddr) 153 { 154 } 155 156 static inline void flush_tlb_range(struct vm_area_struct *vma, 157 unsigned long start, unsigned long end) 158 { 159 } 160 161 static inline void flush_tlb_kernel_range(unsigned long start, 162 unsigned long end) 163 { 164 } 165 166 /* Private function for use by PCI IO mapping code */ 167 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 168 unsigned long end); 169 170 #else 171 #error Unsupported MMU type 172 #endif 173 174 #endif /*__KERNEL__ */ 175 #endif /* _ASM_POWERPC_TLBFLUSH_H */ 176