1 #ifndef _ASM_POWERPC_TLBFLUSH_H 2 #define _ASM_POWERPC_TLBFLUSH_H 3 4 /* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 10 * - flush_tlb_range(vma, start, end) flushes a range of pages 11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18 #ifdef __KERNEL__ 19 20 #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 21 /* 22 * TLB flushing for software loaded TLB chips 23 * 24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 25 * flush_tlb_kernel_range are best implemented as tlbia vs 26 * specific tlbie's 27 */ 28 29 #include <linux/mm.h> 30 31 extern void _tlbie(unsigned long address, unsigned int pid); 32 extern void _tlbil_all(void); 33 extern void _tlbil_pid(unsigned int pid); 34 extern void _tlbil_va(unsigned long address, unsigned int pid); 35 36 #if defined(CONFIG_40x) || defined(CONFIG_8xx) 37 #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 38 #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 39 extern void _tlbia(void); 40 #endif 41 42 static inline void flush_tlb_mm(struct mm_struct *mm) 43 { 44 _tlbil_pid(mm->context.id); 45 } 46 47 static inline void flush_tlb_page(struct vm_area_struct *vma, 48 unsigned long vmaddr) 49 { 50 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0); 51 } 52 53 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 54 unsigned long vmaddr) 55 { 56 flush_tlb_page(vma, vmaddr); 57 } 58 59 static inline void flush_tlb_range(struct vm_area_struct *vma, 60 unsigned long start, unsigned long end) 61 { 62 _tlbil_pid(vma->vm_mm->context.id); 63 } 64 65 static inline void flush_tlb_kernel_range(unsigned long start, 66 unsigned long end) 67 { 68 _tlbil_pid(0); 69 } 70 71 #elif defined(CONFIG_PPC32) 72 /* 73 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 74 */ 75 extern void _tlbie(unsigned long address); 76 extern void _tlbia(void); 77 78 extern void flush_tlb_mm(struct mm_struct *mm); 79 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 80 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 81 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 82 unsigned long end); 83 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 84 85 #else 86 /* 87 * TLB flushing for 64-bit has-MMU CPUs 88 */ 89 90 #include <linux/percpu.h> 91 #include <asm/page.h> 92 93 #define PPC64_TLB_BATCH_NR 192 94 95 struct ppc64_tlb_batch { 96 int active; 97 unsigned long index; 98 struct mm_struct *mm; 99 real_pte_t pte[PPC64_TLB_BATCH_NR]; 100 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 101 unsigned int psize; 102 int ssize; 103 }; 104 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 105 106 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 107 108 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 109 pte_t *ptep, unsigned long pte, int huge); 110 111 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 112 113 static inline void arch_enter_lazy_mmu_mode(void) 114 { 115 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 116 117 batch->active = 1; 118 } 119 120 static inline void arch_leave_lazy_mmu_mode(void) 121 { 122 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 123 124 if (batch->index) 125 __flush_tlb_pending(batch); 126 batch->active = 0; 127 } 128 129 #define arch_flush_lazy_mmu_mode() do {} while (0) 130 131 132 extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 133 int ssize, int local); 134 extern void flush_hash_range(unsigned long number, int local); 135 136 137 static inline void flush_tlb_mm(struct mm_struct *mm) 138 { 139 } 140 141 static inline void flush_tlb_page(struct vm_area_struct *vma, 142 unsigned long vmaddr) 143 { 144 } 145 146 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 147 unsigned long vmaddr) 148 { 149 } 150 151 static inline void flush_tlb_range(struct vm_area_struct *vma, 152 unsigned long start, unsigned long end) 153 { 154 } 155 156 static inline void flush_tlb_kernel_range(unsigned long start, 157 unsigned long end) 158 { 159 } 160 161 /* Private function for use by PCI IO mapping code */ 162 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 163 unsigned long end); 164 165 166 #endif 167 168 #endif /*__KERNEL__ */ 169 #endif /* _ASM_POWERPC_TLBFLUSH_H */ 170