1 #ifndef _ASM_POWERPC_TLBFLUSH_H 2 #define _ASM_POWERPC_TLBFLUSH_H 3 4 /* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 10 * - flush_tlb_range(vma, start, end) flushes a range of pages 11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18 #ifdef __KERNEL__ 19 20 #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 21 /* 22 * TLB flushing for software loaded TLB chips 23 * 24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 25 * flush_tlb_kernel_range are best implemented as tlbia vs 26 * specific tlbie's 27 */ 28 29 #include <linux/mm.h> 30 31 extern void _tlbie(unsigned long address, unsigned int pid); 32 33 #if defined(CONFIG_40x) || defined(CONFIG_8xx) 34 #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 35 #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 36 extern void _tlbia(void); 37 #endif 38 39 static inline void flush_tlb_mm(struct mm_struct *mm) 40 { 41 _tlbia(); 42 } 43 44 static inline void flush_tlb_page(struct vm_area_struct *vma, 45 unsigned long vmaddr) 46 { 47 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 48 } 49 50 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 51 unsigned long vmaddr) 52 { 53 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 54 } 55 56 static inline void flush_tlb_range(struct vm_area_struct *vma, 57 unsigned long start, unsigned long end) 58 { 59 _tlbia(); 60 } 61 62 static inline void flush_tlb_kernel_range(unsigned long start, 63 unsigned long end) 64 { 65 _tlbia(); 66 } 67 68 #elif defined(CONFIG_PPC32) 69 /* 70 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 71 */ 72 extern void _tlbie(unsigned long address); 73 extern void _tlbia(void); 74 75 extern void flush_tlb_mm(struct mm_struct *mm); 76 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 77 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 78 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 79 unsigned long end); 80 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 81 82 #else 83 /* 84 * TLB flushing for 64-bit has-MMU CPUs 85 */ 86 87 #include <linux/percpu.h> 88 #include <asm/page.h> 89 90 #define PPC64_TLB_BATCH_NR 192 91 92 struct ppc64_tlb_batch { 93 int active; 94 unsigned long index; 95 struct mm_struct *mm; 96 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned int psize; 99 int ssize; 100 }; 101 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 102 103 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104 105 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 106 pte_t *ptep, unsigned long pte, int huge); 107 108 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 109 110 static inline void arch_enter_lazy_mmu_mode(void) 111 { 112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 113 114 batch->active = 1; 115 } 116 117 static inline void arch_leave_lazy_mmu_mode(void) 118 { 119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 120 121 if (batch->index) 122 __flush_tlb_pending(batch); 123 batch->active = 0; 124 } 125 126 #define arch_flush_lazy_mmu_mode() do {} while (0) 127 128 129 extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130 int ssize, int local); 131 extern void flush_hash_range(unsigned long number, int local); 132 133 134 static inline void flush_tlb_mm(struct mm_struct *mm) 135 { 136 } 137 138 static inline void flush_tlb_page(struct vm_area_struct *vma, 139 unsigned long vmaddr) 140 { 141 } 142 143 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 144 unsigned long vmaddr) 145 { 146 } 147 148 static inline void flush_tlb_range(struct vm_area_struct *vma, 149 unsigned long start, unsigned long end) 150 { 151 } 152 153 static inline void flush_tlb_kernel_range(unsigned long start, 154 unsigned long end) 155 { 156 } 157 158 /* Private function for use by PCI IO mapping code */ 159 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 160 unsigned long end); 161 162 163 #endif 164 165 #endif /*__KERNEL__ */ 166 #endif /* _ASM_POWERPC_TLBFLUSH_H */ 167