1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H 3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H 4 5 #define MMU_NO_CONTEXT ~0UL 6 7 8 #include <asm/book3s/64/tlbflush-hash.h> 9 #include <asm/book3s/64/tlbflush-radix.h> 10 11 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 12 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, 13 unsigned long start, unsigned long end) 14 { 15 if (radix_enabled()) 16 return radix__flush_pmd_tlb_range(vma, start, end); 17 return hash__flush_tlb_range(vma, start, end); 18 } 19 20 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 21 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, 22 unsigned long start, 23 unsigned long end) 24 { 25 if (radix_enabled()) 26 return radix__flush_hugetlb_tlb_range(vma, start, end); 27 return hash__flush_tlb_range(vma, start, end); 28 } 29 30 static inline void flush_tlb_range(struct vm_area_struct *vma, 31 unsigned long start, unsigned long end) 32 { 33 if (radix_enabled()) 34 return radix__flush_tlb_range(vma, start, end); 35 return hash__flush_tlb_range(vma, start, end); 36 } 37 38 static inline void flush_tlb_kernel_range(unsigned long start, 39 unsigned long end) 40 { 41 if (radix_enabled()) 42 return radix__flush_tlb_kernel_range(start, end); 43 return hash__flush_tlb_kernel_range(start, end); 44 } 45 46 static inline void local_flush_tlb_mm(struct mm_struct *mm) 47 { 48 if (radix_enabled()) 49 return radix__local_flush_tlb_mm(mm); 50 return hash__local_flush_tlb_mm(mm); 51 } 52 53 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 54 unsigned long vmaddr) 55 { 56 if (radix_enabled()) 57 return radix__local_flush_tlb_page(vma, vmaddr); 58 return hash__local_flush_tlb_page(vma, vmaddr); 59 } 60 61 static inline void local_flush_all_mm(struct mm_struct *mm) 62 { 63 if (radix_enabled()) 64 return radix__local_flush_all_mm(mm); 65 return hash__local_flush_all_mm(mm); 66 } 67 68 static inline void tlb_flush(struct mmu_gather *tlb) 69 { 70 if (radix_enabled()) 71 return radix__tlb_flush(tlb); 72 return hash__tlb_flush(tlb); 73 } 74 75 #ifdef CONFIG_SMP 76 static inline void flush_tlb_mm(struct mm_struct *mm) 77 { 78 if (radix_enabled()) 79 return radix__flush_tlb_mm(mm); 80 return hash__flush_tlb_mm(mm); 81 } 82 83 static inline void flush_tlb_page(struct vm_area_struct *vma, 84 unsigned long vmaddr) 85 { 86 if (radix_enabled()) 87 return radix__flush_tlb_page(vma, vmaddr); 88 return hash__flush_tlb_page(vma, vmaddr); 89 } 90 91 static inline void flush_all_mm(struct mm_struct *mm) 92 { 93 if (radix_enabled()) 94 return radix__flush_all_mm(mm); 95 return hash__flush_all_mm(mm); 96 } 97 #else 98 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 99 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) 100 #define flush_all_mm(mm) local_flush_all_mm(mm) 101 #endif /* CONFIG_SMP */ 102 /* 103 * flush the page walk cache for the address 104 */ 105 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address) 106 { 107 /* 108 * Flush the page table walk cache on freeing a page table. We already 109 * have marked the upper/higher level page table entry none by now. 110 * So it is safe to flush PWC here. 111 */ 112 if (!radix_enabled()) 113 return; 114 115 radix__flush_tlb_pwc(tlb, address); 116 } 117 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ 118