1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 
4 #define MMU_NO_CONTEXT	~0UL
5 
6 
7 #include <asm/book3s/64/tlbflush-hash.h>
8 #include <asm/book3s/64/tlbflush-radix.h>
9 
10 static inline void flush_tlb_range(struct vm_area_struct *vma,
11 				   unsigned long start, unsigned long end)
12 {
13 	if (radix_enabled())
14 		return radix__flush_tlb_range(vma, start, end);
15 	return hash__flush_tlb_range(vma, start, end);
16 }
17 
18 static inline void flush_tlb_kernel_range(unsigned long start,
19 					  unsigned long end)
20 {
21 	if (radix_enabled())
22 		return radix__flush_tlb_kernel_range(start, end);
23 	return hash__flush_tlb_kernel_range(start, end);
24 }
25 
26 static inline void local_flush_tlb_mm(struct mm_struct *mm)
27 {
28 	if (radix_enabled())
29 		return radix__local_flush_tlb_mm(mm);
30 	return hash__local_flush_tlb_mm(mm);
31 }
32 
33 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
34 					unsigned long vmaddr)
35 {
36 	if (radix_enabled())
37 		return radix__local_flush_tlb_page(vma, vmaddr);
38 	return hash__local_flush_tlb_page(vma, vmaddr);
39 }
40 
41 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
42 					 unsigned long vmaddr)
43 {
44 	if (radix_enabled())
45 		return radix__flush_tlb_page(vma, vmaddr);
46 	return hash__flush_tlb_page_nohash(vma, vmaddr);
47 }
48 
49 static inline void tlb_flush(struct mmu_gather *tlb)
50 {
51 	if (radix_enabled())
52 		return radix__tlb_flush(tlb);
53 	return hash__tlb_flush(tlb);
54 }
55 
56 #ifdef CONFIG_SMP
57 static inline void flush_tlb_mm(struct mm_struct *mm)
58 {
59 	if (radix_enabled())
60 		return radix__flush_tlb_mm(mm);
61 	return hash__flush_tlb_mm(mm);
62 }
63 
64 static inline void flush_tlb_page(struct vm_area_struct *vma,
65 				  unsigned long vmaddr)
66 {
67 	if (radix_enabled())
68 		return radix__flush_tlb_page(vma, vmaddr);
69 	return hash__flush_tlb_page(vma, vmaddr);
70 }
71 #else
72 #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
73 #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
74 #endif /* CONFIG_SMP */
75 /*
76  * flush the page walk cache for the address
77  */
78 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
79 {
80 	/*
81 	 * Flush the page table walk cache on freeing a page table. We already
82 	 * have marked the upper/higher level page table entry none by now.
83 	 * So it is safe to flush PWC here.
84 	 */
85 	if (!radix_enabled())
86 		return;
87 
88 	radix__flush_tlb_pwc(tlb, address);
89 }
90 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
91