1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4 
5 /*
6  * TLB flushing for 64-bit hash-MMU CPUs
7  */
8 
9 #include <linux/percpu.h>
10 #include <asm/page.h>
11 
12 #define PPC64_TLB_BATCH_NR 192
13 
14 struct ppc64_tlb_batch {
15 	int			active;
16 	unsigned long		index;
17 	struct mm_struct	*mm;
18 	real_pte_t		pte[PPC64_TLB_BATCH_NR];
19 	unsigned long		vpn[PPC64_TLB_BATCH_NR];
20 	unsigned int		psize;
21 	int			ssize;
22 };
23 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24 
25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26 
27 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28 
29 static inline void arch_enter_lazy_mmu_mode(void)
30 {
31 	struct ppc64_tlb_batch *batch;
32 
33 	if (radix_enabled())
34 		return;
35 	/*
36 	 * apply_to_page_range can call us this preempt enabled when
37 	 * operating on kernel page tables.
38 	 */
39 	preempt_disable();
40 	batch = this_cpu_ptr(&ppc64_tlb_batch);
41 	batch->active = 1;
42 }
43 
44 static inline void arch_leave_lazy_mmu_mode(void)
45 {
46 	struct ppc64_tlb_batch *batch;
47 
48 	if (radix_enabled())
49 		return;
50 	batch = this_cpu_ptr(&ppc64_tlb_batch);
51 
52 	if (batch->index)
53 		__flush_tlb_pending(batch);
54 	batch->active = 0;
55 	preempt_enable();
56 }
57 
58 #define arch_flush_lazy_mmu_mode()      do {} while (0)
59 
60 extern void hash__tlbiel_all(unsigned int action);
61 
62 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
63 			    int ssize, unsigned long flags);
64 extern void flush_hash_range(unsigned long number, int local);
65 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
66 				pmd_t *pmdp, unsigned int psize, int ssize,
67 				unsigned long flags);
68 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
69 {
70 }
71 
72 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
73 {
74 }
75 
76 static inline void hash__local_flush_all_mm(struct mm_struct *mm)
77 {
78 	/*
79 	 * There's no Page Walk Cache for hash, so what is needed is
80 	 * the same as flush_tlb_mm(), which doesn't really make sense
81 	 * with hash. So the only thing we could do is flush the
82 	 * entire LPID! Punt for now, as it's not being used.
83 	 */
84 	WARN_ON_ONCE(1);
85 }
86 
87 static inline void hash__flush_all_mm(struct mm_struct *mm)
88 {
89 	/*
90 	 * There's no Page Walk Cache for hash, so what is needed is
91 	 * the same as flush_tlb_mm(), which doesn't really make sense
92 	 * with hash. So the only thing we could do is flush the
93 	 * entire LPID! Punt for now, as it's not being used.
94 	 */
95 	WARN_ON_ONCE(1);
96 }
97 
98 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
99 					  unsigned long vmaddr)
100 {
101 }
102 
103 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
104 				    unsigned long vmaddr)
105 {
106 }
107 
108 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
109 				     unsigned long start, unsigned long end)
110 {
111 }
112 
113 static inline void hash__flush_tlb_kernel_range(unsigned long start,
114 					    unsigned long end)
115 {
116 }
117 
118 
119 struct mmu_gather;
120 extern void hash__tlb_flush(struct mmu_gather *tlb);
121 
122 #ifdef CONFIG_PPC_64S_HASH_MMU
123 /* Private function for use by PCI IO mapping code */
124 extern void __flush_hash_table_range(unsigned long start, unsigned long end);
125 void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
126 #else
127 static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
128 #endif
129 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
130