1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4 
5 /*
6  * TLB flushing for 64-bit hash-MMU CPUs
7  */
8 
9 #include <linux/percpu.h>
10 #include <asm/page.h>
11 
12 #define PPC64_TLB_BATCH_NR 192
13 
14 struct ppc64_tlb_batch {
15 	int			active;
16 	unsigned long		index;
17 	struct mm_struct	*mm;
18 	real_pte_t		pte[PPC64_TLB_BATCH_NR];
19 	unsigned long		vpn[PPC64_TLB_BATCH_NR];
20 	unsigned int		psize;
21 	int			ssize;
22 };
23 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24 
25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26 
27 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28 
29 static inline void arch_enter_lazy_mmu_mode(void)
30 {
31 	struct ppc64_tlb_batch *batch;
32 
33 	if (radix_enabled())
34 		return;
35 	batch = this_cpu_ptr(&ppc64_tlb_batch);
36 	batch->active = 1;
37 }
38 
39 static inline void arch_leave_lazy_mmu_mode(void)
40 {
41 	struct ppc64_tlb_batch *batch;
42 
43 	if (radix_enabled())
44 		return;
45 	batch = this_cpu_ptr(&ppc64_tlb_batch);
46 
47 	if (batch->index)
48 		__flush_tlb_pending(batch);
49 	batch->active = 0;
50 }
51 
52 #define arch_flush_lazy_mmu_mode()      do {} while (0)
53 
54 
55 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
56 			    int ssize, unsigned long flags);
57 extern void flush_hash_range(unsigned long number, int local);
58 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
59 				pmd_t *pmdp, unsigned int psize, int ssize,
60 				unsigned long flags);
61 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
62 {
63 }
64 
65 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
66 {
67 }
68 
69 static inline void hash__local_flush_all_mm(struct mm_struct *mm)
70 {
71 	/*
72 	 * There's no Page Walk Cache for hash, so what is needed is
73 	 * the same as flush_tlb_mm(), which doesn't really make sense
74 	 * with hash. So the only thing we could do is flush the
75 	 * entire LPID! Punt for now, as it's not being used.
76 	 */
77 	WARN_ON_ONCE(1);
78 }
79 
80 static inline void hash__flush_all_mm(struct mm_struct *mm)
81 {
82 	/*
83 	 * There's no Page Walk Cache for hash, so what is needed is
84 	 * the same as flush_tlb_mm(), which doesn't really make sense
85 	 * with hash. So the only thing we could do is flush the
86 	 * entire LPID! Punt for now, as it's not being used.
87 	 */
88 	WARN_ON_ONCE(1);
89 }
90 
91 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
92 					  unsigned long vmaddr)
93 {
94 }
95 
96 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
97 				    unsigned long vmaddr)
98 {
99 }
100 
101 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
102 				     unsigned long start, unsigned long end)
103 {
104 }
105 
106 static inline void hash__flush_tlb_kernel_range(unsigned long start,
107 					    unsigned long end)
108 {
109 }
110 
111 
112 struct mmu_gather;
113 extern void hash__tlb_flush(struct mmu_gather *tlb);
114 /* Private function for use by PCI IO mapping code */
115 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
116 				     unsigned long end);
117 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
118 				unsigned long addr);
119 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
120