xref: /openbmc/linux/arch/m68k/include/asm/tlbflush.h (revision b34e08d5)
1 #ifndef _M68K_TLBFLUSH_H
2 #define _M68K_TLBFLUSH_H
3 
4 #ifdef CONFIG_MMU
5 #ifndef CONFIG_SUN3
6 
7 #include <asm/current.h>
8 #include <asm/mcfmmu.h>
9 
10 static inline void flush_tlb_kernel_page(void *addr)
11 {
12 	if (CPU_IS_COLDFIRE) {
13 		mmu_write(MMUOR, MMUOR_CNL);
14 	} else if (CPU_IS_040_OR_060) {
15 		mm_segment_t old_fs = get_fs();
16 		set_fs(KERNEL_DS);
17 		__asm__ __volatile__(".chip 68040\n\t"
18 				     "pflush (%0)\n\t"
19 				     ".chip 68k"
20 				     : : "a" (addr));
21 		set_fs(old_fs);
22 	} else if (CPU_IS_020_OR_030)
23 		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
24 }
25 
26 /*
27  * flush all user-space atc entries.
28  */
29 static inline void __flush_tlb(void)
30 {
31 	if (CPU_IS_COLDFIRE) {
32 		mmu_write(MMUOR, MMUOR_CNL);
33 	} else if (CPU_IS_040_OR_060) {
34 		__asm__ __volatile__(".chip 68040\n\t"
35 				     "pflushan\n\t"
36 				     ".chip 68k");
37 	} else if (CPU_IS_020_OR_030) {
38 		__asm__ __volatile__("pflush #0,#4");
39 	}
40 }
41 
42 static inline void __flush_tlb040_one(unsigned long addr)
43 {
44 	__asm__ __volatile__(".chip 68040\n\t"
45 			     "pflush (%0)\n\t"
46 			     ".chip 68k"
47 			     : : "a" (addr));
48 }
49 
50 static inline void __flush_tlb_one(unsigned long addr)
51 {
52 	if (CPU_IS_COLDFIRE)
53 		mmu_write(MMUOR, MMUOR_CNL);
54 	else if (CPU_IS_040_OR_060)
55 		__flush_tlb040_one(addr);
56 	else if (CPU_IS_020_OR_030)
57 		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
58 }
59 
60 #define flush_tlb() __flush_tlb()
61 
62 /*
63  * flush all atc entries (both kernel and user-space entries).
64  */
65 static inline void flush_tlb_all(void)
66 {
67 	if (CPU_IS_COLDFIRE) {
68 		mmu_write(MMUOR, MMUOR_CNL);
69 	} else if (CPU_IS_040_OR_060) {
70 		__asm__ __volatile__(".chip 68040\n\t"
71 				     "pflusha\n\t"
72 				     ".chip 68k");
73 	} else if (CPU_IS_020_OR_030) {
74 		__asm__ __volatile__("pflusha");
75 	}
76 }
77 
78 static inline void flush_tlb_mm(struct mm_struct *mm)
79 {
80 	if (mm == current->active_mm)
81 		__flush_tlb();
82 }
83 
84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
85 {
86 	if (vma->vm_mm == current->active_mm) {
87 		mm_segment_t old_fs = get_fs();
88 		set_fs(USER_DS);
89 		__flush_tlb_one(addr);
90 		set_fs(old_fs);
91 	}
92 }
93 
94 static inline void flush_tlb_range(struct vm_area_struct *vma,
95 				   unsigned long start, unsigned long end)
96 {
97 	if (vma->vm_mm == current->active_mm)
98 		__flush_tlb();
99 }
100 
101 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
102 {
103 	flush_tlb_all();
104 }
105 
106 #else
107 
108 
109 /* Reserved PMEGs. */
110 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
111 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
112 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
113 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
114 
115 /* Flush all userspace mappings one by one...  (why no flush command,
116    sun?) */
117 static inline void flush_tlb_all(void)
118 {
119        unsigned long addr;
120        unsigned char ctx, oldctx;
121 
122        oldctx = sun3_get_context();
123        for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
124 	       for(ctx = 0; ctx < 8; ctx++) {
125 		       sun3_put_context(ctx);
126 		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
127 	       }
128        }
129 
130        sun3_put_context(oldctx);
131        /* erase all of the userspace pmeg maps, we've clobbered them
132 	  all anyway */
133        for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
134 	       if(pmeg_alloc[addr] == 1) {
135 		       pmeg_alloc[addr] = 0;
136 		       pmeg_ctx[addr] = 0;
137 		       pmeg_vaddr[addr] = 0;
138 	       }
139        }
140 
141 }
142 
143 /* Clear user TLB entries within the context named in mm */
144 static inline void flush_tlb_mm (struct mm_struct *mm)
145 {
146      unsigned char oldctx;
147      unsigned char seg;
148      unsigned long i;
149 
150      oldctx = sun3_get_context();
151      sun3_put_context(mm->context);
152 
153      for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
154 	     seg = sun3_get_segmap(i);
155 	     if(seg == SUN3_INVALID_PMEG)
156 		     continue;
157 
158 	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
159 	     pmeg_alloc[seg] = 0;
160 	     pmeg_ctx[seg] = 0;
161 	     pmeg_vaddr[seg] = 0;
162      }
163 
164      sun3_put_context(oldctx);
165 
166 }
167 
168 /* Flush a single TLB page. In this case, we're limited to flushing a
169    single PMEG */
170 static inline void flush_tlb_page (struct vm_area_struct *vma,
171 				   unsigned long addr)
172 {
173 	unsigned char oldctx;
174 	unsigned char i;
175 
176 	oldctx = sun3_get_context();
177 	sun3_put_context(vma->vm_mm->context);
178 	addr &= ~SUN3_PMEG_MASK;
179 	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
180 	{
181 		pmeg_alloc[i] = 0;
182 		pmeg_ctx[i] = 0;
183 		pmeg_vaddr[i] = 0;
184 		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
185 	}
186 	sun3_put_context(oldctx);
187 
188 }
189 /* Flush a range of pages from TLB. */
190 
191 static inline void flush_tlb_range (struct vm_area_struct *vma,
192 		      unsigned long start, unsigned long end)
193 {
194 	struct mm_struct *mm = vma->vm_mm;
195 	unsigned char seg, oldctx;
196 
197 	start &= ~SUN3_PMEG_MASK;
198 
199 	oldctx = sun3_get_context();
200 	sun3_put_context(mm->context);
201 
202 	while(start < end)
203 	{
204 		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
205 		     goto next;
206 		if(pmeg_ctx[seg] == mm->context) {
207 			pmeg_alloc[seg] = 0;
208 			pmeg_ctx[seg] = 0;
209 			pmeg_vaddr[seg] = 0;
210 		}
211 		sun3_put_segmap(start, SUN3_INVALID_PMEG);
212 	next:
213 		start += SUN3_PMEG_SIZE;
214 	}
215 }
216 
217 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
218 {
219 	flush_tlb_all();
220 }
221 
222 /* Flush kernel page from TLB. */
223 static inline void flush_tlb_kernel_page (unsigned long addr)
224 {
225 	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
226 }
227 
228 #endif
229 
230 #else /* !CONFIG_MMU */
231 
232 /*
233  * flush all user-space atc entries.
234  */
235 static inline void __flush_tlb(void)
236 {
237 	BUG();
238 }
239 
240 static inline void __flush_tlb_one(unsigned long addr)
241 {
242 	BUG();
243 }
244 
245 #define flush_tlb() __flush_tlb()
246 
247 /*
248  * flush all atc entries (both kernel and user-space entries).
249  */
250 static inline void flush_tlb_all(void)
251 {
252 	BUG();
253 }
254 
255 static inline void flush_tlb_mm(struct mm_struct *mm)
256 {
257 	BUG();
258 }
259 
260 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
261 {
262 	BUG();
263 }
264 
265 static inline void flush_tlb_range(struct mm_struct *mm,
266 				   unsigned long start, unsigned long end)
267 {
268 	BUG();
269 }
270 
271 static inline void flush_tlb_kernel_page(unsigned long addr)
272 {
273 	BUG();
274 }
275 
276 #endif /* CONFIG_MMU */
277 
278 #endif /* _M68K_TLBFLUSH_H */
279