xref: /openbmc/linux/arch/mips/mm/tlb-r4k.c (revision 930beb5a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/module.h>
17 
18 #include <asm/cpu.h>
19 #include <asm/cpu-type.h>
20 #include <asm/bootinfo.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbmisc.h>
25 
26 extern void build_tlb_refill_handler(void);
27 
28 /* Atomicity and interruptability */
29 #ifdef CONFIG_MIPS_MT_SMTC
30 
31 #include <asm/smtc.h>
32 #include <asm/mipsmtregs.h>
33 
34 #define ENTER_CRITICAL(flags) \
35 	{ \
36 	unsigned int mvpflags; \
37 	local_irq_save(flags);\
38 	mvpflags = dvpe()
39 #define EXIT_CRITICAL(flags) \
40 	evpe(mvpflags); \
41 	local_irq_restore(flags); \
42 	}
43 #else
44 
45 #define ENTER_CRITICAL(flags) local_irq_save(flags)
46 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
47 
48 #endif /* CONFIG_MIPS_MT_SMTC */
49 
50 /*
51  * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
52  * unfortrunately, itlb is not totally transparent to software.
53  */
54 static inline void flush_itlb(void)
55 {
56 	switch (current_cpu_type()) {
57 	case CPU_LOONGSON2:
58 		write_c0_diag(4);
59 		break;
60 	default:
61 		break;
62 	}
63 }
64 
65 static inline void flush_itlb_vm(struct vm_area_struct *vma)
66 {
67 	if (vma->vm_flags & VM_EXEC)
68 		flush_itlb();
69 }
70 
71 void local_flush_tlb_all(void)
72 {
73 	unsigned long flags;
74 	unsigned long old_ctx;
75 	int entry, ftlbhighset;
76 
77 	ENTER_CRITICAL(flags);
78 	/* Save old context and create impossible VPN2 value */
79 	old_ctx = read_c0_entryhi();
80 	write_c0_entrylo0(0);
81 	write_c0_entrylo1(0);
82 
83 	entry = read_c0_wired();
84 
85 	/* Blast 'em all away. */
86 	if (cpu_has_tlbinv) {
87 		if (current_cpu_data.tlbsizevtlb) {
88 			write_c0_index(0);
89 			mtc0_tlbw_hazard();
90 			tlbinvf();  /* invalidate VTLB */
91 		}
92 		ftlbhighset = current_cpu_data.tlbsizevtlb +
93 			current_cpu_data.tlbsizeftlbsets;
94 		for (entry = current_cpu_data.tlbsizevtlb;
95 		     entry < ftlbhighset;
96 		     entry++) {
97 			write_c0_index(entry);
98 			mtc0_tlbw_hazard();
99 			tlbinvf();  /* invalidate one FTLB set */
100 		}
101 	} else {
102 		while (entry < current_cpu_data.tlbsize) {
103 			/* Make sure all entries differ. */
104 			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
105 			write_c0_index(entry);
106 			mtc0_tlbw_hazard();
107 			tlb_write_indexed();
108 			entry++;
109 		}
110 	}
111 	tlbw_use_hazard();
112 	write_c0_entryhi(old_ctx);
113 	flush_itlb();
114 	EXIT_CRITICAL(flags);
115 }
116 EXPORT_SYMBOL(local_flush_tlb_all);
117 
118 /* All entries common to a mm share an asid.  To effectively flush
119    these entries, we just bump the asid. */
120 void local_flush_tlb_mm(struct mm_struct *mm)
121 {
122 	int cpu;
123 
124 	preempt_disable();
125 
126 	cpu = smp_processor_id();
127 
128 	if (cpu_context(cpu, mm) != 0) {
129 		drop_mmu_context(mm, cpu);
130 	}
131 
132 	preempt_enable();
133 }
134 
135 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
136 	unsigned long end)
137 {
138 	struct mm_struct *mm = vma->vm_mm;
139 	int cpu = smp_processor_id();
140 
141 	if (cpu_context(cpu, mm) != 0) {
142 		unsigned long size, flags;
143 
144 		ENTER_CRITICAL(flags);
145 		start = round_down(start, PAGE_SIZE << 1);
146 		end = round_up(end, PAGE_SIZE << 1);
147 		size = (end - start) >> (PAGE_SHIFT + 1);
148 		if (size <= (current_cpu_data.tlbsizeftlbsets ?
149 			     current_cpu_data.tlbsize / 8 :
150 			     current_cpu_data.tlbsize / 2)) {
151 			int oldpid = read_c0_entryhi();
152 			int newpid = cpu_asid(cpu, mm);
153 
154 			while (start < end) {
155 				int idx;
156 
157 				write_c0_entryhi(start | newpid);
158 				start += (PAGE_SIZE << 1);
159 				mtc0_tlbw_hazard();
160 				tlb_probe();
161 				tlb_probe_hazard();
162 				idx = read_c0_index();
163 				write_c0_entrylo0(0);
164 				write_c0_entrylo1(0);
165 				if (idx < 0)
166 					continue;
167 				/* Make sure all entries differ. */
168 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
169 				mtc0_tlbw_hazard();
170 				tlb_write_indexed();
171 			}
172 			tlbw_use_hazard();
173 			write_c0_entryhi(oldpid);
174 		} else {
175 			drop_mmu_context(mm, cpu);
176 		}
177 		flush_itlb();
178 		EXIT_CRITICAL(flags);
179 	}
180 }
181 
182 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
183 {
184 	unsigned long size, flags;
185 
186 	ENTER_CRITICAL(flags);
187 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
188 	size = (size + 1) >> 1;
189 	if (size <= (current_cpu_data.tlbsizeftlbsets ?
190 		     current_cpu_data.tlbsize / 8 :
191 		     current_cpu_data.tlbsize / 2)) {
192 		int pid = read_c0_entryhi();
193 
194 		start &= (PAGE_MASK << 1);
195 		end += ((PAGE_SIZE << 1) - 1);
196 		end &= (PAGE_MASK << 1);
197 
198 		while (start < end) {
199 			int idx;
200 
201 			write_c0_entryhi(start);
202 			start += (PAGE_SIZE << 1);
203 			mtc0_tlbw_hazard();
204 			tlb_probe();
205 			tlb_probe_hazard();
206 			idx = read_c0_index();
207 			write_c0_entrylo0(0);
208 			write_c0_entrylo1(0);
209 			if (idx < 0)
210 				continue;
211 			/* Make sure all entries differ. */
212 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
213 			mtc0_tlbw_hazard();
214 			tlb_write_indexed();
215 		}
216 		tlbw_use_hazard();
217 		write_c0_entryhi(pid);
218 	} else {
219 		local_flush_tlb_all();
220 	}
221 	flush_itlb();
222 	EXIT_CRITICAL(flags);
223 }
224 
225 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
226 {
227 	int cpu = smp_processor_id();
228 
229 	if (cpu_context(cpu, vma->vm_mm) != 0) {
230 		unsigned long flags;
231 		int oldpid, newpid, idx;
232 
233 		newpid = cpu_asid(cpu, vma->vm_mm);
234 		page &= (PAGE_MASK << 1);
235 		ENTER_CRITICAL(flags);
236 		oldpid = read_c0_entryhi();
237 		write_c0_entryhi(page | newpid);
238 		mtc0_tlbw_hazard();
239 		tlb_probe();
240 		tlb_probe_hazard();
241 		idx = read_c0_index();
242 		write_c0_entrylo0(0);
243 		write_c0_entrylo1(0);
244 		if (idx < 0)
245 			goto finish;
246 		/* Make sure all entries differ. */
247 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
248 		mtc0_tlbw_hazard();
249 		tlb_write_indexed();
250 		tlbw_use_hazard();
251 
252 	finish:
253 		write_c0_entryhi(oldpid);
254 		flush_itlb_vm(vma);
255 		EXIT_CRITICAL(flags);
256 	}
257 }
258 
259 /*
260  * This one is only used for pages with the global bit set so we don't care
261  * much about the ASID.
262  */
263 void local_flush_tlb_one(unsigned long page)
264 {
265 	unsigned long flags;
266 	int oldpid, idx;
267 
268 	ENTER_CRITICAL(flags);
269 	oldpid = read_c0_entryhi();
270 	page &= (PAGE_MASK << 1);
271 	write_c0_entryhi(page);
272 	mtc0_tlbw_hazard();
273 	tlb_probe();
274 	tlb_probe_hazard();
275 	idx = read_c0_index();
276 	write_c0_entrylo0(0);
277 	write_c0_entrylo1(0);
278 	if (idx >= 0) {
279 		/* Make sure all entries differ. */
280 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
281 		mtc0_tlbw_hazard();
282 		tlb_write_indexed();
283 		tlbw_use_hazard();
284 	}
285 	write_c0_entryhi(oldpid);
286 	flush_itlb();
287 	EXIT_CRITICAL(flags);
288 }
289 
290 /*
291  * We will need multiple versions of update_mmu_cache(), one that just
292  * updates the TLB with the new pte(s), and another which also checks
293  * for the R4k "end of page" hardware bug and does the needy.
294  */
295 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
296 {
297 	unsigned long flags;
298 	pgd_t *pgdp;
299 	pud_t *pudp;
300 	pmd_t *pmdp;
301 	pte_t *ptep;
302 	int idx, pid;
303 
304 	/*
305 	 * Handle debugger faulting in for debugee.
306 	 */
307 	if (current->active_mm != vma->vm_mm)
308 		return;
309 
310 	ENTER_CRITICAL(flags);
311 
312 	pid = read_c0_entryhi() & ASID_MASK;
313 	address &= (PAGE_MASK << 1);
314 	write_c0_entryhi(address | pid);
315 	pgdp = pgd_offset(vma->vm_mm, address);
316 	mtc0_tlbw_hazard();
317 	tlb_probe();
318 	tlb_probe_hazard();
319 	pudp = pud_offset(pgdp, address);
320 	pmdp = pmd_offset(pudp, address);
321 	idx = read_c0_index();
322 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
323 	/* this could be a huge page  */
324 	if (pmd_huge(*pmdp)) {
325 		unsigned long lo;
326 		write_c0_pagemask(PM_HUGE_MASK);
327 		ptep = (pte_t *)pmdp;
328 		lo = pte_to_entrylo(pte_val(*ptep));
329 		write_c0_entrylo0(lo);
330 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
331 
332 		mtc0_tlbw_hazard();
333 		if (idx < 0)
334 			tlb_write_random();
335 		else
336 			tlb_write_indexed();
337 		tlbw_use_hazard();
338 		write_c0_pagemask(PM_DEFAULT_MASK);
339 	} else
340 #endif
341 	{
342 		ptep = pte_offset_map(pmdp, address);
343 
344 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
345 		write_c0_entrylo0(ptep->pte_high);
346 		ptep++;
347 		write_c0_entrylo1(ptep->pte_high);
348 #else
349 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
350 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
351 #endif
352 		mtc0_tlbw_hazard();
353 		if (idx < 0)
354 			tlb_write_random();
355 		else
356 			tlb_write_indexed();
357 	}
358 	tlbw_use_hazard();
359 	flush_itlb_vm(vma);
360 	EXIT_CRITICAL(flags);
361 }
362 
363 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
364 		     unsigned long entryhi, unsigned long pagemask)
365 {
366 	unsigned long flags;
367 	unsigned long wired;
368 	unsigned long old_pagemask;
369 	unsigned long old_ctx;
370 
371 	ENTER_CRITICAL(flags);
372 	/* Save old context and create impossible VPN2 value */
373 	old_ctx = read_c0_entryhi();
374 	old_pagemask = read_c0_pagemask();
375 	wired = read_c0_wired();
376 	write_c0_wired(wired + 1);
377 	write_c0_index(wired);
378 	tlbw_use_hazard();	/* What is the hazard here? */
379 	write_c0_pagemask(pagemask);
380 	write_c0_entryhi(entryhi);
381 	write_c0_entrylo0(entrylo0);
382 	write_c0_entrylo1(entrylo1);
383 	mtc0_tlbw_hazard();
384 	tlb_write_indexed();
385 	tlbw_use_hazard();
386 
387 	write_c0_entryhi(old_ctx);
388 	tlbw_use_hazard();	/* What is the hazard here? */
389 	write_c0_pagemask(old_pagemask);
390 	local_flush_tlb_all();
391 	EXIT_CRITICAL(flags);
392 }
393 
394 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
395 
396 int __init has_transparent_hugepage(void)
397 {
398 	unsigned int mask;
399 	unsigned long flags;
400 
401 	ENTER_CRITICAL(flags);
402 	write_c0_pagemask(PM_HUGE_MASK);
403 	back_to_back_c0_hazard();
404 	mask = read_c0_pagemask();
405 	write_c0_pagemask(PM_DEFAULT_MASK);
406 
407 	EXIT_CRITICAL(flags);
408 
409 	return mask == PM_HUGE_MASK;
410 }
411 
412 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
413 
414 static int ntlb;
415 static int __init set_ntlb(char *str)
416 {
417 	get_option(&str, &ntlb);
418 	return 1;
419 }
420 
421 __setup("ntlb=", set_ntlb);
422 
423 void tlb_init(void)
424 {
425 	/*
426 	 * You should never change this register:
427 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
428 	 *     the value in the c0_pagemask register.
429 	 *   - The entire mm handling assumes the c0_pagemask register to
430 	 *     be set to fixed-size pages.
431 	 */
432 	write_c0_pagemask(PM_DEFAULT_MASK);
433 	write_c0_wired(0);
434 	if (current_cpu_type() == CPU_R10000 ||
435 	    current_cpu_type() == CPU_R12000 ||
436 	    current_cpu_type() == CPU_R14000)
437 		write_c0_framemask(0);
438 
439 	if (cpu_has_rixi) {
440 		/*
441 		 * Enable the no read, no exec bits, and enable large virtual
442 		 * address.
443 		 */
444 		u32 pg = PG_RIE | PG_XIE;
445 #ifdef CONFIG_64BIT
446 		pg |= PG_ELPA;
447 #endif
448 		write_c0_pagegrain(pg);
449 	}
450 
451 	/* From this point on the ARC firmware is dead.	 */
452 	local_flush_tlb_all();
453 
454 	/* Did I tell you that ARC SUCKS?  */
455 
456 	if (ntlb) {
457 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
458 			int wired = current_cpu_data.tlbsize - ntlb;
459 			write_c0_wired(wired);
460 			write_c0_index(wired-1);
461 			printk("Restricting TLB to %d entries\n", ntlb);
462 		} else
463 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
464 	}
465 
466 	build_tlb_refill_handler();
467 }
468