xref: /openbmc/linux/arch/mips/mm/tlb-r4k.c (revision 97da55fc)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 
17 #include <asm/cpu.h>
18 #include <asm/bootinfo.h>
19 #include <asm/mmu_context.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbmisc.h>
22 
23 extern void build_tlb_refill_handler(void);
24 
25 /*
26  * Make sure all entries differ.  If they're not different
27  * MIPS32 will take revenge ...
28  */
29 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30 
31 /* Atomicity and interruptability */
32 #ifdef CONFIG_MIPS_MT_SMTC
33 
34 #include <asm/smtc.h>
35 #include <asm/mipsmtregs.h>
36 
37 #define ENTER_CRITICAL(flags) \
38 	{ \
39 	unsigned int mvpflags; \
40 	local_irq_save(flags);\
41 	mvpflags = dvpe()
42 #define EXIT_CRITICAL(flags) \
43 	evpe(mvpflags); \
44 	local_irq_restore(flags); \
45 	}
46 #else
47 
48 #define ENTER_CRITICAL(flags) local_irq_save(flags)
49 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
50 
51 #endif /* CONFIG_MIPS_MT_SMTC */
52 
53 #if defined(CONFIG_CPU_LOONGSON2)
54 /*
55  * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
56  * unfortrunately, itlb is not totally transparent to software.
57  */
58 #define FLUSH_ITLB write_c0_diag(4);
59 
60 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
61 
62 #else
63 
64 #define FLUSH_ITLB
65 #define FLUSH_ITLB_VM(vma)
66 
67 #endif
68 
69 void local_flush_tlb_all(void)
70 {
71 	unsigned long flags;
72 	unsigned long old_ctx;
73 	int entry;
74 
75 	ENTER_CRITICAL(flags);
76 	/* Save old context and create impossible VPN2 value */
77 	old_ctx = read_c0_entryhi();
78 	write_c0_entrylo0(0);
79 	write_c0_entrylo1(0);
80 
81 	entry = read_c0_wired();
82 
83 	/* Blast 'em all away. */
84 	while (entry < current_cpu_data.tlbsize) {
85 		/* Make sure all entries differ. */
86 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
87 		write_c0_index(entry);
88 		mtc0_tlbw_hazard();
89 		tlb_write_indexed();
90 		entry++;
91 	}
92 	tlbw_use_hazard();
93 	write_c0_entryhi(old_ctx);
94 	FLUSH_ITLB;
95 	EXIT_CRITICAL(flags);
96 }
97 
98 /* All entries common to a mm share an asid.  To effectively flush
99    these entries, we just bump the asid. */
100 void local_flush_tlb_mm(struct mm_struct *mm)
101 {
102 	int cpu;
103 
104 	preempt_disable();
105 
106 	cpu = smp_processor_id();
107 
108 	if (cpu_context(cpu, mm) != 0) {
109 		drop_mmu_context(mm, cpu);
110 	}
111 
112 	preempt_enable();
113 }
114 
115 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116 	unsigned long end)
117 {
118 	struct mm_struct *mm = vma->vm_mm;
119 	int cpu = smp_processor_id();
120 
121 	if (cpu_context(cpu, mm) != 0) {
122 		unsigned long size, flags;
123 
124 		ENTER_CRITICAL(flags);
125 		start = round_down(start, PAGE_SIZE << 1);
126 		end = round_up(end, PAGE_SIZE << 1);
127 		size = (end - start) >> (PAGE_SHIFT + 1);
128 		if (size <= current_cpu_data.tlbsize/2) {
129 			int oldpid = read_c0_entryhi();
130 			int newpid = cpu_asid(cpu, mm);
131 
132 			while (start < end) {
133 				int idx;
134 
135 				write_c0_entryhi(start | newpid);
136 				start += (PAGE_SIZE << 1);
137 				mtc0_tlbw_hazard();
138 				tlb_probe();
139 				tlb_probe_hazard();
140 				idx = read_c0_index();
141 				write_c0_entrylo0(0);
142 				write_c0_entrylo1(0);
143 				if (idx < 0)
144 					continue;
145 				/* Make sure all entries differ. */
146 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
147 				mtc0_tlbw_hazard();
148 				tlb_write_indexed();
149 			}
150 			tlbw_use_hazard();
151 			write_c0_entryhi(oldpid);
152 		} else {
153 			drop_mmu_context(mm, cpu);
154 		}
155 		FLUSH_ITLB;
156 		EXIT_CRITICAL(flags);
157 	}
158 }
159 
160 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
161 {
162 	unsigned long size, flags;
163 
164 	ENTER_CRITICAL(flags);
165 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
166 	size = (size + 1) >> 1;
167 	if (size <= current_cpu_data.tlbsize / 2) {
168 		int pid = read_c0_entryhi();
169 
170 		start &= (PAGE_MASK << 1);
171 		end += ((PAGE_SIZE << 1) - 1);
172 		end &= (PAGE_MASK << 1);
173 
174 		while (start < end) {
175 			int idx;
176 
177 			write_c0_entryhi(start);
178 			start += (PAGE_SIZE << 1);
179 			mtc0_tlbw_hazard();
180 			tlb_probe();
181 			tlb_probe_hazard();
182 			idx = read_c0_index();
183 			write_c0_entrylo0(0);
184 			write_c0_entrylo1(0);
185 			if (idx < 0)
186 				continue;
187 			/* Make sure all entries differ. */
188 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
189 			mtc0_tlbw_hazard();
190 			tlb_write_indexed();
191 		}
192 		tlbw_use_hazard();
193 		write_c0_entryhi(pid);
194 	} else {
195 		local_flush_tlb_all();
196 	}
197 	FLUSH_ITLB;
198 	EXIT_CRITICAL(flags);
199 }
200 
201 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
202 {
203 	int cpu = smp_processor_id();
204 
205 	if (cpu_context(cpu, vma->vm_mm) != 0) {
206 		unsigned long flags;
207 		int oldpid, newpid, idx;
208 
209 		newpid = cpu_asid(cpu, vma->vm_mm);
210 		page &= (PAGE_MASK << 1);
211 		ENTER_CRITICAL(flags);
212 		oldpid = read_c0_entryhi();
213 		write_c0_entryhi(page | newpid);
214 		mtc0_tlbw_hazard();
215 		tlb_probe();
216 		tlb_probe_hazard();
217 		idx = read_c0_index();
218 		write_c0_entrylo0(0);
219 		write_c0_entrylo1(0);
220 		if (idx < 0)
221 			goto finish;
222 		/* Make sure all entries differ. */
223 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
224 		mtc0_tlbw_hazard();
225 		tlb_write_indexed();
226 		tlbw_use_hazard();
227 
228 	finish:
229 		write_c0_entryhi(oldpid);
230 		FLUSH_ITLB_VM(vma);
231 		EXIT_CRITICAL(flags);
232 	}
233 }
234 
235 /*
236  * This one is only used for pages with the global bit set so we don't care
237  * much about the ASID.
238  */
239 void local_flush_tlb_one(unsigned long page)
240 {
241 	unsigned long flags;
242 	int oldpid, idx;
243 
244 	ENTER_CRITICAL(flags);
245 	oldpid = read_c0_entryhi();
246 	page &= (PAGE_MASK << 1);
247 	write_c0_entryhi(page);
248 	mtc0_tlbw_hazard();
249 	tlb_probe();
250 	tlb_probe_hazard();
251 	idx = read_c0_index();
252 	write_c0_entrylo0(0);
253 	write_c0_entrylo1(0);
254 	if (idx >= 0) {
255 		/* Make sure all entries differ. */
256 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
257 		mtc0_tlbw_hazard();
258 		tlb_write_indexed();
259 		tlbw_use_hazard();
260 	}
261 	write_c0_entryhi(oldpid);
262 	FLUSH_ITLB;
263 	EXIT_CRITICAL(flags);
264 }
265 
266 /*
267  * We will need multiple versions of update_mmu_cache(), one that just
268  * updates the TLB with the new pte(s), and another which also checks
269  * for the R4k "end of page" hardware bug and does the needy.
270  */
271 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
272 {
273 	unsigned long flags;
274 	pgd_t *pgdp;
275 	pud_t *pudp;
276 	pmd_t *pmdp;
277 	pte_t *ptep;
278 	int idx, pid;
279 
280 	/*
281 	 * Handle debugger faulting in for debugee.
282 	 */
283 	if (current->active_mm != vma->vm_mm)
284 		return;
285 
286 	ENTER_CRITICAL(flags);
287 
288 	pid = read_c0_entryhi() & ASID_MASK;
289 	address &= (PAGE_MASK << 1);
290 	write_c0_entryhi(address | pid);
291 	pgdp = pgd_offset(vma->vm_mm, address);
292 	mtc0_tlbw_hazard();
293 	tlb_probe();
294 	tlb_probe_hazard();
295 	pudp = pud_offset(pgdp, address);
296 	pmdp = pmd_offset(pudp, address);
297 	idx = read_c0_index();
298 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
299 	/* this could be a huge page  */
300 	if (pmd_huge(*pmdp)) {
301 		unsigned long lo;
302 		write_c0_pagemask(PM_HUGE_MASK);
303 		ptep = (pte_t *)pmdp;
304 		lo = pte_to_entrylo(pte_val(*ptep));
305 		write_c0_entrylo0(lo);
306 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
307 
308 		mtc0_tlbw_hazard();
309 		if (idx < 0)
310 			tlb_write_random();
311 		else
312 			tlb_write_indexed();
313 		tlbw_use_hazard();
314 		write_c0_pagemask(PM_DEFAULT_MASK);
315 	} else
316 #endif
317 	{
318 		ptep = pte_offset_map(pmdp, address);
319 
320 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
321 		write_c0_entrylo0(ptep->pte_high);
322 		ptep++;
323 		write_c0_entrylo1(ptep->pte_high);
324 #else
325 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
326 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
327 #endif
328 		mtc0_tlbw_hazard();
329 		if (idx < 0)
330 			tlb_write_random();
331 		else
332 			tlb_write_indexed();
333 	}
334 	tlbw_use_hazard();
335 	FLUSH_ITLB_VM(vma);
336 	EXIT_CRITICAL(flags);
337 }
338 
339 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
340 		     unsigned long entryhi, unsigned long pagemask)
341 {
342 	unsigned long flags;
343 	unsigned long wired;
344 	unsigned long old_pagemask;
345 	unsigned long old_ctx;
346 
347 	ENTER_CRITICAL(flags);
348 	/* Save old context and create impossible VPN2 value */
349 	old_ctx = read_c0_entryhi();
350 	old_pagemask = read_c0_pagemask();
351 	wired = read_c0_wired();
352 	write_c0_wired(wired + 1);
353 	write_c0_index(wired);
354 	tlbw_use_hazard();	/* What is the hazard here? */
355 	write_c0_pagemask(pagemask);
356 	write_c0_entryhi(entryhi);
357 	write_c0_entrylo0(entrylo0);
358 	write_c0_entrylo1(entrylo1);
359 	mtc0_tlbw_hazard();
360 	tlb_write_indexed();
361 	tlbw_use_hazard();
362 
363 	write_c0_entryhi(old_ctx);
364 	tlbw_use_hazard();	/* What is the hazard here? */
365 	write_c0_pagemask(old_pagemask);
366 	local_flush_tlb_all();
367 	EXIT_CRITICAL(flags);
368 }
369 
370 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
371 
372 int __init has_transparent_hugepage(void)
373 {
374 	unsigned int mask;
375 	unsigned long flags;
376 
377 	ENTER_CRITICAL(flags);
378 	write_c0_pagemask(PM_HUGE_MASK);
379 	back_to_back_c0_hazard();
380 	mask = read_c0_pagemask();
381 	write_c0_pagemask(PM_DEFAULT_MASK);
382 
383 	EXIT_CRITICAL(flags);
384 
385 	return mask == PM_HUGE_MASK;
386 }
387 
388 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
389 
390 static int __cpuinitdata ntlb;
391 static int __init set_ntlb(char *str)
392 {
393 	get_option(&str, &ntlb);
394 	return 1;
395 }
396 
397 __setup("ntlb=", set_ntlb);
398 
399 void __cpuinit tlb_init(void)
400 {
401 	/*
402 	 * You should never change this register:
403 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
404 	 *     the value in the c0_pagemask register.
405 	 *   - The entire mm handling assumes the c0_pagemask register to
406 	 *     be set to fixed-size pages.
407 	 */
408 	write_c0_pagemask(PM_DEFAULT_MASK);
409 	write_c0_wired(0);
410 	if (current_cpu_type() == CPU_R10000 ||
411 	    current_cpu_type() == CPU_R12000 ||
412 	    current_cpu_type() == CPU_R14000)
413 		write_c0_framemask(0);
414 
415 	if (cpu_has_rixi) {
416 		/*
417 		 * Enable the no read, no exec bits, and enable large virtual
418 		 * address.
419 		 */
420 		u32 pg = PG_RIE | PG_XIE;
421 #ifdef CONFIG_64BIT
422 		pg |= PG_ELPA;
423 #endif
424 		write_c0_pagegrain(pg);
425 	}
426 
427 	/* From this point on the ARC firmware is dead.	 */
428 	local_flush_tlb_all();
429 
430 	/* Did I tell you that ARC SUCKS?  */
431 
432 	if (ntlb) {
433 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
434 			int wired = current_cpu_data.tlbsize - ntlb;
435 			write_c0_wired(wired);
436 			write_c0_index(wired-1);
437 			printk("Restricting TLB to %d entries\n", ntlb);
438 		} else
439 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
440 	}
441 
442 	build_tlb_refill_handler();
443 }
444