xref: /openbmc/linux/arch/mips/mm/tlb-r4k.c (revision d2999e1b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/module.h>
18 
19 #include <asm/cpu.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbmisc.h>
26 
27 extern void build_tlb_refill_handler(void);
28 
29 /*
30  * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
31  * unfortunately, itlb is not totally transparent to software.
32  */
33 static inline void flush_itlb(void)
34 {
35 	switch (current_cpu_type()) {
36 	case CPU_LOONGSON2:
37 	case CPU_LOONGSON3:
38 		write_c0_diag(4);
39 		break;
40 	default:
41 		break;
42 	}
43 }
44 
45 static inline void flush_itlb_vm(struct vm_area_struct *vma)
46 {
47 	if (vma->vm_flags & VM_EXEC)
48 		flush_itlb();
49 }
50 
51 void local_flush_tlb_all(void)
52 {
53 	unsigned long flags;
54 	unsigned long old_ctx;
55 	int entry, ftlbhighset;
56 
57 	local_irq_save(flags);
58 	/* Save old context and create impossible VPN2 value */
59 	old_ctx = read_c0_entryhi();
60 	write_c0_entrylo0(0);
61 	write_c0_entrylo1(0);
62 
63 	entry = read_c0_wired();
64 
65 	/* Blast 'em all away. */
66 	if (cpu_has_tlbinv) {
67 		if (current_cpu_data.tlbsizevtlb) {
68 			write_c0_index(0);
69 			mtc0_tlbw_hazard();
70 			tlbinvf();  /* invalidate VTLB */
71 		}
72 		ftlbhighset = current_cpu_data.tlbsizevtlb +
73 			current_cpu_data.tlbsizeftlbsets;
74 		for (entry = current_cpu_data.tlbsizevtlb;
75 		     entry < ftlbhighset;
76 		     entry++) {
77 			write_c0_index(entry);
78 			mtc0_tlbw_hazard();
79 			tlbinvf();  /* invalidate one FTLB set */
80 		}
81 	} else {
82 		while (entry < current_cpu_data.tlbsize) {
83 			/* Make sure all entries differ. */
84 			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
85 			write_c0_index(entry);
86 			mtc0_tlbw_hazard();
87 			tlb_write_indexed();
88 			entry++;
89 		}
90 	}
91 	tlbw_use_hazard();
92 	write_c0_entryhi(old_ctx);
93 	flush_itlb();
94 	local_irq_restore(flags);
95 }
96 EXPORT_SYMBOL(local_flush_tlb_all);
97 
98 /* All entries common to a mm share an asid.  To effectively flush
99    these entries, we just bump the asid. */
100 void local_flush_tlb_mm(struct mm_struct *mm)
101 {
102 	int cpu;
103 
104 	preempt_disable();
105 
106 	cpu = smp_processor_id();
107 
108 	if (cpu_context(cpu, mm) != 0) {
109 		drop_mmu_context(mm, cpu);
110 	}
111 
112 	preempt_enable();
113 }
114 
115 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116 	unsigned long end)
117 {
118 	struct mm_struct *mm = vma->vm_mm;
119 	int cpu = smp_processor_id();
120 
121 	if (cpu_context(cpu, mm) != 0) {
122 		unsigned long size, flags;
123 
124 		local_irq_save(flags);
125 		start = round_down(start, PAGE_SIZE << 1);
126 		end = round_up(end, PAGE_SIZE << 1);
127 		size = (end - start) >> (PAGE_SHIFT + 1);
128 		if (size <= (current_cpu_data.tlbsizeftlbsets ?
129 			     current_cpu_data.tlbsize / 8 :
130 			     current_cpu_data.tlbsize / 2)) {
131 			int oldpid = read_c0_entryhi();
132 			int newpid = cpu_asid(cpu, mm);
133 
134 			while (start < end) {
135 				int idx;
136 
137 				write_c0_entryhi(start | newpid);
138 				start += (PAGE_SIZE << 1);
139 				mtc0_tlbw_hazard();
140 				tlb_probe();
141 				tlb_probe_hazard();
142 				idx = read_c0_index();
143 				write_c0_entrylo0(0);
144 				write_c0_entrylo1(0);
145 				if (idx < 0)
146 					continue;
147 				/* Make sure all entries differ. */
148 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
149 				mtc0_tlbw_hazard();
150 				tlb_write_indexed();
151 			}
152 			tlbw_use_hazard();
153 			write_c0_entryhi(oldpid);
154 		} else {
155 			drop_mmu_context(mm, cpu);
156 		}
157 		flush_itlb();
158 		local_irq_restore(flags);
159 	}
160 }
161 
162 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
163 {
164 	unsigned long size, flags;
165 
166 	local_irq_save(flags);
167 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168 	size = (size + 1) >> 1;
169 	if (size <= (current_cpu_data.tlbsizeftlbsets ?
170 		     current_cpu_data.tlbsize / 8 :
171 		     current_cpu_data.tlbsize / 2)) {
172 		int pid = read_c0_entryhi();
173 
174 		start &= (PAGE_MASK << 1);
175 		end += ((PAGE_SIZE << 1) - 1);
176 		end &= (PAGE_MASK << 1);
177 
178 		while (start < end) {
179 			int idx;
180 
181 			write_c0_entryhi(start);
182 			start += (PAGE_SIZE << 1);
183 			mtc0_tlbw_hazard();
184 			tlb_probe();
185 			tlb_probe_hazard();
186 			idx = read_c0_index();
187 			write_c0_entrylo0(0);
188 			write_c0_entrylo1(0);
189 			if (idx < 0)
190 				continue;
191 			/* Make sure all entries differ. */
192 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
193 			mtc0_tlbw_hazard();
194 			tlb_write_indexed();
195 		}
196 		tlbw_use_hazard();
197 		write_c0_entryhi(pid);
198 	} else {
199 		local_flush_tlb_all();
200 	}
201 	flush_itlb();
202 	local_irq_restore(flags);
203 }
204 
205 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
206 {
207 	int cpu = smp_processor_id();
208 
209 	if (cpu_context(cpu, vma->vm_mm) != 0) {
210 		unsigned long flags;
211 		int oldpid, newpid, idx;
212 
213 		newpid = cpu_asid(cpu, vma->vm_mm);
214 		page &= (PAGE_MASK << 1);
215 		local_irq_save(flags);
216 		oldpid = read_c0_entryhi();
217 		write_c0_entryhi(page | newpid);
218 		mtc0_tlbw_hazard();
219 		tlb_probe();
220 		tlb_probe_hazard();
221 		idx = read_c0_index();
222 		write_c0_entrylo0(0);
223 		write_c0_entrylo1(0);
224 		if (idx < 0)
225 			goto finish;
226 		/* Make sure all entries differ. */
227 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
228 		mtc0_tlbw_hazard();
229 		tlb_write_indexed();
230 		tlbw_use_hazard();
231 
232 	finish:
233 		write_c0_entryhi(oldpid);
234 		flush_itlb_vm(vma);
235 		local_irq_restore(flags);
236 	}
237 }
238 
239 /*
240  * This one is only used for pages with the global bit set so we don't care
241  * much about the ASID.
242  */
243 void local_flush_tlb_one(unsigned long page)
244 {
245 	unsigned long flags;
246 	int oldpid, idx;
247 
248 	local_irq_save(flags);
249 	oldpid = read_c0_entryhi();
250 	page &= (PAGE_MASK << 1);
251 	write_c0_entryhi(page);
252 	mtc0_tlbw_hazard();
253 	tlb_probe();
254 	tlb_probe_hazard();
255 	idx = read_c0_index();
256 	write_c0_entrylo0(0);
257 	write_c0_entrylo1(0);
258 	if (idx >= 0) {
259 		/* Make sure all entries differ. */
260 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
261 		mtc0_tlbw_hazard();
262 		tlb_write_indexed();
263 		tlbw_use_hazard();
264 	}
265 	write_c0_entryhi(oldpid);
266 	flush_itlb();
267 	local_irq_restore(flags);
268 }
269 
270 /*
271  * We will need multiple versions of update_mmu_cache(), one that just
272  * updates the TLB with the new pte(s), and another which also checks
273  * for the R4k "end of page" hardware bug and does the needy.
274  */
275 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
276 {
277 	unsigned long flags;
278 	pgd_t *pgdp;
279 	pud_t *pudp;
280 	pmd_t *pmdp;
281 	pte_t *ptep;
282 	int idx, pid;
283 
284 	/*
285 	 * Handle debugger faulting in for debugee.
286 	 */
287 	if (current->active_mm != vma->vm_mm)
288 		return;
289 
290 	local_irq_save(flags);
291 
292 	pid = read_c0_entryhi() & ASID_MASK;
293 	address &= (PAGE_MASK << 1);
294 	write_c0_entryhi(address | pid);
295 	pgdp = pgd_offset(vma->vm_mm, address);
296 	mtc0_tlbw_hazard();
297 	tlb_probe();
298 	tlb_probe_hazard();
299 	pudp = pud_offset(pgdp, address);
300 	pmdp = pmd_offset(pudp, address);
301 	idx = read_c0_index();
302 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
303 	/* this could be a huge page  */
304 	if (pmd_huge(*pmdp)) {
305 		unsigned long lo;
306 		write_c0_pagemask(PM_HUGE_MASK);
307 		ptep = (pte_t *)pmdp;
308 		lo = pte_to_entrylo(pte_val(*ptep));
309 		write_c0_entrylo0(lo);
310 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
311 
312 		mtc0_tlbw_hazard();
313 		if (idx < 0)
314 			tlb_write_random();
315 		else
316 			tlb_write_indexed();
317 		tlbw_use_hazard();
318 		write_c0_pagemask(PM_DEFAULT_MASK);
319 	} else
320 #endif
321 	{
322 		ptep = pte_offset_map(pmdp, address);
323 
324 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
325 		write_c0_entrylo0(ptep->pte_high);
326 		ptep++;
327 		write_c0_entrylo1(ptep->pte_high);
328 #else
329 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
330 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
331 #endif
332 		mtc0_tlbw_hazard();
333 		if (idx < 0)
334 			tlb_write_random();
335 		else
336 			tlb_write_indexed();
337 	}
338 	tlbw_use_hazard();
339 	flush_itlb_vm(vma);
340 	local_irq_restore(flags);
341 }
342 
343 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
344 		     unsigned long entryhi, unsigned long pagemask)
345 {
346 	unsigned long flags;
347 	unsigned long wired;
348 	unsigned long old_pagemask;
349 	unsigned long old_ctx;
350 
351 	local_irq_save(flags);
352 	/* Save old context and create impossible VPN2 value */
353 	old_ctx = read_c0_entryhi();
354 	old_pagemask = read_c0_pagemask();
355 	wired = read_c0_wired();
356 	write_c0_wired(wired + 1);
357 	write_c0_index(wired);
358 	tlbw_use_hazard();	/* What is the hazard here? */
359 	write_c0_pagemask(pagemask);
360 	write_c0_entryhi(entryhi);
361 	write_c0_entrylo0(entrylo0);
362 	write_c0_entrylo1(entrylo1);
363 	mtc0_tlbw_hazard();
364 	tlb_write_indexed();
365 	tlbw_use_hazard();
366 
367 	write_c0_entryhi(old_ctx);
368 	tlbw_use_hazard();	/* What is the hazard here? */
369 	write_c0_pagemask(old_pagemask);
370 	local_flush_tlb_all();
371 	local_irq_restore(flags);
372 }
373 
374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
375 
376 int __init has_transparent_hugepage(void)
377 {
378 	unsigned int mask;
379 	unsigned long flags;
380 
381 	local_irq_save(flags);
382 	write_c0_pagemask(PM_HUGE_MASK);
383 	back_to_back_c0_hazard();
384 	mask = read_c0_pagemask();
385 	write_c0_pagemask(PM_DEFAULT_MASK);
386 
387 	local_irq_restore(flags);
388 
389 	return mask == PM_HUGE_MASK;
390 }
391 
392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
393 
394 static int ntlb;
395 static int __init set_ntlb(char *str)
396 {
397 	get_option(&str, &ntlb);
398 	return 1;
399 }
400 
401 __setup("ntlb=", set_ntlb);
402 
403 /*
404  * Configure TLB (for init or after a CPU has been powered off).
405  */
406 static void r4k_tlb_configure(void)
407 {
408 	/*
409 	 * You should never change this register:
410 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
411 	 *     the value in the c0_pagemask register.
412 	 *   - The entire mm handling assumes the c0_pagemask register to
413 	 *     be set to fixed-size pages.
414 	 */
415 	write_c0_pagemask(PM_DEFAULT_MASK);
416 	write_c0_wired(0);
417 	if (current_cpu_type() == CPU_R10000 ||
418 	    current_cpu_type() == CPU_R12000 ||
419 	    current_cpu_type() == CPU_R14000)
420 		write_c0_framemask(0);
421 
422 	if (cpu_has_rixi) {
423 		/*
424 		 * Enable the no read, no exec bits, and enable large virtual
425 		 * address.
426 		 */
427 		u32 pg = PG_RIE | PG_XIE;
428 #ifdef CONFIG_64BIT
429 		pg |= PG_ELPA;
430 #endif
431 		write_c0_pagegrain(pg);
432 	}
433 
434 	/* From this point on the ARC firmware is dead.	 */
435 	local_flush_tlb_all();
436 
437 	/* Did I tell you that ARC SUCKS?  */
438 }
439 
440 void tlb_init(void)
441 {
442 	r4k_tlb_configure();
443 
444 	if (ntlb) {
445 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
446 			int wired = current_cpu_data.tlbsize - ntlb;
447 			write_c0_wired(wired);
448 			write_c0_index(wired-1);
449 			printk("Restricting TLB to %d entries\n", ntlb);
450 		} else
451 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
452 	}
453 
454 	build_tlb_refill_handler();
455 }
456 
457 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
458 			       void *v)
459 {
460 	switch (cmd) {
461 	case CPU_PM_ENTER_FAILED:
462 	case CPU_PM_EXIT:
463 		r4k_tlb_configure();
464 		break;
465 	}
466 
467 	return NOTIFY_OK;
468 }
469 
470 static struct notifier_block r4k_tlb_pm_notifier_block = {
471 	.notifier_call = r4k_tlb_pm_notifier,
472 };
473 
474 static int __init r4k_tlb_init_pm(void)
475 {
476 	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
477 }
478 arch_initcall(r4k_tlb_init_pm);
479