xref: /openbmc/linux/arch/mips/mm/tlb-r4k.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 
16 #include <asm/cpu.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
21 
22 extern void build_tlb_refill_handler(void);
23 
24 /* CP0 hazard avoidance. */
25 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
26 				     "nop; nop; nop; nop; nop; nop;\n\t" \
27 				     ".set reorder\n\t")
28 
29 void local_flush_tlb_all(void)
30 {
31 	unsigned long flags;
32 	unsigned long old_ctx;
33 	int entry;
34 
35 	local_irq_save(flags);
36 	/* Save old context and create impossible VPN2 value */
37 	old_ctx = read_c0_entryhi();
38 	write_c0_entrylo0(0);
39 	write_c0_entrylo1(0);
40 
41 	entry = read_c0_wired();
42 
43 	/* Blast 'em all away. */
44 	while (entry < current_cpu_data.tlbsize) {
45 		/*
46 		 * Make sure all entries differ.  If they're not different
47 		 * MIPS32 will take revenge ...
48 		 */
49 		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
50 		write_c0_index(entry);
51 		mtc0_tlbw_hazard();
52 		tlb_write_indexed();
53 		entry++;
54 	}
55 	tlbw_use_hazard();
56 	write_c0_entryhi(old_ctx);
57 	local_irq_restore(flags);
58 }
59 
60 void local_flush_tlb_mm(struct mm_struct *mm)
61 {
62 	int cpu = smp_processor_id();
63 
64 	if (cpu_context(cpu, mm) != 0)
65 		drop_mmu_context(mm,cpu);
66 }
67 
68 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
69 	unsigned long end)
70 {
71 	struct mm_struct *mm = vma->vm_mm;
72 	int cpu = smp_processor_id();
73 
74 	if (cpu_context(cpu, mm) != 0) {
75 		unsigned long flags;
76 		int size;
77 
78 		local_irq_save(flags);
79 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
80 		size = (size + 1) >> 1;
81 		if (size <= current_cpu_data.tlbsize/2) {
82 			int oldpid = read_c0_entryhi();
83 			int newpid = cpu_asid(cpu, mm);
84 
85 			start &= (PAGE_MASK << 1);
86 			end += ((PAGE_SIZE << 1) - 1);
87 			end &= (PAGE_MASK << 1);
88 			while (start < end) {
89 				int idx;
90 
91 				write_c0_entryhi(start | newpid);
92 				start += (PAGE_SIZE << 1);
93 				mtc0_tlbw_hazard();
94 				tlb_probe();
95 				BARRIER;
96 				idx = read_c0_index();
97 				write_c0_entrylo0(0);
98 				write_c0_entrylo1(0);
99 				if (idx < 0)
100 					continue;
101 				/* Make sure all entries differ. */
102 				write_c0_entryhi(CKSEG0 +
103 				                 (idx << (PAGE_SHIFT + 1)));
104 				mtc0_tlbw_hazard();
105 				tlb_write_indexed();
106 			}
107 			tlbw_use_hazard();
108 			write_c0_entryhi(oldpid);
109 		} else {
110 			drop_mmu_context(mm, cpu);
111 		}
112 		local_irq_restore(flags);
113 	}
114 }
115 
116 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
117 {
118 	unsigned long flags;
119 	int size;
120 
121 	local_irq_save(flags);
122 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
123 	size = (size + 1) >> 1;
124 	if (size <= current_cpu_data.tlbsize / 2) {
125 		int pid = read_c0_entryhi();
126 
127 		start &= (PAGE_MASK << 1);
128 		end += ((PAGE_SIZE << 1) - 1);
129 		end &= (PAGE_MASK << 1);
130 
131 		while (start < end) {
132 			int idx;
133 
134 			write_c0_entryhi(start);
135 			start += (PAGE_SIZE << 1);
136 			mtc0_tlbw_hazard();
137 			tlb_probe();
138 			BARRIER;
139 			idx = read_c0_index();
140 			write_c0_entrylo0(0);
141 			write_c0_entrylo1(0);
142 			if (idx < 0)
143 				continue;
144 			/* Make sure all entries differ. */
145 			write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
146 			mtc0_tlbw_hazard();
147 			tlb_write_indexed();
148 		}
149 		tlbw_use_hazard();
150 		write_c0_entryhi(pid);
151 	} else {
152 		local_flush_tlb_all();
153 	}
154 	local_irq_restore(flags);
155 }
156 
157 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
158 {
159 	int cpu = smp_processor_id();
160 
161 	if (cpu_context(cpu, vma->vm_mm) != 0) {
162 		unsigned long flags;
163 		int oldpid, newpid, idx;
164 
165 		newpid = cpu_asid(cpu, vma->vm_mm);
166 		page &= (PAGE_MASK << 1);
167 		local_irq_save(flags);
168 		oldpid = read_c0_entryhi();
169 		write_c0_entryhi(page | newpid);
170 		mtc0_tlbw_hazard();
171 		tlb_probe();
172 		BARRIER;
173 		idx = read_c0_index();
174 		write_c0_entrylo0(0);
175 		write_c0_entrylo1(0);
176 		if (idx < 0)
177 			goto finish;
178 		/* Make sure all entries differ. */
179 		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
180 		mtc0_tlbw_hazard();
181 		tlb_write_indexed();
182 		tlbw_use_hazard();
183 
184 	finish:
185 		write_c0_entryhi(oldpid);
186 		local_irq_restore(flags);
187 	}
188 }
189 
190 /*
191  * This one is only used for pages with the global bit set so we don't care
192  * much about the ASID.
193  */
194 void local_flush_tlb_one(unsigned long page)
195 {
196 	unsigned long flags;
197 	int oldpid, idx;
198 
199 	local_irq_save(flags);
200 	page &= (PAGE_MASK << 1);
201 	oldpid = read_c0_entryhi();
202 	write_c0_entryhi(page);
203 	mtc0_tlbw_hazard();
204 	tlb_probe();
205 	BARRIER;
206 	idx = read_c0_index();
207 	write_c0_entrylo0(0);
208 	write_c0_entrylo1(0);
209 	if (idx >= 0) {
210 		/* Make sure all entries differ. */
211 		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
212 		mtc0_tlbw_hazard();
213 		tlb_write_indexed();
214 		tlbw_use_hazard();
215 	}
216 	write_c0_entryhi(oldpid);
217 
218 	local_irq_restore(flags);
219 }
220 
221 /*
222  * We will need multiple versions of update_mmu_cache(), one that just
223  * updates the TLB with the new pte(s), and another which also checks
224  * for the R4k "end of page" hardware bug and does the needy.
225  */
226 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
227 {
228 	unsigned long flags;
229 	pgd_t *pgdp;
230 	pmd_t *pmdp;
231 	pte_t *ptep;
232 	int idx, pid;
233 
234 	/*
235 	 * Handle debugger faulting in for debugee.
236 	 */
237 	if (current->active_mm != vma->vm_mm)
238 		return;
239 
240 	pid = read_c0_entryhi() & ASID_MASK;
241 
242 	local_irq_save(flags);
243 	address &= (PAGE_MASK << 1);
244 	write_c0_entryhi(address | pid);
245 	pgdp = pgd_offset(vma->vm_mm, address);
246 	mtc0_tlbw_hazard();
247 	tlb_probe();
248 	BARRIER;
249 	pmdp = pmd_offset(pgdp, address);
250 	idx = read_c0_index();
251 	ptep = pte_offset_map(pmdp, address);
252 
253  #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
254  	write_c0_entrylo0(ptep->pte_high);
255  	ptep++;
256  	write_c0_entrylo1(ptep->pte_high);
257 #else
258   	write_c0_entrylo0(pte_val(*ptep++) >> 6);
259   	write_c0_entrylo1(pte_val(*ptep) >> 6);
260 #endif
261 	write_c0_entryhi(address | pid);
262 	mtc0_tlbw_hazard();
263 	if (idx < 0)
264 		tlb_write_random();
265 	else
266 		tlb_write_indexed();
267 	tlbw_use_hazard();
268 	write_c0_entryhi(pid);
269 	local_irq_restore(flags);
270 }
271 
272 #if 0
273 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
274 				       unsigned long address, pte_t pte)
275 {
276 	unsigned long flags;
277 	unsigned int asid;
278 	pgd_t *pgdp;
279 	pmd_t *pmdp;
280 	pte_t *ptep;
281 	int idx;
282 
283 	local_irq_save(flags);
284 	address &= (PAGE_MASK << 1);
285 	asid = read_c0_entryhi() & ASID_MASK;
286 	write_c0_entryhi(address | asid);
287 	pgdp = pgd_offset(vma->vm_mm, address);
288 	mtc0_tlbw_hazard();
289 	tlb_probe();
290 	BARRIER;
291 	pmdp = pmd_offset(pgdp, address);
292 	idx = read_c0_index();
293 	ptep = pte_offset_map(pmdp, address);
294 	write_c0_entrylo0(pte_val(*ptep++) >> 6);
295 	write_c0_entrylo1(pte_val(*ptep) >> 6);
296 	mtc0_tlbw_hazard();
297 	if (idx < 0)
298 		tlb_write_random();
299 	else
300 		tlb_write_indexed();
301 	tlbw_use_hazard();
302 	local_irq_restore(flags);
303 }
304 #endif
305 
306 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
307 	unsigned long entryhi, unsigned long pagemask)
308 {
309 	unsigned long flags;
310 	unsigned long wired;
311 	unsigned long old_pagemask;
312 	unsigned long old_ctx;
313 
314 	local_irq_save(flags);
315 	/* Save old context and create impossible VPN2 value */
316 	old_ctx = read_c0_entryhi();
317 	old_pagemask = read_c0_pagemask();
318 	wired = read_c0_wired();
319 	write_c0_wired(wired + 1);
320 	write_c0_index(wired);
321 	BARRIER;
322 	write_c0_pagemask(pagemask);
323 	write_c0_entryhi(entryhi);
324 	write_c0_entrylo0(entrylo0);
325 	write_c0_entrylo1(entrylo1);
326 	mtc0_tlbw_hazard();
327 	tlb_write_indexed();
328 	tlbw_use_hazard();
329 
330 	write_c0_entryhi(old_ctx);
331 	BARRIER;
332 	write_c0_pagemask(old_pagemask);
333 	local_flush_tlb_all();
334 	local_irq_restore(flags);
335 }
336 
337 /*
338  * Used for loading TLB entries before trap_init() has started, when we
339  * don't actually want to add a wired entry which remains throughout the
340  * lifetime of the system
341  */
342 
343 static int temp_tlb_entry __initdata;
344 
345 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
346 			       unsigned long entryhi, unsigned long pagemask)
347 {
348 	int ret = 0;
349 	unsigned long flags;
350 	unsigned long wired;
351 	unsigned long old_pagemask;
352 	unsigned long old_ctx;
353 
354 	local_irq_save(flags);
355 	/* Save old context and create impossible VPN2 value */
356 	old_ctx = read_c0_entryhi();
357 	old_pagemask = read_c0_pagemask();
358 	wired = read_c0_wired();
359 	if (--temp_tlb_entry < wired) {
360 		printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
361 		ret = -ENOSPC;
362 		goto out;
363 	}
364 
365 	write_c0_index(temp_tlb_entry);
366 	write_c0_pagemask(pagemask);
367 	write_c0_entryhi(entryhi);
368 	write_c0_entrylo0(entrylo0);
369 	write_c0_entrylo1(entrylo1);
370 	mtc0_tlbw_hazard();
371 	tlb_write_indexed();
372 	tlbw_use_hazard();
373 
374 	write_c0_entryhi(old_ctx);
375 	write_c0_pagemask(old_pagemask);
376 out:
377 	local_irq_restore(flags);
378 	return ret;
379 }
380 
381 static void __init probe_tlb(unsigned long config)
382 {
383 	struct cpuinfo_mips *c = &current_cpu_data;
384 	unsigned int reg;
385 
386 	/*
387 	 * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
388 	 * is not supported, we assume R4k style.  Cpu probing already figured
389 	 * out the number of tlb entries.
390 	 */
391 	if ((c->processor_id  & 0xff0000) == PRID_COMP_LEGACY)
392 		return;
393 
394 	reg = read_c0_config1();
395 	if (!((config >> 7) & 3))
396 		panic("No TLB present");
397 
398 	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
399 }
400 
401 void __init tlb_init(void)
402 {
403 	unsigned int config = read_c0_config();
404 
405 	/*
406 	 * You should never change this register:
407 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
408 	 *     the value in the c0_pagemask register.
409 	 *   - The entire mm handling assumes the c0_pagemask register to
410 	 *     be set for 4kb pages.
411 	 */
412 	probe_tlb(config);
413 	write_c0_pagemask(PM_DEFAULT_MASK);
414 	write_c0_wired(0);
415 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
416 	local_flush_tlb_all();
417 
418 	build_tlb_refill_handler();
419 }
420