xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision d2999e1b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 
40 
41 /* On some machines (e.g. ones with the Merced bus), there can be
42  * only a single PxTLB broadcast at a time; this must be guaranteed
43  * by software.  We put a spinlock around all TLB flushes  to
44  * ensure this.
45  */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47 
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52 
53 #ifdef CONFIG_SMP
54 void
55 flush_data_cache(void)
56 {
57 	on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void
60 flush_instruction_cache(void)
61 {
62 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65 
66 void
67 flush_cache_all_local(void)
68 {
69 	flush_instruction_cache_local(NULL);
70 	flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73 
74 /* Virtual address of pfn.  */
75 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
76 
77 void
78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79 {
80 	unsigned long pfn = pte_pfn(*ptep);
81 	struct page *page;
82 
83 	/* We don't have pte special.  As a result, we can be called with
84 	   an invalid pfn and we don't need to flush the kernel dcache page.
85 	   This occurs with FireGL card in C8000.  */
86 	if (!pfn_valid(pfn))
87 		return;
88 
89 	page = pfn_to_page(pfn);
90 	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 		flush_kernel_dcache_page_addr(pfn_va(pfn));
92 		clear_bit(PG_dcache_dirty, &page->flags);
93 	} else if (parisc_requires_coherency())
94 		flush_kernel_dcache_page_addr(pfn_va(pfn));
95 }
96 
97 void
98 show_cache_info(struct seq_file *m)
99 {
100 	char buf[32];
101 
102 	seq_printf(m, "I-cache\t\t: %ld KB\n",
103 		cache_info.ic_size/1024 );
104 	if (cache_info.dc_loop != 1)
105 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 		cache_info.dc_size/1024,
108 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 		cache_info.it_size,
113 		cache_info.dt_size,
114 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 	);
116 
117 #ifndef CONFIG_PA20
118 	/* BTLB - Block TLB */
119 	if (btlb_info.max_size==0) {
120 		seq_printf(m, "BTLB\t\t: not supported\n" );
121 	} else {
122 		seq_printf(m,
123 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 		btlb_info.max_size, (int)4096,
127 		btlb_info.max_size>>8,
128 		btlb_info.fixed_range_info.num_i,
129 		btlb_info.fixed_range_info.num_d,
130 		btlb_info.fixed_range_info.num_comb,
131 		btlb_info.variable_range_info.num_i,
132 		btlb_info.variable_range_info.num_d,
133 		btlb_info.variable_range_info.num_comb
134 		);
135 	}
136 #endif
137 }
138 
139 void __init
140 parisc_cache_init(void)
141 {
142 	if (pdc_cache_info(&cache_info) < 0)
143 		panic("parisc_cache_init: pdc_cache_info failed");
144 
145 #if 0
146 	printk("ic_size %lx dc_size %lx it_size %lx\n",
147 		cache_info.ic_size,
148 		cache_info.dc_size,
149 		cache_info.it_size);
150 
151 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 		cache_info.dc_base,
153 		cache_info.dc_stride,
154 		cache_info.dc_count,
155 		cache_info.dc_loop);
156 
157 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
158 		*(unsigned long *) (&cache_info.dc_conf),
159 		cache_info.dc_conf.cc_alias,
160 		cache_info.dc_conf.cc_block,
161 		cache_info.dc_conf.cc_line,
162 		cache_info.dc_conf.cc_shift);
163 	printk("	wt %d sh %d cst %d hv %d\n",
164 		cache_info.dc_conf.cc_wt,
165 		cache_info.dc_conf.cc_sh,
166 		cache_info.dc_conf.cc_cst,
167 		cache_info.dc_conf.cc_hv);
168 
169 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 		cache_info.ic_base,
171 		cache_info.ic_stride,
172 		cache_info.ic_count,
173 		cache_info.ic_loop);
174 
175 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
176 		*(unsigned long *) (&cache_info.ic_conf),
177 		cache_info.ic_conf.cc_alias,
178 		cache_info.ic_conf.cc_block,
179 		cache_info.ic_conf.cc_line,
180 		cache_info.ic_conf.cc_shift);
181 	printk("	wt %d sh %d cst %d hv %d\n",
182 		cache_info.ic_conf.cc_wt,
183 		cache_info.ic_conf.cc_sh,
184 		cache_info.ic_conf.cc_cst,
185 		cache_info.ic_conf.cc_hv);
186 
187 	printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188 		cache_info.dt_conf.tc_sh,
189 		cache_info.dt_conf.tc_page,
190 		cache_info.dt_conf.tc_cst,
191 		cache_info.dt_conf.tc_aid,
192 		cache_info.dt_conf.tc_pad1);
193 
194 	printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195 		cache_info.it_conf.tc_sh,
196 		cache_info.it_conf.tc_page,
197 		cache_info.it_conf.tc_cst,
198 		cache_info.it_conf.tc_aid,
199 		cache_info.it_conf.tc_pad1);
200 #endif
201 
202 	split_tlb = 0;
203 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 		if (cache_info.dt_conf.tc_sh == 2)
205 			printk(KERN_WARNING "Unexpected TLB configuration. "
206 			"Will flush I/D separately (could be optimized).\n");
207 
208 		split_tlb = 1;
209 	}
210 
211 	/* "New and Improved" version from Jim Hull
212 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
213 	 * The following CAFL_STRIDE is an optimized version, see
214 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
216 	 */
217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220 #undef CAFL_STRIDE
221 
222 #ifndef CONFIG_PA20
223 	if (pdc_btlb_info(&btlb_info) < 0) {
224 		memset(&btlb_info, 0, sizeof btlb_info);
225 	}
226 #endif
227 
228 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 						PDC_MODEL_NVA_UNSUPPORTED) {
230 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231 #if 0
232 		panic("SMP kernel required to avoid non-equivalent aliasing");
233 #endif
234 	}
235 }
236 
237 void disable_sr_hashing(void)
238 {
239 	int srhash_type, retval;
240 	unsigned long space_bits;
241 
242 	switch (boot_cpu_data.cpu_type) {
243 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
244 		BUG();
245 		return;
246 
247 	case pcxs:
248 	case pcxt:
249 	case pcxt_:
250 		srhash_type = SRHASH_PCXST;
251 		break;
252 
253 	case pcxl:
254 		srhash_type = SRHASH_PCXL;
255 		break;
256 
257 	case pcxl2: /* pcxl2 doesn't support space register hashing */
258 		return;
259 
260 	default: /* Currently all PA2.0 machines use the same ins. sequence */
261 		srhash_type = SRHASH_PA20;
262 		break;
263 	}
264 
265 	disable_sr_hashing_asm(srhash_type);
266 
267 	retval = pdc_spaceid_bits(&space_bits);
268 	/* If this procedure isn't implemented, don't panic. */
269 	if (retval < 0 && retval != PDC_BAD_OPTION)
270 		panic("pdc_spaceid_bits call failed.\n");
271 	if (space_bits != 0)
272 		panic("SpaceID hashing is still on!\n");
273 }
274 
275 static inline void
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 		   unsigned long physaddr)
278 {
279 	preempt_disable();
280 	flush_dcache_page_asm(physaddr, vmaddr);
281 	if (vma->vm_flags & VM_EXEC)
282 		flush_icache_page_asm(physaddr, vmaddr);
283 	preempt_enable();
284 }
285 
286 void flush_dcache_page(struct page *page)
287 {
288 	struct address_space *mapping = page_mapping(page);
289 	struct vm_area_struct *mpnt;
290 	unsigned long offset;
291 	unsigned long addr, old_addr = 0;
292 	pgoff_t pgoff;
293 
294 	if (mapping && !mapping_mapped(mapping)) {
295 		set_bit(PG_dcache_dirty, &page->flags);
296 		return;
297 	}
298 
299 	flush_kernel_dcache_page(page);
300 
301 	if (!mapping)
302 		return;
303 
304 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305 
306 	/* We have carefully arranged in arch_get_unmapped_area() that
307 	 * *any* mappings of a file are always congruently mapped (whether
308 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309 	 * to flush one address here for them all to become coherent */
310 
311 	flush_dcache_mmap_lock(mapping);
312 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 		addr = mpnt->vm_start + offset;
315 
316 		/* The TLB is the engine of coherence on parisc: The
317 		 * CPU is entitled to speculate any page with a TLB
318 		 * mapping, so here we kill the mapping then flush the
319 		 * page along a special flush only alias mapping.
320 		 * This guarantees that the page is no-longer in the
321 		 * cache for any process and nor may it be
322 		 * speculatively read in (until the user or kernel
323 		 * specifically accesses it, of course) */
324 
325 		flush_tlb_page(mpnt, addr);
326 		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
327 				      != (addr & (SHM_COLOUR - 1))) {
328 			__flush_cache_page(mpnt, addr, page_to_phys(page));
329 			if (old_addr)
330 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
331 			old_addr = addr;
332 		}
333 	}
334 	flush_dcache_mmap_unlock(mapping);
335 }
336 EXPORT_SYMBOL(flush_dcache_page);
337 
338 /* Defined in arch/parisc/kernel/pacache.S */
339 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
340 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
341 EXPORT_SYMBOL(flush_data_cache_local);
342 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
343 
344 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
346 
347 void __init parisc_setup_cache_timing(void)
348 {
349 	unsigned long rangetime, alltime;
350 	unsigned long size;
351 
352 	alltime = mfctl(16);
353 	flush_data_cache();
354 	alltime = mfctl(16) - alltime;
355 
356 	size = (unsigned long)(_end - _text);
357 	rangetime = mfctl(16);
358 	flush_kernel_dcache_range((unsigned long)_text, size);
359 	rangetime = mfctl(16) - rangetime;
360 
361 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
362 		alltime, size, rangetime);
363 
364 	/* Racy, but if we see an intermediate value, it's ok too... */
365 	parisc_cache_flush_threshold = size * alltime / rangetime;
366 
367 	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
368 	if (!parisc_cache_flush_threshold)
369 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
370 
371 	if (parisc_cache_flush_threshold > cache_info.dc_size)
372 		parisc_cache_flush_threshold = cache_info.dc_size;
373 
374 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
375 }
376 
377 extern void purge_kernel_dcache_page_asm(unsigned long);
378 extern void clear_user_page_asm(void *, unsigned long);
379 extern void copy_user_page_asm(void *, void *, unsigned long);
380 
381 void flush_kernel_dcache_page_addr(void *addr)
382 {
383 	unsigned long flags;
384 
385 	flush_kernel_dcache_page_asm(addr);
386 	purge_tlb_start(flags);
387 	pdtlb_kernel(addr);
388 	purge_tlb_end(flags);
389 }
390 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
391 
392 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
393 	struct page *pg)
394 {
395        /* Copy using kernel mapping.  No coherency is needed (all in
396 	  kunmap) for the `to' page.  However, the `from' page needs to
397 	  be flushed through a mapping equivalent to the user mapping
398 	  before it can be accessed through the kernel mapping. */
399 	preempt_disable();
400 	flush_dcache_page_asm(__pa(vfrom), vaddr);
401 	preempt_enable();
402 	copy_page_asm(vto, vfrom);
403 }
404 EXPORT_SYMBOL(copy_user_page);
405 
406 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
407 {
408 	unsigned long flags;
409 
410 	/* Note: purge_tlb_entries can be called at startup with
411 	   no context.  */
412 
413 	purge_tlb_start(flags);
414 	mtsp(mm->context, 1);
415 	pdtlb(addr);
416 	pitlb(addr);
417 	purge_tlb_end(flags);
418 }
419 EXPORT_SYMBOL(purge_tlb_entries);
420 
421 void __flush_tlb_range(unsigned long sid, unsigned long start,
422 		       unsigned long end)
423 {
424 	unsigned long npages;
425 
426 	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
427 	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
428 		flush_tlb_all();
429 	else {
430 		unsigned long flags;
431 
432 		purge_tlb_start(flags);
433 		mtsp(sid, 1);
434 		if (split_tlb) {
435 			while (npages--) {
436 				pdtlb(start);
437 				pitlb(start);
438 				start += PAGE_SIZE;
439 			}
440 		} else {
441 			while (npages--) {
442 				pdtlb(start);
443 				start += PAGE_SIZE;
444 			}
445 		}
446 		purge_tlb_end(flags);
447 	}
448 }
449 
450 static void cacheflush_h_tmp_function(void *dummy)
451 {
452 	flush_cache_all_local();
453 }
454 
455 void flush_cache_all(void)
456 {
457 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
458 }
459 
460 static inline unsigned long mm_total_size(struct mm_struct *mm)
461 {
462 	struct vm_area_struct *vma;
463 	unsigned long usize = 0;
464 
465 	for (vma = mm->mmap; vma; vma = vma->vm_next)
466 		usize += vma->vm_end - vma->vm_start;
467 	return usize;
468 }
469 
470 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
471 {
472 	pte_t *ptep = NULL;
473 
474 	if (!pgd_none(*pgd)) {
475 		pud_t *pud = pud_offset(pgd, addr);
476 		if (!pud_none(*pud)) {
477 			pmd_t *pmd = pmd_offset(pud, addr);
478 			if (!pmd_none(*pmd))
479 				ptep = pte_offset_map(pmd, addr);
480 		}
481 	}
482 	return ptep;
483 }
484 
485 void flush_cache_mm(struct mm_struct *mm)
486 {
487 	struct vm_area_struct *vma;
488 	pgd_t *pgd;
489 
490 	/* Flushing the whole cache on each cpu takes forever on
491 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
492 	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
493 		flush_cache_all();
494 		return;
495 	}
496 
497 	if (mm->context == mfsp(3)) {
498 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
499 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
500 			if ((vma->vm_flags & VM_EXEC) == 0)
501 				continue;
502 			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
503 		}
504 		return;
505 	}
506 
507 	pgd = mm->pgd;
508 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
509 		unsigned long addr;
510 
511 		for (addr = vma->vm_start; addr < vma->vm_end;
512 		     addr += PAGE_SIZE) {
513 			unsigned long pfn;
514 			pte_t *ptep = get_ptep(pgd, addr);
515 			if (!ptep)
516 				continue;
517 			pfn = pte_pfn(*ptep);
518 			if (!pfn_valid(pfn))
519 				continue;
520 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
521 		}
522 	}
523 }
524 
525 void
526 flush_user_dcache_range(unsigned long start, unsigned long end)
527 {
528 	if ((end - start) < parisc_cache_flush_threshold)
529 		flush_user_dcache_range_asm(start,end);
530 	else
531 		flush_data_cache();
532 }
533 
534 void
535 flush_user_icache_range(unsigned long start, unsigned long end)
536 {
537 	if ((end - start) < parisc_cache_flush_threshold)
538 		flush_user_icache_range_asm(start,end);
539 	else
540 		flush_instruction_cache();
541 }
542 
543 void flush_cache_range(struct vm_area_struct *vma,
544 		unsigned long start, unsigned long end)
545 {
546 	unsigned long addr;
547 	pgd_t *pgd;
548 
549 	BUG_ON(!vma->vm_mm->context);
550 
551 	if ((end - start) >= parisc_cache_flush_threshold) {
552 		flush_cache_all();
553 		return;
554 	}
555 
556 	if (vma->vm_mm->context == mfsp(3)) {
557 		flush_user_dcache_range_asm(start, end);
558 		if (vma->vm_flags & VM_EXEC)
559 			flush_user_icache_range_asm(start, end);
560 		return;
561 	}
562 
563 	pgd = vma->vm_mm->pgd;
564 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
565 		unsigned long pfn;
566 		pte_t *ptep = get_ptep(pgd, addr);
567 		if (!ptep)
568 			continue;
569 		pfn = pte_pfn(*ptep);
570 		if (pfn_valid(pfn))
571 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
572 	}
573 }
574 
575 void
576 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
577 {
578 	BUG_ON(!vma->vm_mm->context);
579 
580 	if (pfn_valid(pfn)) {
581 		flush_tlb_page(vma, vmaddr);
582 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
583 	}
584 }
585