xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
2  *
3  * This file is subject to the terms and conditions of the GNU General Public
4  * License.  See the file "COPYING" in the main directory of this archive
5  * for more details.
6  *
7  * Copyright (C) 1999 Helge Deller (07-13-1999)
8  * Copyright (C) 1999 SuSE GmbH Nuernberg
9  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10  *
11  * Cache and TLB management
12  *
13  */
14 
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20 #include <linux/pagemap.h>
21 
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/system.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <asm/processor.h>
30 
31 int split_tlb;
32 int dcache_stride;
33 int icache_stride;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 
37 #if defined(CONFIG_SMP)
38 /* On some machines (e.g. ones with the Merced bus), there can be
39  * only a single PxTLB broadcast at a time; this must be guaranteed
40  * by software.  We put a spinlock around all TLB flushes  to
41  * ensure this.
42  */
43 DEFINE_SPINLOCK(pa_tlb_lock);
44 EXPORT_SYMBOL(pa_tlb_lock);
45 #endif
46 
47 struct pdc_cache_info cache_info;
48 #ifndef CONFIG_PA20
49 static struct pdc_btlb_info btlb_info;
50 #endif
51 
52 #ifdef CONFIG_SMP
53 void
54 flush_data_cache(void)
55 {
56 	on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
57 }
58 void
59 flush_instruction_cache(void)
60 {
61 	on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1);
62 }
63 #endif
64 
65 void
66 flush_cache_all_local(void)
67 {
68 	flush_instruction_cache_local();
69 	flush_data_cache_local();
70 }
71 EXPORT_SYMBOL(flush_cache_all_local);
72 
73 /* flushes EVERYTHING (tlb & cache) */
74 
75 void
76 flush_all_caches(void)
77 {
78 	flush_cache_all();
79 	flush_tlb_all();
80 }
81 EXPORT_SYMBOL(flush_all_caches);
82 
83 void
84 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
85 {
86 	struct page *page = pte_page(pte);
87 
88 	if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
89 	    test_bit(PG_dcache_dirty, &page->flags)) {
90 
91 		flush_kernel_dcache_page(page_address(page));
92 		clear_bit(PG_dcache_dirty, &page->flags);
93 	}
94 }
95 
96 void
97 show_cache_info(struct seq_file *m)
98 {
99 	seq_printf(m, "I-cache\t\t: %ld KB\n",
100 		cache_info.ic_size/1024 );
101 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %d-way associative)\n",
102 		cache_info.dc_size/1024,
103 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
104 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
105 		(cache_info.dc_conf.cc_assoc)
106 	);
107 
108 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
109 		cache_info.it_size,
110 		cache_info.dt_size,
111 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
112 	);
113 
114 #ifndef CONFIG_PA20
115 	/* BTLB - Block TLB */
116 	if (btlb_info.max_size==0) {
117 		seq_printf(m, "BTLB\t\t: not supported\n" );
118 	} else {
119 		seq_printf(m,
120 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
121 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
122 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
123 		btlb_info.max_size, (int)4096,
124 		btlb_info.max_size>>8,
125 		btlb_info.fixed_range_info.num_i,
126 		btlb_info.fixed_range_info.num_d,
127 		btlb_info.fixed_range_info.num_comb,
128 		btlb_info.variable_range_info.num_i,
129 		btlb_info.variable_range_info.num_d,
130 		btlb_info.variable_range_info.num_comb
131 		);
132 	}
133 #endif
134 }
135 
136 void __init
137 parisc_cache_init(void)
138 {
139 	if (pdc_cache_info(&cache_info) < 0)
140 		panic("parisc_cache_init: pdc_cache_info failed");
141 
142 #if 0
143 	printk("ic_size %lx dc_size %lx it_size %lx\n",
144 		cache_info.ic_size,
145 		cache_info.dc_size,
146 		cache_info.it_size);
147 
148 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
149 		cache_info.dc_base,
150 		cache_info.dc_stride,
151 		cache_info.dc_count,
152 		cache_info.dc_loop);
153 
154 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
155 		*(unsigned long *) (&cache_info.dc_conf),
156 		cache_info.dc_conf.cc_alias,
157 		cache_info.dc_conf.cc_block,
158 		cache_info.dc_conf.cc_line,
159 		cache_info.dc_conf.cc_shift);
160 	printk("	wt %d sh %d cst %d assoc %d\n",
161 		cache_info.dc_conf.cc_wt,
162 		cache_info.dc_conf.cc_sh,
163 		cache_info.dc_conf.cc_cst,
164 		cache_info.dc_conf.cc_assoc);
165 
166 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
167 		cache_info.ic_base,
168 		cache_info.ic_stride,
169 		cache_info.ic_count,
170 		cache_info.ic_loop);
171 
172 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
173 		*(unsigned long *) (&cache_info.ic_conf),
174 		cache_info.ic_conf.cc_alias,
175 		cache_info.ic_conf.cc_block,
176 		cache_info.ic_conf.cc_line,
177 		cache_info.ic_conf.cc_shift);
178 	printk("	wt %d sh %d cst %d assoc %d\n",
179 		cache_info.ic_conf.cc_wt,
180 		cache_info.ic_conf.cc_sh,
181 		cache_info.ic_conf.cc_cst,
182 		cache_info.ic_conf.cc_assoc);
183 
184 	printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
185 		cache_info.dt_conf.tc_sh,
186 		cache_info.dt_conf.tc_page,
187 		cache_info.dt_conf.tc_cst,
188 		cache_info.dt_conf.tc_aid,
189 		cache_info.dt_conf.tc_pad1);
190 
191 	printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
192 		cache_info.it_conf.tc_sh,
193 		cache_info.it_conf.tc_page,
194 		cache_info.it_conf.tc_cst,
195 		cache_info.it_conf.tc_aid,
196 		cache_info.it_conf.tc_pad1);
197 #endif
198 
199 	split_tlb = 0;
200 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
201 		if (cache_info.dt_conf.tc_sh == 2)
202 			printk(KERN_WARNING "Unexpected TLB configuration. "
203 			"Will flush I/D separately (could be optimized).\n");
204 
205 		split_tlb = 1;
206 	}
207 
208 	/* "New and Improved" version from Jim Hull
209 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
210 	 */
211 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
212 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
213 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
214 #undef CAFL_STRIDE
215 
216 #ifndef CONFIG_PA20
217 	if (pdc_btlb_info(&btlb_info) < 0) {
218 		memset(&btlb_info, 0, sizeof btlb_info);
219 	}
220 #endif
221 
222 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
223 						PDC_MODEL_NVA_UNSUPPORTED) {
224 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
225 #if 0
226 		panic("SMP kernel required to avoid non-equivalent aliasing");
227 #endif
228 	}
229 }
230 
231 void disable_sr_hashing(void)
232 {
233 	int srhash_type;
234 
235 	switch (boot_cpu_data.cpu_type) {
236 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
237 		BUG();
238 		return;
239 
240 	case pcxs:
241 	case pcxt:
242 	case pcxt_:
243 		srhash_type = SRHASH_PCXST;
244 		break;
245 
246 	case pcxl:
247 		srhash_type = SRHASH_PCXL;
248 		break;
249 
250 	case pcxl2: /* pcxl2 doesn't support space register hashing */
251 		return;
252 
253 	default: /* Currently all PA2.0 machines use the same ins. sequence */
254 		srhash_type = SRHASH_PA20;
255 		break;
256 	}
257 
258 	disable_sr_hashing_asm(srhash_type);
259 }
260 
261 void flush_dcache_page(struct page *page)
262 {
263 	struct address_space *mapping = page_mapping(page);
264 	struct vm_area_struct *mpnt;
265 	struct prio_tree_iter iter;
266 	unsigned long offset;
267 	unsigned long addr;
268 	pgoff_t pgoff;
269 	pte_t *pte;
270 	unsigned long pfn = page_to_pfn(page);
271 
272 
273 	if (mapping && !mapping_mapped(mapping)) {
274 		set_bit(PG_dcache_dirty, &page->flags);
275 		return;
276 	}
277 
278 	flush_kernel_dcache_page(page_address(page));
279 
280 	if (!mapping)
281 		return;
282 
283 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
284 
285 	/* We have carefully arranged in arch_get_unmapped_area() that
286 	 * *any* mappings of a file are always congruently mapped (whether
287 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
288 	 * to flush one address here for them all to become coherent */
289 
290 	flush_dcache_mmap_lock(mapping);
291 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
292 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
293 		addr = mpnt->vm_start + offset;
294 
295 		/* Flush instructions produce non access tlb misses.
296 		 * On PA, we nullify these instructions rather than
297 		 * taking a page fault if the pte doesn't exist.
298 		 * This is just for speed.  If the page translation
299 		 * isn't there, there's no point exciting the
300 		 * nadtlb handler into a nullification frenzy */
301 
302 
303   		if(!(pte = translation_exists(mpnt, addr)))
304 			continue;
305 
306 		/* make sure we really have this page: the private
307 		 * mappings may cover this area but have COW'd this
308 		 * particular page */
309 		if(pte_pfn(*pte) != pfn)
310   			continue;
311 
312 		__flush_cache_page(mpnt, addr);
313 
314 		break;
315 	}
316 	flush_dcache_mmap_unlock(mapping);
317 }
318 EXPORT_SYMBOL(flush_dcache_page);
319 
320 /* Defined in arch/parisc/kernel/pacache.S */
321 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
322 EXPORT_SYMBOL(flush_kernel_dcache_page);
323 EXPORT_SYMBOL(flush_data_cache_local);
324 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
325 
326 void clear_user_page_asm(void *page, unsigned long vaddr)
327 {
328 	/* This function is implemented in assembly in pacache.S */
329 	extern void __clear_user_page_asm(void *page, unsigned long vaddr);
330 
331 	purge_tlb_start();
332 	__clear_user_page_asm(page, vaddr);
333 	purge_tlb_end();
334 }
335 
336 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
337 int parisc_cache_flush_threshold = FLUSH_THRESHOLD;
338 
339 void parisc_setup_cache_timing(void)
340 {
341 	unsigned long rangetime, alltime;
342 	extern char _text;	/* start of kernel code, defined by linker */
343 	extern char _end;	/* end of BSS, defined by linker */
344 	unsigned long size;
345 
346 	alltime = mfctl(16);
347 	flush_data_cache();
348 	alltime = mfctl(16) - alltime;
349 
350 	size = (unsigned long)(&_end - _text);
351 	rangetime = mfctl(16);
352 	flush_kernel_dcache_range((unsigned long)&_text, size);
353 	rangetime = mfctl(16) - rangetime;
354 
355 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
356 		alltime, size, rangetime);
357 
358 	/* Racy, but if we see an intermediate value, it's ok too... */
359 	parisc_cache_flush_threshold = size * alltime / rangetime;
360 
361 	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
362 	if (!parisc_cache_flush_threshold)
363 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
364 
365 	printk("Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
366 }
367