xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision cd99b9eb)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/syscalls.h>
23 #include <asm/pdc.h>
24 #include <asm/cache.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/page.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 #include <asm/mmu_context.h>
32 #include <asm/cachectl.h>
33 
34 int split_tlb __ro_after_init;
35 int dcache_stride __ro_after_init;
36 int icache_stride __ro_after_init;
37 EXPORT_SYMBOL(dcache_stride);
38 
39 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 EXPORT_SYMBOL(flush_dcache_page_asm);
41 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43 
44 /* Internal implementation in arch/parisc/kernel/pacache.S */
45 void flush_data_cache_local(void *);  /* flushes local data-cache only */
46 void flush_instruction_cache_local(void); /* flushes local code-cache only */
47 
48 /* On some machines (i.e., ones with the Merced bus), there can be
49  * only a single PxTLB broadcast at a time; this must be guaranteed
50  * by software. We need a spinlock around all TLB flushes to ensure
51  * this.
52  */
53 DEFINE_SPINLOCK(pa_tlb_flush_lock);
54 
55 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56 int pa_serialize_tlb_flushes __ro_after_init;
57 #endif
58 
59 struct pdc_cache_info cache_info __ro_after_init;
60 #ifndef CONFIG_PA20
61 static struct pdc_btlb_info btlb_info __ro_after_init;
62 #endif
63 
64 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67 
68 static void cache_flush_local_cpu(void *dummy)
69 {
70 	if (static_branch_likely(&parisc_has_icache))
71 		flush_instruction_cache_local();
72 	if (static_branch_likely(&parisc_has_dcache))
73 		flush_data_cache_local(NULL);
74 }
75 
76 void flush_cache_all_local(void)
77 {
78 	cache_flush_local_cpu(NULL);
79 }
80 
81 void flush_cache_all(void)
82 {
83 	if (static_branch_likely(&parisc_has_cache))
84 		on_each_cpu(cache_flush_local_cpu, NULL, 1);
85 }
86 
87 static inline void flush_data_cache(void)
88 {
89 	if (static_branch_likely(&parisc_has_dcache))
90 		on_each_cpu(flush_data_cache_local, NULL, 1);
91 }
92 
93 
94 /* Kernel virtual address of pfn.  */
95 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
96 
97 void __update_cache(pte_t pte)
98 {
99 	unsigned long pfn = pte_pfn(pte);
100 	struct folio *folio;
101 	unsigned int nr;
102 
103 	/* We don't have pte special.  As a result, we can be called with
104 	   an invalid pfn and we don't need to flush the kernel dcache page.
105 	   This occurs with FireGL card in C8000.  */
106 	if (!pfn_valid(pfn))
107 		return;
108 
109 	folio = page_folio(pfn_to_page(pfn));
110 	pfn = folio_pfn(folio);
111 	nr = folio_nr_pages(folio);
112 	if (folio_flush_mapping(folio) &&
113 	    test_bit(PG_dcache_dirty, &folio->flags)) {
114 		while (nr--)
115 			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116 		clear_bit(PG_dcache_dirty, &folio->flags);
117 	} else if (parisc_requires_coherency())
118 		while (nr--)
119 			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
120 }
121 
122 void
123 show_cache_info(struct seq_file *m)
124 {
125 	char buf[32];
126 
127 	seq_printf(m, "I-cache\t\t: %ld KB\n",
128 		cache_info.ic_size/1024 );
129 	if (cache_info.dc_loop != 1)
130 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132 		cache_info.dc_size/1024,
133 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
134 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135 		((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136 		cache_info.dc_conf.cc_alias
137 	);
138 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
139 		cache_info.it_size,
140 		cache_info.dt_size,
141 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
142 	);
143 
144 #ifndef CONFIG_PA20
145 	/* BTLB - Block TLB */
146 	if (btlb_info.max_size==0) {
147 		seq_printf(m, "BTLB\t\t: not supported\n" );
148 	} else {
149 		seq_printf(m,
150 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153 		btlb_info.max_size, (int)4096,
154 		btlb_info.max_size>>8,
155 		btlb_info.fixed_range_info.num_i,
156 		btlb_info.fixed_range_info.num_d,
157 		btlb_info.fixed_range_info.num_comb,
158 		btlb_info.variable_range_info.num_i,
159 		btlb_info.variable_range_info.num_d,
160 		btlb_info.variable_range_info.num_comb
161 		);
162 	}
163 #endif
164 }
165 
166 void __init
167 parisc_cache_init(void)
168 {
169 	if (pdc_cache_info(&cache_info) < 0)
170 		panic("parisc_cache_init: pdc_cache_info failed");
171 
172 #if 0
173 	printk("ic_size %lx dc_size %lx it_size %lx\n",
174 		cache_info.ic_size,
175 		cache_info.dc_size,
176 		cache_info.it_size);
177 
178 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179 		cache_info.dc_base,
180 		cache_info.dc_stride,
181 		cache_info.dc_count,
182 		cache_info.dc_loop);
183 
184 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
185 		*(unsigned long *) (&cache_info.dc_conf),
186 		cache_info.dc_conf.cc_alias,
187 		cache_info.dc_conf.cc_block,
188 		cache_info.dc_conf.cc_line,
189 		cache_info.dc_conf.cc_shift);
190 	printk("	wt %d sh %d cst %d hv %d\n",
191 		cache_info.dc_conf.cc_wt,
192 		cache_info.dc_conf.cc_sh,
193 		cache_info.dc_conf.cc_cst,
194 		cache_info.dc_conf.cc_hv);
195 
196 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
197 		cache_info.ic_base,
198 		cache_info.ic_stride,
199 		cache_info.ic_count,
200 		cache_info.ic_loop);
201 
202 	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203 		cache_info.it_sp_base,
204 		cache_info.it_sp_stride,
205 		cache_info.it_sp_count,
206 		cache_info.it_loop,
207 		cache_info.it_off_base,
208 		cache_info.it_off_stride,
209 		cache_info.it_off_count);
210 
211 	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212 		cache_info.dt_sp_base,
213 		cache_info.dt_sp_stride,
214 		cache_info.dt_sp_count,
215 		cache_info.dt_loop,
216 		cache_info.dt_off_base,
217 		cache_info.dt_off_stride,
218 		cache_info.dt_off_count);
219 
220 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
221 		*(unsigned long *) (&cache_info.ic_conf),
222 		cache_info.ic_conf.cc_alias,
223 		cache_info.ic_conf.cc_block,
224 		cache_info.ic_conf.cc_line,
225 		cache_info.ic_conf.cc_shift);
226 	printk("	wt %d sh %d cst %d hv %d\n",
227 		cache_info.ic_conf.cc_wt,
228 		cache_info.ic_conf.cc_sh,
229 		cache_info.ic_conf.cc_cst,
230 		cache_info.ic_conf.cc_hv);
231 
232 	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233 		cache_info.dt_conf.tc_sh,
234 		cache_info.dt_conf.tc_page,
235 		cache_info.dt_conf.tc_cst,
236 		cache_info.dt_conf.tc_aid,
237 		cache_info.dt_conf.tc_sr);
238 
239 	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240 		cache_info.it_conf.tc_sh,
241 		cache_info.it_conf.tc_page,
242 		cache_info.it_conf.tc_cst,
243 		cache_info.it_conf.tc_aid,
244 		cache_info.it_conf.tc_sr);
245 #endif
246 
247 	split_tlb = 0;
248 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249 		if (cache_info.dt_conf.tc_sh == 2)
250 			printk(KERN_WARNING "Unexpected TLB configuration. "
251 			"Will flush I/D separately (could be optimized).\n");
252 
253 		split_tlb = 1;
254 	}
255 
256 	/* "New and Improved" version from Jim Hull
257 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258 	 * The following CAFL_STRIDE is an optimized version, see
259 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
261 	 */
262 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265 #undef CAFL_STRIDE
266 
267 #ifndef CONFIG_PA20
268 	if (pdc_btlb_info(&btlb_info) < 0) {
269 		memset(&btlb_info, 0, sizeof btlb_info);
270 	}
271 #endif
272 
273 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
274 						PDC_MODEL_NVA_UNSUPPORTED) {
275 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
276 #if 0
277 		panic("SMP kernel required to avoid non-equivalent aliasing");
278 #endif
279 	}
280 }
281 
282 void disable_sr_hashing(void)
283 {
284 	int srhash_type, retval;
285 	unsigned long space_bits;
286 
287 	switch (boot_cpu_data.cpu_type) {
288 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
289 		BUG();
290 		return;
291 
292 	case pcxs:
293 	case pcxt:
294 	case pcxt_:
295 		srhash_type = SRHASH_PCXST;
296 		break;
297 
298 	case pcxl:
299 		srhash_type = SRHASH_PCXL;
300 		break;
301 
302 	case pcxl2: /* pcxl2 doesn't support space register hashing */
303 		return;
304 
305 	default: /* Currently all PA2.0 machines use the same ins. sequence */
306 		srhash_type = SRHASH_PA20;
307 		break;
308 	}
309 
310 	disable_sr_hashing_asm(srhash_type);
311 
312 	retval = pdc_spaceid_bits(&space_bits);
313 	/* If this procedure isn't implemented, don't panic. */
314 	if (retval < 0 && retval != PDC_BAD_OPTION)
315 		panic("pdc_spaceid_bits call failed.\n");
316 	if (space_bits != 0)
317 		panic("SpaceID hashing is still on!\n");
318 }
319 
320 static inline void
321 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
322 		   unsigned long physaddr)
323 {
324 	if (!static_branch_likely(&parisc_has_cache))
325 		return;
326 	preempt_disable();
327 	flush_dcache_page_asm(physaddr, vmaddr);
328 	if (vma->vm_flags & VM_EXEC)
329 		flush_icache_page_asm(physaddr, vmaddr);
330 	preempt_enable();
331 }
332 
333 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
334 {
335 	unsigned long flags, space, pgd, prot;
336 #ifdef CONFIG_TLB_PTLOCK
337 	unsigned long pgd_lock;
338 #endif
339 
340 	vmaddr &= PAGE_MASK;
341 
342 	preempt_disable();
343 
344 	/* Set context for flush */
345 	local_irq_save(flags);
346 	prot = mfctl(8);
347 	space = mfsp(SR_USER);
348 	pgd = mfctl(25);
349 #ifdef CONFIG_TLB_PTLOCK
350 	pgd_lock = mfctl(28);
351 #endif
352 	switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
353 	local_irq_restore(flags);
354 
355 	flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356 	if (vma->vm_flags & VM_EXEC)
357 		flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
358 	flush_tlb_page(vma, vmaddr);
359 
360 	/* Restore previous context */
361 	local_irq_save(flags);
362 #ifdef CONFIG_TLB_PTLOCK
363 	mtctl(pgd_lock, 28);
364 #endif
365 	mtctl(pgd, 25);
366 	mtsp(space, SR_USER);
367 	mtctl(prot, 8);
368 	local_irq_restore(flags);
369 
370 	preempt_enable();
371 }
372 
373 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
374 		unsigned int nr)
375 {
376 	void *kaddr = page_address(page);
377 
378 	for (;;) {
379 		flush_kernel_dcache_page_addr(kaddr);
380 		flush_kernel_icache_page(kaddr);
381 		if (--nr == 0)
382 			break;
383 		kaddr += PAGE_SIZE;
384 	}
385 }
386 
387 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
388 {
389 	pte_t *ptep = NULL;
390 	pgd_t *pgd = mm->pgd;
391 	p4d_t *p4d;
392 	pud_t *pud;
393 	pmd_t *pmd;
394 
395 	if (!pgd_none(*pgd)) {
396 		p4d = p4d_offset(pgd, addr);
397 		if (!p4d_none(*p4d)) {
398 			pud = pud_offset(p4d, addr);
399 			if (!pud_none(*pud)) {
400 				pmd = pmd_offset(pud, addr);
401 				if (!pmd_none(*pmd))
402 					ptep = pte_offset_map(pmd, addr);
403 			}
404 		}
405 	}
406 	return ptep;
407 }
408 
409 static inline bool pte_needs_flush(pte_t pte)
410 {
411 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
412 		== (_PAGE_PRESENT | _PAGE_ACCESSED);
413 }
414 
415 void flush_dcache_folio(struct folio *folio)
416 {
417 	struct address_space *mapping = folio_flush_mapping(folio);
418 	struct vm_area_struct *vma;
419 	unsigned long addr, old_addr = 0;
420 	void *kaddr;
421 	unsigned long count = 0;
422 	unsigned long i, nr, flags;
423 	pgoff_t pgoff;
424 
425 	if (mapping && !mapping_mapped(mapping)) {
426 		set_bit(PG_dcache_dirty, &folio->flags);
427 		return;
428 	}
429 
430 	nr = folio_nr_pages(folio);
431 	kaddr = folio_address(folio);
432 	for (i = 0; i < nr; i++)
433 		flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
434 
435 	if (!mapping)
436 		return;
437 
438 	pgoff = folio->index;
439 
440 	/*
441 	 * We have carefully arranged in arch_get_unmapped_area() that
442 	 * *any* mappings of a file are always congruently mapped (whether
443 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
444 	 * to flush one address here for them all to become coherent
445 	 * on machines that support equivalent aliasing
446 	 */
447 	flush_dcache_mmap_lock_irqsave(mapping, flags);
448 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
449 		unsigned long offset = pgoff - vma->vm_pgoff;
450 		unsigned long pfn = folio_pfn(folio);
451 
452 		addr = vma->vm_start;
453 		nr = folio_nr_pages(folio);
454 		if (offset > -nr) {
455 			pfn -= offset;
456 			nr += offset;
457 		} else {
458 			addr += offset * PAGE_SIZE;
459 		}
460 		if (addr + nr * PAGE_SIZE > vma->vm_end)
461 			nr = (vma->vm_end - addr) / PAGE_SIZE;
462 
463 		if (parisc_requires_coherency()) {
464 			for (i = 0; i < nr; i++) {
465 				pte_t *ptep = get_ptep(vma->vm_mm,
466 							addr + i * PAGE_SIZE);
467 				if (!ptep)
468 					continue;
469 				if (pte_needs_flush(*ptep))
470 					flush_user_cache_page(vma,
471 							addr + i * PAGE_SIZE);
472 				/* Optimise accesses to the same table? */
473 				pte_unmap(ptep);
474 			}
475 		} else {
476 			/*
477 			 * The TLB is the engine of coherence on parisc:
478 			 * The CPU is entitled to speculate any page
479 			 * with a TLB mapping, so here we kill the
480 			 * mapping then flush the page along a special
481 			 * flush only alias mapping. This guarantees that
482 			 * the page is no-longer in the cache for any
483 			 * process and nor may it be speculatively read
484 			 * in (until the user or kernel specifically
485 			 * accesses it, of course)
486 			 */
487 			for (i = 0; i < nr; i++)
488 				flush_tlb_page(vma, addr + i * PAGE_SIZE);
489 			if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
490 					!= (addr & (SHM_COLOUR - 1))) {
491 				for (i = 0; i < nr; i++)
492 					__flush_cache_page(vma,
493 						addr + i * PAGE_SIZE,
494 						(pfn + i) * PAGE_SIZE);
495 				/*
496 				 * Software is allowed to have any number
497 				 * of private mappings to a page.
498 				 */
499 				if (!(vma->vm_flags & VM_SHARED))
500 					continue;
501 				if (old_addr)
502 					pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
503 						old_addr, addr, vma->vm_file);
504 				if (nr == folio_nr_pages(folio))
505 					old_addr = addr;
506 			}
507 		}
508 		WARN_ON(++count == 4096);
509 	}
510 	flush_dcache_mmap_unlock_irqrestore(mapping, flags);
511 }
512 EXPORT_SYMBOL(flush_dcache_folio);
513 
514 /* Defined in arch/parisc/kernel/pacache.S */
515 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
516 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
517 
518 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
519 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
520 
521 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
522 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
523 
524 void __init parisc_setup_cache_timing(void)
525 {
526 	unsigned long rangetime, alltime;
527 	unsigned long size;
528 	unsigned long threshold, threshold2;
529 
530 	alltime = mfctl(16);
531 	flush_data_cache();
532 	alltime = mfctl(16) - alltime;
533 
534 	size = (unsigned long)(_end - _text);
535 	rangetime = mfctl(16);
536 	flush_kernel_dcache_range((unsigned long)_text, size);
537 	rangetime = mfctl(16) - rangetime;
538 
539 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
540 		alltime, size, rangetime);
541 
542 	threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
543 	pr_info("Calculated flush threshold is %lu KiB\n",
544 		threshold/1024);
545 
546 	/*
547 	 * The threshold computed above isn't very reliable. The following
548 	 * heuristic works reasonably well on c8000/rp3440.
549 	 */
550 	threshold2 = cache_info.dc_size * num_online_cpus();
551 	parisc_cache_flush_threshold = threshold2;
552 	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
553 		parisc_cache_flush_threshold/1024);
554 
555 	/* calculate TLB flush threshold */
556 
557 	/* On SMP machines, skip the TLB measure of kernel text which
558 	 * has been mapped as huge pages. */
559 	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
560 		threshold = max(cache_info.it_size, cache_info.dt_size);
561 		threshold *= PAGE_SIZE;
562 		threshold /= num_online_cpus();
563 		goto set_tlb_threshold;
564 	}
565 
566 	size = (unsigned long)_end - (unsigned long)_text;
567 	rangetime = mfctl(16);
568 	flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
569 	rangetime = mfctl(16) - rangetime;
570 
571 	alltime = mfctl(16);
572 	flush_tlb_all();
573 	alltime = mfctl(16) - alltime;
574 
575 	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
576 		alltime, size, rangetime);
577 
578 	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
579 	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
580 		threshold/1024);
581 
582 set_tlb_threshold:
583 	if (threshold > FLUSH_TLB_THRESHOLD)
584 		parisc_tlb_flush_threshold = threshold;
585 	else
586 		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
587 
588 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
589 		parisc_tlb_flush_threshold/1024);
590 }
591 
592 extern void purge_kernel_dcache_page_asm(unsigned long);
593 extern void clear_user_page_asm(void *, unsigned long);
594 extern void copy_user_page_asm(void *, void *, unsigned long);
595 
596 void flush_kernel_dcache_page_addr(const void *addr)
597 {
598 	unsigned long flags;
599 
600 	flush_kernel_dcache_page_asm(addr);
601 	purge_tlb_start(flags);
602 	pdtlb(SR_KERNEL, addr);
603 	purge_tlb_end(flags);
604 }
605 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
606 
607 static void flush_cache_page_if_present(struct vm_area_struct *vma,
608 	unsigned long vmaddr, unsigned long pfn)
609 {
610 	bool needs_flush = false;
611 	pte_t *ptep;
612 
613 	/*
614 	 * The pte check is racy and sometimes the flush will trigger
615 	 * a non-access TLB miss. Hopefully, the page has already been
616 	 * flushed.
617 	 */
618 	ptep = get_ptep(vma->vm_mm, vmaddr);
619 	if (ptep) {
620 		needs_flush = pte_needs_flush(*ptep);
621 		pte_unmap(ptep);
622 	}
623 	if (needs_flush)
624 		flush_cache_page(vma, vmaddr, pfn);
625 }
626 
627 void copy_user_highpage(struct page *to, struct page *from,
628 	unsigned long vaddr, struct vm_area_struct *vma)
629 {
630 	void *kto, *kfrom;
631 
632 	kfrom = kmap_local_page(from);
633 	kto = kmap_local_page(to);
634 	flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
635 	copy_page_asm(kto, kfrom);
636 	kunmap_local(kto);
637 	kunmap_local(kfrom);
638 }
639 
640 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
641 		unsigned long user_vaddr, void *dst, void *src, int len)
642 {
643 	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
644 	memcpy(dst, src, len);
645 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
646 }
647 
648 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
649 		unsigned long user_vaddr, void *dst, void *src, int len)
650 {
651 	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
652 	memcpy(dst, src, len);
653 }
654 
655 /* __flush_tlb_range()
656  *
657  * returns 1 if all TLBs were flushed.
658  */
659 int __flush_tlb_range(unsigned long sid, unsigned long start,
660 		      unsigned long end)
661 {
662 	unsigned long flags;
663 
664 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
665 	    end - start >= parisc_tlb_flush_threshold) {
666 		flush_tlb_all();
667 		return 1;
668 	}
669 
670 	/* Purge TLB entries for small ranges using the pdtlb and
671 	   pitlb instructions.  These instructions execute locally
672 	   but cause a purge request to be broadcast to other TLBs.  */
673 	while (start < end) {
674 		purge_tlb_start(flags);
675 		mtsp(sid, SR_TEMP1);
676 		pdtlb(SR_TEMP1, start);
677 		pitlb(SR_TEMP1, start);
678 		purge_tlb_end(flags);
679 		start += PAGE_SIZE;
680 	}
681 	return 0;
682 }
683 
684 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
685 {
686 	unsigned long addr, pfn;
687 	pte_t *ptep;
688 
689 	for (addr = start; addr < end; addr += PAGE_SIZE) {
690 		bool needs_flush = false;
691 		/*
692 		 * The vma can contain pages that aren't present. Although
693 		 * the pte search is expensive, we need the pte to find the
694 		 * page pfn and to check whether the page should be flushed.
695 		 */
696 		ptep = get_ptep(vma->vm_mm, addr);
697 		if (ptep) {
698 			needs_flush = pte_needs_flush(*ptep);
699 			pfn = pte_pfn(*ptep);
700 			pte_unmap(ptep);
701 		}
702 		if (needs_flush) {
703 			if (parisc_requires_coherency()) {
704 				flush_user_cache_page(vma, addr);
705 			} else {
706 				if (WARN_ON(!pfn_valid(pfn)))
707 					return;
708 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
709 			}
710 		}
711 	}
712 }
713 
714 static inline unsigned long mm_total_size(struct mm_struct *mm)
715 {
716 	struct vm_area_struct *vma;
717 	unsigned long usize = 0;
718 	VMA_ITERATOR(vmi, mm, 0);
719 
720 	for_each_vma(vmi, vma) {
721 		if (usize >= parisc_cache_flush_threshold)
722 			break;
723 		usize += vma->vm_end - vma->vm_start;
724 	}
725 	return usize;
726 }
727 
728 void flush_cache_mm(struct mm_struct *mm)
729 {
730 	struct vm_area_struct *vma;
731 	VMA_ITERATOR(vmi, mm, 0);
732 
733 	/*
734 	 * Flushing the whole cache on each cpu takes forever on
735 	 * rp3440, etc. So, avoid it if the mm isn't too big.
736 	 *
737 	 * Note that we must flush the entire cache on machines
738 	 * with aliasing caches to prevent random segmentation
739 	 * faults.
740 	 */
741 	if (!parisc_requires_coherency()
742 	    ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
743 		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
744 			return;
745 		flush_tlb_all();
746 		flush_cache_all();
747 		return;
748 	}
749 
750 	/* Flush mm */
751 	for_each_vma(vmi, vma)
752 		flush_cache_pages(vma, vma->vm_start, vma->vm_end);
753 }
754 
755 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
756 {
757 	if (!parisc_requires_coherency()
758 	    || end - start >= parisc_cache_flush_threshold) {
759 		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
760 			return;
761 		flush_tlb_range(vma, start, end);
762 		flush_cache_all();
763 		return;
764 	}
765 
766 	flush_cache_pages(vma, start, end);
767 }
768 
769 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
770 {
771 	if (WARN_ON(!pfn_valid(pfn)))
772 		return;
773 	if (parisc_requires_coherency())
774 		flush_user_cache_page(vma, vmaddr);
775 	else
776 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
777 }
778 
779 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
780 {
781 	if (!PageAnon(page))
782 		return;
783 
784 	if (parisc_requires_coherency()) {
785 		if (vma->vm_flags & VM_SHARED)
786 			flush_data_cache();
787 		else
788 			flush_user_cache_page(vma, vmaddr);
789 		return;
790 	}
791 
792 	flush_tlb_page(vma, vmaddr);
793 	preempt_disable();
794 	flush_dcache_page_asm(page_to_phys(page), vmaddr);
795 	preempt_enable();
796 }
797 
798 void flush_kernel_vmap_range(void *vaddr, int size)
799 {
800 	unsigned long start = (unsigned long)vaddr;
801 	unsigned long end = start + size;
802 
803 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
804 	    (unsigned long)size >= parisc_cache_flush_threshold) {
805 		flush_tlb_kernel_range(start, end);
806 		flush_data_cache();
807 		return;
808 	}
809 
810 	flush_kernel_dcache_range_asm(start, end);
811 	flush_tlb_kernel_range(start, end);
812 }
813 EXPORT_SYMBOL(flush_kernel_vmap_range);
814 
815 void invalidate_kernel_vmap_range(void *vaddr, int size)
816 {
817 	unsigned long start = (unsigned long)vaddr;
818 	unsigned long end = start + size;
819 
820 	/* Ensure DMA is complete */
821 	asm_syncdma();
822 
823 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
824 	    (unsigned long)size >= parisc_cache_flush_threshold) {
825 		flush_tlb_kernel_range(start, end);
826 		flush_data_cache();
827 		return;
828 	}
829 
830 	purge_kernel_dcache_range_asm(start, end);
831 	flush_tlb_kernel_range(start, end);
832 }
833 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
834 
835 
836 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
837 	unsigned int, cache)
838 {
839 	unsigned long start, end;
840 	ASM_EXCEPTIONTABLE_VAR(error);
841 
842 	if (bytes == 0)
843 		return 0;
844 	if (!access_ok((void __user *) addr, bytes))
845 		return -EFAULT;
846 
847 	end = addr + bytes;
848 
849 	if (cache & DCACHE) {
850 		start = addr;
851 		__asm__ __volatile__ (
852 #ifdef CONFIG_64BIT
853 			"1: cmpb,*<<,n	%0,%2,1b\n"
854 #else
855 			"1: cmpb,<<,n	%0,%2,1b\n"
856 #endif
857 			"   fic,m	%3(%4,%0)\n"
858 			"2: sync\n"
859 			ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
860 			: "+r" (start), "+r" (error)
861 			: "r" (end), "r" (dcache_stride), "i" (SR_USER));
862 	}
863 
864 	if (cache & ICACHE && error == 0) {
865 		start = addr;
866 		__asm__ __volatile__ (
867 #ifdef CONFIG_64BIT
868 			"1: cmpb,*<<,n	%0,%2,1b\n"
869 #else
870 			"1: cmpb,<<,n	%0,%2,1b\n"
871 #endif
872 			"   fdc,m	%3(%4,%0)\n"
873 			"2: sync\n"
874 			ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
875 			: "+r" (start), "+r" (error)
876 			: "r" (end), "r" (icache_stride), "i" (SR_USER));
877 	}
878 
879 	return error;
880 }
881