xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision ba936421)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 
31 int split_tlb __ro_after_init;
32 int dcache_stride __ro_after_init;
33 int icache_stride __ro_after_init;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 
41 /* Internal implementation in arch/parisc/kernel/pacache.S */
42 void flush_data_cache_local(void *);  /* flushes local data-cache only */
43 void flush_instruction_cache_local(void); /* flushes local code-cache only */
44 
45 /* On some machines (i.e., ones with the Merced bus), there can be
46  * only a single PxTLB broadcast at a time; this must be guaranteed
47  * by software. We need a spinlock around all TLB flushes to ensure
48  * this.
49  */
50 DEFINE_SPINLOCK(pa_tlb_flush_lock);
51 
52 /* Swapper page setup lock. */
53 DEFINE_SPINLOCK(pa_swapper_pg_lock);
54 
55 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56 int pa_serialize_tlb_flushes __ro_after_init;
57 #endif
58 
59 struct pdc_cache_info cache_info __ro_after_init;
60 #ifndef CONFIG_PA20
61 static struct pdc_btlb_info btlb_info __ro_after_init;
62 #endif
63 
64 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67 
68 static void cache_flush_local_cpu(void *dummy)
69 {
70 	if (static_branch_likely(&parisc_has_icache))
71 		flush_instruction_cache_local();
72 	if (static_branch_likely(&parisc_has_dcache))
73 		flush_data_cache_local(NULL);
74 }
75 
76 void flush_cache_all_local(void)
77 {
78 	cache_flush_local_cpu(NULL);
79 }
80 
81 void flush_cache_all(void)
82 {
83 	if (static_branch_likely(&parisc_has_cache))
84 		on_each_cpu(cache_flush_local_cpu, NULL, 1);
85 }
86 
87 static inline void flush_data_cache(void)
88 {
89 	if (static_branch_likely(&parisc_has_dcache))
90 		on_each_cpu(flush_data_cache_local, NULL, 1);
91 }
92 
93 
94 /* Virtual address of pfn.  */
95 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
96 
97 void
98 __update_cache(pte_t pte)
99 {
100 	unsigned long pfn = pte_pfn(pte);
101 	struct page *page;
102 
103 	/* We don't have pte special.  As a result, we can be called with
104 	   an invalid pfn and we don't need to flush the kernel dcache page.
105 	   This occurs with FireGL card in C8000.  */
106 	if (!pfn_valid(pfn))
107 		return;
108 
109 	page = pfn_to_page(pfn);
110 	if (page_mapping_file(page) &&
111 	    test_bit(PG_dcache_dirty, &page->flags)) {
112 		flush_kernel_dcache_page_addr(pfn_va(pfn));
113 		clear_bit(PG_dcache_dirty, &page->flags);
114 	} else if (parisc_requires_coherency())
115 		flush_kernel_dcache_page_addr(pfn_va(pfn));
116 }
117 
118 void
119 show_cache_info(struct seq_file *m)
120 {
121 	char buf[32];
122 
123 	seq_printf(m, "I-cache\t\t: %ld KB\n",
124 		cache_info.ic_size/1024 );
125 	if (cache_info.dc_loop != 1)
126 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
127 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
128 		cache_info.dc_size/1024,
129 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
130 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
131 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
132 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
133 		cache_info.it_size,
134 		cache_info.dt_size,
135 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
136 	);
137 
138 #ifndef CONFIG_PA20
139 	/* BTLB - Block TLB */
140 	if (btlb_info.max_size==0) {
141 		seq_printf(m, "BTLB\t\t: not supported\n" );
142 	} else {
143 		seq_printf(m,
144 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
145 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
146 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
147 		btlb_info.max_size, (int)4096,
148 		btlb_info.max_size>>8,
149 		btlb_info.fixed_range_info.num_i,
150 		btlb_info.fixed_range_info.num_d,
151 		btlb_info.fixed_range_info.num_comb,
152 		btlb_info.variable_range_info.num_i,
153 		btlb_info.variable_range_info.num_d,
154 		btlb_info.variable_range_info.num_comb
155 		);
156 	}
157 #endif
158 }
159 
160 void __init
161 parisc_cache_init(void)
162 {
163 	if (pdc_cache_info(&cache_info) < 0)
164 		panic("parisc_cache_init: pdc_cache_info failed");
165 
166 #if 0
167 	printk("ic_size %lx dc_size %lx it_size %lx\n",
168 		cache_info.ic_size,
169 		cache_info.dc_size,
170 		cache_info.it_size);
171 
172 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
173 		cache_info.dc_base,
174 		cache_info.dc_stride,
175 		cache_info.dc_count,
176 		cache_info.dc_loop);
177 
178 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
179 		*(unsigned long *) (&cache_info.dc_conf),
180 		cache_info.dc_conf.cc_alias,
181 		cache_info.dc_conf.cc_block,
182 		cache_info.dc_conf.cc_line,
183 		cache_info.dc_conf.cc_shift);
184 	printk("	wt %d sh %d cst %d hv %d\n",
185 		cache_info.dc_conf.cc_wt,
186 		cache_info.dc_conf.cc_sh,
187 		cache_info.dc_conf.cc_cst,
188 		cache_info.dc_conf.cc_hv);
189 
190 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191 		cache_info.ic_base,
192 		cache_info.ic_stride,
193 		cache_info.ic_count,
194 		cache_info.ic_loop);
195 
196 	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
197 		cache_info.it_sp_base,
198 		cache_info.it_sp_stride,
199 		cache_info.it_sp_count,
200 		cache_info.it_loop,
201 		cache_info.it_off_base,
202 		cache_info.it_off_stride,
203 		cache_info.it_off_count);
204 
205 	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
206 		cache_info.dt_sp_base,
207 		cache_info.dt_sp_stride,
208 		cache_info.dt_sp_count,
209 		cache_info.dt_loop,
210 		cache_info.dt_off_base,
211 		cache_info.dt_off_stride,
212 		cache_info.dt_off_count);
213 
214 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
215 		*(unsigned long *) (&cache_info.ic_conf),
216 		cache_info.ic_conf.cc_alias,
217 		cache_info.ic_conf.cc_block,
218 		cache_info.ic_conf.cc_line,
219 		cache_info.ic_conf.cc_shift);
220 	printk("	wt %d sh %d cst %d hv %d\n",
221 		cache_info.ic_conf.cc_wt,
222 		cache_info.ic_conf.cc_sh,
223 		cache_info.ic_conf.cc_cst,
224 		cache_info.ic_conf.cc_hv);
225 
226 	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
227 		cache_info.dt_conf.tc_sh,
228 		cache_info.dt_conf.tc_page,
229 		cache_info.dt_conf.tc_cst,
230 		cache_info.dt_conf.tc_aid,
231 		cache_info.dt_conf.tc_sr);
232 
233 	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
234 		cache_info.it_conf.tc_sh,
235 		cache_info.it_conf.tc_page,
236 		cache_info.it_conf.tc_cst,
237 		cache_info.it_conf.tc_aid,
238 		cache_info.it_conf.tc_sr);
239 #endif
240 
241 	split_tlb = 0;
242 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
243 		if (cache_info.dt_conf.tc_sh == 2)
244 			printk(KERN_WARNING "Unexpected TLB configuration. "
245 			"Will flush I/D separately (could be optimized).\n");
246 
247 		split_tlb = 1;
248 	}
249 
250 	/* "New and Improved" version from Jim Hull
251 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
252 	 * The following CAFL_STRIDE is an optimized version, see
253 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
254 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
255 	 */
256 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
257 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
258 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
259 #undef CAFL_STRIDE
260 
261 #ifndef CONFIG_PA20
262 	if (pdc_btlb_info(&btlb_info) < 0) {
263 		memset(&btlb_info, 0, sizeof btlb_info);
264 	}
265 #endif
266 
267 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
268 						PDC_MODEL_NVA_UNSUPPORTED) {
269 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
270 #if 0
271 		panic("SMP kernel required to avoid non-equivalent aliasing");
272 #endif
273 	}
274 }
275 
276 void disable_sr_hashing(void)
277 {
278 	int srhash_type, retval;
279 	unsigned long space_bits;
280 
281 	switch (boot_cpu_data.cpu_type) {
282 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
283 		BUG();
284 		return;
285 
286 	case pcxs:
287 	case pcxt:
288 	case pcxt_:
289 		srhash_type = SRHASH_PCXST;
290 		break;
291 
292 	case pcxl:
293 		srhash_type = SRHASH_PCXL;
294 		break;
295 
296 	case pcxl2: /* pcxl2 doesn't support space register hashing */
297 		return;
298 
299 	default: /* Currently all PA2.0 machines use the same ins. sequence */
300 		srhash_type = SRHASH_PA20;
301 		break;
302 	}
303 
304 	disable_sr_hashing_asm(srhash_type);
305 
306 	retval = pdc_spaceid_bits(&space_bits);
307 	/* If this procedure isn't implemented, don't panic. */
308 	if (retval < 0 && retval != PDC_BAD_OPTION)
309 		panic("pdc_spaceid_bits call failed.\n");
310 	if (space_bits != 0)
311 		panic("SpaceID hashing is still on!\n");
312 }
313 
314 static inline void
315 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316 		   unsigned long physaddr)
317 {
318 	if (!static_branch_likely(&parisc_has_cache))
319 		return;
320 	preempt_disable();
321 	flush_dcache_page_asm(physaddr, vmaddr);
322 	if (vma->vm_flags & VM_EXEC)
323 		flush_icache_page_asm(physaddr, vmaddr);
324 	preempt_enable();
325 }
326 
327 static inline void
328 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
329 		   unsigned long physaddr)
330 {
331 	if (!static_branch_likely(&parisc_has_cache))
332 		return;
333 	preempt_disable();
334 	purge_dcache_page_asm(physaddr, vmaddr);
335 	if (vma->vm_flags & VM_EXEC)
336 		flush_icache_page_asm(physaddr, vmaddr);
337 	preempt_enable();
338 }
339 
340 void flush_dcache_page(struct page *page)
341 {
342 	struct address_space *mapping = page_mapping_file(page);
343 	struct vm_area_struct *mpnt;
344 	unsigned long offset;
345 	unsigned long addr, old_addr = 0;
346 	pgoff_t pgoff;
347 
348 	if (mapping && !mapping_mapped(mapping)) {
349 		set_bit(PG_dcache_dirty, &page->flags);
350 		return;
351 	}
352 
353 	flush_kernel_dcache_page_addr(page_address(page));
354 
355 	if (!mapping)
356 		return;
357 
358 	pgoff = page->index;
359 
360 	/* We have carefully arranged in arch_get_unmapped_area() that
361 	 * *any* mappings of a file are always congruently mapped (whether
362 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
363 	 * to flush one address here for them all to become coherent */
364 
365 	flush_dcache_mmap_lock(mapping);
366 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
367 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
368 		addr = mpnt->vm_start + offset;
369 
370 		/* The TLB is the engine of coherence on parisc: The
371 		 * CPU is entitled to speculate any page with a TLB
372 		 * mapping, so here we kill the mapping then flush the
373 		 * page along a special flush only alias mapping.
374 		 * This guarantees that the page is no-longer in the
375 		 * cache for any process and nor may it be
376 		 * speculatively read in (until the user or kernel
377 		 * specifically accesses it, of course) */
378 
379 		flush_tlb_page(mpnt, addr);
380 		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
381 				      != (addr & (SHM_COLOUR - 1))) {
382 			__flush_cache_page(mpnt, addr, page_to_phys(page));
383 			if (parisc_requires_coherency() && old_addr)
384 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
385 			old_addr = addr;
386 		}
387 	}
388 	flush_dcache_mmap_unlock(mapping);
389 }
390 EXPORT_SYMBOL(flush_dcache_page);
391 
392 /* Defined in arch/parisc/kernel/pacache.S */
393 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
394 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
395 
396 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
397 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
398 
399 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
400 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
401 
402 void __init parisc_setup_cache_timing(void)
403 {
404 	unsigned long rangetime, alltime;
405 	unsigned long size;
406 	unsigned long threshold, threshold2;
407 
408 	alltime = mfctl(16);
409 	flush_data_cache();
410 	alltime = mfctl(16) - alltime;
411 
412 	size = (unsigned long)(_end - _text);
413 	rangetime = mfctl(16);
414 	flush_kernel_dcache_range((unsigned long)_text, size);
415 	rangetime = mfctl(16) - rangetime;
416 
417 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
418 		alltime, size, rangetime);
419 
420 	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
421 
422 	/*
423 	 * The threshold computed above isn't very reliable since the
424 	 * flush times depend greatly on the percentage of dirty lines
425 	 * in the flush range. Further, the whole cache time doesn't
426 	 * include the time to refill lines that aren't in the mm/vma
427 	 * being flushed. By timing glibc build and checks on mako cpus,
428 	 * the following formula seems to work reasonably well. The
429 	 * value from the timing calculation is too small, and increases
430 	 * build and check times by almost a factor two.
431 	 */
432 	threshold2 = cache_info.dc_size * num_online_cpus();
433 	if (threshold2 > threshold)
434 		threshold = threshold2;
435 	if (threshold)
436 		parisc_cache_flush_threshold = threshold;
437 	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
438 		parisc_cache_flush_threshold/1024);
439 
440 	/* calculate TLB flush threshold */
441 
442 	/* On SMP machines, skip the TLB measure of kernel text which
443 	 * has been mapped as huge pages. */
444 	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
445 		threshold = max(cache_info.it_size, cache_info.dt_size);
446 		threshold *= PAGE_SIZE;
447 		threshold /= num_online_cpus();
448 		goto set_tlb_threshold;
449 	}
450 
451 	size = (unsigned long)_end - (unsigned long)_text;
452 	rangetime = mfctl(16);
453 	flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
454 	rangetime = mfctl(16) - rangetime;
455 
456 	alltime = mfctl(16);
457 	flush_tlb_all();
458 	alltime = mfctl(16) - alltime;
459 
460 	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
461 		alltime, size, rangetime);
462 
463 	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
464 	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
465 		threshold/1024);
466 
467 set_tlb_threshold:
468 	if (threshold > FLUSH_TLB_THRESHOLD)
469 		parisc_tlb_flush_threshold = threshold;
470 	else
471 		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
472 
473 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
474 		parisc_tlb_flush_threshold/1024);
475 }
476 
477 extern void purge_kernel_dcache_page_asm(unsigned long);
478 extern void clear_user_page_asm(void *, unsigned long);
479 extern void copy_user_page_asm(void *, void *, unsigned long);
480 
481 void flush_kernel_dcache_page_addr(void *addr)
482 {
483 	unsigned long flags;
484 
485 	flush_kernel_dcache_page_asm(addr);
486 	purge_tlb_start(flags);
487 	pdtlb(SR_KERNEL, addr);
488 	purge_tlb_end(flags);
489 }
490 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
491 
492 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
493 	struct page *pg)
494 {
495        /* Copy using kernel mapping.  No coherency is needed (all in
496 	  kunmap) for the `to' page.  However, the `from' page needs to
497 	  be flushed through a mapping equivalent to the user mapping
498 	  before it can be accessed through the kernel mapping. */
499 	preempt_disable();
500 	flush_dcache_page_asm(__pa(vfrom), vaddr);
501 	copy_page_asm(vto, vfrom);
502 	preempt_enable();
503 }
504 EXPORT_SYMBOL(copy_user_page);
505 
506 /* __flush_tlb_range()
507  *
508  * returns 1 if all TLBs were flushed.
509  */
510 int __flush_tlb_range(unsigned long sid, unsigned long start,
511 		      unsigned long end)
512 {
513 	unsigned long flags;
514 
515 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
516 	    end - start >= parisc_tlb_flush_threshold) {
517 		flush_tlb_all();
518 		return 1;
519 	}
520 
521 	/* Purge TLB entries for small ranges using the pdtlb and
522 	   pitlb instructions.  These instructions execute locally
523 	   but cause a purge request to be broadcast to other TLBs.  */
524 	while (start < end) {
525 		purge_tlb_start(flags);
526 		mtsp(sid, SR_TEMP1);
527 		pdtlb(SR_TEMP1, start);
528 		pitlb(SR_TEMP1, start);
529 		purge_tlb_end(flags);
530 		start += PAGE_SIZE;
531 	}
532 	return 0;
533 }
534 
535 static inline unsigned long mm_total_size(struct mm_struct *mm)
536 {
537 	struct vm_area_struct *vma;
538 	unsigned long usize = 0;
539 
540 	for (vma = mm->mmap; vma; vma = vma->vm_next)
541 		usize += vma->vm_end - vma->vm_start;
542 	return usize;
543 }
544 
545 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
546 {
547 	pte_t *ptep = NULL;
548 
549 	if (!pgd_none(*pgd)) {
550 		p4d_t *p4d = p4d_offset(pgd, addr);
551 		if (!p4d_none(*p4d)) {
552 			pud_t *pud = pud_offset(p4d, addr);
553 			if (!pud_none(*pud)) {
554 				pmd_t *pmd = pmd_offset(pud, addr);
555 				if (!pmd_none(*pmd))
556 					ptep = pte_offset_map(pmd, addr);
557 			}
558 		}
559 	}
560 	return ptep;
561 }
562 
563 static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
564 			      unsigned long start, unsigned long end)
565 {
566 	unsigned long addr, pfn;
567 	pte_t *ptep;
568 
569 	for (addr = start; addr < end; addr += PAGE_SIZE) {
570 		ptep = get_ptep(mm->pgd, addr);
571 		if (ptep) {
572 			pfn = pte_pfn(*ptep);
573 			flush_cache_page(vma, addr, pfn);
574 		}
575 	}
576 }
577 
578 void flush_cache_mm(struct mm_struct *mm)
579 {
580 	struct vm_area_struct *vma;
581 
582 	/* Flushing the whole cache on each cpu takes forever on
583 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
584 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
585 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
586 		if (mm->context.space_id)
587 			flush_tlb_all();
588 		flush_cache_all();
589 		return;
590 	}
591 
592 	for (vma = mm->mmap; vma; vma = vma->vm_next)
593 		flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
594 }
595 
596 void flush_cache_range(struct vm_area_struct *vma,
597 		unsigned long start, unsigned long end)
598 {
599 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
600 	    end - start >= parisc_cache_flush_threshold) {
601 		if (vma->vm_mm->context.space_id)
602 			flush_tlb_range(vma, start, end);
603 		flush_cache_all();
604 		return;
605 	}
606 
607 	flush_cache_pages(vma, vma->vm_mm, start, end);
608 }
609 
610 void
611 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
612 {
613 	if (pfn_valid(pfn)) {
614 		if (likely(vma->vm_mm->context.space_id)) {
615 			flush_tlb_page(vma, vmaddr);
616 			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 		} else {
618 			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
619 		}
620 	}
621 }
622 
623 void flush_kernel_vmap_range(void *vaddr, int size)
624 {
625 	unsigned long start = (unsigned long)vaddr;
626 	unsigned long end = start + size;
627 
628 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
629 	    (unsigned long)size >= parisc_cache_flush_threshold) {
630 		flush_tlb_kernel_range(start, end);
631 		flush_data_cache();
632 		return;
633 	}
634 
635 	flush_kernel_dcache_range_asm(start, end);
636 	flush_tlb_kernel_range(start, end);
637 }
638 EXPORT_SYMBOL(flush_kernel_vmap_range);
639 
640 void invalidate_kernel_vmap_range(void *vaddr, int size)
641 {
642 	unsigned long start = (unsigned long)vaddr;
643 	unsigned long end = start + size;
644 
645 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
646 	    (unsigned long)size >= parisc_cache_flush_threshold) {
647 		flush_tlb_kernel_range(start, end);
648 		flush_data_cache();
649 		return;
650 	}
651 
652 	purge_kernel_dcache_range_asm(start, end);
653 	flush_tlb_kernel_range(start, end);
654 }
655 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
656