xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision 55fd7e02)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 
32 int split_tlb __ro_after_init;
33 int dcache_stride __ro_after_init;
34 int icache_stride __ro_after_init;
35 EXPORT_SYMBOL(dcache_stride);
36 
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41 
42 
43 /* On some machines (i.e., ones with the Merced bus), there can be
44  * only a single PxTLB broadcast at a time; this must be guaranteed
45  * by software. We need a spinlock around all TLB flushes to ensure
46  * this.
47  */
48 DEFINE_SPINLOCK(pa_tlb_flush_lock);
49 
50 /* Swapper page setup lock. */
51 DEFINE_SPINLOCK(pa_swapper_pg_lock);
52 
53 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
54 int pa_serialize_tlb_flushes __ro_after_init;
55 #endif
56 
57 struct pdc_cache_info cache_info __ro_after_init;
58 #ifndef CONFIG_PA20
59 static struct pdc_btlb_info btlb_info __ro_after_init;
60 #endif
61 
62 #ifdef CONFIG_SMP
63 void
64 flush_data_cache(void)
65 {
66 	on_each_cpu(flush_data_cache_local, NULL, 1);
67 }
68 void
69 flush_instruction_cache(void)
70 {
71 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
72 }
73 #endif
74 
75 void
76 flush_cache_all_local(void)
77 {
78 	flush_instruction_cache_local(NULL);
79 	flush_data_cache_local(NULL);
80 }
81 EXPORT_SYMBOL(flush_cache_all_local);
82 
83 /* Virtual address of pfn.  */
84 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
85 
86 void
87 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
88 {
89 	unsigned long pfn = pte_pfn(*ptep);
90 	struct page *page;
91 
92 	/* We don't have pte special.  As a result, we can be called with
93 	   an invalid pfn and we don't need to flush the kernel dcache page.
94 	   This occurs with FireGL card in C8000.  */
95 	if (!pfn_valid(pfn))
96 		return;
97 
98 	page = pfn_to_page(pfn);
99 	if (page_mapping_file(page) &&
100 	    test_bit(PG_dcache_dirty, &page->flags)) {
101 		flush_kernel_dcache_page_addr(pfn_va(pfn));
102 		clear_bit(PG_dcache_dirty, &page->flags);
103 	} else if (parisc_requires_coherency())
104 		flush_kernel_dcache_page_addr(pfn_va(pfn));
105 }
106 
107 void
108 show_cache_info(struct seq_file *m)
109 {
110 	char buf[32];
111 
112 	seq_printf(m, "I-cache\t\t: %ld KB\n",
113 		cache_info.ic_size/1024 );
114 	if (cache_info.dc_loop != 1)
115 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
116 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
117 		cache_info.dc_size/1024,
118 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
119 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
120 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
121 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
122 		cache_info.it_size,
123 		cache_info.dt_size,
124 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
125 	);
126 
127 #ifndef CONFIG_PA20
128 	/* BTLB - Block TLB */
129 	if (btlb_info.max_size==0) {
130 		seq_printf(m, "BTLB\t\t: not supported\n" );
131 	} else {
132 		seq_printf(m,
133 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
134 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
135 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
136 		btlb_info.max_size, (int)4096,
137 		btlb_info.max_size>>8,
138 		btlb_info.fixed_range_info.num_i,
139 		btlb_info.fixed_range_info.num_d,
140 		btlb_info.fixed_range_info.num_comb,
141 		btlb_info.variable_range_info.num_i,
142 		btlb_info.variable_range_info.num_d,
143 		btlb_info.variable_range_info.num_comb
144 		);
145 	}
146 #endif
147 }
148 
149 void __init
150 parisc_cache_init(void)
151 {
152 	if (pdc_cache_info(&cache_info) < 0)
153 		panic("parisc_cache_init: pdc_cache_info failed");
154 
155 #if 0
156 	printk("ic_size %lx dc_size %lx it_size %lx\n",
157 		cache_info.ic_size,
158 		cache_info.dc_size,
159 		cache_info.it_size);
160 
161 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 		cache_info.dc_base,
163 		cache_info.dc_stride,
164 		cache_info.dc_count,
165 		cache_info.dc_loop);
166 
167 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
168 		*(unsigned long *) (&cache_info.dc_conf),
169 		cache_info.dc_conf.cc_alias,
170 		cache_info.dc_conf.cc_block,
171 		cache_info.dc_conf.cc_line,
172 		cache_info.dc_conf.cc_shift);
173 	printk("	wt %d sh %d cst %d hv %d\n",
174 		cache_info.dc_conf.cc_wt,
175 		cache_info.dc_conf.cc_sh,
176 		cache_info.dc_conf.cc_cst,
177 		cache_info.dc_conf.cc_hv);
178 
179 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180 		cache_info.ic_base,
181 		cache_info.ic_stride,
182 		cache_info.ic_count,
183 		cache_info.ic_loop);
184 
185 	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186 		cache_info.it_sp_base,
187 		cache_info.it_sp_stride,
188 		cache_info.it_sp_count,
189 		cache_info.it_loop,
190 		cache_info.it_off_base,
191 		cache_info.it_off_stride,
192 		cache_info.it_off_count);
193 
194 	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
195 		cache_info.dt_sp_base,
196 		cache_info.dt_sp_stride,
197 		cache_info.dt_sp_count,
198 		cache_info.dt_loop,
199 		cache_info.dt_off_base,
200 		cache_info.dt_off_stride,
201 		cache_info.dt_off_count);
202 
203 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
204 		*(unsigned long *) (&cache_info.ic_conf),
205 		cache_info.ic_conf.cc_alias,
206 		cache_info.ic_conf.cc_block,
207 		cache_info.ic_conf.cc_line,
208 		cache_info.ic_conf.cc_shift);
209 	printk("	wt %d sh %d cst %d hv %d\n",
210 		cache_info.ic_conf.cc_wt,
211 		cache_info.ic_conf.cc_sh,
212 		cache_info.ic_conf.cc_cst,
213 		cache_info.ic_conf.cc_hv);
214 
215 	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216 		cache_info.dt_conf.tc_sh,
217 		cache_info.dt_conf.tc_page,
218 		cache_info.dt_conf.tc_cst,
219 		cache_info.dt_conf.tc_aid,
220 		cache_info.dt_conf.tc_sr);
221 
222 	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
223 		cache_info.it_conf.tc_sh,
224 		cache_info.it_conf.tc_page,
225 		cache_info.it_conf.tc_cst,
226 		cache_info.it_conf.tc_aid,
227 		cache_info.it_conf.tc_sr);
228 #endif
229 
230 	split_tlb = 0;
231 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
232 		if (cache_info.dt_conf.tc_sh == 2)
233 			printk(KERN_WARNING "Unexpected TLB configuration. "
234 			"Will flush I/D separately (could be optimized).\n");
235 
236 		split_tlb = 1;
237 	}
238 
239 	/* "New and Improved" version from Jim Hull
240 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
241 	 * The following CAFL_STRIDE is an optimized version, see
242 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
243 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244 	 */
245 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
246 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
247 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
248 #undef CAFL_STRIDE
249 
250 #ifndef CONFIG_PA20
251 	if (pdc_btlb_info(&btlb_info) < 0) {
252 		memset(&btlb_info, 0, sizeof btlb_info);
253 	}
254 #endif
255 
256 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
257 						PDC_MODEL_NVA_UNSUPPORTED) {
258 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
259 #if 0
260 		panic("SMP kernel required to avoid non-equivalent aliasing");
261 #endif
262 	}
263 }
264 
265 void __init disable_sr_hashing(void)
266 {
267 	int srhash_type, retval;
268 	unsigned long space_bits;
269 
270 	switch (boot_cpu_data.cpu_type) {
271 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
272 		BUG();
273 		return;
274 
275 	case pcxs:
276 	case pcxt:
277 	case pcxt_:
278 		srhash_type = SRHASH_PCXST;
279 		break;
280 
281 	case pcxl:
282 		srhash_type = SRHASH_PCXL;
283 		break;
284 
285 	case pcxl2: /* pcxl2 doesn't support space register hashing */
286 		return;
287 
288 	default: /* Currently all PA2.0 machines use the same ins. sequence */
289 		srhash_type = SRHASH_PA20;
290 		break;
291 	}
292 
293 	disable_sr_hashing_asm(srhash_type);
294 
295 	retval = pdc_spaceid_bits(&space_bits);
296 	/* If this procedure isn't implemented, don't panic. */
297 	if (retval < 0 && retval != PDC_BAD_OPTION)
298 		panic("pdc_spaceid_bits call failed.\n");
299 	if (space_bits != 0)
300 		panic("SpaceID hashing is still on!\n");
301 }
302 
303 static inline void
304 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
305 		   unsigned long physaddr)
306 {
307 	preempt_disable();
308 	flush_dcache_page_asm(physaddr, vmaddr);
309 	if (vma->vm_flags & VM_EXEC)
310 		flush_icache_page_asm(physaddr, vmaddr);
311 	preempt_enable();
312 }
313 
314 static inline void
315 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316 		   unsigned long physaddr)
317 {
318 	preempt_disable();
319 	purge_dcache_page_asm(physaddr, vmaddr);
320 	if (vma->vm_flags & VM_EXEC)
321 		flush_icache_page_asm(physaddr, vmaddr);
322 	preempt_enable();
323 }
324 
325 void flush_dcache_page(struct page *page)
326 {
327 	struct address_space *mapping = page_mapping_file(page);
328 	struct vm_area_struct *mpnt;
329 	unsigned long offset;
330 	unsigned long addr, old_addr = 0;
331 	pgoff_t pgoff;
332 
333 	if (mapping && !mapping_mapped(mapping)) {
334 		set_bit(PG_dcache_dirty, &page->flags);
335 		return;
336 	}
337 
338 	flush_kernel_dcache_page(page);
339 
340 	if (!mapping)
341 		return;
342 
343 	pgoff = page->index;
344 
345 	/* We have carefully arranged in arch_get_unmapped_area() that
346 	 * *any* mappings of a file are always congruently mapped (whether
347 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
348 	 * to flush one address here for them all to become coherent */
349 
350 	flush_dcache_mmap_lock(mapping);
351 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
352 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
353 		addr = mpnt->vm_start + offset;
354 
355 		/* The TLB is the engine of coherence on parisc: The
356 		 * CPU is entitled to speculate any page with a TLB
357 		 * mapping, so here we kill the mapping then flush the
358 		 * page along a special flush only alias mapping.
359 		 * This guarantees that the page is no-longer in the
360 		 * cache for any process and nor may it be
361 		 * speculatively read in (until the user or kernel
362 		 * specifically accesses it, of course) */
363 
364 		flush_tlb_page(mpnt, addr);
365 		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
366 				      != (addr & (SHM_COLOUR - 1))) {
367 			__flush_cache_page(mpnt, addr, page_to_phys(page));
368 			if (parisc_requires_coherency() && old_addr)
369 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
370 			old_addr = addr;
371 		}
372 	}
373 	flush_dcache_mmap_unlock(mapping);
374 }
375 EXPORT_SYMBOL(flush_dcache_page);
376 
377 /* Defined in arch/parisc/kernel/pacache.S */
378 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
379 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
380 EXPORT_SYMBOL(flush_data_cache_local);
381 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
382 
383 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
384 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
385 
386 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
387 static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
388 
389 void __init parisc_setup_cache_timing(void)
390 {
391 	unsigned long rangetime, alltime;
392 	unsigned long size, start;
393 	unsigned long threshold;
394 
395 	alltime = mfctl(16);
396 	flush_data_cache();
397 	alltime = mfctl(16) - alltime;
398 
399 	size = (unsigned long)(_end - _text);
400 	rangetime = mfctl(16);
401 	flush_kernel_dcache_range((unsigned long)_text, size);
402 	rangetime = mfctl(16) - rangetime;
403 
404 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
405 		alltime, size, rangetime);
406 
407 	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
408 	if (threshold > cache_info.dc_size)
409 		threshold = cache_info.dc_size;
410 	if (threshold)
411 		parisc_cache_flush_threshold = threshold;
412 	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
413 		parisc_cache_flush_threshold/1024);
414 
415 	/* calculate TLB flush threshold */
416 
417 	/* On SMP machines, skip the TLB measure of kernel text which
418 	 * has been mapped as huge pages. */
419 	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
420 		threshold = max(cache_info.it_size, cache_info.dt_size);
421 		threshold *= PAGE_SIZE;
422 		threshold /= num_online_cpus();
423 		goto set_tlb_threshold;
424 	}
425 
426 	size = 0;
427 	start = (unsigned long) _text;
428 	rangetime = mfctl(16);
429 	while (start < (unsigned long) _end) {
430 		flush_tlb_kernel_range(start, start + PAGE_SIZE);
431 		start += PAGE_SIZE;
432 		size += PAGE_SIZE;
433 	}
434 	rangetime = mfctl(16) - rangetime;
435 
436 	alltime = mfctl(16);
437 	flush_tlb_all();
438 	alltime = mfctl(16) - alltime;
439 
440 	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
441 		alltime, size, rangetime);
442 
443 	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
444 	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
445 		threshold/1024);
446 
447 set_tlb_threshold:
448 	if (threshold > parisc_tlb_flush_threshold)
449 		parisc_tlb_flush_threshold = threshold;
450 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
451 		parisc_tlb_flush_threshold/1024);
452 }
453 
454 extern void purge_kernel_dcache_page_asm(unsigned long);
455 extern void clear_user_page_asm(void *, unsigned long);
456 extern void copy_user_page_asm(void *, void *, unsigned long);
457 
458 void flush_kernel_dcache_page_addr(void *addr)
459 {
460 	unsigned long flags;
461 
462 	flush_kernel_dcache_page_asm(addr);
463 	purge_tlb_start(flags);
464 	pdtlb_kernel(addr);
465 	purge_tlb_end(flags);
466 }
467 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
468 
469 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
470 	struct page *pg)
471 {
472        /* Copy using kernel mapping.  No coherency is needed (all in
473 	  kunmap) for the `to' page.  However, the `from' page needs to
474 	  be flushed through a mapping equivalent to the user mapping
475 	  before it can be accessed through the kernel mapping. */
476 	preempt_disable();
477 	flush_dcache_page_asm(__pa(vfrom), vaddr);
478 	copy_page_asm(vto, vfrom);
479 	preempt_enable();
480 }
481 EXPORT_SYMBOL(copy_user_page);
482 
483 /* __flush_tlb_range()
484  *
485  * returns 1 if all TLBs were flushed.
486  */
487 int __flush_tlb_range(unsigned long sid, unsigned long start,
488 		      unsigned long end)
489 {
490 	unsigned long flags;
491 
492 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
493 	    end - start >= parisc_tlb_flush_threshold) {
494 		flush_tlb_all();
495 		return 1;
496 	}
497 
498 	/* Purge TLB entries for small ranges using the pdtlb and
499 	   pitlb instructions.  These instructions execute locally
500 	   but cause a purge request to be broadcast to other TLBs.  */
501 	while (start < end) {
502 		purge_tlb_start(flags);
503 		mtsp(sid, 1);
504 		pdtlb(start);
505 		pitlb(start);
506 		purge_tlb_end(flags);
507 		start += PAGE_SIZE;
508 	}
509 	return 0;
510 }
511 
512 static void cacheflush_h_tmp_function(void *dummy)
513 {
514 	flush_cache_all_local();
515 }
516 
517 void flush_cache_all(void)
518 {
519 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
520 }
521 
522 static inline unsigned long mm_total_size(struct mm_struct *mm)
523 {
524 	struct vm_area_struct *vma;
525 	unsigned long usize = 0;
526 
527 	for (vma = mm->mmap; vma; vma = vma->vm_next)
528 		usize += vma->vm_end - vma->vm_start;
529 	return usize;
530 }
531 
532 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
533 {
534 	pte_t *ptep = NULL;
535 
536 	if (!pgd_none(*pgd)) {
537 		p4d_t *p4d = p4d_offset(pgd, addr);
538 		if (!p4d_none(*p4d)) {
539 			pud_t *pud = pud_offset(p4d, addr);
540 			if (!pud_none(*pud)) {
541 				pmd_t *pmd = pmd_offset(pud, addr);
542 				if (!pmd_none(*pmd))
543 					ptep = pte_offset_map(pmd, addr);
544 			}
545 		}
546 	}
547 	return ptep;
548 }
549 
550 void flush_cache_mm(struct mm_struct *mm)
551 {
552 	struct vm_area_struct *vma;
553 	pgd_t *pgd;
554 
555 	/* Flushing the whole cache on each cpu takes forever on
556 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
557 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
558 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
559 		if (mm->context)
560 			flush_tlb_all();
561 		flush_cache_all();
562 		return;
563 	}
564 
565 	if (mm->context == mfsp(3)) {
566 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
567 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
568 			if (vma->vm_flags & VM_EXEC)
569 				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
570 			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
571 		}
572 		return;
573 	}
574 
575 	pgd = mm->pgd;
576 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
577 		unsigned long addr;
578 
579 		for (addr = vma->vm_start; addr < vma->vm_end;
580 		     addr += PAGE_SIZE) {
581 			unsigned long pfn;
582 			pte_t *ptep = get_ptep(pgd, addr);
583 			if (!ptep)
584 				continue;
585 			pfn = pte_pfn(*ptep);
586 			if (!pfn_valid(pfn))
587 				continue;
588 			if (unlikely(mm->context)) {
589 				flush_tlb_page(vma, addr);
590 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
591 			} else {
592 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
593 			}
594 		}
595 	}
596 }
597 
598 void flush_cache_range(struct vm_area_struct *vma,
599 		unsigned long start, unsigned long end)
600 {
601 	pgd_t *pgd;
602 	unsigned long addr;
603 
604 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
605 	    end - start >= parisc_cache_flush_threshold) {
606 		if (vma->vm_mm->context)
607 			flush_tlb_range(vma, start, end);
608 		flush_cache_all();
609 		return;
610 	}
611 
612 	if (vma->vm_mm->context == mfsp(3)) {
613 		flush_user_dcache_range_asm(start, end);
614 		if (vma->vm_flags & VM_EXEC)
615 			flush_user_icache_range_asm(start, end);
616 		flush_tlb_range(vma, start, end);
617 		return;
618 	}
619 
620 	pgd = vma->vm_mm->pgd;
621 	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
622 		unsigned long pfn;
623 		pte_t *ptep = get_ptep(pgd, addr);
624 		if (!ptep)
625 			continue;
626 		pfn = pte_pfn(*ptep);
627 		if (pfn_valid(pfn)) {
628 			if (unlikely(vma->vm_mm->context)) {
629 				flush_tlb_page(vma, addr);
630 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
631 			} else {
632 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
633 			}
634 		}
635 	}
636 }
637 
638 void
639 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
640 {
641 	if (pfn_valid(pfn)) {
642 		if (likely(vma->vm_mm->context)) {
643 			flush_tlb_page(vma, vmaddr);
644 			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
645 		} else {
646 			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
647 		}
648 	}
649 }
650 
651 void flush_kernel_vmap_range(void *vaddr, int size)
652 {
653 	unsigned long start = (unsigned long)vaddr;
654 	unsigned long end = start + size;
655 
656 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
657 	    (unsigned long)size >= parisc_cache_flush_threshold) {
658 		flush_tlb_kernel_range(start, end);
659 		flush_data_cache();
660 		return;
661 	}
662 
663 	flush_kernel_dcache_range_asm(start, end);
664 	flush_tlb_kernel_range(start, end);
665 }
666 EXPORT_SYMBOL(flush_kernel_vmap_range);
667 
668 void invalidate_kernel_vmap_range(void *vaddr, int size)
669 {
670 	unsigned long start = (unsigned long)vaddr;
671 	unsigned long end = start + size;
672 
673 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
674 	    (unsigned long)size >= parisc_cache_flush_threshold) {
675 		flush_tlb_kernel_range(start, end);
676 		flush_data_cache();
677 		return;
678 	}
679 
680 	purge_kernel_dcache_range_asm(start, end);
681 	flush_tlb_kernel_range(start, end);
682 }
683 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
684