xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision 31af04cd)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 
32 int split_tlb __read_mostly;
33 int dcache_stride __read_mostly;
34 int icache_stride __read_mostly;
35 EXPORT_SYMBOL(dcache_stride);
36 
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41 
42 
43 /* On some machines (e.g. ones with the Merced bus), there can be
44  * only a single PxTLB broadcast at a time; this must be guaranteed
45  * by software.  We put a spinlock around all TLB flushes  to
46  * ensure this.
47  */
48 DEFINE_SPINLOCK(pa_tlb_lock);
49 
50 struct pdc_cache_info cache_info __read_mostly;
51 #ifndef CONFIG_PA20
52 static struct pdc_btlb_info btlb_info __read_mostly;
53 #endif
54 
55 #ifdef CONFIG_SMP
56 void
57 flush_data_cache(void)
58 {
59 	on_each_cpu(flush_data_cache_local, NULL, 1);
60 }
61 void
62 flush_instruction_cache(void)
63 {
64 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
65 }
66 #endif
67 
68 void
69 flush_cache_all_local(void)
70 {
71 	flush_instruction_cache_local(NULL);
72 	flush_data_cache_local(NULL);
73 }
74 EXPORT_SYMBOL(flush_cache_all_local);
75 
76 /* Virtual address of pfn.  */
77 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
78 
79 void
80 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
81 {
82 	unsigned long pfn = pte_pfn(*ptep);
83 	struct page *page;
84 
85 	/* We don't have pte special.  As a result, we can be called with
86 	   an invalid pfn and we don't need to flush the kernel dcache page.
87 	   This occurs with FireGL card in C8000.  */
88 	if (!pfn_valid(pfn))
89 		return;
90 
91 	page = pfn_to_page(pfn);
92 	if (page_mapping_file(page) &&
93 	    test_bit(PG_dcache_dirty, &page->flags)) {
94 		flush_kernel_dcache_page_addr(pfn_va(pfn));
95 		clear_bit(PG_dcache_dirty, &page->flags);
96 	} else if (parisc_requires_coherency())
97 		flush_kernel_dcache_page_addr(pfn_va(pfn));
98 }
99 
100 void
101 show_cache_info(struct seq_file *m)
102 {
103 	char buf[32];
104 
105 	seq_printf(m, "I-cache\t\t: %ld KB\n",
106 		cache_info.ic_size/1024 );
107 	if (cache_info.dc_loop != 1)
108 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
109 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
110 		cache_info.dc_size/1024,
111 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
112 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
113 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
114 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
115 		cache_info.it_size,
116 		cache_info.dt_size,
117 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
118 	);
119 
120 #ifndef CONFIG_PA20
121 	/* BTLB - Block TLB */
122 	if (btlb_info.max_size==0) {
123 		seq_printf(m, "BTLB\t\t: not supported\n" );
124 	} else {
125 		seq_printf(m,
126 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
127 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
128 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
129 		btlb_info.max_size, (int)4096,
130 		btlb_info.max_size>>8,
131 		btlb_info.fixed_range_info.num_i,
132 		btlb_info.fixed_range_info.num_d,
133 		btlb_info.fixed_range_info.num_comb,
134 		btlb_info.variable_range_info.num_i,
135 		btlb_info.variable_range_info.num_d,
136 		btlb_info.variable_range_info.num_comb
137 		);
138 	}
139 #endif
140 }
141 
142 void __init
143 parisc_cache_init(void)
144 {
145 	if (pdc_cache_info(&cache_info) < 0)
146 		panic("parisc_cache_init: pdc_cache_info failed");
147 
148 #if 0
149 	printk("ic_size %lx dc_size %lx it_size %lx\n",
150 		cache_info.ic_size,
151 		cache_info.dc_size,
152 		cache_info.it_size);
153 
154 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
155 		cache_info.dc_base,
156 		cache_info.dc_stride,
157 		cache_info.dc_count,
158 		cache_info.dc_loop);
159 
160 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
161 		*(unsigned long *) (&cache_info.dc_conf),
162 		cache_info.dc_conf.cc_alias,
163 		cache_info.dc_conf.cc_block,
164 		cache_info.dc_conf.cc_line,
165 		cache_info.dc_conf.cc_shift);
166 	printk("	wt %d sh %d cst %d hv %d\n",
167 		cache_info.dc_conf.cc_wt,
168 		cache_info.dc_conf.cc_sh,
169 		cache_info.dc_conf.cc_cst,
170 		cache_info.dc_conf.cc_hv);
171 
172 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
173 		cache_info.ic_base,
174 		cache_info.ic_stride,
175 		cache_info.ic_count,
176 		cache_info.ic_loop);
177 
178 	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
179 		cache_info.it_sp_base,
180 		cache_info.it_sp_stride,
181 		cache_info.it_sp_count,
182 		cache_info.it_loop,
183 		cache_info.it_off_base,
184 		cache_info.it_off_stride,
185 		cache_info.it_off_count);
186 
187 	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
188 		cache_info.dt_sp_base,
189 		cache_info.dt_sp_stride,
190 		cache_info.dt_sp_count,
191 		cache_info.dt_loop,
192 		cache_info.dt_off_base,
193 		cache_info.dt_off_stride,
194 		cache_info.dt_off_count);
195 
196 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
197 		*(unsigned long *) (&cache_info.ic_conf),
198 		cache_info.ic_conf.cc_alias,
199 		cache_info.ic_conf.cc_block,
200 		cache_info.ic_conf.cc_line,
201 		cache_info.ic_conf.cc_shift);
202 	printk("	wt %d sh %d cst %d hv %d\n",
203 		cache_info.ic_conf.cc_wt,
204 		cache_info.ic_conf.cc_sh,
205 		cache_info.ic_conf.cc_cst,
206 		cache_info.ic_conf.cc_hv);
207 
208 	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
209 		cache_info.dt_conf.tc_sh,
210 		cache_info.dt_conf.tc_page,
211 		cache_info.dt_conf.tc_cst,
212 		cache_info.dt_conf.tc_aid,
213 		cache_info.dt_conf.tc_sr);
214 
215 	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216 		cache_info.it_conf.tc_sh,
217 		cache_info.it_conf.tc_page,
218 		cache_info.it_conf.tc_cst,
219 		cache_info.it_conf.tc_aid,
220 		cache_info.it_conf.tc_sr);
221 #endif
222 
223 	split_tlb = 0;
224 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
225 		if (cache_info.dt_conf.tc_sh == 2)
226 			printk(KERN_WARNING "Unexpected TLB configuration. "
227 			"Will flush I/D separately (could be optimized).\n");
228 
229 		split_tlb = 1;
230 	}
231 
232 	/* "New and Improved" version from Jim Hull
233 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
234 	 * The following CAFL_STRIDE is an optimized version, see
235 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
236 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
237 	 */
238 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
239 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
240 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
241 #undef CAFL_STRIDE
242 
243 #ifndef CONFIG_PA20
244 	if (pdc_btlb_info(&btlb_info) < 0) {
245 		memset(&btlb_info, 0, sizeof btlb_info);
246 	}
247 #endif
248 
249 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
250 						PDC_MODEL_NVA_UNSUPPORTED) {
251 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
252 #if 0
253 		panic("SMP kernel required to avoid non-equivalent aliasing");
254 #endif
255 	}
256 }
257 
258 void __init disable_sr_hashing(void)
259 {
260 	int srhash_type, retval;
261 	unsigned long space_bits;
262 
263 	switch (boot_cpu_data.cpu_type) {
264 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
265 		BUG();
266 		return;
267 
268 	case pcxs:
269 	case pcxt:
270 	case pcxt_:
271 		srhash_type = SRHASH_PCXST;
272 		break;
273 
274 	case pcxl:
275 		srhash_type = SRHASH_PCXL;
276 		break;
277 
278 	case pcxl2: /* pcxl2 doesn't support space register hashing */
279 		return;
280 
281 	default: /* Currently all PA2.0 machines use the same ins. sequence */
282 		srhash_type = SRHASH_PA20;
283 		break;
284 	}
285 
286 	disable_sr_hashing_asm(srhash_type);
287 
288 	retval = pdc_spaceid_bits(&space_bits);
289 	/* If this procedure isn't implemented, don't panic. */
290 	if (retval < 0 && retval != PDC_BAD_OPTION)
291 		panic("pdc_spaceid_bits call failed.\n");
292 	if (space_bits != 0)
293 		panic("SpaceID hashing is still on!\n");
294 }
295 
296 static inline void
297 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
298 		   unsigned long physaddr)
299 {
300 	preempt_disable();
301 	flush_dcache_page_asm(physaddr, vmaddr);
302 	if (vma->vm_flags & VM_EXEC)
303 		flush_icache_page_asm(physaddr, vmaddr);
304 	preempt_enable();
305 }
306 
307 static inline void
308 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
309 		   unsigned long physaddr)
310 {
311 	preempt_disable();
312 	purge_dcache_page_asm(physaddr, vmaddr);
313 	if (vma->vm_flags & VM_EXEC)
314 		flush_icache_page_asm(physaddr, vmaddr);
315 	preempt_enable();
316 }
317 
318 void flush_dcache_page(struct page *page)
319 {
320 	struct address_space *mapping = page_mapping_file(page);
321 	struct vm_area_struct *mpnt;
322 	unsigned long offset;
323 	unsigned long addr, old_addr = 0;
324 	pgoff_t pgoff;
325 
326 	if (mapping && !mapping_mapped(mapping)) {
327 		set_bit(PG_dcache_dirty, &page->flags);
328 		return;
329 	}
330 
331 	flush_kernel_dcache_page(page);
332 
333 	if (!mapping)
334 		return;
335 
336 	pgoff = page->index;
337 
338 	/* We have carefully arranged in arch_get_unmapped_area() that
339 	 * *any* mappings of a file are always congruently mapped (whether
340 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
341 	 * to flush one address here for them all to become coherent */
342 
343 	flush_dcache_mmap_lock(mapping);
344 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
345 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
346 		addr = mpnt->vm_start + offset;
347 
348 		/* The TLB is the engine of coherence on parisc: The
349 		 * CPU is entitled to speculate any page with a TLB
350 		 * mapping, so here we kill the mapping then flush the
351 		 * page along a special flush only alias mapping.
352 		 * This guarantees that the page is no-longer in the
353 		 * cache for any process and nor may it be
354 		 * speculatively read in (until the user or kernel
355 		 * specifically accesses it, of course) */
356 
357 		flush_tlb_page(mpnt, addr);
358 		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
359 				      != (addr & (SHM_COLOUR - 1))) {
360 			__flush_cache_page(mpnt, addr, page_to_phys(page));
361 			if (old_addr)
362 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
363 			old_addr = addr;
364 		}
365 	}
366 	flush_dcache_mmap_unlock(mapping);
367 }
368 EXPORT_SYMBOL(flush_dcache_page);
369 
370 /* Defined in arch/parisc/kernel/pacache.S */
371 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
372 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
373 EXPORT_SYMBOL(flush_data_cache_local);
374 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
375 
376 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
377 static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
378 
379 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
380 static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
381 
382 void __init parisc_setup_cache_timing(void)
383 {
384 	unsigned long rangetime, alltime;
385 	unsigned long size, start;
386 	unsigned long threshold;
387 
388 	alltime = mfctl(16);
389 	flush_data_cache();
390 	alltime = mfctl(16) - alltime;
391 
392 	size = (unsigned long)(_end - _text);
393 	rangetime = mfctl(16);
394 	flush_kernel_dcache_range((unsigned long)_text, size);
395 	rangetime = mfctl(16) - rangetime;
396 
397 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
398 		alltime, size, rangetime);
399 
400 	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
401 	if (threshold > cache_info.dc_size)
402 		threshold = cache_info.dc_size;
403 	if (threshold)
404 		parisc_cache_flush_threshold = threshold;
405 	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
406 		parisc_cache_flush_threshold/1024);
407 
408 	/* calculate TLB flush threshold */
409 
410 	/* On SMP machines, skip the TLB measure of kernel text which
411 	 * has been mapped as huge pages. */
412 	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
413 		threshold = max(cache_info.it_size, cache_info.dt_size);
414 		threshold *= PAGE_SIZE;
415 		threshold /= num_online_cpus();
416 		goto set_tlb_threshold;
417 	}
418 
419 	size = 0;
420 	start = (unsigned long) _text;
421 	rangetime = mfctl(16);
422 	while (start < (unsigned long) _end) {
423 		flush_tlb_kernel_range(start, start + PAGE_SIZE);
424 		start += PAGE_SIZE;
425 		size += PAGE_SIZE;
426 	}
427 	rangetime = mfctl(16) - rangetime;
428 
429 	alltime = mfctl(16);
430 	flush_tlb_all();
431 	alltime = mfctl(16) - alltime;
432 
433 	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
434 		alltime, size, rangetime);
435 
436 	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
437 	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
438 		threshold/1024);
439 
440 set_tlb_threshold:
441 	if (threshold > parisc_tlb_flush_threshold)
442 		parisc_tlb_flush_threshold = threshold;
443 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
444 		parisc_tlb_flush_threshold/1024);
445 }
446 
447 extern void purge_kernel_dcache_page_asm(unsigned long);
448 extern void clear_user_page_asm(void *, unsigned long);
449 extern void copy_user_page_asm(void *, void *, unsigned long);
450 
451 void flush_kernel_dcache_page_addr(void *addr)
452 {
453 	unsigned long flags;
454 
455 	flush_kernel_dcache_page_asm(addr);
456 	purge_tlb_start(flags);
457 	pdtlb_kernel(addr);
458 	purge_tlb_end(flags);
459 }
460 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
461 
462 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
463 	struct page *pg)
464 {
465        /* Copy using kernel mapping.  No coherency is needed (all in
466 	  kunmap) for the `to' page.  However, the `from' page needs to
467 	  be flushed through a mapping equivalent to the user mapping
468 	  before it can be accessed through the kernel mapping. */
469 	preempt_disable();
470 	flush_dcache_page_asm(__pa(vfrom), vaddr);
471 	copy_page_asm(vto, vfrom);
472 	preempt_enable();
473 }
474 EXPORT_SYMBOL(copy_user_page);
475 
476 /* __flush_tlb_range()
477  *
478  * returns 1 if all TLBs were flushed.
479  */
480 int __flush_tlb_range(unsigned long sid, unsigned long start,
481 		      unsigned long end)
482 {
483 	unsigned long flags;
484 
485 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
486 	    end - start >= parisc_tlb_flush_threshold) {
487 		flush_tlb_all();
488 		return 1;
489 	}
490 
491 	/* Purge TLB entries for small ranges using the pdtlb and
492 	   pitlb instructions.  These instructions execute locally
493 	   but cause a purge request to be broadcast to other TLBs.  */
494 	while (start < end) {
495 		purge_tlb_start(flags);
496 		mtsp(sid, 1);
497 		pdtlb(start);
498 		pitlb(start);
499 		purge_tlb_end(flags);
500 		start += PAGE_SIZE;
501 	}
502 	return 0;
503 }
504 
505 static void cacheflush_h_tmp_function(void *dummy)
506 {
507 	flush_cache_all_local();
508 }
509 
510 void flush_cache_all(void)
511 {
512 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
513 }
514 
515 static inline unsigned long mm_total_size(struct mm_struct *mm)
516 {
517 	struct vm_area_struct *vma;
518 	unsigned long usize = 0;
519 
520 	for (vma = mm->mmap; vma; vma = vma->vm_next)
521 		usize += vma->vm_end - vma->vm_start;
522 	return usize;
523 }
524 
525 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
526 {
527 	pte_t *ptep = NULL;
528 
529 	if (!pgd_none(*pgd)) {
530 		pud_t *pud = pud_offset(pgd, addr);
531 		if (!pud_none(*pud)) {
532 			pmd_t *pmd = pmd_offset(pud, addr);
533 			if (!pmd_none(*pmd))
534 				ptep = pte_offset_map(pmd, addr);
535 		}
536 	}
537 	return ptep;
538 }
539 
540 void flush_cache_mm(struct mm_struct *mm)
541 {
542 	struct vm_area_struct *vma;
543 	pgd_t *pgd;
544 
545 	/* Flushing the whole cache on each cpu takes forever on
546 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
547 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
548 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
549 		if (mm->context)
550 			flush_tlb_all();
551 		flush_cache_all();
552 		return;
553 	}
554 
555 	if (mm->context == mfsp(3)) {
556 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
557 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
558 			if (vma->vm_flags & VM_EXEC)
559 				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
560 			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
561 		}
562 		return;
563 	}
564 
565 	pgd = mm->pgd;
566 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
567 		unsigned long addr;
568 
569 		for (addr = vma->vm_start; addr < vma->vm_end;
570 		     addr += PAGE_SIZE) {
571 			unsigned long pfn;
572 			pte_t *ptep = get_ptep(pgd, addr);
573 			if (!ptep)
574 				continue;
575 			pfn = pte_pfn(*ptep);
576 			if (!pfn_valid(pfn))
577 				continue;
578 			if (unlikely(mm->context)) {
579 				flush_tlb_page(vma, addr);
580 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
581 			} else {
582 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
583 			}
584 		}
585 	}
586 }
587 
588 void flush_cache_range(struct vm_area_struct *vma,
589 		unsigned long start, unsigned long end)
590 {
591 	pgd_t *pgd;
592 	unsigned long addr;
593 
594 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
595 	    end - start >= parisc_cache_flush_threshold) {
596 		if (vma->vm_mm->context)
597 			flush_tlb_range(vma, start, end);
598 		flush_cache_all();
599 		return;
600 	}
601 
602 	if (vma->vm_mm->context == mfsp(3)) {
603 		flush_user_dcache_range_asm(start, end);
604 		if (vma->vm_flags & VM_EXEC)
605 			flush_user_icache_range_asm(start, end);
606 		flush_tlb_range(vma, start, end);
607 		return;
608 	}
609 
610 	pgd = vma->vm_mm->pgd;
611 	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
612 		unsigned long pfn;
613 		pte_t *ptep = get_ptep(pgd, addr);
614 		if (!ptep)
615 			continue;
616 		pfn = pte_pfn(*ptep);
617 		if (pfn_valid(pfn)) {
618 			if (unlikely(vma->vm_mm->context)) {
619 				flush_tlb_page(vma, addr);
620 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
621 			} else {
622 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
623 			}
624 		}
625 	}
626 }
627 
628 void
629 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
630 {
631 	if (pfn_valid(pfn)) {
632 		if (likely(vma->vm_mm->context)) {
633 			flush_tlb_page(vma, vmaddr);
634 			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
635 		} else {
636 			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
637 		}
638 	}
639 }
640 
641 void flush_kernel_vmap_range(void *vaddr, int size)
642 {
643 	unsigned long start = (unsigned long)vaddr;
644 	unsigned long end = start + size;
645 
646 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
647 	    (unsigned long)size >= parisc_cache_flush_threshold) {
648 		flush_tlb_kernel_range(start, end);
649 		flush_data_cache();
650 		return;
651 	}
652 
653 	flush_kernel_dcache_range_asm(start, end);
654 	flush_tlb_kernel_range(start, end);
655 }
656 EXPORT_SYMBOL(flush_kernel_vmap_range);
657 
658 void invalidate_kernel_vmap_range(void *vaddr, int size)
659 {
660 	unsigned long start = (unsigned long)vaddr;
661 	unsigned long end = start + size;
662 
663 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
664 	    (unsigned long)size >= parisc_cache_flush_threshold) {
665 		flush_tlb_kernel_range(start, end);
666 		flush_data_cache();
667 		return;
668 	}
669 
670 	purge_kernel_dcache_range_asm(start, end);
671 	flush_tlb_kernel_range(start, end);
672 }
673 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
674