xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision afc98d90)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 
40 
41 /* On some machines (e.g. ones with the Merced bus), there can be
42  * only a single PxTLB broadcast at a time; this must be guaranteed
43  * by software.  We put a spinlock around all TLB flushes  to
44  * ensure this.
45  */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47 
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52 
53 #ifdef CONFIG_SMP
54 void
55 flush_data_cache(void)
56 {
57 	on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void
60 flush_instruction_cache(void)
61 {
62 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65 
66 void
67 flush_cache_all_local(void)
68 {
69 	flush_instruction_cache_local(NULL);
70 	flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73 
74 /* Virtual address of pfn.  */
75 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
76 
77 void
78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79 {
80 	unsigned long pfn = pte_pfn(*ptep);
81 	struct page *page;
82 
83 	/* We don't have pte special.  As a result, we can be called with
84 	   an invalid pfn and we don't need to flush the kernel dcache page.
85 	   This occurs with FireGL card in C8000.  */
86 	if (!pfn_valid(pfn))
87 		return;
88 
89 	page = pfn_to_page(pfn);
90 	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 		flush_kernel_dcache_page_addr(pfn_va(pfn));
92 		clear_bit(PG_dcache_dirty, &page->flags);
93 	} else if (parisc_requires_coherency())
94 		flush_kernel_dcache_page_addr(pfn_va(pfn));
95 }
96 
97 void
98 show_cache_info(struct seq_file *m)
99 {
100 	char buf[32];
101 
102 	seq_printf(m, "I-cache\t\t: %ld KB\n",
103 		cache_info.ic_size/1024 );
104 	if (cache_info.dc_loop != 1)
105 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 		cache_info.dc_size/1024,
108 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 		cache_info.it_size,
113 		cache_info.dt_size,
114 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 	);
116 
117 #ifndef CONFIG_PA20
118 	/* BTLB - Block TLB */
119 	if (btlb_info.max_size==0) {
120 		seq_printf(m, "BTLB\t\t: not supported\n" );
121 	} else {
122 		seq_printf(m,
123 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 		btlb_info.max_size, (int)4096,
127 		btlb_info.max_size>>8,
128 		btlb_info.fixed_range_info.num_i,
129 		btlb_info.fixed_range_info.num_d,
130 		btlb_info.fixed_range_info.num_comb,
131 		btlb_info.variable_range_info.num_i,
132 		btlb_info.variable_range_info.num_d,
133 		btlb_info.variable_range_info.num_comb
134 		);
135 	}
136 #endif
137 }
138 
139 void __init
140 parisc_cache_init(void)
141 {
142 	if (pdc_cache_info(&cache_info) < 0)
143 		panic("parisc_cache_init: pdc_cache_info failed");
144 
145 #if 0
146 	printk("ic_size %lx dc_size %lx it_size %lx\n",
147 		cache_info.ic_size,
148 		cache_info.dc_size,
149 		cache_info.it_size);
150 
151 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 		cache_info.dc_base,
153 		cache_info.dc_stride,
154 		cache_info.dc_count,
155 		cache_info.dc_loop);
156 
157 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
158 		*(unsigned long *) (&cache_info.dc_conf),
159 		cache_info.dc_conf.cc_alias,
160 		cache_info.dc_conf.cc_block,
161 		cache_info.dc_conf.cc_line,
162 		cache_info.dc_conf.cc_shift);
163 	printk("	wt %d sh %d cst %d hv %d\n",
164 		cache_info.dc_conf.cc_wt,
165 		cache_info.dc_conf.cc_sh,
166 		cache_info.dc_conf.cc_cst,
167 		cache_info.dc_conf.cc_hv);
168 
169 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 		cache_info.ic_base,
171 		cache_info.ic_stride,
172 		cache_info.ic_count,
173 		cache_info.ic_loop);
174 
175 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
176 		*(unsigned long *) (&cache_info.ic_conf),
177 		cache_info.ic_conf.cc_alias,
178 		cache_info.ic_conf.cc_block,
179 		cache_info.ic_conf.cc_line,
180 		cache_info.ic_conf.cc_shift);
181 	printk("	wt %d sh %d cst %d hv %d\n",
182 		cache_info.ic_conf.cc_wt,
183 		cache_info.ic_conf.cc_sh,
184 		cache_info.ic_conf.cc_cst,
185 		cache_info.ic_conf.cc_hv);
186 
187 	printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188 		cache_info.dt_conf.tc_sh,
189 		cache_info.dt_conf.tc_page,
190 		cache_info.dt_conf.tc_cst,
191 		cache_info.dt_conf.tc_aid,
192 		cache_info.dt_conf.tc_pad1);
193 
194 	printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195 		cache_info.it_conf.tc_sh,
196 		cache_info.it_conf.tc_page,
197 		cache_info.it_conf.tc_cst,
198 		cache_info.it_conf.tc_aid,
199 		cache_info.it_conf.tc_pad1);
200 #endif
201 
202 	split_tlb = 0;
203 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 		if (cache_info.dt_conf.tc_sh == 2)
205 			printk(KERN_WARNING "Unexpected TLB configuration. "
206 			"Will flush I/D separately (could be optimized).\n");
207 
208 		split_tlb = 1;
209 	}
210 
211 	/* "New and Improved" version from Jim Hull
212 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
213 	 * The following CAFL_STRIDE is an optimized version, see
214 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
216 	 */
217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220 #undef CAFL_STRIDE
221 
222 #ifndef CONFIG_PA20
223 	if (pdc_btlb_info(&btlb_info) < 0) {
224 		memset(&btlb_info, 0, sizeof btlb_info);
225 	}
226 #endif
227 
228 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 						PDC_MODEL_NVA_UNSUPPORTED) {
230 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231 #if 0
232 		panic("SMP kernel required to avoid non-equivalent aliasing");
233 #endif
234 	}
235 }
236 
237 void disable_sr_hashing(void)
238 {
239 	int srhash_type, retval;
240 	unsigned long space_bits;
241 
242 	switch (boot_cpu_data.cpu_type) {
243 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
244 		BUG();
245 		return;
246 
247 	case pcxs:
248 	case pcxt:
249 	case pcxt_:
250 		srhash_type = SRHASH_PCXST;
251 		break;
252 
253 	case pcxl:
254 		srhash_type = SRHASH_PCXL;
255 		break;
256 
257 	case pcxl2: /* pcxl2 doesn't support space register hashing */
258 		return;
259 
260 	default: /* Currently all PA2.0 machines use the same ins. sequence */
261 		srhash_type = SRHASH_PA20;
262 		break;
263 	}
264 
265 	disable_sr_hashing_asm(srhash_type);
266 
267 	retval = pdc_spaceid_bits(&space_bits);
268 	/* If this procedure isn't implemented, don't panic. */
269 	if (retval < 0 && retval != PDC_BAD_OPTION)
270 		panic("pdc_spaceid_bits call failed.\n");
271 	if (space_bits != 0)
272 		panic("SpaceID hashing is still on!\n");
273 }
274 
275 static inline void
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 		   unsigned long physaddr)
278 {
279 	preempt_disable();
280 	flush_dcache_page_asm(physaddr, vmaddr);
281 	if (vma->vm_flags & VM_EXEC)
282 		flush_icache_page_asm(physaddr, vmaddr);
283 	preempt_enable();
284 }
285 
286 void flush_dcache_page(struct page *page)
287 {
288 	struct address_space *mapping = page_mapping(page);
289 	struct vm_area_struct *mpnt;
290 	unsigned long offset;
291 	unsigned long addr, old_addr = 0;
292 	pgoff_t pgoff;
293 
294 	if (mapping && !mapping_mapped(mapping)) {
295 		set_bit(PG_dcache_dirty, &page->flags);
296 		return;
297 	}
298 
299 	flush_kernel_dcache_page(page);
300 
301 	if (!mapping)
302 		return;
303 
304 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305 
306 	/* We have carefully arranged in arch_get_unmapped_area() that
307 	 * *any* mappings of a file are always congruently mapped (whether
308 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309 	 * to flush one address here for them all to become coherent */
310 
311 	flush_dcache_mmap_lock(mapping);
312 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 		addr = mpnt->vm_start + offset;
315 
316 		/* The TLB is the engine of coherence on parisc: The
317 		 * CPU is entitled to speculate any page with a TLB
318 		 * mapping, so here we kill the mapping then flush the
319 		 * page along a special flush only alias mapping.
320 		 * This guarantees that the page is no-longer in the
321 		 * cache for any process and nor may it be
322 		 * speculatively read in (until the user or kernel
323 		 * specifically accesses it, of course) */
324 
325 		flush_tlb_page(mpnt, addr);
326 		if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327 			__flush_cache_page(mpnt, addr, page_to_phys(page));
328 			if (old_addr)
329 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
330 			old_addr = addr;
331 		}
332 	}
333 	flush_dcache_mmap_unlock(mapping);
334 }
335 EXPORT_SYMBOL(flush_dcache_page);
336 
337 /* Defined in arch/parisc/kernel/pacache.S */
338 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
339 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
340 EXPORT_SYMBOL(flush_data_cache_local);
341 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342 
343 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
344 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345 
346 void __init parisc_setup_cache_timing(void)
347 {
348 	unsigned long rangetime, alltime;
349 	unsigned long size;
350 
351 	alltime = mfctl(16);
352 	flush_data_cache();
353 	alltime = mfctl(16) - alltime;
354 
355 	size = (unsigned long)(_end - _text);
356 	rangetime = mfctl(16);
357 	flush_kernel_dcache_range((unsigned long)_text, size);
358 	rangetime = mfctl(16) - rangetime;
359 
360 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361 		alltime, size, rangetime);
362 
363 	/* Racy, but if we see an intermediate value, it's ok too... */
364 	parisc_cache_flush_threshold = size * alltime / rangetime;
365 
366 	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
367 	if (!parisc_cache_flush_threshold)
368 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369 
370 	if (parisc_cache_flush_threshold > cache_info.dc_size)
371 		parisc_cache_flush_threshold = cache_info.dc_size;
372 
373 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374 }
375 
376 extern void purge_kernel_dcache_page_asm(unsigned long);
377 extern void clear_user_page_asm(void *, unsigned long);
378 extern void copy_user_page_asm(void *, void *, unsigned long);
379 
380 void flush_kernel_dcache_page_addr(void *addr)
381 {
382 	unsigned long flags;
383 
384 	flush_kernel_dcache_page_asm(addr);
385 	purge_tlb_start(flags);
386 	pdtlb_kernel(addr);
387 	purge_tlb_end(flags);
388 }
389 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390 
391 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
392 	struct page *pg)
393 {
394        /* Copy using kernel mapping.  No coherency is needed (all in
395 	  kunmap) for the `to' page.  However, the `from' page needs to
396 	  be flushed through a mapping equivalent to the user mapping
397 	  before it can be accessed through the kernel mapping. */
398 	preempt_disable();
399 	flush_dcache_page_asm(__pa(vfrom), vaddr);
400 	preempt_enable();
401 	copy_page_asm(vto, vfrom);
402 }
403 EXPORT_SYMBOL(copy_user_page);
404 
405 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
406 {
407 	unsigned long flags;
408 
409 	/* Note: purge_tlb_entries can be called at startup with
410 	   no context.  */
411 
412 	purge_tlb_start(flags);
413 	mtsp(mm->context, 1);
414 	pdtlb(addr);
415 	pitlb(addr);
416 	purge_tlb_end(flags);
417 }
418 EXPORT_SYMBOL(purge_tlb_entries);
419 
420 void __flush_tlb_range(unsigned long sid, unsigned long start,
421 		       unsigned long end)
422 {
423 	unsigned long npages;
424 
425 	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
426 	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
427 		flush_tlb_all();
428 	else {
429 		unsigned long flags;
430 
431 		purge_tlb_start(flags);
432 		mtsp(sid, 1);
433 		if (split_tlb) {
434 			while (npages--) {
435 				pdtlb(start);
436 				pitlb(start);
437 				start += PAGE_SIZE;
438 			}
439 		} else {
440 			while (npages--) {
441 				pdtlb(start);
442 				start += PAGE_SIZE;
443 			}
444 		}
445 		purge_tlb_end(flags);
446 	}
447 }
448 
449 static void cacheflush_h_tmp_function(void *dummy)
450 {
451 	flush_cache_all_local();
452 }
453 
454 void flush_cache_all(void)
455 {
456 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
457 }
458 
459 static inline unsigned long mm_total_size(struct mm_struct *mm)
460 {
461 	struct vm_area_struct *vma;
462 	unsigned long usize = 0;
463 
464 	for (vma = mm->mmap; vma; vma = vma->vm_next)
465 		usize += vma->vm_end - vma->vm_start;
466 	return usize;
467 }
468 
469 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
470 {
471 	pte_t *ptep = NULL;
472 
473 	if (!pgd_none(*pgd)) {
474 		pud_t *pud = pud_offset(pgd, addr);
475 		if (!pud_none(*pud)) {
476 			pmd_t *pmd = pmd_offset(pud, addr);
477 			if (!pmd_none(*pmd))
478 				ptep = pte_offset_map(pmd, addr);
479 		}
480 	}
481 	return ptep;
482 }
483 
484 void flush_cache_mm(struct mm_struct *mm)
485 {
486 	struct vm_area_struct *vma;
487 	pgd_t *pgd;
488 
489 	/* Flushing the whole cache on each cpu takes forever on
490 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
491 	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
492 		flush_cache_all();
493 		return;
494 	}
495 
496 	if (mm->context == mfsp(3)) {
497 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
498 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
499 			if ((vma->vm_flags & VM_EXEC) == 0)
500 				continue;
501 			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
502 		}
503 		return;
504 	}
505 
506 	pgd = mm->pgd;
507 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
508 		unsigned long addr;
509 
510 		for (addr = vma->vm_start; addr < vma->vm_end;
511 		     addr += PAGE_SIZE) {
512 			unsigned long pfn;
513 			pte_t *ptep = get_ptep(pgd, addr);
514 			if (!ptep)
515 				continue;
516 			pfn = pte_pfn(*ptep);
517 			if (!pfn_valid(pfn))
518 				continue;
519 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
520 		}
521 	}
522 }
523 
524 void
525 flush_user_dcache_range(unsigned long start, unsigned long end)
526 {
527 	if ((end - start) < parisc_cache_flush_threshold)
528 		flush_user_dcache_range_asm(start,end);
529 	else
530 		flush_data_cache();
531 }
532 
533 void
534 flush_user_icache_range(unsigned long start, unsigned long end)
535 {
536 	if ((end - start) < parisc_cache_flush_threshold)
537 		flush_user_icache_range_asm(start,end);
538 	else
539 		flush_instruction_cache();
540 }
541 
542 void flush_cache_range(struct vm_area_struct *vma,
543 		unsigned long start, unsigned long end)
544 {
545 	unsigned long addr;
546 	pgd_t *pgd;
547 
548 	BUG_ON(!vma->vm_mm->context);
549 
550 	if ((end - start) >= parisc_cache_flush_threshold) {
551 		flush_cache_all();
552 		return;
553 	}
554 
555 	if (vma->vm_mm->context == mfsp(3)) {
556 		flush_user_dcache_range_asm(start, end);
557 		if (vma->vm_flags & VM_EXEC)
558 			flush_user_icache_range_asm(start, end);
559 		return;
560 	}
561 
562 	pgd = vma->vm_mm->pgd;
563 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
564 		unsigned long pfn;
565 		pte_t *ptep = get_ptep(pgd, addr);
566 		if (!ptep)
567 			continue;
568 		pfn = pte_pfn(*ptep);
569 		if (pfn_valid(pfn))
570 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
571 	}
572 }
573 
574 void
575 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
576 {
577 	BUG_ON(!vma->vm_mm->context);
578 
579 	if (pfn_valid(pfn)) {
580 		flush_tlb_page(vma, vmaddr);
581 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
582 	}
583 }
584 
585 #ifdef CONFIG_PARISC_TMPALIAS
586 
587 void clear_user_highpage(struct page *page, unsigned long vaddr)
588 {
589 	void *vto;
590 	unsigned long flags;
591 
592 	/* Clear using TMPALIAS region.  The page doesn't need to
593 	   be flushed but the kernel mapping needs to be purged.  */
594 
595 	vto = kmap_atomic(page);
596 
597 	/* The PA-RISC 2.0 Architecture book states on page F-6:
598 	   "Before a write-capable translation is enabled, *all*
599 	   non-equivalently-aliased translations must be removed
600 	   from the page table and purged from the TLB.  (Note
601 	   that the caches are not required to be flushed at this
602 	   time.)  Before any non-equivalent aliased translation
603 	   is re-enabled, the virtual address range for the writeable
604 	   page (the entire page) must be flushed from the cache,
605 	   and the write-capable translation removed from the page
606 	   table and purged from the TLB."  */
607 
608 	purge_kernel_dcache_page_asm((unsigned long)vto);
609 	purge_tlb_start(flags);
610 	pdtlb_kernel(vto);
611 	purge_tlb_end(flags);
612 	preempt_disable();
613 	clear_user_page_asm(vto, vaddr);
614 	preempt_enable();
615 
616 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
617 }
618 
619 void copy_user_highpage(struct page *to, struct page *from,
620 	unsigned long vaddr, struct vm_area_struct *vma)
621 {
622 	void *vfrom, *vto;
623 	unsigned long flags;
624 
625 	/* Copy using TMPALIAS region.  This has the advantage
626 	   that the `from' page doesn't need to be flushed.  However,
627 	   the `to' page must be flushed in copy_user_page_asm since
628 	   it can be used to bring in executable code.  */
629 
630 	vfrom = kmap_atomic(from);
631 	vto = kmap_atomic(to);
632 
633 	purge_kernel_dcache_page_asm((unsigned long)vto);
634 	purge_tlb_start(flags);
635 	pdtlb_kernel(vto);
636 	pdtlb_kernel(vfrom);
637 	purge_tlb_end(flags);
638 	preempt_disable();
639 	copy_user_page_asm(vto, vfrom, vaddr);
640 	flush_dcache_page_asm(__pa(vto), vaddr);
641 	preempt_enable();
642 
643 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER1); */
644 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
645 }
646 
647 #endif /* CONFIG_PARISC_TMPALIAS */
648