xref: /openbmc/linux/arch/parisc/kernel/cache.c (revision 930beb5a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35 
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 
40 
41 /* On some machines (e.g. ones with the Merced bus), there can be
42  * only a single PxTLB broadcast at a time; this must be guaranteed
43  * by software.  We put a spinlock around all TLB flushes  to
44  * ensure this.
45  */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47 
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52 
53 #ifdef CONFIG_SMP
54 void
55 flush_data_cache(void)
56 {
57 	on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void
60 flush_instruction_cache(void)
61 {
62 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65 
66 void
67 flush_cache_all_local(void)
68 {
69 	flush_instruction_cache_local(NULL);
70 	flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73 
74 /* Virtual address of pfn.  */
75 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
76 
77 void
78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79 {
80 	unsigned long pfn = pte_pfn(*ptep);
81 	struct page *page;
82 
83 	/* We don't have pte special.  As a result, we can be called with
84 	   an invalid pfn and we don't need to flush the kernel dcache page.
85 	   This occurs with FireGL card in C8000.  */
86 	if (!pfn_valid(pfn))
87 		return;
88 
89 	page = pfn_to_page(pfn);
90 	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 		flush_kernel_dcache_page_addr(pfn_va(pfn));
92 		clear_bit(PG_dcache_dirty, &page->flags);
93 	} else if (parisc_requires_coherency())
94 		flush_kernel_dcache_page_addr(pfn_va(pfn));
95 }
96 
97 void
98 show_cache_info(struct seq_file *m)
99 {
100 	char buf[32];
101 
102 	seq_printf(m, "I-cache\t\t: %ld KB\n",
103 		cache_info.ic_size/1024 );
104 	if (cache_info.dc_loop != 1)
105 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 		cache_info.dc_size/1024,
108 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 		cache_info.it_size,
113 		cache_info.dt_size,
114 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 	);
116 
117 #ifndef CONFIG_PA20
118 	/* BTLB - Block TLB */
119 	if (btlb_info.max_size==0) {
120 		seq_printf(m, "BTLB\t\t: not supported\n" );
121 	} else {
122 		seq_printf(m,
123 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 		btlb_info.max_size, (int)4096,
127 		btlb_info.max_size>>8,
128 		btlb_info.fixed_range_info.num_i,
129 		btlb_info.fixed_range_info.num_d,
130 		btlb_info.fixed_range_info.num_comb,
131 		btlb_info.variable_range_info.num_i,
132 		btlb_info.variable_range_info.num_d,
133 		btlb_info.variable_range_info.num_comb
134 		);
135 	}
136 #endif
137 }
138 
139 void __init
140 parisc_cache_init(void)
141 {
142 	if (pdc_cache_info(&cache_info) < 0)
143 		panic("parisc_cache_init: pdc_cache_info failed");
144 
145 #if 0
146 	printk("ic_size %lx dc_size %lx it_size %lx\n",
147 		cache_info.ic_size,
148 		cache_info.dc_size,
149 		cache_info.it_size);
150 
151 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 		cache_info.dc_base,
153 		cache_info.dc_stride,
154 		cache_info.dc_count,
155 		cache_info.dc_loop);
156 
157 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
158 		*(unsigned long *) (&cache_info.dc_conf),
159 		cache_info.dc_conf.cc_alias,
160 		cache_info.dc_conf.cc_block,
161 		cache_info.dc_conf.cc_line,
162 		cache_info.dc_conf.cc_shift);
163 	printk("	wt %d sh %d cst %d hv %d\n",
164 		cache_info.dc_conf.cc_wt,
165 		cache_info.dc_conf.cc_sh,
166 		cache_info.dc_conf.cc_cst,
167 		cache_info.dc_conf.cc_hv);
168 
169 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 		cache_info.ic_base,
171 		cache_info.ic_stride,
172 		cache_info.ic_count,
173 		cache_info.ic_loop);
174 
175 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
176 		*(unsigned long *) (&cache_info.ic_conf),
177 		cache_info.ic_conf.cc_alias,
178 		cache_info.ic_conf.cc_block,
179 		cache_info.ic_conf.cc_line,
180 		cache_info.ic_conf.cc_shift);
181 	printk("	wt %d sh %d cst %d hv %d\n",
182 		cache_info.ic_conf.cc_wt,
183 		cache_info.ic_conf.cc_sh,
184 		cache_info.ic_conf.cc_cst,
185 		cache_info.ic_conf.cc_hv);
186 
187 	printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188 		cache_info.dt_conf.tc_sh,
189 		cache_info.dt_conf.tc_page,
190 		cache_info.dt_conf.tc_cst,
191 		cache_info.dt_conf.tc_aid,
192 		cache_info.dt_conf.tc_pad1);
193 
194 	printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195 		cache_info.it_conf.tc_sh,
196 		cache_info.it_conf.tc_page,
197 		cache_info.it_conf.tc_cst,
198 		cache_info.it_conf.tc_aid,
199 		cache_info.it_conf.tc_pad1);
200 #endif
201 
202 	split_tlb = 0;
203 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 		if (cache_info.dt_conf.tc_sh == 2)
205 			printk(KERN_WARNING "Unexpected TLB configuration. "
206 			"Will flush I/D separately (could be optimized).\n");
207 
208 		split_tlb = 1;
209 	}
210 
211 	/* "New and Improved" version from Jim Hull
212 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
213 	 * The following CAFL_STRIDE is an optimized version, see
214 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
216 	 */
217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220 #undef CAFL_STRIDE
221 
222 #ifndef CONFIG_PA20
223 	if (pdc_btlb_info(&btlb_info) < 0) {
224 		memset(&btlb_info, 0, sizeof btlb_info);
225 	}
226 #endif
227 
228 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 						PDC_MODEL_NVA_UNSUPPORTED) {
230 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231 #if 0
232 		panic("SMP kernel required to avoid non-equivalent aliasing");
233 #endif
234 	}
235 }
236 
237 void disable_sr_hashing(void)
238 {
239 	int srhash_type, retval;
240 	unsigned long space_bits;
241 
242 	switch (boot_cpu_data.cpu_type) {
243 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
244 		BUG();
245 		return;
246 
247 	case pcxs:
248 	case pcxt:
249 	case pcxt_:
250 		srhash_type = SRHASH_PCXST;
251 		break;
252 
253 	case pcxl:
254 		srhash_type = SRHASH_PCXL;
255 		break;
256 
257 	case pcxl2: /* pcxl2 doesn't support space register hashing */
258 		return;
259 
260 	default: /* Currently all PA2.0 machines use the same ins. sequence */
261 		srhash_type = SRHASH_PA20;
262 		break;
263 	}
264 
265 	disable_sr_hashing_asm(srhash_type);
266 
267 	retval = pdc_spaceid_bits(&space_bits);
268 	/* If this procedure isn't implemented, don't panic. */
269 	if (retval < 0 && retval != PDC_BAD_OPTION)
270 		panic("pdc_spaceid_bits call failed.\n");
271 	if (space_bits != 0)
272 		panic("SpaceID hashing is still on!\n");
273 }
274 
275 static inline void
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 		   unsigned long physaddr)
278 {
279 	preempt_disable();
280 	flush_dcache_page_asm(physaddr, vmaddr);
281 	if (vma->vm_flags & VM_EXEC)
282 		flush_icache_page_asm(physaddr, vmaddr);
283 	preempt_enable();
284 }
285 
286 void flush_dcache_page(struct page *page)
287 {
288 	struct address_space *mapping = page_mapping(page);
289 	struct vm_area_struct *mpnt;
290 	unsigned long offset;
291 	unsigned long addr, old_addr = 0;
292 	pgoff_t pgoff;
293 
294 	if (mapping && !mapping_mapped(mapping)) {
295 		set_bit(PG_dcache_dirty, &page->flags);
296 		return;
297 	}
298 
299 	flush_kernel_dcache_page(page);
300 
301 	if (!mapping)
302 		return;
303 
304 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305 
306 	/* We have carefully arranged in arch_get_unmapped_area() that
307 	 * *any* mappings of a file are always congruently mapped (whether
308 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309 	 * to flush one address here for them all to become coherent */
310 
311 	flush_dcache_mmap_lock(mapping);
312 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 		addr = mpnt->vm_start + offset;
315 
316 		/* The TLB is the engine of coherence on parisc: The
317 		 * CPU is entitled to speculate any page with a TLB
318 		 * mapping, so here we kill the mapping then flush the
319 		 * page along a special flush only alias mapping.
320 		 * This guarantees that the page is no-longer in the
321 		 * cache for any process and nor may it be
322 		 * speculatively read in (until the user or kernel
323 		 * specifically accesses it, of course) */
324 
325 		flush_tlb_page(mpnt, addr);
326 		if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327 			__flush_cache_page(mpnt, addr, page_to_phys(page));
328 			if (old_addr)
329 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
330 			old_addr = addr;
331 		}
332 	}
333 	flush_dcache_mmap_unlock(mapping);
334 }
335 EXPORT_SYMBOL(flush_dcache_page);
336 
337 /* Defined in arch/parisc/kernel/pacache.S */
338 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
339 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
340 EXPORT_SYMBOL(flush_data_cache_local);
341 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342 
343 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
344 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345 
346 void __init parisc_setup_cache_timing(void)
347 {
348 	unsigned long rangetime, alltime;
349 	unsigned long size;
350 
351 	alltime = mfctl(16);
352 	flush_data_cache();
353 	alltime = mfctl(16) - alltime;
354 
355 	size = (unsigned long)(_end - _text);
356 	rangetime = mfctl(16);
357 	flush_kernel_dcache_range((unsigned long)_text, size);
358 	rangetime = mfctl(16) - rangetime;
359 
360 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361 		alltime, size, rangetime);
362 
363 	/* Racy, but if we see an intermediate value, it's ok too... */
364 	parisc_cache_flush_threshold = size * alltime / rangetime;
365 
366 	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
367 	if (!parisc_cache_flush_threshold)
368 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369 
370 	if (parisc_cache_flush_threshold > cache_info.dc_size)
371 		parisc_cache_flush_threshold = cache_info.dc_size;
372 
373 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374 }
375 
376 extern void purge_kernel_dcache_page_asm(unsigned long);
377 extern void clear_user_page_asm(void *, unsigned long);
378 extern void copy_user_page_asm(void *, void *, unsigned long);
379 
380 void flush_kernel_dcache_page_addr(void *addr)
381 {
382 	unsigned long flags;
383 
384 	flush_kernel_dcache_page_asm(addr);
385 	purge_tlb_start(flags);
386 	pdtlb_kernel(addr);
387 	purge_tlb_end(flags);
388 }
389 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390 
391 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
392 {
393 	unsigned long flags;
394 
395 	/* Note: purge_tlb_entries can be called at startup with
396 	   no context.  */
397 
398 	purge_tlb_start(flags);
399 	mtsp(mm->context, 1);
400 	pdtlb(addr);
401 	pitlb(addr);
402 	purge_tlb_end(flags);
403 }
404 EXPORT_SYMBOL(purge_tlb_entries);
405 
406 void __flush_tlb_range(unsigned long sid, unsigned long start,
407 		       unsigned long end)
408 {
409 	unsigned long npages;
410 
411 	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
412 	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
413 		flush_tlb_all();
414 	else {
415 		unsigned long flags;
416 
417 		purge_tlb_start(flags);
418 		mtsp(sid, 1);
419 		if (split_tlb) {
420 			while (npages--) {
421 				pdtlb(start);
422 				pitlb(start);
423 				start += PAGE_SIZE;
424 			}
425 		} else {
426 			while (npages--) {
427 				pdtlb(start);
428 				start += PAGE_SIZE;
429 			}
430 		}
431 		purge_tlb_end(flags);
432 	}
433 }
434 
435 static void cacheflush_h_tmp_function(void *dummy)
436 {
437 	flush_cache_all_local();
438 }
439 
440 void flush_cache_all(void)
441 {
442 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
443 }
444 
445 static inline unsigned long mm_total_size(struct mm_struct *mm)
446 {
447 	struct vm_area_struct *vma;
448 	unsigned long usize = 0;
449 
450 	for (vma = mm->mmap; vma; vma = vma->vm_next)
451 		usize += vma->vm_end - vma->vm_start;
452 	return usize;
453 }
454 
455 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
456 {
457 	pte_t *ptep = NULL;
458 
459 	if (!pgd_none(*pgd)) {
460 		pud_t *pud = pud_offset(pgd, addr);
461 		if (!pud_none(*pud)) {
462 			pmd_t *pmd = pmd_offset(pud, addr);
463 			if (!pmd_none(*pmd))
464 				ptep = pte_offset_map(pmd, addr);
465 		}
466 	}
467 	return ptep;
468 }
469 
470 void flush_cache_mm(struct mm_struct *mm)
471 {
472 	struct vm_area_struct *vma;
473 	pgd_t *pgd;
474 
475 	/* Flushing the whole cache on each cpu takes forever on
476 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
477 	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
478 		flush_cache_all();
479 		return;
480 	}
481 
482 	if (mm->context == mfsp(3)) {
483 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
484 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
485 			if ((vma->vm_flags & VM_EXEC) == 0)
486 				continue;
487 			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
488 		}
489 		return;
490 	}
491 
492 	pgd = mm->pgd;
493 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
494 		unsigned long addr;
495 
496 		for (addr = vma->vm_start; addr < vma->vm_end;
497 		     addr += PAGE_SIZE) {
498 			unsigned long pfn;
499 			pte_t *ptep = get_ptep(pgd, addr);
500 			if (!ptep)
501 				continue;
502 			pfn = pte_pfn(*ptep);
503 			if (!pfn_valid(pfn))
504 				continue;
505 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
506 		}
507 	}
508 }
509 
510 void
511 flush_user_dcache_range(unsigned long start, unsigned long end)
512 {
513 	if ((end - start) < parisc_cache_flush_threshold)
514 		flush_user_dcache_range_asm(start,end);
515 	else
516 		flush_data_cache();
517 }
518 
519 void
520 flush_user_icache_range(unsigned long start, unsigned long end)
521 {
522 	if ((end - start) < parisc_cache_flush_threshold)
523 		flush_user_icache_range_asm(start,end);
524 	else
525 		flush_instruction_cache();
526 }
527 
528 void flush_cache_range(struct vm_area_struct *vma,
529 		unsigned long start, unsigned long end)
530 {
531 	unsigned long addr;
532 	pgd_t *pgd;
533 
534 	BUG_ON(!vma->vm_mm->context);
535 
536 	if ((end - start) >= parisc_cache_flush_threshold) {
537 		flush_cache_all();
538 		return;
539 	}
540 
541 	if (vma->vm_mm->context == mfsp(3)) {
542 		flush_user_dcache_range_asm(start, end);
543 		if (vma->vm_flags & VM_EXEC)
544 			flush_user_icache_range_asm(start, end);
545 		return;
546 	}
547 
548 	pgd = vma->vm_mm->pgd;
549 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
550 		unsigned long pfn;
551 		pte_t *ptep = get_ptep(pgd, addr);
552 		if (!ptep)
553 			continue;
554 		pfn = pte_pfn(*ptep);
555 		if (pfn_valid(pfn))
556 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
557 	}
558 }
559 
560 void
561 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
562 {
563 	BUG_ON(!vma->vm_mm->context);
564 
565 	if (pfn_valid(pfn)) {
566 		flush_tlb_page(vma, vmaddr);
567 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
568 	}
569 }
570 
571 #ifdef CONFIG_PARISC_TMPALIAS
572 
573 void clear_user_highpage(struct page *page, unsigned long vaddr)
574 {
575 	void *vto;
576 	unsigned long flags;
577 
578 	/* Clear using TMPALIAS region.  The page doesn't need to
579 	   be flushed but the kernel mapping needs to be purged.  */
580 
581 	vto = kmap_atomic(page);
582 
583 	/* The PA-RISC 2.0 Architecture book states on page F-6:
584 	   "Before a write-capable translation is enabled, *all*
585 	   non-equivalently-aliased translations must be removed
586 	   from the page table and purged from the TLB.  (Note
587 	   that the caches are not required to be flushed at this
588 	   time.)  Before any non-equivalent aliased translation
589 	   is re-enabled, the virtual address range for the writeable
590 	   page (the entire page) must be flushed from the cache,
591 	   and the write-capable translation removed from the page
592 	   table and purged from the TLB."  */
593 
594 	purge_kernel_dcache_page_asm((unsigned long)vto);
595 	purge_tlb_start(flags);
596 	pdtlb_kernel(vto);
597 	purge_tlb_end(flags);
598 	preempt_disable();
599 	clear_user_page_asm(vto, vaddr);
600 	preempt_enable();
601 
602 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
603 }
604 
605 void copy_user_highpage(struct page *to, struct page *from,
606 	unsigned long vaddr, struct vm_area_struct *vma)
607 {
608 	void *vfrom, *vto;
609 	unsigned long flags;
610 
611 	/* Copy using TMPALIAS region.  This has the advantage
612 	   that the `from' page doesn't need to be flushed.  However,
613 	   the `to' page must be flushed in copy_user_page_asm since
614 	   it can be used to bring in executable code.  */
615 
616 	vfrom = kmap_atomic(from);
617 	vto = kmap_atomic(to);
618 
619 	purge_kernel_dcache_page_asm((unsigned long)vto);
620 	purge_tlb_start(flags);
621 	pdtlb_kernel(vto);
622 	pdtlb_kernel(vfrom);
623 	purge_tlb_end(flags);
624 	preempt_disable();
625 	copy_user_page_asm(vto, vfrom, vaddr);
626 	flush_dcache_page_asm(__pa(vto), vaddr);
627 	preempt_enable();
628 
629 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER1); */
630 	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
631 }
632 
633 #endif /* CONFIG_PARISC_TMPALIAS */
634