xref: /openbmc/linux/arch/arm/mm/copypage-xsc3.c (revision 023e4163)
1 /*
2  *  linux/arch/arm/mm/copypage-xsc3.S
3  *
4  *  Copyright (C) 2004 Intel Corp.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * Adapted for 3rd gen XScale core, no more mini-dcache
11  * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12  */
13 #include <linux/init.h>
14 #include <linux/highmem.h>
15 
16 /*
17  * General note:
18  *  We don't really want write-allocate cache behaviour for these functions
19  *  since that will just eat through 8K of the cache.
20  */
21 
22 /*
23  * XSC3 optimised copy_user_highpage
24  *
25  * The source page may have some clean entries in the cache already, but we
26  * can safely ignore them - break_cow() will flush them out of the cache
27  * if we eventually end up using our copied page.
28  *
29  */
30 static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
31 {
32 	int tmp;
33 
34 	asm volatile ("\
35 	pld	[%1, #0]			\n\
36 	pld	[%1, #32]			\n\
37 1:	pld	[%1, #64]			\n\
38 	pld	[%1, #96]			\n\
39 						\n\
40 2:	ldrd	r2, r3, [%1], #8		\n\
41 	ldrd	r4, r5, [%1], #8		\n\
42 	mcr	p15, 0, %0, c7, c6, 1		@ invalidate\n\
43 	strd	r2, r3, [%0], #8		\n\
44 	ldrd	r2, r3, [%1], #8		\n\
45 	strd	r4, r5, [%0], #8		\n\
46 	ldrd	r4, r5, [%1], #8		\n\
47 	strd	r2, r3, [%0], #8		\n\
48 	strd	r4, r5, [%0], #8		\n\
49 	ldrd	r2, r3, [%1], #8		\n\
50 	ldrd	r4, r5, [%1], #8		\n\
51 	mcr	p15, 0, %0, c7, c6, 1		@ invalidate\n\
52 	strd	r2, r3, [%0], #8		\n\
53 	ldrd	r2, r3, [%1], #8		\n\
54 	subs	%2, %2, #1			\n\
55 	strd	r4, r5, [%0], #8		\n\
56 	ldrd	r4, r5, [%1], #8		\n\
57 	strd	r2, r3, [%0], #8		\n\
58 	strd	r4, r5, [%0], #8		\n\
59 	bgt	1b				\n\
60 	beq	2b				"
61 	: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
62 	: "2" (PAGE_SIZE / 64 - 1)
63 	: "r2", "r3", "r4", "r5");
64 }
65 
66 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
67 	unsigned long vaddr, struct vm_area_struct *vma)
68 {
69 	void *kto, *kfrom;
70 
71 	kto = kmap_atomic(to);
72 	kfrom = kmap_atomic(from);
73 	flush_cache_page(vma, vaddr, page_to_pfn(from));
74 	xsc3_mc_copy_user_page(kto, kfrom);
75 	kunmap_atomic(kfrom);
76 	kunmap_atomic(kto);
77 }
78 
79 /*
80  * XScale optimised clear_user_page
81  */
82 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
83 {
84 	void *ptr, *kaddr = kmap_atomic(page);
85 	asm volatile ("\
86 	mov	r1, %2				\n\
87 	mov	r2, #0				\n\
88 	mov	r3, #0				\n\
89 1:	mcr	p15, 0, %0, c7, c6, 1		@ invalidate line\n\
90 	strd	r2, r3, [%0], #8		\n\
91 	strd	r2, r3, [%0], #8		\n\
92 	strd	r2, r3, [%0], #8		\n\
93 	strd	r2, r3, [%0], #8		\n\
94 	subs	r1, r1, #1			\n\
95 	bne	1b"
96 	: "=r" (ptr)
97 	: "0" (kaddr), "I" (PAGE_SIZE / 32)
98 	: "r1", "r2", "r3");
99 	kunmap_atomic(kaddr);
100 }
101 
102 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
103 	.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
104 	.cpu_copy_user_highpage	= xsc3_mc_copy_user_highpage,
105 };
106