xref: /openbmc/linux/arch/arm/mm/copypage-xsc3.c (revision 12eb4683)
1 /*
2  *  linux/arch/arm/mm/copypage-xsc3.S
3  *
4  *  Copyright (C) 2004 Intel Corp.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * Adapted for 3rd gen XScale core, no more mini-dcache
11  * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12  */
13 #include <linux/init.h>
14 #include <linux/highmem.h>
15 
16 /*
17  * General note:
18  *  We don't really want write-allocate cache behaviour for these functions
19  *  since that will just eat through 8K of the cache.
20  */
21 
22 /*
23  * XSC3 optimised copy_user_highpage
24  *  r0 = destination
25  *  r1 = source
26  *
27  * The source page may have some clean entries in the cache already, but we
28  * can safely ignore them - break_cow() will flush them out of the cache
29  * if we eventually end up using our copied page.
30  *
31  */
32 static void __naked
33 xsc3_mc_copy_user_page(void *kto, const void *kfrom)
34 {
35 	asm("\
36 	stmfd	sp!, {r4, r5, lr}		\n\
37 	mov	lr, %2				\n\
38 						\n\
39 	pld	[r1, #0]			\n\
40 	pld	[r1, #32]			\n\
41 1:	pld	[r1, #64]			\n\
42 	pld	[r1, #96]			\n\
43 						\n\
44 2:	ldrd	r2, [r1], #8			\n\
45 	mov	ip, r0				\n\
46 	ldrd	r4, [r1], #8			\n\
47 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate\n\
48 	strd	r2, [r0], #8			\n\
49 	ldrd	r2, [r1], #8			\n\
50 	strd	r4, [r0], #8			\n\
51 	ldrd	r4, [r1], #8			\n\
52 	strd	r2, [r0], #8			\n\
53 	strd	r4, [r0], #8			\n\
54 	ldrd	r2, [r1], #8			\n\
55 	mov	ip, r0				\n\
56 	ldrd	r4, [r1], #8			\n\
57 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate\n\
58 	strd	r2, [r0], #8			\n\
59 	ldrd	r2, [r1], #8			\n\
60 	subs	lr, lr, #1			\n\
61 	strd	r4, [r0], #8			\n\
62 	ldrd	r4, [r1], #8			\n\
63 	strd	r2, [r0], #8			\n\
64 	strd	r4, [r0], #8			\n\
65 	bgt	1b				\n\
66 	beq	2b				\n\
67 						\n\
68 	ldmfd	sp!, {r4, r5, pc}"
69 	:
70 	: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
71 }
72 
73 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
74 	unsigned long vaddr, struct vm_area_struct *vma)
75 {
76 	void *kto, *kfrom;
77 
78 	kto = kmap_atomic(to);
79 	kfrom = kmap_atomic(from);
80 	flush_cache_page(vma, vaddr, page_to_pfn(from));
81 	xsc3_mc_copy_user_page(kto, kfrom);
82 	kunmap_atomic(kfrom);
83 	kunmap_atomic(kto);
84 }
85 
86 /*
87  * XScale optimised clear_user_page
88  *  r0 = destination
89  *  r1 = virtual user address of ultimate destination page
90  */
91 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
92 {
93 	void *ptr, *kaddr = kmap_atomic(page);
94 	asm volatile ("\
95 	mov	r1, %2				\n\
96 	mov	r2, #0				\n\
97 	mov	r3, #0				\n\
98 1:	mcr	p15, 0, %0, c7, c6, 1		@ invalidate line\n\
99 	strd	r2, [%0], #8			\n\
100 	strd	r2, [%0], #8			\n\
101 	strd	r2, [%0], #8			\n\
102 	strd	r2, [%0], #8			\n\
103 	subs	r1, r1, #1			\n\
104 	bne	1b"
105 	: "=r" (ptr)
106 	: "0" (kaddr), "I" (PAGE_SIZE / 32)
107 	: "r1", "r2", "r3");
108 	kunmap_atomic(kaddr);
109 }
110 
111 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
112 	.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
113 	.cpu_copy_user_highpage	= xsc3_mc_copy_user_highpage,
114 };
115