xref: /openbmc/linux/arch/arm/mm/copypage-v4wb.c (revision 1a59d1b8)
1 /*
2  *  linux/arch/arm/mm/copypage-v4wb.c
3  *
4  *  Copyright (C) 1995-1999 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/init.h>
11 #include <linux/highmem.h>
12 
13 /*
14  * ARMv4 optimised copy_user_highpage
15  *
16  * We flush the destination cache lines just before we write the data into the
17  * corresponding address.  Since the Dcache is read-allocate, this removes the
18  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
19  * and merged as appropriate.
20  *
21  * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
22  * instruction.  If your processor does not supply this, you have to write your
23  * own copy_user_highpage that does the right thing.
24  */
25 static void v4wb_copy_user_page(void *kto, const void *kfrom)
26 {
27 	int tmp;
28 
29 	asm volatile ("\
30 	.syntax unified\n\
31 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
32 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
33 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
34 	ldmia	%1!, {r3, r4, ip, lr}		@ 4+1\n\
35 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
36 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
37 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
38 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
39 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
40 	subs	%2, %2, #1			@ 1\n\
41 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
42 	ldmiane	%1!, {r3, r4, ip, lr}		@ 4\n\
43 	bne	1b				@ 1\n\
44 	mcr	p15, 0, %1, c7, c10, 4		@ 1   drain WB"
45 	: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
46 	: "2" (PAGE_SIZE / 64)
47 	: "r3", "r4", "ip", "lr");
48 }
49 
50 void v4wb_copy_user_highpage(struct page *to, struct page *from,
51 	unsigned long vaddr, struct vm_area_struct *vma)
52 {
53 	void *kto, *kfrom;
54 
55 	kto = kmap_atomic(to);
56 	kfrom = kmap_atomic(from);
57 	flush_cache_page(vma, vaddr, page_to_pfn(from));
58 	v4wb_copy_user_page(kto, kfrom);
59 	kunmap_atomic(kfrom);
60 	kunmap_atomic(kto);
61 }
62 
63 /*
64  * ARMv4 optimised clear_user_page
65  *
66  * Same story as above.
67  */
68 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
69 {
70 	void *ptr, *kaddr = kmap_atomic(page);
71 	asm volatile("\
72 	mov	r1, %2				@ 1\n\
73 	mov	r2, #0				@ 1\n\
74 	mov	r3, #0				@ 1\n\
75 	mov	ip, #0				@ 1\n\
76 	mov	lr, #0				@ 1\n\
77 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
78 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
79 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
80 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
81 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
82 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
83 	subs	r1, r1, #1			@ 1\n\
84 	bne	1b				@ 1\n\
85 	mcr	p15, 0, r1, c7, c10, 4		@ 1   drain WB"
86 	: "=r" (ptr)
87 	: "0" (kaddr), "I" (PAGE_SIZE / 64)
88 	: "r1", "r2", "r3", "ip", "lr");
89 	kunmap_atomic(kaddr);
90 }
91 
92 struct cpu_user_fns v4wb_user_fns __initdata = {
93 	.cpu_clear_user_highpage = v4wb_clear_user_highpage,
94 	.cpu_copy_user_highpage	= v4wb_copy_user_highpage,
95 };
96