xref: /openbmc/linux/arch/arm/mm/copypage-v4wt.c (revision d73e60b7144a86baf0fdfcc9537a70bb4f72e11c)
1*d73e60b7SRussell King /*
2*d73e60b7SRussell King  *  linux/arch/arm/mm/copypage-v4wt.S
3*d73e60b7SRussell King  *
4*d73e60b7SRussell King  *  Copyright (C) 1995-1999 Russell King
5*d73e60b7SRussell King  *
6*d73e60b7SRussell King  * This program is free software; you can redistribute it and/or modify
7*d73e60b7SRussell King  * it under the terms of the GNU General Public License version 2 as
8*d73e60b7SRussell King  * published by the Free Software Foundation.
9*d73e60b7SRussell King  *
10*d73e60b7SRussell King  *  This is for CPUs with a writethrough cache and 'flush ID cache' is
11*d73e60b7SRussell King  *  the only supported cache operation.
12*d73e60b7SRussell King  */
13*d73e60b7SRussell King #include <linux/init.h>
14*d73e60b7SRussell King 
15*d73e60b7SRussell King #include <asm/page.h>
16*d73e60b7SRussell King 
17*d73e60b7SRussell King /*
18*d73e60b7SRussell King  * ARMv4 optimised copy_user_page
19*d73e60b7SRussell King  *
20*d73e60b7SRussell King  * Since we have writethrough caches, we don't have to worry about
21*d73e60b7SRussell King  * dirty data in the cache.  However, we do have to ensure that
22*d73e60b7SRussell King  * subsequent reads are up to date.
23*d73e60b7SRussell King  */
24*d73e60b7SRussell King void __attribute__((naked))
25*d73e60b7SRussell King v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
26*d73e60b7SRussell King {
27*d73e60b7SRussell King 	asm("\
28*d73e60b7SRussell King 	stmfd	sp!, {r4, lr}			@ 2\n\
29*d73e60b7SRussell King 	mov	r2, %0				@ 1\n\
30*d73e60b7SRussell King 	ldmia	r1!, {r3, r4, ip, lr}		@ 4\n\
31*d73e60b7SRussell King 1:	stmia	r0!, {r3, r4, ip, lr}		@ 4\n\
32*d73e60b7SRussell King 	ldmia	r1!, {r3, r4, ip, lr}		@ 4+1\n\
33*d73e60b7SRussell King 	stmia	r0!, {r3, r4, ip, lr}		@ 4\n\
34*d73e60b7SRussell King 	ldmia	r1!, {r3, r4, ip, lr}		@ 4\n\
35*d73e60b7SRussell King 	stmia	r0!, {r3, r4, ip, lr}		@ 4\n\
36*d73e60b7SRussell King 	ldmia	r1!, {r3, r4, ip, lr}		@ 4\n\
37*d73e60b7SRussell King 	subs	r2, r2, #1			@ 1\n\
38*d73e60b7SRussell King 	stmia	r0!, {r3, r4, ip, lr}		@ 4\n\
39*d73e60b7SRussell King 	ldmneia	r1!, {r3, r4, ip, lr}		@ 4\n\
40*d73e60b7SRussell King 	bne	1b				@ 1\n\
41*d73e60b7SRussell King 	mcr	p15, 0, r2, c7, c7, 0		@ flush ID cache\n\
42*d73e60b7SRussell King 	ldmfd	sp!, {r4, pc}			@ 3"
43*d73e60b7SRussell King 	:
44*d73e60b7SRussell King 	: "I" (PAGE_SIZE / 64));
45*d73e60b7SRussell King }
46*d73e60b7SRussell King 
47*d73e60b7SRussell King /*
48*d73e60b7SRussell King  * ARMv4 optimised clear_user_page
49*d73e60b7SRussell King  *
50*d73e60b7SRussell King  * Same story as above.
51*d73e60b7SRussell King  */
52*d73e60b7SRussell King void __attribute__((naked))
53*d73e60b7SRussell King v4wt_clear_user_page(void *kaddr, unsigned long vaddr)
54*d73e60b7SRussell King {
55*d73e60b7SRussell King 	asm("\
56*d73e60b7SRussell King 	str	lr, [sp, #-4]!\n\
57*d73e60b7SRussell King 	mov	r1, %0				@ 1\n\
58*d73e60b7SRussell King 	mov	r2, #0				@ 1\n\
59*d73e60b7SRussell King 	mov	r3, #0				@ 1\n\
60*d73e60b7SRussell King 	mov	ip, #0				@ 1\n\
61*d73e60b7SRussell King 	mov	lr, #0				@ 1\n\
62*d73e60b7SRussell King 1:	stmia	r0!, {r2, r3, ip, lr}		@ 4\n\
63*d73e60b7SRussell King 	stmia	r0!, {r2, r3, ip, lr}		@ 4\n\
64*d73e60b7SRussell King 	stmia	r0!, {r2, r3, ip, lr}		@ 4\n\
65*d73e60b7SRussell King 	stmia	r0!, {r2, r3, ip, lr}		@ 4\n\
66*d73e60b7SRussell King 	subs	r1, r1, #1			@ 1\n\
67*d73e60b7SRussell King 	bne	1b				@ 1\n\
68*d73e60b7SRussell King 	mcr	p15, 0, r2, c7, c7, 0		@ flush ID cache\n\
69*d73e60b7SRussell King 	ldr	pc, [sp], #4"
70*d73e60b7SRussell King 	:
71*d73e60b7SRussell King 	: "I" (PAGE_SIZE / 64));
72*d73e60b7SRussell King }
73*d73e60b7SRussell King 
74*d73e60b7SRussell King struct cpu_user_fns v4wt_user_fns __initdata = {
75*d73e60b7SRussell King 	.cpu_clear_user_page	= v4wt_clear_user_page,
76*d73e60b7SRussell King 	.cpu_copy_user_page	= v4wt_copy_user_page,
77*d73e60b7SRussell King };
78