xref: /openbmc/linux/arch/arm/mm/copypage-xscale.c (revision ae213c44)
1 /*
2  *  linux/arch/arm/lib/copypage-xscale.S
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This handles the mini data cache, as found on SA11x0 and XScale
11  * processors.  When we copy a user page page, we map it in such a way
12  * that accesses to this page will not touch the main data cache, but
13  * will be cached in the mini data cache.  This prevents us thrashing
14  * the main data cache on page faults.
15  */
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
19 
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23 
24 #include "mm.h"
25 
26 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
27 				  L_PTE_MT_MINICACHE)
28 
29 static DEFINE_RAW_SPINLOCK(minicache_lock);
30 
31 /*
32  * XScale mini-dcache optimised copy_user_highpage
33  *
34  * We flush the destination cache lines just before we write the data into the
35  * corresponding address.  Since the Dcache is read-allocate, this removes the
36  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
37  * and merged as appropriate.
38  */
39 static void mc_copy_user_page(void *from, void *to)
40 {
41 	int tmp;
42 
43 	/*
44 	 * Strangely enough, best performance is achieved
45 	 * when prefetching destination as well.  (NP)
46 	 */
47 	asm volatile ("\
48 	pld	[%0, #0]			\n\
49 	pld	[%0, #32]			\n\
50 	pld	[%1, #0]			\n\
51 	pld	[%1, #32]			\n\
52 1:	pld	[%0, #64]			\n\
53 	pld	[%0, #96]			\n\
54 	pld	[%1, #64]			\n\
55 	pld	[%1, #96]			\n\
56 2:	ldrd	r2, r3, [%0], #8		\n\
57 	ldrd	r4, r5, [%0], #8		\n\
58 	mov	ip, %1				\n\
59 	strd	r2, r3, [%1], #8		\n\
60 	ldrd	r2, r3, [%0], #8		\n\
61 	strd	r4, r5, [%1], #8		\n\
62 	ldrd	r4, r5, [%0], #8		\n\
63 	strd	r2, r3, [%1], #8		\n\
64 	strd	r4, r5, [%1], #8		\n\
65 	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
66 	ldrd	r2, r3, [%0], #8		\n\
67 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
68 	ldrd	r4, r5, [%0], #8		\n\
69 	mov	ip, %1				\n\
70 	strd	r2, r3, [%1], #8		\n\
71 	ldrd	r2, r3, [%0], #8		\n\
72 	strd	r4, r5, [%1], #8		\n\
73 	ldrd	r4, r5, [%0], #8		\n\
74 	strd	r2, r3, [%1], #8		\n\
75 	strd	r4, r5, [%1], #8		\n\
76 	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
77 	subs	%2, %2, #1			\n\
78 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
79 	bgt	1b				\n\
80 	beq	2b				"
81 	: "+&r" (from), "+&r" (to), "=&r" (tmp)
82 	: "2" (PAGE_SIZE / 64 - 1)
83 	: "r2", "r3", "r4", "r5", "ip");
84 }
85 
86 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
87 	unsigned long vaddr, struct vm_area_struct *vma)
88 {
89 	void *kto = kmap_atomic(to);
90 
91 	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
92 		__flush_dcache_page(page_mapping_file(from), from);
93 
94 	raw_spin_lock(&minicache_lock);
95 
96 	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
97 
98 	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
99 
100 	raw_spin_unlock(&minicache_lock);
101 
102 	kunmap_atomic(kto);
103 }
104 
105 /*
106  * XScale optimised clear_user_page
107  */
108 void
109 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
110 {
111 	void *ptr, *kaddr = kmap_atomic(page);
112 	asm volatile(
113 	"mov	r1, %2				\n\
114 	mov	r2, #0				\n\
115 	mov	r3, #0				\n\
116 1:	mov	ip, %0				\n\
117 	strd	r2, r3, [%0], #8		\n\
118 	strd	r2, r3, [%0], #8		\n\
119 	strd	r2, r3, [%0], #8		\n\
120 	strd	r2, r3, [%0], #8		\n\
121 	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
122 	subs	r1, r1, #1			\n\
123 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
124 	bne	1b"
125 	: "=r" (ptr)
126 	: "0" (kaddr), "I" (PAGE_SIZE / 32)
127 	: "r1", "r2", "r3", "ip");
128 	kunmap_atomic(kaddr);
129 }
130 
131 struct cpu_user_fns xscale_mc_user_fns __initdata = {
132 	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
133 	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
134 };
135