xref: /openbmc/linux/arch/arm/mm/cache-v4wb.S (revision 1da177e4)
1/*
2 *  linux/arch/arm/mm/cache-v4wb.S
3 *
4 *  Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/hardware.h>
14#include <asm/page.h>
15#include "proc-macros.S"
16
17/*
18 * The size of one data cache line.
19 */
20#define CACHE_DLINESIZE	32
21
22/*
23 * The total size of the data cache.
24 */
25#if defined(CONFIG_CPU_SA110)
26# define CACHE_DSIZE	16384
27#elif defined(CONFIG_CPU_SA1100)
28# define CACHE_DSIZE	8192
29#else
30# error Unknown cache size
31#endif
32
33/*
34 * This is the size at which it becomes more efficient to
35 * clean the whole cache, rather than using the individual
36 * cache line maintainence instructions.
37 *
38 *  Size  Clean (ticks) Dirty (ticks)
39 *   4096   21  20  21    53  55  54
40 *   8192   40  41  40   106 100 102
41 *  16384   77  77  76   140 140 138
42 *  32768  150 149 150   214 216 212 <---
43 *  65536  296 297 296   351 358 361
44 * 131072  591 591 591   656 657 651
45 *  Whole  132 136 132   221 217 207 <---
46 */
47#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
48
49/*
50 *	flush_user_cache_all()
51 *
52 *	Clean and invalidate all cache entries in a particular address
53 *	space.
54 */
55ENTRY(v4wb_flush_user_cache_all)
56	/* FALLTHROUGH */
57/*
58 *	flush_kern_cache_all()
59 *
60 *	Clean and invalidate the entire cache.
61 */
62ENTRY(v4wb_flush_kern_cache_all)
63	mov	ip, #0
64	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
65__flush_whole_cache:
66	mov	r0, #FLUSH_BASE
67	add	r1, r0, #CACHE_DSIZE
681:	ldr	r2, [r0], #32
69	cmp	r0, r1
70	blo	1b
71	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
72	mov	pc, lr
73
74/*
75 *	flush_user_cache_range(start, end, flags)
76 *
77 *	Invalidate a range of cache entries in the specified
78 *	address space.
79 *
80 *	- start - start address (inclusive, page aligned)
81 *	- end	- end address (exclusive, page aligned)
82 *	- flags	- vma_area_struct flags describing address space
83 */
84ENTRY(v4wb_flush_user_cache_range)
85	sub	r3, r1, r0			@ calculate total size
86	tst	r2, #VM_EXEC			@ executable region?
87	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
88
89	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
90	bhs	__flush_whole_cache		@ flush whole D cache
91
921:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
93	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
94	add	r0, r0, #CACHE_DLINESIZE
95	cmp	r0, r1
96	blo	1b
97	tst	r2, #VM_EXEC
98	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
99	mov	pc, lr
100
101/*
102 *	flush_kern_dcache_page(void *page)
103 *
104 *	Ensure no D cache aliasing occurs, either with itself or
105 *	the I cache
106 *
107 *	- addr	- page aligned address
108 */
109ENTRY(v4wb_flush_kern_dcache_page)
110	add	r1, r0, #PAGE_SZ
111	/* fall through */
112
113/*
114 *	coherent_kern_range(start, end)
115 *
116 *	Ensure coherency between the Icache and the Dcache in the
117 *	region described by start.  If you have non-snooping
118 *	Harvard caches, you need to implement this function.
119 *
120 *	- start  - virtual start address
121 *	- end	 - virtual end address
122 */
123ENTRY(v4wb_coherent_kern_range)
124	/* fall through */
125
126/*
127 *	coherent_user_range(start, end)
128 *
129 *	Ensure coherency between the Icache and the Dcache in the
130 *	region described by start.  If you have non-snooping
131 *	Harvard caches, you need to implement this function.
132 *
133 *	- start  - virtual start address
134 *	- end	 - virtual end address
135 */
136ENTRY(v4wb_coherent_user_range)
137	bic	r0, r0, #CACHE_DLINESIZE - 1
1381:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
139	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
140	add	r0, r0, #CACHE_DLINESIZE
141	cmp	r0, r1
142	blo	1b
143	mov	ip, #0
144	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
145	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
146	mov	pc, lr
147
148
149/*
150 *	dma_inv_range(start, end)
151 *
152 *	Invalidate (discard) the specified virtual address range.
153 *	May not write back any entries.  If 'start' or 'end'
154 *	are not cache line aligned, those lines must be written
155 *	back.
156 *
157 *	- start  - virtual start address
158 *	- end	 - virtual end address
159 */
160ENTRY(v4wb_dma_inv_range)
161	tst	r0, #CACHE_DLINESIZE - 1
162	bic	r0, r0, #CACHE_DLINESIZE - 1
163	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
164	tst	r1, #CACHE_DLINESIZE - 1
165	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1661:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
167	add	r0, r0, #CACHE_DLINESIZE
168	cmp	r0, r1
169	blo	1b
170	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
171	mov	pc, lr
172
173/*
174 *	dma_clean_range(start, end)
175 *
176 *	Clean (write back) the specified virtual address range.
177 *
178 *	- start  - virtual start address
179 *	- end	 - virtual end address
180 */
181ENTRY(v4wb_dma_clean_range)
182	bic	r0, r0, #CACHE_DLINESIZE - 1
1831:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
184	add	r0, r0, #CACHE_DLINESIZE
185	cmp	r0, r1
186	blo	1b
187	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
188	mov	pc, lr
189
190/*
191 *	dma_flush_range(start, end)
192 *
193 *	Clean and invalidate the specified virtual address range.
194 *
195 *	- start  - virtual start address
196 *	- end	 - virtual end address
197 *
198 *	This is actually the same as v4wb_coherent_kern_range()
199 */
200	.globl	v4wb_dma_flush_range
201	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
202
203	__INITDATA
204
205	.type	v4wb_cache_fns, #object
206ENTRY(v4wb_cache_fns)
207	.long	v4wb_flush_kern_cache_all
208	.long	v4wb_flush_user_cache_all
209	.long	v4wb_flush_user_cache_range
210	.long	v4wb_coherent_kern_range
211	.long	v4wb_coherent_user_range
212	.long	v4wb_flush_kern_dcache_page
213	.long	v4wb_dma_inv_range
214	.long	v4wb_dma_clean_range
215	.long	v4wb_dma_flush_range
216	.size	v4wb_cache_fns, . - v4wb_cache_fns
217