xref: /openbmc/linux/arch/arm/mm/cache-v4wt.S (revision ba61bb17)
1/*
2 *  linux/arch/arm/mm/cache-v4wt.S
3 *
4 *  Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  ARMv4 write through cache operations support.
11 *
12 *  We assume that the write buffer is not enabled.
13 */
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <asm/assembler.h>
17#include <asm/page.h>
18#include "proc-macros.S"
19
20/*
21 * The size of one data cache line.
22 */
23#define CACHE_DLINESIZE	32
24
25/*
26 * The number of data cache segments.
27 */
28#define CACHE_DSEGMENTS	8
29
30/*
31 * The number of lines in a cache segment.
32 */
33#define CACHE_DENTRIES	64
34
35/*
36 * This is the size at which it becomes more efficient to
37 * clean the whole cache, rather than using the individual
38 * cache line maintenance instructions.
39 *
40 * *** This needs benchmarking
41 */
42#define CACHE_DLIMIT	16384
43
44/*
45 *	flush_icache_all()
46 *
47 *	Unconditionally clean and invalidate the entire icache.
48 */
49ENTRY(v4wt_flush_icache_all)
50	mov	r0, #0
51	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
52	ret	lr
53ENDPROC(v4wt_flush_icache_all)
54
55/*
56 *	flush_user_cache_all()
57 *
58 *	Invalidate all cache entries in a particular address
59 *	space.
60 */
61ENTRY(v4wt_flush_user_cache_all)
62	/* FALLTHROUGH */
63/*
64 *	flush_kern_cache_all()
65 *
66 *	Clean and invalidate the entire cache.
67 */
68ENTRY(v4wt_flush_kern_cache_all)
69	mov	r2, #VM_EXEC
70	mov	ip, #0
71__flush_whole_cache:
72	tst	r2, #VM_EXEC
73	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
74	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
75	ret	lr
76
77/*
78 *	flush_user_cache_range(start, end, flags)
79 *
80 *	Clean and invalidate a range of cache entries in the specified
81 *	address space.
82 *
83 *	- start - start address (inclusive, page aligned)
84 *	- end	- end address (exclusive, page aligned)
85 *	- flags	- vma_area_struct flags describing address space
86 */
87ENTRY(v4wt_flush_user_cache_range)
88	sub	r3, r1, r0			@ calculate total size
89	cmp	r3, #CACHE_DLIMIT
90	bhs	__flush_whole_cache
91
921:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
93	tst	r2, #VM_EXEC
94	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
95	add	r0, r0, #CACHE_DLINESIZE
96	cmp	r0, r1
97	blo	1b
98	ret	lr
99
100/*
101 *	coherent_kern_range(start, end)
102 *
103 *	Ensure coherency between the Icache and the Dcache in the
104 *	region described by start.  If you have non-snooping
105 *	Harvard caches, you need to implement this function.
106 *
107 *	- start  - virtual start address
108 *	- end	 - virtual end address
109 */
110ENTRY(v4wt_coherent_kern_range)
111	/* FALLTRHOUGH */
112
113/*
114 *	coherent_user_range(start, end)
115 *
116 *	Ensure coherency between the Icache and the Dcache in the
117 *	region described by start.  If you have non-snooping
118 *	Harvard caches, you need to implement this function.
119 *
120 *	- start  - virtual start address
121 *	- end	 - virtual end address
122 */
123ENTRY(v4wt_coherent_user_range)
124	bic	r0, r0, #CACHE_DLINESIZE - 1
1251:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
126	add	r0, r0, #CACHE_DLINESIZE
127	cmp	r0, r1
128	blo	1b
129	mov	r0, #0
130	ret	lr
131
132/*
133 *	flush_kern_dcache_area(void *addr, size_t size)
134 *
135 *	Ensure no D cache aliasing occurs, either with itself or
136 *	the I cache
137 *
138 *	- addr	- kernel address
139 *	- size	- region size
140 */
141ENTRY(v4wt_flush_kern_dcache_area)
142	mov	r2, #0
143	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
144	add	r1, r0, r1
145	/* fallthrough */
146
147/*
148 *	dma_inv_range(start, end)
149 *
150 *	Invalidate (discard) the specified virtual address range.
151 *	May not write back any entries.  If 'start' or 'end'
152 *	are not cache line aligned, those lines must be written
153 *	back.
154 *
155 *	- start  - virtual start address
156 *	- end	 - virtual end address
157 */
158v4wt_dma_inv_range:
159	bic	r0, r0, #CACHE_DLINESIZE - 1
1601:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
161	add	r0, r0, #CACHE_DLINESIZE
162	cmp	r0, r1
163	blo	1b
164	ret	lr
165
166/*
167 *	dma_flush_range(start, end)
168 *
169 *	Clean and invalidate the specified virtual address range.
170 *
171 *	- start  - virtual start address
172 *	- end	 - virtual end address
173 */
174	.globl	v4wt_dma_flush_range
175	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
176
177/*
178 *	dma_unmap_area(start, size, dir)
179 *	- start	- kernel virtual start address
180 *	- size	- size of region
181 *	- dir	- DMA direction
182 */
183ENTRY(v4wt_dma_unmap_area)
184	add	r1, r1, r0
185	teq	r2, #DMA_TO_DEVICE
186	bne	v4wt_dma_inv_range
187	/* FALLTHROUGH */
188
189/*
190 *	dma_map_area(start, size, dir)
191 *	- start	- kernel virtual start address
192 *	- size	- size of region
193 *	- dir	- DMA direction
194 */
195ENTRY(v4wt_dma_map_area)
196	ret	lr
197ENDPROC(v4wt_dma_unmap_area)
198ENDPROC(v4wt_dma_map_area)
199
200	.globl	v4wt_flush_kern_cache_louis
201	.equ	v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
202
203	__INITDATA
204
205	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
206	define_cache_functions v4wt
207