xref: /openbmc/linux/arch/arm/mm/cache-l2x0.c (revision 9a6655e4)
1 /*
2  * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3  *
4  * Copyright (C) 2007 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/io.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/hardware/cache-l2x0.h>
25 
26 #define CACHE_LINE_SIZE		32
27 
28 static void __iomem *l2x0_base;
29 static DEFINE_SPINLOCK(l2x0_lock);
30 static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
31 
32 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
33 {
34 	/* wait for cache operation by line or way to complete */
35 	while (readl_relaxed(reg) & mask)
36 		;
37 }
38 
39 #ifdef CONFIG_CACHE_PL310
40 static inline void cache_wait(void __iomem *reg, unsigned long mask)
41 {
42 	/* cache operations by line are atomic on PL310 */
43 }
44 #else
45 #define cache_wait	cache_wait_way
46 #endif
47 
48 static inline void cache_sync(void)
49 {
50 	void __iomem *base = l2x0_base;
51 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
52 	cache_wait(base + L2X0_CACHE_SYNC, 1);
53 }
54 
55 static inline void l2x0_clean_line(unsigned long addr)
56 {
57 	void __iomem *base = l2x0_base;
58 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
59 	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
60 }
61 
62 static inline void l2x0_inv_line(unsigned long addr)
63 {
64 	void __iomem *base = l2x0_base;
65 	cache_wait(base + L2X0_INV_LINE_PA, 1);
66 	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
67 }
68 
69 #ifdef CONFIG_PL310_ERRATA_588369
70 static void debug_writel(unsigned long val)
71 {
72 	extern void omap_smc1(u32 fn, u32 arg);
73 
74 	/*
75 	 * Texas Instrument secure monitor api to modify the
76 	 * PL310 Debug Control Register.
77 	 */
78 	omap_smc1(0x100, val);
79 }
80 
81 static inline void l2x0_flush_line(unsigned long addr)
82 {
83 	void __iomem *base = l2x0_base;
84 
85 	/* Clean by PA followed by Invalidate by PA */
86 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
87 	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
88 	cache_wait(base + L2X0_INV_LINE_PA, 1);
89 	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
90 }
91 #else
92 
93 /* Optimised out for non-errata case */
94 static inline void debug_writel(unsigned long val)
95 {
96 }
97 
98 static inline void l2x0_flush_line(unsigned long addr)
99 {
100 	void __iomem *base = l2x0_base;
101 	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
102 	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
103 }
104 #endif
105 
106 static void l2x0_cache_sync(void)
107 {
108 	unsigned long flags;
109 
110 	spin_lock_irqsave(&l2x0_lock, flags);
111 	cache_sync();
112 	spin_unlock_irqrestore(&l2x0_lock, flags);
113 }
114 
115 static inline void l2x0_inv_all(void)
116 {
117 	unsigned long flags;
118 
119 	/* invalidate all ways */
120 	spin_lock_irqsave(&l2x0_lock, flags);
121 	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
122 	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
123 	cache_sync();
124 	spin_unlock_irqrestore(&l2x0_lock, flags);
125 }
126 
127 static void l2x0_inv_range(unsigned long start, unsigned long end)
128 {
129 	void __iomem *base = l2x0_base;
130 	unsigned long flags;
131 
132 	spin_lock_irqsave(&l2x0_lock, flags);
133 	if (start & (CACHE_LINE_SIZE - 1)) {
134 		start &= ~(CACHE_LINE_SIZE - 1);
135 		debug_writel(0x03);
136 		l2x0_flush_line(start);
137 		debug_writel(0x00);
138 		start += CACHE_LINE_SIZE;
139 	}
140 
141 	if (end & (CACHE_LINE_SIZE - 1)) {
142 		end &= ~(CACHE_LINE_SIZE - 1);
143 		debug_writel(0x03);
144 		l2x0_flush_line(end);
145 		debug_writel(0x00);
146 	}
147 
148 	while (start < end) {
149 		unsigned long blk_end = start + min(end - start, 4096UL);
150 
151 		while (start < blk_end) {
152 			l2x0_inv_line(start);
153 			start += CACHE_LINE_SIZE;
154 		}
155 
156 		if (blk_end < end) {
157 			spin_unlock_irqrestore(&l2x0_lock, flags);
158 			spin_lock_irqsave(&l2x0_lock, flags);
159 		}
160 	}
161 	cache_wait(base + L2X0_INV_LINE_PA, 1);
162 	cache_sync();
163 	spin_unlock_irqrestore(&l2x0_lock, flags);
164 }
165 
166 static void l2x0_clean_range(unsigned long start, unsigned long end)
167 {
168 	void __iomem *base = l2x0_base;
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(&l2x0_lock, flags);
172 	start &= ~(CACHE_LINE_SIZE - 1);
173 	while (start < end) {
174 		unsigned long blk_end = start + min(end - start, 4096UL);
175 
176 		while (start < blk_end) {
177 			l2x0_clean_line(start);
178 			start += CACHE_LINE_SIZE;
179 		}
180 
181 		if (blk_end < end) {
182 			spin_unlock_irqrestore(&l2x0_lock, flags);
183 			spin_lock_irqsave(&l2x0_lock, flags);
184 		}
185 	}
186 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
187 	cache_sync();
188 	spin_unlock_irqrestore(&l2x0_lock, flags);
189 }
190 
191 static void l2x0_flush_range(unsigned long start, unsigned long end)
192 {
193 	void __iomem *base = l2x0_base;
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&l2x0_lock, flags);
197 	start &= ~(CACHE_LINE_SIZE - 1);
198 	while (start < end) {
199 		unsigned long blk_end = start + min(end - start, 4096UL);
200 
201 		debug_writel(0x03);
202 		while (start < blk_end) {
203 			l2x0_flush_line(start);
204 			start += CACHE_LINE_SIZE;
205 		}
206 		debug_writel(0x00);
207 
208 		if (blk_end < end) {
209 			spin_unlock_irqrestore(&l2x0_lock, flags);
210 			spin_lock_irqsave(&l2x0_lock, flags);
211 		}
212 	}
213 	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
214 	cache_sync();
215 	spin_unlock_irqrestore(&l2x0_lock, flags);
216 }
217 
218 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
219 {
220 	__u32 aux;
221 	__u32 cache_id;
222 	int ways;
223 	const char *type;
224 
225 	l2x0_base = base;
226 
227 	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
228 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
229 
230 	aux &= aux_mask;
231 	aux |= aux_val;
232 
233 	/* Determine the number of ways */
234 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
235 	case L2X0_CACHE_ID_PART_L310:
236 		if (aux & (1 << 16))
237 			ways = 16;
238 		else
239 			ways = 8;
240 		type = "L310";
241 		break;
242 	case L2X0_CACHE_ID_PART_L210:
243 		ways = (aux >> 13) & 0xf;
244 		type = "L210";
245 		break;
246 	default:
247 		/* Assume unknown chips have 8 ways */
248 		ways = 8;
249 		type = "L2x0 series";
250 		break;
251 	}
252 
253 	l2x0_way_mask = (1 << ways) - 1;
254 
255 	/*
256 	 * Check if l2x0 controller is already enabled.
257 	 * If you are booting from non-secure mode
258 	 * accessing the below registers will fault.
259 	 */
260 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
261 
262 		/* l2x0 controller is disabled */
263 		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
264 
265 		l2x0_inv_all();
266 
267 		/* enable L2X0 */
268 		writel_relaxed(1, l2x0_base + L2X0_CTRL);
269 	}
270 
271 	outer_cache.inv_range = l2x0_inv_range;
272 	outer_cache.clean_range = l2x0_clean_range;
273 	outer_cache.flush_range = l2x0_flush_range;
274 	outer_cache.sync = l2x0_cache_sync;
275 
276 	printk(KERN_INFO "%s cache controller enabled\n", type);
277 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
278 			 ways, cache_id, aux);
279 }
280