xref: /openbmc/linux/arch/arm/mm/cache-l2x0.c (revision a8875a09)
1 /*
2  * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3  *
4  * Copyright (C) 2007 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-tauros3.h"
29 #include "cache-aurora-l2.h"
30 
31 struct l2c_init_data {
32 	const char *type;
33 	unsigned way_size_0;
34 	unsigned num_lock;
35 	void (*of_parse)(const struct device_node *, u32 *, u32 *);
36 	void (*enable)(void __iomem *, u32, unsigned);
37 	void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
38 	void (*save)(void __iomem *);
39 	struct outer_cache_fns outer_cache;
40 };
41 
42 #define CACHE_LINE_SIZE		32
43 
44 static void __iomem *l2x0_base;
45 static DEFINE_RAW_SPINLOCK(l2x0_lock);
46 static u32 l2x0_way_mask;	/* Bitmask of active ways */
47 static u32 l2x0_size;
48 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
49 
50 struct l2x0_regs l2x0_saved_regs;
51 
52 /*
53  * Common code for all cache controllers.
54  */
55 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
56 {
57 	/* wait for cache operation by line or way to complete */
58 	while (readl_relaxed(reg) & mask)
59 		cpu_relax();
60 }
61 
62 /*
63  * By default, we write directly to secure registers.  Platforms must
64  * override this if they are running non-secure.
65  */
66 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
67 {
68 	if (val == readl_relaxed(base + reg))
69 		return;
70 	if (outer_cache.write_sec)
71 		outer_cache.write_sec(val, reg);
72 	else
73 		writel_relaxed(val, base + reg);
74 }
75 
76 /*
77  * This should only be called when we have a requirement that the
78  * register be written due to a work-around, as platforms running
79  * in non-secure mode may not be able to access this register.
80  */
81 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
82 {
83 	if (outer_cache.set_debug)
84 		outer_cache.set_debug(val);
85 	else
86 		l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
87 }
88 
89 static void __l2c_op_way(void __iomem *reg)
90 {
91 	writel_relaxed(l2x0_way_mask, reg);
92 	l2c_wait_mask(reg, l2x0_way_mask);
93 }
94 
95 static inline void l2c_unlock(void __iomem *base, unsigned num)
96 {
97 	unsigned i;
98 
99 	for (i = 0; i < num; i++) {
100 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
101 			       i * L2X0_LOCKDOWN_STRIDE);
102 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
103 			       i * L2X0_LOCKDOWN_STRIDE);
104 	}
105 }
106 
107 /*
108  * Enable the L2 cache controller.  This function must only be
109  * called when the cache controller is known to be disabled.
110  */
111 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
112 {
113 	unsigned long flags;
114 
115 	l2c_write_sec(aux, base, L2X0_AUX_CTRL);
116 
117 	l2c_unlock(base, num_lock);
118 
119 	local_irq_save(flags);
120 	__l2c_op_way(base + L2X0_INV_WAY);
121 	writel_relaxed(0, base + sync_reg_offset);
122 	l2c_wait_mask(base + sync_reg_offset, 1);
123 	local_irq_restore(flags);
124 
125 	l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
126 }
127 
128 static void l2c_disable(void)
129 {
130 	void __iomem *base = l2x0_base;
131 
132 	outer_cache.flush_all();
133 	l2c_write_sec(0, base, L2X0_CTRL);
134 	dsb(st);
135 }
136 
137 #ifdef CONFIG_CACHE_PL310
138 static inline void cache_wait(void __iomem *reg, unsigned long mask)
139 {
140 	/* cache operations by line are atomic on PL310 */
141 }
142 #else
143 #define cache_wait	l2c_wait_mask
144 #endif
145 
146 static inline void cache_sync(void)
147 {
148 	void __iomem *base = l2x0_base;
149 
150 	writel_relaxed(0, base + sync_reg_offset);
151 	cache_wait(base + L2X0_CACHE_SYNC, 1);
152 }
153 
154 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
155 static inline void debug_writel(unsigned long val)
156 {
157 	if (outer_cache.set_debug || outer_cache.write_sec)
158 		l2c_set_debug(l2x0_base, val);
159 }
160 #else
161 /* Optimised out for non-errata case */
162 static inline void debug_writel(unsigned long val)
163 {
164 }
165 #endif
166 
167 static void l2x0_cache_sync(void)
168 {
169 	unsigned long flags;
170 
171 	raw_spin_lock_irqsave(&l2x0_lock, flags);
172 	cache_sync();
173 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
174 }
175 
176 static void __l2x0_flush_all(void)
177 {
178 	debug_writel(0x03);
179 	__l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
180 	cache_sync();
181 	debug_writel(0x00);
182 }
183 
184 static void l2x0_flush_all(void)
185 {
186 	unsigned long flags;
187 
188 	/* clean all ways */
189 	raw_spin_lock_irqsave(&l2x0_lock, flags);
190 	__l2x0_flush_all();
191 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
192 }
193 
194 static void l2x0_disable(void)
195 {
196 	unsigned long flags;
197 
198 	raw_spin_lock_irqsave(&l2x0_lock, flags);
199 	__l2x0_flush_all();
200 	l2c_write_sec(0, l2x0_base, L2X0_CTRL);
201 	dsb(st);
202 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
203 }
204 
205 /*
206  * L2C-210 specific code.
207  *
208  * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
209  * ensure that no background operation is running.  The way operations
210  * are all background tasks.
211  *
212  * While a background operation is in progress, any new operation is
213  * ignored (unspecified whether this causes an error.)  Thankfully, not
214  * used on SMP.
215  *
216  * Never has a different sync register other than L2X0_CACHE_SYNC, but
217  * we use sync_reg_offset here so we can share some of this with L2C-310.
218  */
219 static void __l2c210_cache_sync(void __iomem *base)
220 {
221 	writel_relaxed(0, base + sync_reg_offset);
222 }
223 
224 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
225 	unsigned long end)
226 {
227 	while (start < end) {
228 		writel_relaxed(start, reg);
229 		start += CACHE_LINE_SIZE;
230 	}
231 }
232 
233 static void l2c210_inv_range(unsigned long start, unsigned long end)
234 {
235 	void __iomem *base = l2x0_base;
236 
237 	if (start & (CACHE_LINE_SIZE - 1)) {
238 		start &= ~(CACHE_LINE_SIZE - 1);
239 		writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
240 		start += CACHE_LINE_SIZE;
241 	}
242 
243 	if (end & (CACHE_LINE_SIZE - 1)) {
244 		end &= ~(CACHE_LINE_SIZE - 1);
245 		writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
246 	}
247 
248 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
249 	__l2c210_cache_sync(base);
250 }
251 
252 static void l2c210_clean_range(unsigned long start, unsigned long end)
253 {
254 	void __iomem *base = l2x0_base;
255 
256 	start &= ~(CACHE_LINE_SIZE - 1);
257 	__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
258 	__l2c210_cache_sync(base);
259 }
260 
261 static void l2c210_flush_range(unsigned long start, unsigned long end)
262 {
263 	void __iomem *base = l2x0_base;
264 
265 	start &= ~(CACHE_LINE_SIZE - 1);
266 	__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
267 	__l2c210_cache_sync(base);
268 }
269 
270 static void l2c210_flush_all(void)
271 {
272 	void __iomem *base = l2x0_base;
273 
274 	BUG_ON(!irqs_disabled());
275 
276 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
277 	__l2c210_cache_sync(base);
278 }
279 
280 static void l2c210_sync(void)
281 {
282 	__l2c210_cache_sync(l2x0_base);
283 }
284 
285 static void l2c210_resume(void)
286 {
287 	void __iomem *base = l2x0_base;
288 
289 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
290 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
291 }
292 
293 static const struct l2c_init_data l2c210_data __initconst = {
294 	.type = "L2C-210",
295 	.way_size_0 = SZ_8K,
296 	.num_lock = 1,
297 	.enable = l2c_enable,
298 	.outer_cache = {
299 		.inv_range = l2c210_inv_range,
300 		.clean_range = l2c210_clean_range,
301 		.flush_range = l2c210_flush_range,
302 		.flush_all = l2c210_flush_all,
303 		.disable = l2c_disable,
304 		.sync = l2c210_sync,
305 		.resume = l2c210_resume,
306 	},
307 };
308 
309 /*
310  * L2C-220 specific code.
311  *
312  * All operations are background operations: they have to be waited for.
313  * Conflicting requests generate a slave error (which will cause an
314  * imprecise abort.)  Never uses sync_reg_offset, so we hard-code the
315  * sync register here.
316  *
317  * However, we can re-use the l2c210_resume call.
318  */
319 static inline void __l2c220_cache_sync(void __iomem *base)
320 {
321 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
322 	l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
323 }
324 
325 static void l2c220_op_way(void __iomem *base, unsigned reg)
326 {
327 	unsigned long flags;
328 
329 	raw_spin_lock_irqsave(&l2x0_lock, flags);
330 	__l2c_op_way(base + reg);
331 	__l2c220_cache_sync(base);
332 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
333 }
334 
335 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
336 	unsigned long end, unsigned long flags)
337 {
338 	raw_spinlock_t *lock = &l2x0_lock;
339 
340 	while (start < end) {
341 		unsigned long blk_end = start + min(end - start, 4096UL);
342 
343 		while (start < blk_end) {
344 			l2c_wait_mask(reg, 1);
345 			writel_relaxed(start, reg);
346 			start += CACHE_LINE_SIZE;
347 		}
348 
349 		if (blk_end < end) {
350 			raw_spin_unlock_irqrestore(lock, flags);
351 			raw_spin_lock_irqsave(lock, flags);
352 		}
353 	}
354 
355 	return flags;
356 }
357 
358 static void l2c220_inv_range(unsigned long start, unsigned long end)
359 {
360 	void __iomem *base = l2x0_base;
361 	unsigned long flags;
362 
363 	raw_spin_lock_irqsave(&l2x0_lock, flags);
364 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
365 		if (start & (CACHE_LINE_SIZE - 1)) {
366 			start &= ~(CACHE_LINE_SIZE - 1);
367 			writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
368 			start += CACHE_LINE_SIZE;
369 		}
370 
371 		if (end & (CACHE_LINE_SIZE - 1)) {
372 			end &= ~(CACHE_LINE_SIZE - 1);
373 			l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
374 			writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
375 		}
376 	}
377 
378 	flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
379 				   start, end, flags);
380 	l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
381 	__l2c220_cache_sync(base);
382 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
383 }
384 
385 static void l2c220_clean_range(unsigned long start, unsigned long end)
386 {
387 	void __iomem *base = l2x0_base;
388 	unsigned long flags;
389 
390 	start &= ~(CACHE_LINE_SIZE - 1);
391 	if ((end - start) >= l2x0_size) {
392 		l2c220_op_way(base, L2X0_CLEAN_WAY);
393 		return;
394 	}
395 
396 	raw_spin_lock_irqsave(&l2x0_lock, flags);
397 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
398 				   start, end, flags);
399 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
400 	__l2c220_cache_sync(base);
401 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
402 }
403 
404 static void l2c220_flush_range(unsigned long start, unsigned long end)
405 {
406 	void __iomem *base = l2x0_base;
407 	unsigned long flags;
408 
409 	start &= ~(CACHE_LINE_SIZE - 1);
410 	if ((end - start) >= l2x0_size) {
411 		l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
412 		return;
413 	}
414 
415 	raw_spin_lock_irqsave(&l2x0_lock, flags);
416 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
417 				   start, end, flags);
418 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
419 	__l2c220_cache_sync(base);
420 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
421 }
422 
423 static void l2c220_flush_all(void)
424 {
425 	l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
426 }
427 
428 static void l2c220_sync(void)
429 {
430 	unsigned long flags;
431 
432 	raw_spin_lock_irqsave(&l2x0_lock, flags);
433 	__l2c220_cache_sync(l2x0_base);
434 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
435 }
436 
437 static const struct l2c_init_data l2c220_data = {
438 	.type = "L2C-220",
439 	.way_size_0 = SZ_8K,
440 	.num_lock = 1,
441 	.enable = l2c_enable,
442 	.outer_cache = {
443 		.inv_range = l2c220_inv_range,
444 		.clean_range = l2c220_clean_range,
445 		.flush_range = l2c220_flush_range,
446 		.flush_all = l2c220_flush_all,
447 		.disable = l2c_disable,
448 		.sync = l2c220_sync,
449 		.resume = l2c210_resume,
450 	},
451 };
452 
453 /*
454  * L2C-310 specific code.
455  *
456  * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
457  * and the way operations are all background tasks.  However, issuing an
458  * operation while a background operation is in progress results in a
459  * SLVERR response.  We can reuse:
460  *
461  *  __l2c210_cache_sync (using sync_reg_offset)
462  *  l2c210_sync
463  *  l2c210_inv_range (if 588369 is not applicable)
464  *  l2c210_clean_range
465  *  l2c210_flush_range (if 588369 is not applicable)
466  *  l2c210_flush_all (if 727915 is not applicable)
467  *
468  * Errata:
469  * 588369: PL310 R0P0->R1P0, fixed R2P0.
470  *	Affects: all clean+invalidate operations
471  *	clean and invalidate skips the invalidate step, so we need to issue
472  *	separate operations.  We also require the above debug workaround
473  *	enclosing this code fragment on affected parts.  On unaffected parts,
474  *	we must not use this workaround without the debug register writes
475  *	to avoid exposing a problem similar to 727915.
476  *
477  * 727915: PL310 R2P0->R3P0, fixed R3P1.
478  *	Affects: clean+invalidate by way
479  *	clean and invalidate by way runs in the background, and a store can
480  *	hit the line between the clean operation and invalidate operation,
481  *	resulting in the store being lost.
482  *
483  * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
484  *	Affects: 8x64-bit (double fill) line fetches
485  *	double fill line fetches can fail to cause dirty data to be evicted
486  *	from the cache before the new data overwrites the second line.
487  *
488  * 753970: PL310 R3P0, fixed R3P1.
489  *	Affects: sync
490  *	prevents merging writes after the sync operation, until another L2C
491  *	operation is performed (or a number of other conditions.)
492  *
493  * 769419: PL310 R0P0->R3P1, fixed R3P2.
494  *	Affects: store buffer
495  *	store buffer is not automatically drained.
496  */
497 static void l2c310_set_debug(unsigned long val)
498 {
499 	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
500 }
501 
502 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
503 {
504 	void __iomem *base = l2x0_base;
505 
506 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
507 		unsigned long flags;
508 
509 		/* Erratum 588369 for both clean+invalidate operations */
510 		raw_spin_lock_irqsave(&l2x0_lock, flags);
511 		l2c_set_debug(base, 0x03);
512 
513 		if (start & (CACHE_LINE_SIZE - 1)) {
514 			start &= ~(CACHE_LINE_SIZE - 1);
515 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
516 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
517 			start += CACHE_LINE_SIZE;
518 		}
519 
520 		if (end & (CACHE_LINE_SIZE - 1)) {
521 			end &= ~(CACHE_LINE_SIZE - 1);
522 			writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
523 			writel_relaxed(end, base + L2X0_INV_LINE_PA);
524 		}
525 
526 		l2c_set_debug(base, 0x00);
527 		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
528 	}
529 
530 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
531 	__l2c210_cache_sync(base);
532 }
533 
534 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
535 {
536 	raw_spinlock_t *lock = &l2x0_lock;
537 	unsigned long flags;
538 	void __iomem *base = l2x0_base;
539 
540 	raw_spin_lock_irqsave(lock, flags);
541 	while (start < end) {
542 		unsigned long blk_end = start + min(end - start, 4096UL);
543 
544 		l2c_set_debug(base, 0x03);
545 		while (start < blk_end) {
546 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
547 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
548 			start += CACHE_LINE_SIZE;
549 		}
550 		l2c_set_debug(base, 0x00);
551 
552 		if (blk_end < end) {
553 			raw_spin_unlock_irqrestore(lock, flags);
554 			raw_spin_lock_irqsave(lock, flags);
555 		}
556 	}
557 	raw_spin_unlock_irqrestore(lock, flags);
558 	__l2c210_cache_sync(base);
559 }
560 
561 static void l2c310_flush_all_erratum(void)
562 {
563 	void __iomem *base = l2x0_base;
564 	unsigned long flags;
565 
566 	raw_spin_lock_irqsave(&l2x0_lock, flags);
567 	l2c_set_debug(base, 0x03);
568 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
569 	l2c_set_debug(base, 0x00);
570 	__l2c210_cache_sync(base);
571 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
572 }
573 
574 static void __init l2c310_save(void __iomem *base)
575 {
576 	unsigned revision;
577 
578 	l2x0_saved_regs.tag_latency = readl_relaxed(base +
579 		L2X0_TAG_LATENCY_CTRL);
580 	l2x0_saved_regs.data_latency = readl_relaxed(base +
581 		L2X0_DATA_LATENCY_CTRL);
582 	l2x0_saved_regs.filter_end = readl_relaxed(base +
583 		L2X0_ADDR_FILTER_END);
584 	l2x0_saved_regs.filter_start = readl_relaxed(base +
585 		L2X0_ADDR_FILTER_START);
586 
587 	revision = readl_relaxed(base + L2X0_CACHE_ID) &
588 			L2X0_CACHE_ID_RTL_MASK;
589 
590 	/* From r2p0, there is Prefetch offset/control register */
591 	if (revision >= L310_CACHE_ID_RTL_R2P0)
592 		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
593 							L2X0_PREFETCH_CTRL);
594 
595 	/* From r3p0, there is Power control register */
596 	if (revision >= L310_CACHE_ID_RTL_R3P0)
597 		l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
598 							L2X0_POWER_CTRL);
599 }
600 
601 static void l2c310_resume(void)
602 {
603 	void __iomem *base = l2x0_base;
604 
605 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
606 		unsigned revision;
607 
608 		/* restore pl310 setup */
609 		writel_relaxed(l2x0_saved_regs.tag_latency,
610 			       base + L2X0_TAG_LATENCY_CTRL);
611 		writel_relaxed(l2x0_saved_regs.data_latency,
612 			       base + L2X0_DATA_LATENCY_CTRL);
613 		writel_relaxed(l2x0_saved_regs.filter_end,
614 			       base + L2X0_ADDR_FILTER_END);
615 		writel_relaxed(l2x0_saved_regs.filter_start,
616 			       base + L2X0_ADDR_FILTER_START);
617 
618 		revision = readl_relaxed(base + L2X0_CACHE_ID) &
619 				L2X0_CACHE_ID_RTL_MASK;
620 
621 		if (revision >= L310_CACHE_ID_RTL_R2P0)
622 			l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
623 				      L2X0_PREFETCH_CTRL);
624 		if (revision >= L310_CACHE_ID_RTL_R3P0)
625 			l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
626 				      L2X0_POWER_CTRL);
627 
628 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
629 	}
630 }
631 
632 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
633 	struct outer_cache_fns *fns)
634 {
635 	unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
636 	const char *errata[8];
637 	unsigned n = 0;
638 
639 	/* For compatibility */
640 	if (revision <= L310_CACHE_ID_RTL_R3P0)
641 		fns->set_debug = l2c310_set_debug;
642 
643 	if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
644 	    revision < L310_CACHE_ID_RTL_R2P0 &&
645 	    /* For bcm compatibility */
646 	    fns->inv_range == l2c210_inv_range) {
647 		fns->inv_range = l2c310_inv_range_erratum;
648 		fns->flush_range = l2c310_flush_range_erratum;
649 		errata[n++] = "588369";
650 	}
651 
652 	if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
653 	    revision >= L310_CACHE_ID_RTL_R2P0 &&
654 	    revision < L310_CACHE_ID_RTL_R3P1) {
655 		fns->flush_all = l2c310_flush_all_erratum;
656 		errata[n++] = "727915";
657 	}
658 
659 	if (revision >= L310_CACHE_ID_RTL_R3P0 &&
660 	    revision < L310_CACHE_ID_RTL_R3P2) {
661 		u32 val = readl_relaxed(base + L2X0_PREFETCH_CTRL);
662 		/* I don't think bit23 is required here... but iMX6 does so */
663 		if (val & (BIT(30) | BIT(23))) {
664 			val &= ~(BIT(30) | BIT(23));
665 			l2c_write_sec(val, base, L2X0_PREFETCH_CTRL);
666 			errata[n++] = "752271";
667 		}
668 	}
669 
670 	if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
671 	    revision == L310_CACHE_ID_RTL_R3P0) {
672 		sync_reg_offset = L2X0_DUMMY_REG;
673 		errata[n++] = "753970";
674 	}
675 
676 	if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
677 		errata[n++] = "769419";
678 
679 	if (n) {
680 		unsigned i;
681 
682 		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
683 		for (i = 0; i < n; i++)
684 			pr_cont(" %s", errata[i]);
685 		pr_cont(" enabled\n");
686 	}
687 }
688 
689 static const struct l2c_init_data l2c310_init_fns __initconst = {
690 	.type = "L2C-310",
691 	.way_size_0 = SZ_8K,
692 	.num_lock = 8,
693 	.enable = l2c_enable,
694 	.fixup = l2c310_fixup,
695 	.save = l2c310_save,
696 	.outer_cache = {
697 		.inv_range = l2c210_inv_range,
698 		.clean_range = l2c210_clean_range,
699 		.flush_range = l2c210_flush_range,
700 		.flush_all = l2c210_flush_all,
701 		.disable = l2c_disable,
702 		.sync = l2c210_sync,
703 		.set_debug = l2c310_set_debug,
704 		.resume = l2c310_resume,
705 	},
706 };
707 
708 static void __init __l2c_init(const struct l2c_init_data *data,
709 	u32 aux_val, u32 aux_mask, u32 cache_id)
710 {
711 	struct outer_cache_fns fns;
712 	unsigned way_size_bits, ways;
713 	u32 aux;
714 
715 	/*
716 	 * It is strange to save the register state before initialisation,
717 	 * but hey, this is what the DT implementations decided to do.
718 	 */
719 	if (data->save)
720 		data->save(l2x0_base);
721 
722 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
723 
724 	aux &= aux_mask;
725 	aux |= aux_val;
726 
727 	/* Determine the number of ways */
728 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
729 	case L2X0_CACHE_ID_PART_L310:
730 		if (aux & (1 << 16))
731 			ways = 16;
732 		else
733 			ways = 8;
734 		break;
735 
736 	case L2X0_CACHE_ID_PART_L210:
737 	case L2X0_CACHE_ID_PART_L220:
738 		ways = (aux >> 13) & 0xf;
739 		break;
740 
741 	case AURORA_CACHE_ID:
742 		ways = (aux >> 13) & 0xf;
743 		ways = 2 << ((ways + 1) >> 2);
744 		break;
745 
746 	default:
747 		/* Assume unknown chips have 8 ways */
748 		ways = 8;
749 		break;
750 	}
751 
752 	l2x0_way_mask = (1 << ways) - 1;
753 
754 	/*
755 	 * way_size_0 is the size that a way_size value of zero would be
756 	 * given the calculation: way_size = way_size_0 << way_size_bits.
757 	 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
758 	 * then way_size_0 would be 8k.
759 	 *
760 	 * L2 cache size = number of ways * way size.
761 	 */
762 	way_size_bits = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
763 	l2x0_size = ways * (data->way_size_0 << way_size_bits);
764 
765 	fns = data->outer_cache;
766 	fns.write_sec = outer_cache.write_sec;
767 	if (data->fixup)
768 		data->fixup(l2x0_base, cache_id, &fns);
769 	if (fns.write_sec)
770 		fns.set_debug = NULL;
771 
772 	/*
773 	 * Check if l2x0 controller is already enabled.  If we are booting
774 	 * in non-secure mode accessing the below registers will fault.
775 	 */
776 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
777 		data->enable(l2x0_base, aux, data->num_lock);
778 
779 	/* Re-read it in case some bits are reserved. */
780 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
781 
782 	/* Save the value for resuming. */
783 	l2x0_saved_regs.aux_ctrl = aux;
784 
785 	outer_cache = fns;
786 
787 	pr_info("%s cache controller enabled, %d ways, %d kB\n",
788 		data->type, ways, l2x0_size >> 10);
789 	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
790 		data->type, cache_id, aux);
791 }
792 
793 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
794 {
795 	const struct l2c_init_data *data;
796 	u32 cache_id;
797 
798 	l2x0_base = base;
799 
800 	cache_id = readl_relaxed(base + L2X0_CACHE_ID);
801 
802 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
803 	default:
804 	case L2X0_CACHE_ID_PART_L210:
805 		data = &l2c210_data;
806 		break;
807 
808 	case L2X0_CACHE_ID_PART_L220:
809 		data = &l2c220_data;
810 		break;
811 
812 	case L2X0_CACHE_ID_PART_L310:
813 		data = &l2c310_init_fns;
814 		break;
815 	}
816 
817 	__l2c_init(data, aux_val, aux_mask, cache_id);
818 }
819 
820 #ifdef CONFIG_OF
821 static int l2_wt_override;
822 
823 /* Aurora don't have the cache ID register available, so we have to
824  * pass it though the device tree */
825 static u32 cache_id_part_number_from_dt;
826 
827 static void __init l2x0_of_parse(const struct device_node *np,
828 				 u32 *aux_val, u32 *aux_mask)
829 {
830 	u32 data[2] = { 0, 0 };
831 	u32 tag = 0;
832 	u32 dirty = 0;
833 	u32 val = 0, mask = 0;
834 
835 	of_property_read_u32(np, "arm,tag-latency", &tag);
836 	if (tag) {
837 		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
838 		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
839 	}
840 
841 	of_property_read_u32_array(np, "arm,data-latency",
842 				   data, ARRAY_SIZE(data));
843 	if (data[0] && data[1]) {
844 		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
845 			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
846 		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
847 		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
848 	}
849 
850 	of_property_read_u32(np, "arm,dirty-latency", &dirty);
851 	if (dirty) {
852 		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
853 		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
854 	}
855 
856 	*aux_val &= ~mask;
857 	*aux_val |= val;
858 	*aux_mask &= ~mask;
859 }
860 
861 static const struct l2c_init_data of_l2c210_data __initconst = {
862 	.type = "L2C-210",
863 	.way_size_0 = SZ_8K,
864 	.num_lock = 1,
865 	.of_parse = l2x0_of_parse,
866 	.enable = l2c_enable,
867 	.outer_cache = {
868 		.inv_range   = l2c210_inv_range,
869 		.clean_range = l2c210_clean_range,
870 		.flush_range = l2c210_flush_range,
871 		.flush_all   = l2c210_flush_all,
872 		.disable     = l2c_disable,
873 		.sync        = l2c210_sync,
874 		.resume      = l2c210_resume,
875 	},
876 };
877 
878 static const struct l2c_init_data of_l2c220_data __initconst = {
879 	.type = "L2C-220",
880 	.way_size_0 = SZ_8K,
881 	.num_lock = 1,
882 	.of_parse = l2x0_of_parse,
883 	.enable = l2c_enable,
884 	.outer_cache = {
885 		.inv_range   = l2c220_inv_range,
886 		.clean_range = l2c220_clean_range,
887 		.flush_range = l2c220_flush_range,
888 		.flush_all   = l2c220_flush_all,
889 		.disable     = l2c_disable,
890 		.sync        = l2c220_sync,
891 		.resume      = l2c210_resume,
892 	},
893 };
894 
895 static void __init l2c310_of_parse(const struct device_node *np,
896 	u32 *aux_val, u32 *aux_mask)
897 {
898 	u32 data[3] = { 0, 0, 0 };
899 	u32 tag[3] = { 0, 0, 0 };
900 	u32 filter[2] = { 0, 0 };
901 
902 	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
903 	if (tag[0] && tag[1] && tag[2])
904 		writel_relaxed(
905 			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
906 			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
907 			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
908 			l2x0_base + L2X0_TAG_LATENCY_CTRL);
909 
910 	of_property_read_u32_array(np, "arm,data-latency",
911 				   data, ARRAY_SIZE(data));
912 	if (data[0] && data[1] && data[2])
913 		writel_relaxed(
914 			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
915 			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
916 			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
917 			l2x0_base + L2X0_DATA_LATENCY_CTRL);
918 
919 	of_property_read_u32_array(np, "arm,filter-ranges",
920 				   filter, ARRAY_SIZE(filter));
921 	if (filter[1]) {
922 		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
923 			       l2x0_base + L2X0_ADDR_FILTER_END);
924 		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
925 			       l2x0_base + L2X0_ADDR_FILTER_START);
926 	}
927 }
928 
929 static const struct l2c_init_data of_l2c310_data __initconst = {
930 	.type = "L2C-310",
931 	.way_size_0 = SZ_8K,
932 	.num_lock = 8,
933 	.of_parse = l2c310_of_parse,
934 	.enable = l2c_enable,
935 	.fixup = l2c310_fixup,
936 	.save  = l2c310_save,
937 	.outer_cache = {
938 		.inv_range   = l2c210_inv_range,
939 		.clean_range = l2c210_clean_range,
940 		.flush_range = l2c210_flush_range,
941 		.flush_all   = l2c210_flush_all,
942 		.disable     = l2c_disable,
943 		.sync        = l2c210_sync,
944 		.set_debug   = l2c310_set_debug,
945 		.resume      = l2c310_resume,
946 	},
947 };
948 
949 /*
950  * Note that the end addresses passed to Linux primitives are
951  * noninclusive, while the hardware cache range operations use
952  * inclusive start and end addresses.
953  */
954 static unsigned long calc_range_end(unsigned long start, unsigned long end)
955 {
956 	/*
957 	 * Limit the number of cache lines processed at once,
958 	 * since cache range operations stall the CPU pipeline
959 	 * until completion.
960 	 */
961 	if (end > start + MAX_RANGE_SIZE)
962 		end = start + MAX_RANGE_SIZE;
963 
964 	/*
965 	 * Cache range operations can't straddle a page boundary.
966 	 */
967 	if (end > PAGE_ALIGN(start+1))
968 		end = PAGE_ALIGN(start+1);
969 
970 	return end;
971 }
972 
973 /*
974  * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
975  * and range operations only do a TLB lookup on the start address.
976  */
977 static void aurora_pa_range(unsigned long start, unsigned long end,
978 			unsigned long offset)
979 {
980 	unsigned long flags;
981 
982 	raw_spin_lock_irqsave(&l2x0_lock, flags);
983 	writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
984 	writel_relaxed(end, l2x0_base + offset);
985 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
986 
987 	cache_sync();
988 }
989 
990 static void aurora_inv_range(unsigned long start, unsigned long end)
991 {
992 	/*
993 	 * round start and end adresses up to cache line size
994 	 */
995 	start &= ~(CACHE_LINE_SIZE - 1);
996 	end = ALIGN(end, CACHE_LINE_SIZE);
997 
998 	/*
999 	 * Invalidate all full cache lines between 'start' and 'end'.
1000 	 */
1001 	while (start < end) {
1002 		unsigned long range_end = calc_range_end(start, end);
1003 		aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1004 				AURORA_INVAL_RANGE_REG);
1005 		start = range_end;
1006 	}
1007 }
1008 
1009 static void aurora_clean_range(unsigned long start, unsigned long end)
1010 {
1011 	/*
1012 	 * If L2 is forced to WT, the L2 will always be clean and we
1013 	 * don't need to do anything here.
1014 	 */
1015 	if (!l2_wt_override) {
1016 		start &= ~(CACHE_LINE_SIZE - 1);
1017 		end = ALIGN(end, CACHE_LINE_SIZE);
1018 		while (start != end) {
1019 			unsigned long range_end = calc_range_end(start, end);
1020 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1021 					AURORA_CLEAN_RANGE_REG);
1022 			start = range_end;
1023 		}
1024 	}
1025 }
1026 
1027 static void aurora_flush_range(unsigned long start, unsigned long end)
1028 {
1029 	start &= ~(CACHE_LINE_SIZE - 1);
1030 	end = ALIGN(end, CACHE_LINE_SIZE);
1031 	while (start != end) {
1032 		unsigned long range_end = calc_range_end(start, end);
1033 		/*
1034 		 * If L2 is forced to WT, the L2 will always be clean and we
1035 		 * just need to invalidate.
1036 		 */
1037 		if (l2_wt_override)
1038 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1039 							AURORA_INVAL_RANGE_REG);
1040 		else
1041 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1042 							AURORA_FLUSH_RANGE_REG);
1043 		start = range_end;
1044 	}
1045 }
1046 
1047 static void aurora_save(void __iomem *base)
1048 {
1049 	l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1050 	l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1051 }
1052 
1053 static void aurora_resume(void)
1054 {
1055 	void __iomem *base = l2x0_base;
1056 
1057 	if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1058 		writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1059 		writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
1060 	}
1061 }
1062 
1063 /*
1064  * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1065  * broadcasting of cache commands to L2.
1066  */
1067 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1068 	unsigned num_lock)
1069 {
1070 	u32 u;
1071 
1072 	asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1073 	u |= AURORA_CTRL_FW;		/* Set the FW bit */
1074 	asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1075 
1076 	isb();
1077 
1078 	l2c_enable(base, aux, num_lock);
1079 }
1080 
1081 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1082 	struct outer_cache_fns *fns)
1083 {
1084 	sync_reg_offset = AURORA_SYNC_REG;
1085 }
1086 
1087 static void __init aurora_of_parse(const struct device_node *np,
1088 				u32 *aux_val, u32 *aux_mask)
1089 {
1090 	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1091 	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
1092 
1093 	of_property_read_u32(np, "cache-id-part",
1094 			&cache_id_part_number_from_dt);
1095 
1096 	/* Determine and save the write policy */
1097 	l2_wt_override = of_property_read_bool(np, "wt-override");
1098 
1099 	if (l2_wt_override) {
1100 		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1101 		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1102 	}
1103 
1104 	*aux_val &= ~mask;
1105 	*aux_val |= val;
1106 	*aux_mask &= ~mask;
1107 }
1108 
1109 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1110 	.type = "Aurora",
1111 	.way_size_0 = SZ_4K,
1112 	.num_lock = 4,
1113 	.of_parse = aurora_of_parse,
1114 	.enable = l2c_enable,
1115 	.fixup = aurora_fixup,
1116 	.save  = aurora_save,
1117 	.outer_cache = {
1118 		.inv_range   = aurora_inv_range,
1119 		.clean_range = aurora_clean_range,
1120 		.flush_range = aurora_flush_range,
1121 		.flush_all   = l2x0_flush_all,
1122 		.disable     = l2x0_disable,
1123 		.sync        = l2x0_cache_sync,
1124 		.resume      = aurora_resume,
1125 	},
1126 };
1127 
1128 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1129 	.type = "Aurora",
1130 	.way_size_0 = SZ_4K,
1131 	.num_lock = 4,
1132 	.of_parse = aurora_of_parse,
1133 	.enable = aurora_enable_no_outer,
1134 	.fixup = aurora_fixup,
1135 	.save  = aurora_save,
1136 	.outer_cache = {
1137 		.resume      = aurora_resume,
1138 	},
1139 };
1140 
1141 /*
1142  * For certain Broadcom SoCs, depending on the address range, different offsets
1143  * need to be added to the address before passing it to L2 for
1144  * invalidation/clean/flush
1145  *
1146  * Section Address Range              Offset        EMI
1147  *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
1148  *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
1149  *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
1150  *
1151  * When the start and end addresses have crossed two different sections, we
1152  * need to break the L2 operation into two, each within its own section.
1153  * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1154  * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1155  * 0xC0000000 - 0xC0001000
1156  *
1157  * Note 1:
1158  * By breaking a single L2 operation into two, we may potentially suffer some
1159  * performance hit, but keep in mind the cross section case is very rare
1160  *
1161  * Note 2:
1162  * We do not need to handle the case when the start address is in
1163  * Section 1 and the end address is in Section 3, since it is not a valid use
1164  * case
1165  *
1166  * Note 3:
1167  * Section 1 in practical terms can no longer be used on rev A2. Because of
1168  * that the code does not need to handle section 1 at all.
1169  *
1170  */
1171 #define BCM_SYS_EMI_START_ADDR        0x40000000UL
1172 #define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
1173 
1174 #define BCM_SYS_EMI_OFFSET            0x40000000UL
1175 #define BCM_VC_EMI_OFFSET             0x80000000UL
1176 
1177 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1178 {
1179 	return (addr >= BCM_SYS_EMI_START_ADDR) &&
1180 		(addr < BCM_VC_EMI_SEC3_START_ADDR);
1181 }
1182 
1183 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1184 {
1185 	if (bcm_addr_is_sys_emi(addr))
1186 		return addr + BCM_SYS_EMI_OFFSET;
1187 	else
1188 		return addr + BCM_VC_EMI_OFFSET;
1189 }
1190 
1191 static void bcm_inv_range(unsigned long start, unsigned long end)
1192 {
1193 	unsigned long new_start, new_end;
1194 
1195 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1196 
1197 	if (unlikely(end <= start))
1198 		return;
1199 
1200 	new_start = bcm_l2_phys_addr(start);
1201 	new_end = bcm_l2_phys_addr(end);
1202 
1203 	/* normal case, no cross section between start and end */
1204 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1205 		l2c210_inv_range(new_start, new_end);
1206 		return;
1207 	}
1208 
1209 	/* They cross sections, so it can only be a cross from section
1210 	 * 2 to section 3
1211 	 */
1212 	l2c210_inv_range(new_start,
1213 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1214 	l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1215 		new_end);
1216 }
1217 
1218 static void bcm_clean_range(unsigned long start, unsigned long end)
1219 {
1220 	unsigned long new_start, new_end;
1221 
1222 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1223 
1224 	if (unlikely(end <= start))
1225 		return;
1226 
1227 	new_start = bcm_l2_phys_addr(start);
1228 	new_end = bcm_l2_phys_addr(end);
1229 
1230 	/* normal case, no cross section between start and end */
1231 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1232 		l2c210_clean_range(new_start, new_end);
1233 		return;
1234 	}
1235 
1236 	/* They cross sections, so it can only be a cross from section
1237 	 * 2 to section 3
1238 	 */
1239 	l2c210_clean_range(new_start,
1240 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1241 	l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1242 		new_end);
1243 }
1244 
1245 static void bcm_flush_range(unsigned long start, unsigned long end)
1246 {
1247 	unsigned long new_start, new_end;
1248 
1249 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1250 
1251 	if (unlikely(end <= start))
1252 		return;
1253 
1254 	if ((end - start) >= l2x0_size) {
1255 		outer_cache.flush_all();
1256 		return;
1257 	}
1258 
1259 	new_start = bcm_l2_phys_addr(start);
1260 	new_end = bcm_l2_phys_addr(end);
1261 
1262 	/* normal case, no cross section between start and end */
1263 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1264 		l2c210_flush_range(new_start, new_end);
1265 		return;
1266 	}
1267 
1268 	/* They cross sections, so it can only be a cross from section
1269 	 * 2 to section 3
1270 	 */
1271 	l2c210_flush_range(new_start,
1272 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1273 	l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1274 		new_end);
1275 }
1276 
1277 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1278 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1279 	.type = "BCM-L2C-310",
1280 	.way_size_0 = SZ_8K,
1281 	.num_lock = 8,
1282 	.of_parse = l2c310_of_parse,
1283 	.enable = l2c_enable,
1284 	.save  = l2c310_save,
1285 	.outer_cache = {
1286 		.inv_range   = bcm_inv_range,
1287 		.clean_range = bcm_clean_range,
1288 		.flush_range = bcm_flush_range,
1289 		.flush_all   = l2c210_flush_all,
1290 		.disable     = l2c_disable,
1291 		.sync        = l2c210_sync,
1292 		.resume      = l2c310_resume,
1293 	},
1294 };
1295 
1296 static void __init tauros3_save(void __iomem *base)
1297 {
1298 	l2x0_saved_regs.aux2_ctrl =
1299 		readl_relaxed(base + TAUROS3_AUX2_CTRL);
1300 	l2x0_saved_regs.prefetch_ctrl =
1301 		readl_relaxed(base + L2X0_PREFETCH_CTRL);
1302 }
1303 
1304 static void tauros3_resume(void)
1305 {
1306 	void __iomem *base = l2x0_base;
1307 
1308 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1309 		writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1310 			       base + TAUROS3_AUX2_CTRL);
1311 		writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1312 			       base + L2X0_PREFETCH_CTRL);
1313 
1314 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1315 	}
1316 }
1317 
1318 static const struct l2c_init_data of_tauros3_data __initconst = {
1319 	.type = "Tauros3",
1320 	.way_size_0 = SZ_8K,
1321 	.num_lock = 8,
1322 	.enable = l2c_enable,
1323 	.save  = tauros3_save,
1324 	/* Tauros3 broadcasts L1 cache operations to L2 */
1325 	.outer_cache = {
1326 		.resume      = tauros3_resume,
1327 	},
1328 };
1329 
1330 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1331 static const struct of_device_id l2x0_ids[] __initconst = {
1332 	L2C_ID("arm,l210-cache", of_l2c210_data),
1333 	L2C_ID("arm,l220-cache", of_l2c220_data),
1334 	L2C_ID("arm,pl310-cache", of_l2c310_data),
1335 	L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1336 	L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1337 	L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1338 	L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1339 	/* Deprecated IDs */
1340 	L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1341 	{}
1342 };
1343 
1344 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1345 {
1346 	const struct l2c_init_data *data;
1347 	struct device_node *np;
1348 	struct resource res;
1349 	u32 cache_id;
1350 
1351 	np = of_find_matching_node(NULL, l2x0_ids);
1352 	if (!np)
1353 		return -ENODEV;
1354 
1355 	if (of_address_to_resource(np, 0, &res))
1356 		return -ENODEV;
1357 
1358 	l2x0_base = ioremap(res.start, resource_size(&res));
1359 	if (!l2x0_base)
1360 		return -ENOMEM;
1361 
1362 	l2x0_saved_regs.phy_base = res.start;
1363 
1364 	data = of_match_node(l2x0_ids, np)->data;
1365 
1366 	/* L2 configuration can only be changed if the cache is disabled */
1367 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1368 		if (data->of_parse)
1369 			data->of_parse(np, &aux_val, &aux_mask);
1370 
1371 	if (cache_id_part_number_from_dt)
1372 		cache_id = cache_id_part_number_from_dt;
1373 	else
1374 		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1375 
1376 	__l2c_init(data, aux_val, aux_mask, cache_id);
1377 
1378 	return 0;
1379 }
1380 #endif
1381