xref: /openbmc/linux/arch/arm/mm/cache-l2x0.c (revision 733c6bba)
1 /*
2  * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3  *
4  * Copyright (C) 2007 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-tauros3.h"
29 #include "cache-aurora-l2.h"
30 
31 struct l2c_init_data {
32 	unsigned num_lock;
33 	void (*of_parse)(const struct device_node *, u32 *, u32 *);
34 	void (*enable)(void __iomem *, u32, unsigned);
35 	void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
36 	void (*save)(void __iomem *);
37 	struct outer_cache_fns outer_cache;
38 };
39 
40 #define CACHE_LINE_SIZE		32
41 
42 static void __iomem *l2x0_base;
43 static DEFINE_RAW_SPINLOCK(l2x0_lock);
44 static u32 l2x0_way_mask;	/* Bitmask of active ways */
45 static u32 l2x0_size;
46 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
47 
48 struct l2x0_regs l2x0_saved_regs;
49 
50 /*
51  * Common code for all cache controllers.
52  */
53 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
54 {
55 	/* wait for cache operation by line or way to complete */
56 	while (readl_relaxed(reg) & mask)
57 		cpu_relax();
58 }
59 
60 /*
61  * This should only be called when we have a requirement that the
62  * register be written due to a work-around, as platforms running
63  * in non-secure mode may not be able to access this register.
64  */
65 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66 {
67 	outer_cache.set_debug(val);
68 }
69 
70 static void __l2c_op_way(void __iomem *reg)
71 {
72 	writel_relaxed(l2x0_way_mask, reg);
73 	l2c_wait_mask(reg, l2x0_way_mask);
74 }
75 
76 static inline void l2c_unlock(void __iomem *base, unsigned num)
77 {
78 	unsigned i;
79 
80 	for (i = 0; i < num; i++) {
81 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 			       i * L2X0_LOCKDOWN_STRIDE);
83 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 			       i * L2X0_LOCKDOWN_STRIDE);
85 	}
86 }
87 
88 /*
89  * Enable the L2 cache controller.  This function must only be
90  * called when the cache controller is known to be disabled.
91  */
92 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93 {
94 	unsigned long flags;
95 
96 	/* Only write the aux register if it needs changing */
97 	if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 		writel_relaxed(aux, base + L2X0_AUX_CTRL);
99 
100 	l2c_unlock(base, num_lock);
101 
102 	local_irq_save(flags);
103 	__l2c_op_way(base + L2X0_INV_WAY);
104 	writel_relaxed(0, base + sync_reg_offset);
105 	l2c_wait_mask(base + sync_reg_offset, 1);
106 	local_irq_restore(flags);
107 
108 	writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109 }
110 
111 static void l2c_disable(void)
112 {
113 	void __iomem *base = l2x0_base;
114 
115 	outer_cache.flush_all();
116 	writel_relaxed(0, base + L2X0_CTRL);
117 	dsb(st);
118 }
119 
120 #ifdef CONFIG_CACHE_PL310
121 static inline void cache_wait(void __iomem *reg, unsigned long mask)
122 {
123 	/* cache operations by line are atomic on PL310 */
124 }
125 #else
126 #define cache_wait	l2c_wait_mask
127 #endif
128 
129 static inline void cache_sync(void)
130 {
131 	void __iomem *base = l2x0_base;
132 
133 	writel_relaxed(0, base + sync_reg_offset);
134 	cache_wait(base + L2X0_CACHE_SYNC, 1);
135 }
136 
137 static inline void l2x0_clean_line(unsigned long addr)
138 {
139 	void __iomem *base = l2x0_base;
140 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
141 	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
142 }
143 
144 static inline void l2x0_inv_line(unsigned long addr)
145 {
146 	void __iomem *base = l2x0_base;
147 	cache_wait(base + L2X0_INV_LINE_PA, 1);
148 	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
149 }
150 
151 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
152 static inline void debug_writel(unsigned long val)
153 {
154 	if (outer_cache.set_debug)
155 		l2c_set_debug(l2x0_base, val);
156 }
157 #else
158 /* Optimised out for non-errata case */
159 static inline void debug_writel(unsigned long val)
160 {
161 }
162 #endif
163 
164 #ifdef CONFIG_PL310_ERRATA_588369
165 static inline void l2x0_flush_line(unsigned long addr)
166 {
167 	void __iomem *base = l2x0_base;
168 
169 	/* Clean by PA followed by Invalidate by PA */
170 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
171 	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
172 	cache_wait(base + L2X0_INV_LINE_PA, 1);
173 	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
174 }
175 #else
176 
177 static inline void l2x0_flush_line(unsigned long addr)
178 {
179 	void __iomem *base = l2x0_base;
180 	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
181 	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
182 }
183 #endif
184 
185 static void l2x0_cache_sync(void)
186 {
187 	unsigned long flags;
188 
189 	raw_spin_lock_irqsave(&l2x0_lock, flags);
190 	cache_sync();
191 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
192 }
193 
194 static void __l2x0_flush_all(void)
195 {
196 	debug_writel(0x03);
197 	__l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
198 	cache_sync();
199 	debug_writel(0x00);
200 }
201 
202 static void l2x0_flush_all(void)
203 {
204 	unsigned long flags;
205 
206 	/* clean all ways */
207 	raw_spin_lock_irqsave(&l2x0_lock, flags);
208 	__l2x0_flush_all();
209 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
210 }
211 
212 static void l2x0_clean_all(void)
213 {
214 	unsigned long flags;
215 
216 	/* clean all ways */
217 	raw_spin_lock_irqsave(&l2x0_lock, flags);
218 	__l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
219 	cache_sync();
220 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
221 }
222 
223 static void l2x0_inv_all(void)
224 {
225 	unsigned long flags;
226 
227 	/* invalidate all ways */
228 	raw_spin_lock_irqsave(&l2x0_lock, flags);
229 	/* Invalidating when L2 is enabled is a nono */
230 	BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
231 	__l2c_op_way(l2x0_base + L2X0_INV_WAY);
232 	cache_sync();
233 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
234 }
235 
236 static void l2x0_inv_range(unsigned long start, unsigned long end)
237 {
238 	void __iomem *base = l2x0_base;
239 	unsigned long flags;
240 
241 	raw_spin_lock_irqsave(&l2x0_lock, flags);
242 	if (start & (CACHE_LINE_SIZE - 1)) {
243 		start &= ~(CACHE_LINE_SIZE - 1);
244 		debug_writel(0x03);
245 		l2x0_flush_line(start);
246 		debug_writel(0x00);
247 		start += CACHE_LINE_SIZE;
248 	}
249 
250 	if (end & (CACHE_LINE_SIZE - 1)) {
251 		end &= ~(CACHE_LINE_SIZE - 1);
252 		debug_writel(0x03);
253 		l2x0_flush_line(end);
254 		debug_writel(0x00);
255 	}
256 
257 	while (start < end) {
258 		unsigned long blk_end = start + min(end - start, 4096UL);
259 
260 		while (start < blk_end) {
261 			l2x0_inv_line(start);
262 			start += CACHE_LINE_SIZE;
263 		}
264 
265 		if (blk_end < end) {
266 			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
267 			raw_spin_lock_irqsave(&l2x0_lock, flags);
268 		}
269 	}
270 	cache_wait(base + L2X0_INV_LINE_PA, 1);
271 	cache_sync();
272 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
273 }
274 
275 static void l2x0_clean_range(unsigned long start, unsigned long end)
276 {
277 	void __iomem *base = l2x0_base;
278 	unsigned long flags;
279 
280 	if ((end - start) >= l2x0_size) {
281 		l2x0_clean_all();
282 		return;
283 	}
284 
285 	raw_spin_lock_irqsave(&l2x0_lock, flags);
286 	start &= ~(CACHE_LINE_SIZE - 1);
287 	while (start < end) {
288 		unsigned long blk_end = start + min(end - start, 4096UL);
289 
290 		while (start < blk_end) {
291 			l2x0_clean_line(start);
292 			start += CACHE_LINE_SIZE;
293 		}
294 
295 		if (blk_end < end) {
296 			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
297 			raw_spin_lock_irqsave(&l2x0_lock, flags);
298 		}
299 	}
300 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
301 	cache_sync();
302 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
303 }
304 
305 static void l2x0_flush_range(unsigned long start, unsigned long end)
306 {
307 	void __iomem *base = l2x0_base;
308 	unsigned long flags;
309 
310 	if ((end - start) >= l2x0_size) {
311 		l2x0_flush_all();
312 		return;
313 	}
314 
315 	raw_spin_lock_irqsave(&l2x0_lock, flags);
316 	start &= ~(CACHE_LINE_SIZE - 1);
317 	while (start < end) {
318 		unsigned long blk_end = start + min(end - start, 4096UL);
319 
320 		debug_writel(0x03);
321 		while (start < blk_end) {
322 			l2x0_flush_line(start);
323 			start += CACHE_LINE_SIZE;
324 		}
325 		debug_writel(0x00);
326 
327 		if (blk_end < end) {
328 			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
329 			raw_spin_lock_irqsave(&l2x0_lock, flags);
330 		}
331 	}
332 	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
333 	cache_sync();
334 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
335 }
336 
337 static void l2x0_disable(void)
338 {
339 	unsigned long flags;
340 
341 	raw_spin_lock_irqsave(&l2x0_lock, flags);
342 	__l2x0_flush_all();
343 	writel_relaxed(0, l2x0_base + L2X0_CTRL);
344 	dsb(st);
345 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
346 }
347 
348 static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
349 {
350 	unsigned id;
351 
352 	id = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
353 	if (id == L2X0_CACHE_ID_PART_L310)
354 		num_lock = 8;
355 	else
356 		num_lock = 1;
357 
358 	/* l2x0 controller is disabled */
359 	writel_relaxed(aux, base + L2X0_AUX_CTRL);
360 
361 	/* Make sure that I&D is not locked down when starting */
362 	l2c_unlock(base, num_lock);
363 
364 	l2x0_inv_all();
365 
366 	/* enable L2X0 */
367 	writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
368 }
369 
370 static void l2x0_resume(void)
371 {
372 	void __iomem *base = l2x0_base;
373 
374 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
375 		l2x0_enable(base, l2x0_saved_regs.aux_ctrl, 0);
376 }
377 
378 static const struct l2c_init_data l2x0_init_fns __initconst = {
379 	.enable = l2x0_enable,
380 	.outer_cache = {
381 		.inv_range = l2x0_inv_range,
382 		.clean_range = l2x0_clean_range,
383 		.flush_range = l2x0_flush_range,
384 		.flush_all = l2x0_flush_all,
385 		.disable = l2x0_disable,
386 		.sync = l2x0_cache_sync,
387 		.resume = l2x0_resume,
388 	},
389 };
390 
391 /*
392  * L2C-210 specific code.
393  *
394  * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
395  * ensure that no background operation is running.  The way operations
396  * are all background tasks.
397  *
398  * While a background operation is in progress, any new operation is
399  * ignored (unspecified whether this causes an error.)  Thankfully, not
400  * used on SMP.
401  *
402  * Never has a different sync register other than L2X0_CACHE_SYNC, but
403  * we use sync_reg_offset here so we can share some of this with L2C-310.
404  */
405 static void __l2c210_cache_sync(void __iomem *base)
406 {
407 	writel_relaxed(0, base + sync_reg_offset);
408 }
409 
410 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
411 	unsigned long end)
412 {
413 	while (start < end) {
414 		writel_relaxed(start, reg);
415 		start += CACHE_LINE_SIZE;
416 	}
417 }
418 
419 static void l2c210_inv_range(unsigned long start, unsigned long end)
420 {
421 	void __iomem *base = l2x0_base;
422 
423 	if (start & (CACHE_LINE_SIZE - 1)) {
424 		start &= ~(CACHE_LINE_SIZE - 1);
425 		writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
426 		start += CACHE_LINE_SIZE;
427 	}
428 
429 	if (end & (CACHE_LINE_SIZE - 1)) {
430 		end &= ~(CACHE_LINE_SIZE - 1);
431 		writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
432 	}
433 
434 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
435 	__l2c210_cache_sync(base);
436 }
437 
438 static void l2c210_clean_range(unsigned long start, unsigned long end)
439 {
440 	void __iomem *base = l2x0_base;
441 
442 	start &= ~(CACHE_LINE_SIZE - 1);
443 	__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
444 	__l2c210_cache_sync(base);
445 }
446 
447 static void l2c210_flush_range(unsigned long start, unsigned long end)
448 {
449 	void __iomem *base = l2x0_base;
450 
451 	start &= ~(CACHE_LINE_SIZE - 1);
452 	__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
453 	__l2c210_cache_sync(base);
454 }
455 
456 static void l2c210_flush_all(void)
457 {
458 	void __iomem *base = l2x0_base;
459 
460 	BUG_ON(!irqs_disabled());
461 
462 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
463 	__l2c210_cache_sync(base);
464 }
465 
466 static void l2c210_sync(void)
467 {
468 	__l2c210_cache_sync(l2x0_base);
469 }
470 
471 static void l2c210_resume(void)
472 {
473 	void __iomem *base = l2x0_base;
474 
475 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
476 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
477 }
478 
479 static const struct l2c_init_data l2c210_data __initconst = {
480 	.num_lock = 1,
481 	.enable = l2c_enable,
482 	.outer_cache = {
483 		.inv_range = l2c210_inv_range,
484 		.clean_range = l2c210_clean_range,
485 		.flush_range = l2c210_flush_range,
486 		.flush_all = l2c210_flush_all,
487 		.disable = l2c_disable,
488 		.sync = l2c210_sync,
489 		.resume = l2c210_resume,
490 	},
491 };
492 
493 /*
494  * L2C-220 specific code.
495  *
496  * All operations are background operations: they have to be waited for.
497  * Conflicting requests generate a slave error (which will cause an
498  * imprecise abort.)  Never uses sync_reg_offset, so we hard-code the
499  * sync register here.
500  *
501  * However, we can re-use the l2c210_resume call.
502  */
503 static inline void __l2c220_cache_sync(void __iomem *base)
504 {
505 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
506 	l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
507 }
508 
509 static void l2c220_op_way(void __iomem *base, unsigned reg)
510 {
511 	unsigned long flags;
512 
513 	raw_spin_lock_irqsave(&l2x0_lock, flags);
514 	__l2c_op_way(base + reg);
515 	__l2c220_cache_sync(base);
516 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
517 }
518 
519 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
520 	unsigned long end, unsigned long flags)
521 {
522 	raw_spinlock_t *lock = &l2x0_lock;
523 
524 	while (start < end) {
525 		unsigned long blk_end = start + min(end - start, 4096UL);
526 
527 		while (start < blk_end) {
528 			l2c_wait_mask(reg, 1);
529 			writel_relaxed(start, reg);
530 			start += CACHE_LINE_SIZE;
531 		}
532 
533 		if (blk_end < end) {
534 			raw_spin_unlock_irqrestore(lock, flags);
535 			raw_spin_lock_irqsave(lock, flags);
536 		}
537 	}
538 
539 	return flags;
540 }
541 
542 static void l2c220_inv_range(unsigned long start, unsigned long end)
543 {
544 	void __iomem *base = l2x0_base;
545 	unsigned long flags;
546 
547 	raw_spin_lock_irqsave(&l2x0_lock, flags);
548 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
549 		if (start & (CACHE_LINE_SIZE - 1)) {
550 			start &= ~(CACHE_LINE_SIZE - 1);
551 			writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
552 			start += CACHE_LINE_SIZE;
553 		}
554 
555 		if (end & (CACHE_LINE_SIZE - 1)) {
556 			end &= ~(CACHE_LINE_SIZE - 1);
557 			l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
558 			writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
559 		}
560 	}
561 
562 	flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
563 				   start, end, flags);
564 	l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
565 	__l2c220_cache_sync(base);
566 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
567 }
568 
569 static void l2c220_clean_range(unsigned long start, unsigned long end)
570 {
571 	void __iomem *base = l2x0_base;
572 	unsigned long flags;
573 
574 	start &= ~(CACHE_LINE_SIZE - 1);
575 	if ((end - start) >= l2x0_size) {
576 		l2c220_op_way(base, L2X0_CLEAN_WAY);
577 		return;
578 	}
579 
580 	raw_spin_lock_irqsave(&l2x0_lock, flags);
581 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
582 				   start, end, flags);
583 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
584 	__l2c220_cache_sync(base);
585 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
586 }
587 
588 static void l2c220_flush_range(unsigned long start, unsigned long end)
589 {
590 	void __iomem *base = l2x0_base;
591 	unsigned long flags;
592 
593 	start &= ~(CACHE_LINE_SIZE - 1);
594 	if ((end - start) >= l2x0_size) {
595 		l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
596 		return;
597 	}
598 
599 	raw_spin_lock_irqsave(&l2x0_lock, flags);
600 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
601 				   start, end, flags);
602 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
603 	__l2c220_cache_sync(base);
604 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
605 }
606 
607 static void l2c220_flush_all(void)
608 {
609 	l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
610 }
611 
612 static void l2c220_sync(void)
613 {
614 	unsigned long flags;
615 
616 	raw_spin_lock_irqsave(&l2x0_lock, flags);
617 	__l2c220_cache_sync(l2x0_base);
618 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
619 }
620 
621 static const struct l2c_init_data l2c220_data = {
622 	.num_lock = 1,
623 	.enable = l2c_enable,
624 	.outer_cache = {
625 		.inv_range = l2c220_inv_range,
626 		.clean_range = l2c220_clean_range,
627 		.flush_range = l2c220_flush_range,
628 		.flush_all = l2c220_flush_all,
629 		.disable = l2c_disable,
630 		.sync = l2c220_sync,
631 		.resume = l2c210_resume,
632 	},
633 };
634 
635 /*
636  * L2C-310 specific code.
637  *
638  * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
639  * and the way operations are all background tasks.  However, issuing an
640  * operation while a background operation is in progress results in a
641  * SLVERR response.  We can reuse:
642  *
643  *  __l2c210_cache_sync (using sync_reg_offset)
644  *  l2c210_sync
645  *  l2c210_inv_range (if 588369 is not applicable)
646  *  l2c210_clean_range
647  *  l2c210_flush_range (if 588369 is not applicable)
648  *  l2c210_flush_all (if 727915 is not applicable)
649  *
650  * Errata:
651  * 588369: PL310 R0P0->R1P0, fixed R2P0.
652  *	Affects: all clean+invalidate operations
653  *	clean and invalidate skips the invalidate step, so we need to issue
654  *	separate operations.  We also require the above debug workaround
655  *	enclosing this code fragment on affected parts.  On unaffected parts,
656  *	we must not use this workaround without the debug register writes
657  *	to avoid exposing a problem similar to 727915.
658  *
659  * 727915: PL310 R2P0->R3P0, fixed R3P1.
660  *	Affects: clean+invalidate by way
661  *	clean and invalidate by way runs in the background, and a store can
662  *	hit the line between the clean operation and invalidate operation,
663  *	resulting in the store being lost.
664  *
665  * 753970: PL310 R3P0, fixed R3P1.
666  *	Affects: sync
667  *	prevents merging writes after the sync operation, until another L2C
668  *	operation is performed (or a number of other conditions.)
669  *
670  * 769419: PL310 R0P0->R3P1, fixed R3P2.
671  *	Affects: store buffer
672  *	store buffer is not automatically drained.
673  */
674 static void l2c310_set_debug(unsigned long val)
675 {
676 	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
677 }
678 
679 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
680 {
681 	void __iomem *base = l2x0_base;
682 
683 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
684 		unsigned long flags;
685 
686 		/* Erratum 588369 for both clean+invalidate operations */
687 		raw_spin_lock_irqsave(&l2x0_lock, flags);
688 		l2c_set_debug(base, 0x03);
689 
690 		if (start & (CACHE_LINE_SIZE - 1)) {
691 			start &= ~(CACHE_LINE_SIZE - 1);
692 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
693 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
694 			start += CACHE_LINE_SIZE;
695 		}
696 
697 		if (end & (CACHE_LINE_SIZE - 1)) {
698 			end &= ~(CACHE_LINE_SIZE - 1);
699 			writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
700 			writel_relaxed(end, base + L2X0_INV_LINE_PA);
701 		}
702 
703 		l2c_set_debug(base, 0x00);
704 		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
705 	}
706 
707 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
708 	__l2c210_cache_sync(base);
709 }
710 
711 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
712 {
713 	raw_spinlock_t *lock = &l2x0_lock;
714 	unsigned long flags;
715 	void __iomem *base = l2x0_base;
716 
717 	raw_spin_lock_irqsave(lock, flags);
718 	while (start < end) {
719 		unsigned long blk_end = start + min(end - start, 4096UL);
720 
721 		l2c_set_debug(base, 0x03);
722 		while (start < blk_end) {
723 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
724 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
725 			start += CACHE_LINE_SIZE;
726 		}
727 		l2c_set_debug(base, 0x00);
728 
729 		if (blk_end < end) {
730 			raw_spin_unlock_irqrestore(lock, flags);
731 			raw_spin_lock_irqsave(lock, flags);
732 		}
733 	}
734 	raw_spin_unlock_irqrestore(lock, flags);
735 	__l2c210_cache_sync(base);
736 }
737 
738 static void l2c310_flush_all_erratum(void)
739 {
740 	void __iomem *base = l2x0_base;
741 	unsigned long flags;
742 
743 	raw_spin_lock_irqsave(&l2x0_lock, flags);
744 	l2c_set_debug(base, 0x03);
745 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
746 	l2c_set_debug(base, 0x00);
747 	__l2c210_cache_sync(base);
748 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
749 }
750 
751 static void __init l2c310_save(void __iomem *base)
752 {
753 	unsigned revision;
754 
755 	l2x0_saved_regs.tag_latency = readl_relaxed(base +
756 		L2X0_TAG_LATENCY_CTRL);
757 	l2x0_saved_regs.data_latency = readl_relaxed(base +
758 		L2X0_DATA_LATENCY_CTRL);
759 	l2x0_saved_regs.filter_end = readl_relaxed(base +
760 		L2X0_ADDR_FILTER_END);
761 	l2x0_saved_regs.filter_start = readl_relaxed(base +
762 		L2X0_ADDR_FILTER_START);
763 
764 	revision = readl_relaxed(base + L2X0_CACHE_ID) &
765 			L2X0_CACHE_ID_RTL_MASK;
766 
767 	/* From r2p0, there is Prefetch offset/control register */
768 	if (revision >= L310_CACHE_ID_RTL_R2P0)
769 		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
770 							L2X0_PREFETCH_CTRL);
771 
772 	/* From r3p0, there is Power control register */
773 	if (revision >= L310_CACHE_ID_RTL_R3P0)
774 		l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
775 							L2X0_POWER_CTRL);
776 }
777 
778 static void l2c310_resume(void)
779 {
780 	void __iomem *base = l2x0_base;
781 
782 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
783 		unsigned revision;
784 
785 		/* restore pl310 setup */
786 		writel_relaxed(l2x0_saved_regs.tag_latency,
787 			       base + L2X0_TAG_LATENCY_CTRL);
788 		writel_relaxed(l2x0_saved_regs.data_latency,
789 			       base + L2X0_DATA_LATENCY_CTRL);
790 		writel_relaxed(l2x0_saved_regs.filter_end,
791 			       base + L2X0_ADDR_FILTER_END);
792 		writel_relaxed(l2x0_saved_regs.filter_start,
793 			       base + L2X0_ADDR_FILTER_START);
794 
795 		revision = readl_relaxed(base + L2X0_CACHE_ID) &
796 				L2X0_CACHE_ID_RTL_MASK;
797 
798 		if (revision >= L310_CACHE_ID_RTL_R2P0)
799 			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
800 				       base + L2X0_PREFETCH_CTRL);
801 		if (revision >= L310_CACHE_ID_RTL_R3P0)
802 			writel_relaxed(l2x0_saved_regs.pwr_ctrl,
803 				       base + L2X0_POWER_CTRL);
804 
805 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
806 	}
807 }
808 
809 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
810 	struct outer_cache_fns *fns)
811 {
812 	unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
813 	const char *errata[4];
814 	unsigned n = 0;
815 
816 	/* For compatibility */
817 	if (revision <= L310_CACHE_ID_RTL_R3P0)
818 		fns->set_debug = l2c310_set_debug;
819 
820 	if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
821 	    revision < L310_CACHE_ID_RTL_R2P0 &&
822 	    /* For bcm compatibility */
823 	    fns->inv_range == l2c210_inv_range) {
824 		fns->inv_range = l2c310_inv_range_erratum;
825 		fns->flush_range = l2c310_flush_range_erratum;
826 		errata[n++] = "588369";
827 	}
828 
829 	if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
830 	    revision >= L310_CACHE_ID_RTL_R2P0 &&
831 	    revision < L310_CACHE_ID_RTL_R3P1) {
832 		fns->flush_all = l2c310_flush_all_erratum;
833 		errata[n++] = "727915";
834 	}
835 
836 	if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
837 	    revision == L310_CACHE_ID_RTL_R3P0) {
838 		sync_reg_offset = L2X0_DUMMY_REG;
839 		errata[n++] = "753970";
840 	}
841 
842 	if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
843 		errata[n++] = "769419";
844 
845 	if (n) {
846 		unsigned i;
847 
848 		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
849 		for (i = 0; i < n; i++)
850 			pr_cont(" %s", errata[i]);
851 		pr_cont(" enabled\n");
852 	}
853 }
854 
855 static const struct l2c_init_data l2c310_init_fns __initconst = {
856 	.num_lock = 8,
857 	.enable = l2c_enable,
858 	.fixup = l2c310_fixup,
859 	.save = l2c310_save,
860 	.outer_cache = {
861 		.inv_range = l2c210_inv_range,
862 		.clean_range = l2c210_clean_range,
863 		.flush_range = l2c210_flush_range,
864 		.flush_all = l2c210_flush_all,
865 		.disable = l2c_disable,
866 		.sync = l2c210_sync,
867 		.set_debug = l2c310_set_debug,
868 		.resume = l2c310_resume,
869 	},
870 };
871 
872 static void __init __l2c_init(const struct l2c_init_data *data,
873 	u32 aux_val, u32 aux_mask, u32 cache_id)
874 {
875 	struct outer_cache_fns fns;
876 	u32 aux;
877 	u32 way_size = 0;
878 	int ways;
879 	int way_size_shift = L2X0_WAY_SIZE_SHIFT;
880 	const char *type;
881 
882 	/*
883 	 * It is strange to save the register state before initialisation,
884 	 * but hey, this is what the DT implementations decided to do.
885 	 */
886 	if (data->save)
887 		data->save(l2x0_base);
888 
889 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
890 
891 	aux &= aux_mask;
892 	aux |= aux_val;
893 
894 	/* Determine the number of ways */
895 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
896 	case L2X0_CACHE_ID_PART_L310:
897 		if (aux & (1 << 16))
898 			ways = 16;
899 		else
900 			ways = 8;
901 		type = "L310";
902 		break;
903 
904 	case L2X0_CACHE_ID_PART_L210:
905 		ways = (aux >> 13) & 0xf;
906 		type = "L210";
907 		break;
908 
909 	case AURORA_CACHE_ID:
910 		ways = (aux >> 13) & 0xf;
911 		ways = 2 << ((ways + 1) >> 2);
912 		way_size_shift = AURORA_WAY_SIZE_SHIFT;
913 		type = "Aurora";
914 		break;
915 
916 	default:
917 		/* Assume unknown chips have 8 ways */
918 		ways = 8;
919 		type = "L2x0 series";
920 		break;
921 	}
922 
923 	l2x0_way_mask = (1 << ways) - 1;
924 
925 	/*
926 	 * L2 cache Size =  Way size * Number of ways
927 	 */
928 	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
929 	way_size = 1 << (way_size + way_size_shift);
930 
931 	l2x0_size = ways * way_size * SZ_1K;
932 
933 	fns = data->outer_cache;
934 	if (data->fixup)
935 		data->fixup(l2x0_base, cache_id, &fns);
936 
937 	/*
938 	 * Check if l2x0 controller is already enabled.  If we are booting
939 	 * in non-secure mode accessing the below registers will fault.
940 	 */
941 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
942 		data->enable(l2x0_base, aux, data->num_lock);
943 
944 	/* Re-read it in case some bits are reserved. */
945 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
946 
947 	/* Save the value for resuming. */
948 	l2x0_saved_regs.aux_ctrl = aux;
949 
950 	outer_cache = fns;
951 
952 	pr_info("%s cache controller enabled, %d ways, %d kB\n",
953 		type, ways, l2x0_size >> 10);
954 	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
955 		type, cache_id, aux);
956 }
957 
958 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
959 {
960 	const struct l2c_init_data *data;
961 	u32 cache_id;
962 
963 	l2x0_base = base;
964 
965 	cache_id = readl_relaxed(base + L2X0_CACHE_ID);
966 
967 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
968 	default:
969 		data = &l2x0_init_fns;
970 		break;
971 
972 	case L2X0_CACHE_ID_PART_L210:
973 		data = &l2c210_data;
974 		break;
975 
976 	case L2X0_CACHE_ID_PART_L220:
977 		data = &l2c220_data;
978 		break;
979 
980 	case L2X0_CACHE_ID_PART_L310:
981 		data = &l2c310_init_fns;
982 		break;
983 	}
984 
985 	__l2c_init(data, aux_val, aux_mask, cache_id);
986 }
987 
988 #ifdef CONFIG_OF
989 static int l2_wt_override;
990 
991 /* Aurora don't have the cache ID register available, so we have to
992  * pass it though the device tree */
993 static u32 cache_id_part_number_from_dt;
994 
995 static void __init l2x0_of_parse(const struct device_node *np,
996 				 u32 *aux_val, u32 *aux_mask)
997 {
998 	u32 data[2] = { 0, 0 };
999 	u32 tag = 0;
1000 	u32 dirty = 0;
1001 	u32 val = 0, mask = 0;
1002 
1003 	of_property_read_u32(np, "arm,tag-latency", &tag);
1004 	if (tag) {
1005 		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1006 		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1007 	}
1008 
1009 	of_property_read_u32_array(np, "arm,data-latency",
1010 				   data, ARRAY_SIZE(data));
1011 	if (data[0] && data[1]) {
1012 		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1013 			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1014 		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1015 		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1016 	}
1017 
1018 	of_property_read_u32(np, "arm,dirty-latency", &dirty);
1019 	if (dirty) {
1020 		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1021 		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1022 	}
1023 
1024 	*aux_val &= ~mask;
1025 	*aux_val |= val;
1026 	*aux_mask &= ~mask;
1027 }
1028 
1029 static const struct l2c_init_data of_l2c210_data __initconst = {
1030 	.num_lock = 1,
1031 	.of_parse = l2x0_of_parse,
1032 	.enable = l2c_enable,
1033 	.outer_cache = {
1034 		.inv_range   = l2c210_inv_range,
1035 		.clean_range = l2c210_clean_range,
1036 		.flush_range = l2c210_flush_range,
1037 		.flush_all   = l2c210_flush_all,
1038 		.disable     = l2c_disable,
1039 		.sync        = l2c210_sync,
1040 		.resume      = l2c210_resume,
1041 	},
1042 };
1043 
1044 static const struct l2c_init_data of_l2c220_data __initconst = {
1045 	.num_lock = 1,
1046 	.of_parse = l2x0_of_parse,
1047 	.enable = l2c_enable,
1048 	.outer_cache = {
1049 		.inv_range   = l2c220_inv_range,
1050 		.clean_range = l2c220_clean_range,
1051 		.flush_range = l2c220_flush_range,
1052 		.flush_all   = l2c220_flush_all,
1053 		.disable     = l2c_disable,
1054 		.sync        = l2c220_sync,
1055 		.resume      = l2c210_resume,
1056 	},
1057 };
1058 
1059 static void __init l2c310_of_parse(const struct device_node *np,
1060 	u32 *aux_val, u32 *aux_mask)
1061 {
1062 	u32 data[3] = { 0, 0, 0 };
1063 	u32 tag[3] = { 0, 0, 0 };
1064 	u32 filter[2] = { 0, 0 };
1065 
1066 	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1067 	if (tag[0] && tag[1] && tag[2])
1068 		writel_relaxed(
1069 			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
1070 			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
1071 			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
1072 			l2x0_base + L2X0_TAG_LATENCY_CTRL);
1073 
1074 	of_property_read_u32_array(np, "arm,data-latency",
1075 				   data, ARRAY_SIZE(data));
1076 	if (data[0] && data[1] && data[2])
1077 		writel_relaxed(
1078 			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
1079 			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
1080 			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
1081 			l2x0_base + L2X0_DATA_LATENCY_CTRL);
1082 
1083 	of_property_read_u32_array(np, "arm,filter-ranges",
1084 				   filter, ARRAY_SIZE(filter));
1085 	if (filter[1]) {
1086 		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
1087 			       l2x0_base + L2X0_ADDR_FILTER_END);
1088 		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
1089 			       l2x0_base + L2X0_ADDR_FILTER_START);
1090 	}
1091 }
1092 
1093 static const struct l2c_init_data of_l2c310_data __initconst = {
1094 	.num_lock = 8,
1095 	.of_parse = l2c310_of_parse,
1096 	.enable = l2c_enable,
1097 	.fixup = l2c310_fixup,
1098 	.save  = l2c310_save,
1099 	.outer_cache = {
1100 		.inv_range   = l2c210_inv_range,
1101 		.clean_range = l2c210_clean_range,
1102 		.flush_range = l2c210_flush_range,
1103 		.flush_all   = l2c210_flush_all,
1104 		.disable     = l2c_disable,
1105 		.sync        = l2c210_sync,
1106 		.set_debug   = l2c310_set_debug,
1107 		.resume      = l2c310_resume,
1108 	},
1109 };
1110 
1111 /*
1112  * Note that the end addresses passed to Linux primitives are
1113  * noninclusive, while the hardware cache range operations use
1114  * inclusive start and end addresses.
1115  */
1116 static unsigned long calc_range_end(unsigned long start, unsigned long end)
1117 {
1118 	/*
1119 	 * Limit the number of cache lines processed at once,
1120 	 * since cache range operations stall the CPU pipeline
1121 	 * until completion.
1122 	 */
1123 	if (end > start + MAX_RANGE_SIZE)
1124 		end = start + MAX_RANGE_SIZE;
1125 
1126 	/*
1127 	 * Cache range operations can't straddle a page boundary.
1128 	 */
1129 	if (end > PAGE_ALIGN(start+1))
1130 		end = PAGE_ALIGN(start+1);
1131 
1132 	return end;
1133 }
1134 
1135 /*
1136  * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1137  * and range operations only do a TLB lookup on the start address.
1138  */
1139 static void aurora_pa_range(unsigned long start, unsigned long end,
1140 			unsigned long offset)
1141 {
1142 	unsigned long flags;
1143 
1144 	raw_spin_lock_irqsave(&l2x0_lock, flags);
1145 	writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1146 	writel_relaxed(end, l2x0_base + offset);
1147 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1148 
1149 	cache_sync();
1150 }
1151 
1152 static void aurora_inv_range(unsigned long start, unsigned long end)
1153 {
1154 	/*
1155 	 * round start and end adresses up to cache line size
1156 	 */
1157 	start &= ~(CACHE_LINE_SIZE - 1);
1158 	end = ALIGN(end, CACHE_LINE_SIZE);
1159 
1160 	/*
1161 	 * Invalidate all full cache lines between 'start' and 'end'.
1162 	 */
1163 	while (start < end) {
1164 		unsigned long range_end = calc_range_end(start, end);
1165 		aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1166 				AURORA_INVAL_RANGE_REG);
1167 		start = range_end;
1168 	}
1169 }
1170 
1171 static void aurora_clean_range(unsigned long start, unsigned long end)
1172 {
1173 	/*
1174 	 * If L2 is forced to WT, the L2 will always be clean and we
1175 	 * don't need to do anything here.
1176 	 */
1177 	if (!l2_wt_override) {
1178 		start &= ~(CACHE_LINE_SIZE - 1);
1179 		end = ALIGN(end, CACHE_LINE_SIZE);
1180 		while (start != end) {
1181 			unsigned long range_end = calc_range_end(start, end);
1182 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1183 					AURORA_CLEAN_RANGE_REG);
1184 			start = range_end;
1185 		}
1186 	}
1187 }
1188 
1189 static void aurora_flush_range(unsigned long start, unsigned long end)
1190 {
1191 	start &= ~(CACHE_LINE_SIZE - 1);
1192 	end = ALIGN(end, CACHE_LINE_SIZE);
1193 	while (start != end) {
1194 		unsigned long range_end = calc_range_end(start, end);
1195 		/*
1196 		 * If L2 is forced to WT, the L2 will always be clean and we
1197 		 * just need to invalidate.
1198 		 */
1199 		if (l2_wt_override)
1200 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1201 							AURORA_INVAL_RANGE_REG);
1202 		else
1203 			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1204 							AURORA_FLUSH_RANGE_REG);
1205 		start = range_end;
1206 	}
1207 }
1208 
1209 static void aurora_save(void __iomem *base)
1210 {
1211 	l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1212 	l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1213 }
1214 
1215 static void aurora_resume(void)
1216 {
1217 	void __iomem *base = l2x0_base;
1218 
1219 	if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1220 		writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1221 		writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
1222 	}
1223 }
1224 
1225 /*
1226  * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1227  * broadcasting of cache commands to L2.
1228  */
1229 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1230 	unsigned num_lock)
1231 {
1232 	u32 u;
1233 
1234 	asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1235 	u |= AURORA_CTRL_FW;		/* Set the FW bit */
1236 	asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1237 
1238 	isb();
1239 
1240 	l2c_enable(base, aux, num_lock);
1241 }
1242 
1243 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1244 	struct outer_cache_fns *fns)
1245 {
1246 	sync_reg_offset = AURORA_SYNC_REG;
1247 }
1248 
1249 static void __init aurora_of_parse(const struct device_node *np,
1250 				u32 *aux_val, u32 *aux_mask)
1251 {
1252 	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1253 	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
1254 
1255 	of_property_read_u32(np, "cache-id-part",
1256 			&cache_id_part_number_from_dt);
1257 
1258 	/* Determine and save the write policy */
1259 	l2_wt_override = of_property_read_bool(np, "wt-override");
1260 
1261 	if (l2_wt_override) {
1262 		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1263 		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1264 	}
1265 
1266 	*aux_val &= ~mask;
1267 	*aux_val |= val;
1268 	*aux_mask &= ~mask;
1269 }
1270 
1271 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1272 	.num_lock = 4,
1273 	.of_parse = aurora_of_parse,
1274 	.enable = l2c_enable,
1275 	.fixup = aurora_fixup,
1276 	.save  = aurora_save,
1277 	.outer_cache = {
1278 		.inv_range   = aurora_inv_range,
1279 		.clean_range = aurora_clean_range,
1280 		.flush_range = aurora_flush_range,
1281 		.flush_all   = l2x0_flush_all,
1282 		.disable     = l2x0_disable,
1283 		.sync        = l2x0_cache_sync,
1284 		.resume      = aurora_resume,
1285 	},
1286 };
1287 
1288 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1289 	.num_lock = 4,
1290 	.of_parse = aurora_of_parse,
1291 	.enable = aurora_enable_no_outer,
1292 	.fixup = aurora_fixup,
1293 	.save  = aurora_save,
1294 	.outer_cache = {
1295 		.resume      = aurora_resume,
1296 	},
1297 };
1298 
1299 /*
1300  * For certain Broadcom SoCs, depending on the address range, different offsets
1301  * need to be added to the address before passing it to L2 for
1302  * invalidation/clean/flush
1303  *
1304  * Section Address Range              Offset        EMI
1305  *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
1306  *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
1307  *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
1308  *
1309  * When the start and end addresses have crossed two different sections, we
1310  * need to break the L2 operation into two, each within its own section.
1311  * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1312  * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1313  * 0xC0000000 - 0xC0001000
1314  *
1315  * Note 1:
1316  * By breaking a single L2 operation into two, we may potentially suffer some
1317  * performance hit, but keep in mind the cross section case is very rare
1318  *
1319  * Note 2:
1320  * We do not need to handle the case when the start address is in
1321  * Section 1 and the end address is in Section 3, since it is not a valid use
1322  * case
1323  *
1324  * Note 3:
1325  * Section 1 in practical terms can no longer be used on rev A2. Because of
1326  * that the code does not need to handle section 1 at all.
1327  *
1328  */
1329 #define BCM_SYS_EMI_START_ADDR        0x40000000UL
1330 #define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
1331 
1332 #define BCM_SYS_EMI_OFFSET            0x40000000UL
1333 #define BCM_VC_EMI_OFFSET             0x80000000UL
1334 
1335 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1336 {
1337 	return (addr >= BCM_SYS_EMI_START_ADDR) &&
1338 		(addr < BCM_VC_EMI_SEC3_START_ADDR);
1339 }
1340 
1341 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1342 {
1343 	if (bcm_addr_is_sys_emi(addr))
1344 		return addr + BCM_SYS_EMI_OFFSET;
1345 	else
1346 		return addr + BCM_VC_EMI_OFFSET;
1347 }
1348 
1349 static void bcm_inv_range(unsigned long start, unsigned long end)
1350 {
1351 	unsigned long new_start, new_end;
1352 
1353 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1354 
1355 	if (unlikely(end <= start))
1356 		return;
1357 
1358 	new_start = bcm_l2_phys_addr(start);
1359 	new_end = bcm_l2_phys_addr(end);
1360 
1361 	/* normal case, no cross section between start and end */
1362 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1363 		l2x0_inv_range(new_start, new_end);
1364 		return;
1365 	}
1366 
1367 	/* They cross sections, so it can only be a cross from section
1368 	 * 2 to section 3
1369 	 */
1370 	l2x0_inv_range(new_start,
1371 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1372 	l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1373 		new_end);
1374 }
1375 
1376 static void bcm_clean_range(unsigned long start, unsigned long end)
1377 {
1378 	unsigned long new_start, new_end;
1379 
1380 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1381 
1382 	if (unlikely(end <= start))
1383 		return;
1384 
1385 	if ((end - start) >= l2x0_size) {
1386 		l2x0_clean_all();
1387 		return;
1388 	}
1389 
1390 	new_start = bcm_l2_phys_addr(start);
1391 	new_end = bcm_l2_phys_addr(end);
1392 
1393 	/* normal case, no cross section between start and end */
1394 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1395 		l2x0_clean_range(new_start, new_end);
1396 		return;
1397 	}
1398 
1399 	/* They cross sections, so it can only be a cross from section
1400 	 * 2 to section 3
1401 	 */
1402 	l2x0_clean_range(new_start,
1403 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1404 	l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1405 		new_end);
1406 }
1407 
1408 static void bcm_flush_range(unsigned long start, unsigned long end)
1409 {
1410 	unsigned long new_start, new_end;
1411 
1412 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1413 
1414 	if (unlikely(end <= start))
1415 		return;
1416 
1417 	if ((end - start) >= l2x0_size) {
1418 		l2x0_flush_all();
1419 		return;
1420 	}
1421 
1422 	new_start = bcm_l2_phys_addr(start);
1423 	new_end = bcm_l2_phys_addr(end);
1424 
1425 	/* normal case, no cross section between start and end */
1426 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1427 		l2x0_flush_range(new_start, new_end);
1428 		return;
1429 	}
1430 
1431 	/* They cross sections, so it can only be a cross from section
1432 	 * 2 to section 3
1433 	 */
1434 	l2x0_flush_range(new_start,
1435 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1436 	l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1437 		new_end);
1438 }
1439 
1440 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1441 	.num_lock = 8,
1442 	.of_parse = l2c310_of_parse,
1443 	.enable = l2c_enable,
1444 	.fixup = l2c310_fixup,
1445 	.save  = l2c310_save,
1446 	.outer_cache = {
1447 		.inv_range   = bcm_inv_range,
1448 		.clean_range = bcm_clean_range,
1449 		.flush_range = bcm_flush_range,
1450 		.flush_all   = l2c210_flush_all,
1451 		.disable     = l2c_disable,
1452 		.sync        = l2c210_sync,
1453 		.resume      = l2c310_resume,
1454 	},
1455 };
1456 
1457 static void __init tauros3_save(void __iomem *base)
1458 {
1459 	l2x0_saved_regs.aux2_ctrl =
1460 		readl_relaxed(base + TAUROS3_AUX2_CTRL);
1461 	l2x0_saved_regs.prefetch_ctrl =
1462 		readl_relaxed(base + L2X0_PREFETCH_CTRL);
1463 }
1464 
1465 static void tauros3_resume(void)
1466 {
1467 	void __iomem *base = l2x0_base;
1468 
1469 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1470 		writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1471 			       base + TAUROS3_AUX2_CTRL);
1472 		writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1473 			       base + L2X0_PREFETCH_CTRL);
1474 
1475 		l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1476 	}
1477 }
1478 
1479 static const struct l2c_init_data of_tauros3_data __initconst = {
1480 	.num_lock = 8,
1481 	.enable = l2c_enable,
1482 	.save  = tauros3_save,
1483 	/* Tauros3 broadcasts L1 cache operations to L2 */
1484 	.outer_cache = {
1485 		.resume      = tauros3_resume,
1486 	},
1487 };
1488 
1489 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1490 static const struct of_device_id l2x0_ids[] __initconst = {
1491 	L2C_ID("arm,l210-cache", of_l2c210_data),
1492 	L2C_ID("arm,l220-cache", of_l2c220_data),
1493 	L2C_ID("arm,pl310-cache", of_l2c310_data),
1494 	L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1495 	L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1496 	L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1497 	L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1498 	/* Deprecated IDs */
1499 	L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1500 	{}
1501 };
1502 
1503 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1504 {
1505 	const struct l2c_init_data *data;
1506 	struct device_node *np;
1507 	struct resource res;
1508 	u32 cache_id;
1509 
1510 	np = of_find_matching_node(NULL, l2x0_ids);
1511 	if (!np)
1512 		return -ENODEV;
1513 
1514 	if (of_address_to_resource(np, 0, &res))
1515 		return -ENODEV;
1516 
1517 	l2x0_base = ioremap(res.start, resource_size(&res));
1518 	if (!l2x0_base)
1519 		return -ENOMEM;
1520 
1521 	l2x0_saved_regs.phy_base = res.start;
1522 
1523 	data = of_match_node(l2x0_ids, np)->data;
1524 
1525 	/* L2 configuration can only be changed if the cache is disabled */
1526 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1527 		if (data->of_parse)
1528 			data->of_parse(np, &aux_val, &aux_mask);
1529 
1530 	if (cache_id_part_number_from_dt)
1531 		cache_id = cache_id_part_number_from_dt;
1532 	else
1533 		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1534 
1535 	__l2c_init(data, aux_val, aux_mask, cache_id);
1536 
1537 	return 0;
1538 }
1539 #endif
1540