xref: /openbmc/u-boot/arch/arm/cpu/armv8/cache.S (revision 1e6ad55c)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * This file is based on sample code from ARMv8 ARM.
60ae76531SDavid Feng *
70ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
80ae76531SDavid Feng */
90ae76531SDavid Feng
100ae76531SDavid Feng#include <asm-offsets.h>
110ae76531SDavid Feng#include <config.h>
120ae76531SDavid Feng#include <version.h>
130ae76531SDavid Feng#include <asm/macro.h>
140ae76531SDavid Feng#include <linux/linkage.h>
150ae76531SDavid Feng
160ae76531SDavid Feng/*
170ae76531SDavid Feng * void __asm_flush_dcache_level(level)
180ae76531SDavid Feng *
190ae76531SDavid Feng * clean and invalidate one level cache.
200ae76531SDavid Feng *
210ae76531SDavid Feng * x0: cache level
22*1e6ad55cSYork Sun * x1: 0 flush & invalidate, 1 invalidate only
23*1e6ad55cSYork Sun * x2~x9: clobbered
240ae76531SDavid Feng */
250ae76531SDavid FengENTRY(__asm_flush_dcache_level)
26*1e6ad55cSYork Sun	lsl	x12, x0, #1
27*1e6ad55cSYork Sun	msr	csselr_el1, x12		/* select cache level */
280ae76531SDavid Feng	isb				/* sync change of cssidr_el1 */
290ae76531SDavid Feng	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
300ae76531SDavid Feng	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
310ae76531SDavid Feng	add	x2, x2, #4		/* x2 <- log2(cache line size) */
320ae76531SDavid Feng	mov	x3, #0x3ff
330ae76531SDavid Feng	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
340ae76531SDavid Feng	add	w4, w3, w3
350ae76531SDavid Feng	sub	w4, w4, 1		/* round up log2(#ways + 1) */
360ae76531SDavid Feng	clz	w5, w4			/* bit position of #ways */
370ae76531SDavid Feng	mov	x4, #0x7fff
380ae76531SDavid Feng	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
39*1e6ad55cSYork Sun	/* x12 <- cache level << 1 */
400ae76531SDavid Feng	/* x2 <- line length offset */
410ae76531SDavid Feng	/* x3 <- number of cache ways - 1 */
420ae76531SDavid Feng	/* x4 <- number of cache sets - 1 */
430ae76531SDavid Feng	/* x5 <- bit position of #ways */
440ae76531SDavid Feng
450ae76531SDavid Fengloop_set:
460ae76531SDavid Feng	mov	x6, x3			/* x6 <- working copy of #ways */
470ae76531SDavid Fengloop_way:
480ae76531SDavid Feng	lsl	x7, x6, x5
49*1e6ad55cSYork Sun	orr	x9, x12, x7		/* map way and level to cisw value */
500ae76531SDavid Feng	lsl	x7, x4, x2
510ae76531SDavid Feng	orr	x9, x9, x7		/* map set number to cisw value */
52*1e6ad55cSYork Sun	tbz	w1, #0, 1f
53*1e6ad55cSYork Sun	dc	isw, x9
54*1e6ad55cSYork Sun	b	2f
55*1e6ad55cSYork Sun1:	dc	cisw, x9		/* clean & invalidate by set/way */
56*1e6ad55cSYork Sun2:	subs	x6, x6, #1		/* decrement the way */
570ae76531SDavid Feng	b.ge	loop_way
580ae76531SDavid Feng	subs	x4, x4, #1		/* decrement the set */
590ae76531SDavid Feng	b.ge	loop_set
600ae76531SDavid Feng
610ae76531SDavid Feng	ret
620ae76531SDavid FengENDPROC(__asm_flush_dcache_level)
630ae76531SDavid Feng
640ae76531SDavid Feng/*
65*1e6ad55cSYork Sun * void __asm_flush_dcache_all(int invalidate_only)
66*1e6ad55cSYork Sun *
67*1e6ad55cSYork Sun * x0: 0 flush & invalidate, 1 invalidate only
680ae76531SDavid Feng *
690ae76531SDavid Feng * clean and invalidate all data cache by SET/WAY.
700ae76531SDavid Feng */
71*1e6ad55cSYork SunENTRY(__asm_dcache_all)
72*1e6ad55cSYork Sun	mov	x1, x0
730ae76531SDavid Feng	dsb	sy
740ae76531SDavid Feng	mrs	x10, clidr_el1		/* read clidr_el1 */
750ae76531SDavid Feng	lsr	x11, x10, #24
760ae76531SDavid Feng	and	x11, x11, #0x7		/* x11 <- loc */
770ae76531SDavid Feng	cbz	x11, finished		/* if loc is 0, exit */
780ae76531SDavid Feng	mov	x15, lr
790ae76531SDavid Feng	mov	x0, #0			/* start flush at cache level 0 */
800ae76531SDavid Feng	/* x0  <- cache level */
810ae76531SDavid Feng	/* x10 <- clidr_el1 */
820ae76531SDavid Feng	/* x11 <- loc */
830ae76531SDavid Feng	/* x15 <- return address */
840ae76531SDavid Feng
850ae76531SDavid Fengloop_level:
86*1e6ad55cSYork Sun	lsl	x12, x0, #1
87*1e6ad55cSYork Sun	add	x12, x12, x0		/* x0 <- tripled cache level */
88*1e6ad55cSYork Sun	lsr	x12, x10, x12
89*1e6ad55cSYork Sun	and	x12, x12, #7		/* x12 <- cache type */
90*1e6ad55cSYork Sun	cmp	x12, #2
910ae76531SDavid Feng	b.lt	skip			/* skip if no cache or icache */
92*1e6ad55cSYork Sun	bl	__asm_flush_dcache_level	/* x1 = 0 flush, 1 invalidate */
930ae76531SDavid Fengskip:
940ae76531SDavid Feng	add	x0, x0, #1		/* increment cache level */
950ae76531SDavid Feng	cmp	x11, x0
960ae76531SDavid Feng	b.gt	loop_level
970ae76531SDavid Feng
980ae76531SDavid Feng	mov	x0, #0
990ae76531SDavid Feng	msr	csselr_el1, x0		/* resotre csselr_el1 */
1000ae76531SDavid Feng	dsb	sy
1010ae76531SDavid Feng	isb
1020ae76531SDavid Feng	mov	lr, x15
1030ae76531SDavid Feng
1040ae76531SDavid Fengfinished:
1050ae76531SDavid Feng	ret
106*1e6ad55cSYork SunENDPROC(__asm_dcache_all)
107*1e6ad55cSYork Sun
108*1e6ad55cSYork SunENTRY(__asm_flush_dcache_all)
109*1e6ad55cSYork Sun	mov	x16, lr
110*1e6ad55cSYork Sun	mov	x0, #0
111*1e6ad55cSYork Sun	bl	__asm_dcache_all
112*1e6ad55cSYork Sun	mov	lr, x16
113*1e6ad55cSYork Sun	ret
1140ae76531SDavid FengENDPROC(__asm_flush_dcache_all)
1150ae76531SDavid Feng
116*1e6ad55cSYork SunENTRY(__asm_invalidate_dcache_all)
117*1e6ad55cSYork Sun	mov	x16, lr
118*1e6ad55cSYork Sun	mov	x0, #0xffff
119*1e6ad55cSYork Sun	bl	__asm_dcache_all
120*1e6ad55cSYork Sun	mov	lr, x16
121*1e6ad55cSYork Sun	ret
122*1e6ad55cSYork SunENDPROC(__asm_invalidate_dcache_all)
123*1e6ad55cSYork Sun
1240ae76531SDavid Feng/*
1250ae76531SDavid Feng * void __asm_flush_dcache_range(start, end)
1260ae76531SDavid Feng *
1270ae76531SDavid Feng * clean & invalidate data cache in the range
1280ae76531SDavid Feng *
1290ae76531SDavid Feng * x0: start address
1300ae76531SDavid Feng * x1: end address
1310ae76531SDavid Feng */
1320ae76531SDavid FengENTRY(__asm_flush_dcache_range)
1330ae76531SDavid Feng	mrs	x3, ctr_el0
1340ae76531SDavid Feng	lsr	x3, x3, #16
1350ae76531SDavid Feng	and	x3, x3, #0xf
1360ae76531SDavid Feng	mov	x2, #4
1370ae76531SDavid Feng	lsl	x2, x2, x3		/* cache line size */
1380ae76531SDavid Feng
1390ae76531SDavid Feng	/* x2 <- minimal cache line size in cache system */
1400ae76531SDavid Feng	sub	x3, x2, #1
1410ae76531SDavid Feng	bic	x0, x0, x3
1420ae76531SDavid Feng1:	dc	civac, x0	/* clean & invalidate data or unified cache */
1430ae76531SDavid Feng	add	x0, x0, x2
1440ae76531SDavid Feng	cmp	x0, x1
1450ae76531SDavid Feng	b.lo	1b
1460ae76531SDavid Feng	dsb	sy
1470ae76531SDavid Feng	ret
1480ae76531SDavid FengENDPROC(__asm_flush_dcache_range)
1490ae76531SDavid Feng
1500ae76531SDavid Feng/*
1510ae76531SDavid Feng * void __asm_invalidate_icache_all(void)
1520ae76531SDavid Feng *
1530ae76531SDavid Feng * invalidate all tlb entries.
1540ae76531SDavid Feng */
1550ae76531SDavid FengENTRY(__asm_invalidate_icache_all)
1560ae76531SDavid Feng	ic	ialluis
1570ae76531SDavid Feng	isb	sy
1580ae76531SDavid Feng	ret
1590ae76531SDavid FengENDPROC(__asm_invalidate_icache_all)
160