xref: /openbmc/u-boot/arch/arm/cpu/armv8/cache.S (revision e6a05862)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * This file is based on sample code from ARMv8 ARM.
60ae76531SDavid Feng *
70ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
80ae76531SDavid Feng */
90ae76531SDavid Feng
100ae76531SDavid Feng#include <asm-offsets.h>
110ae76531SDavid Feng#include <config.h>
120ae76531SDavid Feng#include <asm/macro.h>
135e2ec773SAlexander Graf#include <asm/system.h>
140ae76531SDavid Feng#include <linux/linkage.h>
150ae76531SDavid Feng
160ae76531SDavid Feng/*
17ba9eb6c7SMasahiro Yamada * void __asm_dcache_level(level)
180ae76531SDavid Feng *
19ba9eb6c7SMasahiro Yamada * flush or invalidate one level cache.
200ae76531SDavid Feng *
210ae76531SDavid Feng * x0: cache level
221a021230SMasahiro Yamada * x1: 0 clean & invalidate, 1 invalidate only
231e6ad55cSYork Sun * x2~x9: clobbered
240ae76531SDavid Feng */
25*e6a05862SPhilipp Tomsich.pushsection .text.__asm_dcache_level, "ax"
26ba9eb6c7SMasahiro YamadaENTRY(__asm_dcache_level)
271e6ad55cSYork Sun	lsl	x12, x0, #1
281e6ad55cSYork Sun	msr	csselr_el1, x12		/* select cache level */
290ae76531SDavid Feng	isb				/* sync change of cssidr_el1 */
300ae76531SDavid Feng	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
310ae76531SDavid Feng	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
320ae76531SDavid Feng	add	x2, x2, #4		/* x2 <- log2(cache line size) */
330ae76531SDavid Feng	mov	x3, #0x3ff
340ae76531SDavid Feng	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
3542ddfad6SLeo Yan	clz	w5, w3			/* bit position of #ways */
360ae76531SDavid Feng	mov	x4, #0x7fff
370ae76531SDavid Feng	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
381e6ad55cSYork Sun	/* x12 <- cache level << 1 */
390ae76531SDavid Feng	/* x2 <- line length offset */
400ae76531SDavid Feng	/* x3 <- number of cache ways - 1 */
410ae76531SDavid Feng	/* x4 <- number of cache sets - 1 */
420ae76531SDavid Feng	/* x5 <- bit position of #ways */
430ae76531SDavid Feng
440ae76531SDavid Fengloop_set:
450ae76531SDavid Feng	mov	x6, x3			/* x6 <- working copy of #ways */
460ae76531SDavid Fengloop_way:
470ae76531SDavid Feng	lsl	x7, x6, x5
481e6ad55cSYork Sun	orr	x9, x12, x7		/* map way and level to cisw value */
490ae76531SDavid Feng	lsl	x7, x4, x2
500ae76531SDavid Feng	orr	x9, x9, x7		/* map set number to cisw value */
511e6ad55cSYork Sun	tbz	w1, #0, 1f
521e6ad55cSYork Sun	dc	isw, x9
531e6ad55cSYork Sun	b	2f
541e6ad55cSYork Sun1:	dc	cisw, x9		/* clean & invalidate by set/way */
551e6ad55cSYork Sun2:	subs	x6, x6, #1		/* decrement the way */
560ae76531SDavid Feng	b.ge	loop_way
570ae76531SDavid Feng	subs	x4, x4, #1		/* decrement the set */
580ae76531SDavid Feng	b.ge	loop_set
590ae76531SDavid Feng
600ae76531SDavid Feng	ret
61ba9eb6c7SMasahiro YamadaENDPROC(__asm_dcache_level)
62*e6a05862SPhilipp Tomsich.popsection
630ae76531SDavid Feng
640ae76531SDavid Feng/*
651e6ad55cSYork Sun * void __asm_flush_dcache_all(int invalidate_only)
661e6ad55cSYork Sun *
671a021230SMasahiro Yamada * x0: 0 clean & invalidate, 1 invalidate only
680ae76531SDavid Feng *
69ba9eb6c7SMasahiro Yamada * flush or invalidate all data cache by SET/WAY.
700ae76531SDavid Feng */
71*e6a05862SPhilipp Tomsich.pushsection .text.__asm_dcache_all, "ax"
721e6ad55cSYork SunENTRY(__asm_dcache_all)
731e6ad55cSYork Sun	mov	x1, x0
740ae76531SDavid Feng	dsb	sy
750ae76531SDavid Feng	mrs	x10, clidr_el1		/* read clidr_el1 */
760ae76531SDavid Feng	lsr	x11, x10, #24
770ae76531SDavid Feng	and	x11, x11, #0x7		/* x11 <- loc */
780ae76531SDavid Feng	cbz	x11, finished		/* if loc is 0, exit */
790ae76531SDavid Feng	mov	x15, lr
800ae76531SDavid Feng	mov	x0, #0			/* start flush at cache level 0 */
810ae76531SDavid Feng	/* x0  <- cache level */
820ae76531SDavid Feng	/* x10 <- clidr_el1 */
830ae76531SDavid Feng	/* x11 <- loc */
840ae76531SDavid Feng	/* x15 <- return address */
850ae76531SDavid Feng
860ae76531SDavid Fengloop_level:
871e6ad55cSYork Sun	lsl	x12, x0, #1
881e6ad55cSYork Sun	add	x12, x12, x0		/* x0 <- tripled cache level */
891e6ad55cSYork Sun	lsr	x12, x10, x12
901e6ad55cSYork Sun	and	x12, x12, #7		/* x12 <- cache type */
911e6ad55cSYork Sun	cmp	x12, #2
920ae76531SDavid Feng	b.lt	skip			/* skip if no cache or icache */
93ba9eb6c7SMasahiro Yamada	bl	__asm_dcache_level	/* x1 = 0 flush, 1 invalidate */
940ae76531SDavid Fengskip:
950ae76531SDavid Feng	add	x0, x0, #1		/* increment cache level */
960ae76531SDavid Feng	cmp	x11, x0
970ae76531SDavid Feng	b.gt	loop_level
980ae76531SDavid Feng
990ae76531SDavid Feng	mov	x0, #0
100f1075aedSMichal Simek	msr	csselr_el1, x0		/* restore csselr_el1 */
1010ae76531SDavid Feng	dsb	sy
1020ae76531SDavid Feng	isb
1030ae76531SDavid Feng	mov	lr, x15
1040ae76531SDavid Feng
1050ae76531SDavid Fengfinished:
1060ae76531SDavid Feng	ret
1071e6ad55cSYork SunENDPROC(__asm_dcache_all)
108*e6a05862SPhilipp Tomsich.popsection
1091e6ad55cSYork Sun
110*e6a05862SPhilipp Tomsich.pushsection .text.__asm_flush_dcache_all, "ax"
1111e6ad55cSYork SunENTRY(__asm_flush_dcache_all)
1121e6ad55cSYork Sun	mov	x0, #0
11325828588SMasahiro Yamada	b	__asm_dcache_all
1140ae76531SDavid FengENDPROC(__asm_flush_dcache_all)
115*e6a05862SPhilipp Tomsich.popsection
1160ae76531SDavid Feng
117*e6a05862SPhilipp Tomsich.pushsection .text.__asm_invalidate_dcache_all, "ax"
1181e6ad55cSYork SunENTRY(__asm_invalidate_dcache_all)
119208bd513SPeng Fan	mov	x0, #0x1
12025828588SMasahiro Yamada	b	__asm_dcache_all
1211e6ad55cSYork SunENDPROC(__asm_invalidate_dcache_all)
122*e6a05862SPhilipp Tomsich.popsection
1231e6ad55cSYork Sun
1240ae76531SDavid Feng/*
1250ae76531SDavid Feng * void __asm_flush_dcache_range(start, end)
1260ae76531SDavid Feng *
1270ae76531SDavid Feng * clean & invalidate data cache in the range
1280ae76531SDavid Feng *
1290ae76531SDavid Feng * x0: start address
1300ae76531SDavid Feng * x1: end address
1310ae76531SDavid Feng */
132*e6a05862SPhilipp Tomsich.pushsection .text.__asm_flush_dcache_range, "ax"
1330ae76531SDavid FengENTRY(__asm_flush_dcache_range)
1340ae76531SDavid Feng	mrs	x3, ctr_el0
1350ae76531SDavid Feng	lsr	x3, x3, #16
1360ae76531SDavid Feng	and	x3, x3, #0xf
1370ae76531SDavid Feng	mov	x2, #4
1380ae76531SDavid Feng	lsl	x2, x2, x3		/* cache line size */
1390ae76531SDavid Feng
1400ae76531SDavid Feng	/* x2 <- minimal cache line size in cache system */
1410ae76531SDavid Feng	sub	x3, x2, #1
1420ae76531SDavid Feng	bic	x0, x0, x3
1430ae76531SDavid Feng1:	dc	civac, x0	/* clean & invalidate data or unified cache */
1440ae76531SDavid Feng	add	x0, x0, x2
1450ae76531SDavid Feng	cmp	x0, x1
1460ae76531SDavid Feng	b.lo	1b
1470ae76531SDavid Feng	dsb	sy
1480ae76531SDavid Feng	ret
1490ae76531SDavid FengENDPROC(__asm_flush_dcache_range)
150*e6a05862SPhilipp Tomsich.popsection
1516775a820SSimon Glass/*
1526775a820SSimon Glass * void __asm_invalidate_dcache_range(start, end)
1536775a820SSimon Glass *
1546775a820SSimon Glass * invalidate data cache in the range
1556775a820SSimon Glass *
1566775a820SSimon Glass * x0: start address
1576775a820SSimon Glass * x1: end address
1586775a820SSimon Glass */
159*e6a05862SPhilipp Tomsich.pushsection .text.__asm_invalidate_dcache_range, "ax"
1606775a820SSimon GlassENTRY(__asm_invalidate_dcache_range)
1616775a820SSimon Glass	mrs	x3, ctr_el0
1626775a820SSimon Glass	ubfm	x3, x3, #16, #19
1636775a820SSimon Glass	mov	x2, #4
1646775a820SSimon Glass	lsl	x2, x2, x3		/* cache line size */
1656775a820SSimon Glass
1666775a820SSimon Glass	/* x2 <- minimal cache line size in cache system */
1676775a820SSimon Glass	sub	x3, x2, #1
1686775a820SSimon Glass	bic	x0, x0, x3
1696775a820SSimon Glass1:	dc	ivac, x0	/* invalidate data or unified cache */
1706775a820SSimon Glass	add	x0, x0, x2
1716775a820SSimon Glass	cmp	x0, x1
1726775a820SSimon Glass	b.lo	1b
1736775a820SSimon Glass	dsb	sy
1746775a820SSimon Glass	ret
1756775a820SSimon GlassENDPROC(__asm_invalidate_dcache_range)
176*e6a05862SPhilipp Tomsich.popsection
1770ae76531SDavid Feng
1780ae76531SDavid Feng/*
1790ae76531SDavid Feng * void __asm_invalidate_icache_all(void)
1800ae76531SDavid Feng *
1810ae76531SDavid Feng * invalidate all tlb entries.
1820ae76531SDavid Feng */
183*e6a05862SPhilipp Tomsich.pushsection .text.__asm_invalidate_icache_all, "ax"
1840ae76531SDavid FengENTRY(__asm_invalidate_icache_all)
1850ae76531SDavid Feng	ic	ialluis
1860ae76531SDavid Feng	isb	sy
1870ae76531SDavid Feng	ret
1880ae76531SDavid FengENDPROC(__asm_invalidate_icache_all)
189*e6a05862SPhilipp Tomsich.popsection
190dcd468b8SYork Sun
191*e6a05862SPhilipp Tomsich.pushsection .text.__asm_invalidate_l3_dcache, "ax"
1921ab557a0SStephen WarrenENTRY(__asm_invalidate_l3_dcache)
193dcd468b8SYork Sun	mov	x0, #0			/* return status as success */
194dcd468b8SYork Sun	ret
1951ab557a0SStephen WarrenENDPROC(__asm_invalidate_l3_dcache)
1961ab557a0SStephen Warren	.weak	__asm_invalidate_l3_dcache
197*e6a05862SPhilipp Tomsich.popsection
1981ab557a0SStephen Warren
199*e6a05862SPhilipp Tomsich.pushsection .text.__asm_flush_l3_dcache, "ax"
2001ab557a0SStephen WarrenENTRY(__asm_flush_l3_dcache)
2011ab557a0SStephen Warren	mov	x0, #0			/* return status as success */
2021ab557a0SStephen Warren	ret
2031ab557a0SStephen WarrenENDPROC(__asm_flush_l3_dcache)
2041ab557a0SStephen Warren	.weak	__asm_flush_l3_dcache
205*e6a05862SPhilipp Tomsich.popsection
2061ab557a0SStephen Warren
207*e6a05862SPhilipp Tomsich.pushsection .text.__asm_invalidate_l3_icache, "ax"
2081ab557a0SStephen WarrenENTRY(__asm_invalidate_l3_icache)
2091ab557a0SStephen Warren	mov	x0, #0			/* return status as success */
2101ab557a0SStephen Warren	ret
2111ab557a0SStephen WarrenENDPROC(__asm_invalidate_l3_icache)
2121ab557a0SStephen Warren	.weak	__asm_invalidate_l3_icache
213*e6a05862SPhilipp Tomsich.popsection
2145e2ec773SAlexander Graf
2155e2ec773SAlexander Graf/*
2165e2ec773SAlexander Graf * void __asm_switch_ttbr(ulong new_ttbr)
2175e2ec773SAlexander Graf *
2185e2ec773SAlexander Graf * Safely switches to a new page table.
2195e2ec773SAlexander Graf */
220*e6a05862SPhilipp Tomsich.pushsection .text.__asm_switch_ttbr, "ax"
2215e2ec773SAlexander GrafENTRY(__asm_switch_ttbr)
2225e2ec773SAlexander Graf	/* x2 = SCTLR (alive throghout the function) */
2235e2ec773SAlexander Graf	switch_el x4, 3f, 2f, 1f
2245e2ec773SAlexander Graf3:	mrs	x2, sctlr_el3
2255e2ec773SAlexander Graf	b	0f
2265e2ec773SAlexander Graf2:	mrs	x2, sctlr_el2
2275e2ec773SAlexander Graf	b	0f
2285e2ec773SAlexander Graf1:	mrs	x2, sctlr_el1
2295e2ec773SAlexander Graf0:
2305e2ec773SAlexander Graf
2315e2ec773SAlexander Graf	/* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
2325e2ec773SAlexander Graf	movn	x1, #(CR_M | CR_C | CR_I)
2335e2ec773SAlexander Graf	and	x1, x2, x1
2345e2ec773SAlexander Graf	switch_el x4, 3f, 2f, 1f
2355e2ec773SAlexander Graf3:	msr	sctlr_el3, x1
2365e2ec773SAlexander Graf	b	0f
2375e2ec773SAlexander Graf2:	msr	sctlr_el2, x1
2385e2ec773SAlexander Graf	b	0f
2395e2ec773SAlexander Graf1:	msr	sctlr_el1, x1
2405e2ec773SAlexander Graf0:	isb
2415e2ec773SAlexander Graf
2425e2ec773SAlexander Graf	/* This call only clobbers x30 (lr) and x9 (unused) */
2435e2ec773SAlexander Graf	mov	x3, x30
2445e2ec773SAlexander Graf	bl	__asm_invalidate_tlb_all
2455e2ec773SAlexander Graf
2465e2ec773SAlexander Graf	/* From here on we're running safely with caches disabled */
2475e2ec773SAlexander Graf
2485e2ec773SAlexander Graf	/* Set TTBR to our first argument */
2495e2ec773SAlexander Graf	switch_el x4, 3f, 2f, 1f
2505e2ec773SAlexander Graf3:	msr	ttbr0_el3, x0
2515e2ec773SAlexander Graf	b	0f
2525e2ec773SAlexander Graf2:	msr	ttbr0_el2, x0
2535e2ec773SAlexander Graf	b	0f
2545e2ec773SAlexander Graf1:	msr	ttbr0_el1, x0
2555e2ec773SAlexander Graf0:	isb
2565e2ec773SAlexander Graf
2575e2ec773SAlexander Graf	/* Restore original SCTLR and thus enable caches again */
2585e2ec773SAlexander Graf	switch_el x4, 3f, 2f, 1f
2595e2ec773SAlexander Graf3:	msr	sctlr_el3, x2
2605e2ec773SAlexander Graf	b	0f
2615e2ec773SAlexander Graf2:	msr	sctlr_el2, x2
2625e2ec773SAlexander Graf	b	0f
2635e2ec773SAlexander Graf1:	msr	sctlr_el1, x2
2645e2ec773SAlexander Graf0:	isb
2655e2ec773SAlexander Graf
2665e2ec773SAlexander Graf	ret	x3
2675e2ec773SAlexander GrafENDPROC(__asm_switch_ttbr)
268*e6a05862SPhilipp Tomsich.popsection
269