1*0ae76531SDavid Feng/* 2*0ae76531SDavid Feng * (C) Copyright 2013 3*0ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn> 4*0ae76531SDavid Feng * 5*0ae76531SDavid Feng * This file is based on sample code from ARMv8 ARM. 6*0ae76531SDavid Feng * 7*0ae76531SDavid Feng * SPDX-License-Identifier: GPL-2.0+ 8*0ae76531SDavid Feng */ 9*0ae76531SDavid Feng 10*0ae76531SDavid Feng#include <asm-offsets.h> 11*0ae76531SDavid Feng#include <config.h> 12*0ae76531SDavid Feng#include <version.h> 13*0ae76531SDavid Feng#include <asm/macro.h> 14*0ae76531SDavid Feng#include <linux/linkage.h> 15*0ae76531SDavid Feng 16*0ae76531SDavid Feng/* 17*0ae76531SDavid Feng * void __asm_flush_dcache_level(level) 18*0ae76531SDavid Feng * 19*0ae76531SDavid Feng * clean and invalidate one level cache. 20*0ae76531SDavid Feng * 21*0ae76531SDavid Feng * x0: cache level 22*0ae76531SDavid Feng * x1~x9: clobbered 23*0ae76531SDavid Feng */ 24*0ae76531SDavid FengENTRY(__asm_flush_dcache_level) 25*0ae76531SDavid Feng lsl x1, x0, #1 26*0ae76531SDavid Feng msr csselr_el1, x1 /* select cache level */ 27*0ae76531SDavid Feng isb /* sync change of cssidr_el1 */ 28*0ae76531SDavid Feng mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ 29*0ae76531SDavid Feng and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ 30*0ae76531SDavid Feng add x2, x2, #4 /* x2 <- log2(cache line size) */ 31*0ae76531SDavid Feng mov x3, #0x3ff 32*0ae76531SDavid Feng and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ 33*0ae76531SDavid Feng add w4, w3, w3 34*0ae76531SDavid Feng sub w4, w4, 1 /* round up log2(#ways + 1) */ 35*0ae76531SDavid Feng clz w5, w4 /* bit position of #ways */ 36*0ae76531SDavid Feng mov x4, #0x7fff 37*0ae76531SDavid Feng and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ 38*0ae76531SDavid Feng /* x1 <- cache level << 1 */ 39*0ae76531SDavid Feng /* x2 <- line length offset */ 40*0ae76531SDavid Feng /* x3 <- number of cache ways - 1 */ 41*0ae76531SDavid Feng /* x4 <- number of cache sets - 1 */ 42*0ae76531SDavid Feng /* x5 <- bit position of #ways */ 43*0ae76531SDavid Feng 44*0ae76531SDavid Fengloop_set: 45*0ae76531SDavid Feng mov x6, x3 /* x6 <- working copy of #ways */ 46*0ae76531SDavid Fengloop_way: 47*0ae76531SDavid Feng lsl x7, x6, x5 48*0ae76531SDavid Feng orr x9, x1, x7 /* map way and level to cisw value */ 49*0ae76531SDavid Feng lsl x7, x4, x2 50*0ae76531SDavid Feng orr x9, x9, x7 /* map set number to cisw value */ 51*0ae76531SDavid Feng dc cisw, x9 /* clean & invalidate by set/way */ 52*0ae76531SDavid Feng subs x6, x6, #1 /* decrement the way */ 53*0ae76531SDavid Feng b.ge loop_way 54*0ae76531SDavid Feng subs x4, x4, #1 /* decrement the set */ 55*0ae76531SDavid Feng b.ge loop_set 56*0ae76531SDavid Feng 57*0ae76531SDavid Feng ret 58*0ae76531SDavid FengENDPROC(__asm_flush_dcache_level) 59*0ae76531SDavid Feng 60*0ae76531SDavid Feng/* 61*0ae76531SDavid Feng * void __asm_flush_dcache_all(void) 62*0ae76531SDavid Feng * 63*0ae76531SDavid Feng * clean and invalidate all data cache by SET/WAY. 64*0ae76531SDavid Feng */ 65*0ae76531SDavid FengENTRY(__asm_flush_dcache_all) 66*0ae76531SDavid Feng dsb sy 67*0ae76531SDavid Feng mrs x10, clidr_el1 /* read clidr_el1 */ 68*0ae76531SDavid Feng lsr x11, x10, #24 69*0ae76531SDavid Feng and x11, x11, #0x7 /* x11 <- loc */ 70*0ae76531SDavid Feng cbz x11, finished /* if loc is 0, exit */ 71*0ae76531SDavid Feng mov x15, lr 72*0ae76531SDavid Feng mov x0, #0 /* start flush at cache level 0 */ 73*0ae76531SDavid Feng /* x0 <- cache level */ 74*0ae76531SDavid Feng /* x10 <- clidr_el1 */ 75*0ae76531SDavid Feng /* x11 <- loc */ 76*0ae76531SDavid Feng /* x15 <- return address */ 77*0ae76531SDavid Feng 78*0ae76531SDavid Fengloop_level: 79*0ae76531SDavid Feng lsl x1, x0, #1 80*0ae76531SDavid Feng add x1, x1, x0 /* x0 <- tripled cache level */ 81*0ae76531SDavid Feng lsr x1, x10, x1 82*0ae76531SDavid Feng and x1, x1, #7 /* x1 <- cache type */ 83*0ae76531SDavid Feng cmp x1, #2 84*0ae76531SDavid Feng b.lt skip /* skip if no cache or icache */ 85*0ae76531SDavid Feng bl __asm_flush_dcache_level 86*0ae76531SDavid Fengskip: 87*0ae76531SDavid Feng add x0, x0, #1 /* increment cache level */ 88*0ae76531SDavid Feng cmp x11, x0 89*0ae76531SDavid Feng b.gt loop_level 90*0ae76531SDavid Feng 91*0ae76531SDavid Feng mov x0, #0 92*0ae76531SDavid Feng msr csselr_el1, x0 /* resotre csselr_el1 */ 93*0ae76531SDavid Feng dsb sy 94*0ae76531SDavid Feng isb 95*0ae76531SDavid Feng mov lr, x15 96*0ae76531SDavid Feng 97*0ae76531SDavid Fengfinished: 98*0ae76531SDavid Feng ret 99*0ae76531SDavid FengENDPROC(__asm_flush_dcache_all) 100*0ae76531SDavid Feng 101*0ae76531SDavid Feng/* 102*0ae76531SDavid Feng * void __asm_flush_dcache_range(start, end) 103*0ae76531SDavid Feng * 104*0ae76531SDavid Feng * clean & invalidate data cache in the range 105*0ae76531SDavid Feng * 106*0ae76531SDavid Feng * x0: start address 107*0ae76531SDavid Feng * x1: end address 108*0ae76531SDavid Feng */ 109*0ae76531SDavid FengENTRY(__asm_flush_dcache_range) 110*0ae76531SDavid Feng mrs x3, ctr_el0 111*0ae76531SDavid Feng lsr x3, x3, #16 112*0ae76531SDavid Feng and x3, x3, #0xf 113*0ae76531SDavid Feng mov x2, #4 114*0ae76531SDavid Feng lsl x2, x2, x3 /* cache line size */ 115*0ae76531SDavid Feng 116*0ae76531SDavid Feng /* x2 <- minimal cache line size in cache system */ 117*0ae76531SDavid Feng sub x3, x2, #1 118*0ae76531SDavid Feng bic x0, x0, x3 119*0ae76531SDavid Feng1: dc civac, x0 /* clean & invalidate data or unified cache */ 120*0ae76531SDavid Feng add x0, x0, x2 121*0ae76531SDavid Feng cmp x0, x1 122*0ae76531SDavid Feng b.lo 1b 123*0ae76531SDavid Feng dsb sy 124*0ae76531SDavid Feng ret 125*0ae76531SDavid FengENDPROC(__asm_flush_dcache_range) 126*0ae76531SDavid Feng 127*0ae76531SDavid Feng/* 128*0ae76531SDavid Feng * void __asm_invalidate_icache_all(void) 129*0ae76531SDavid Feng * 130*0ae76531SDavid Feng * invalidate all tlb entries. 131*0ae76531SDavid Feng */ 132*0ae76531SDavid FengENTRY(__asm_invalidate_icache_all) 133*0ae76531SDavid Feng ic ialluis 134*0ae76531SDavid Feng isb sy 135*0ae76531SDavid Feng ret 136*0ae76531SDavid FengENDPROC(__asm_invalidate_icache_all) 137