10ae76531SDavid Feng/* 20ae76531SDavid Feng * (C) Copyright 2013 30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn> 40ae76531SDavid Feng * 50ae76531SDavid Feng * This file is based on sample code from ARMv8 ARM. 60ae76531SDavid Feng * 70ae76531SDavid Feng * SPDX-License-Identifier: GPL-2.0+ 80ae76531SDavid Feng */ 90ae76531SDavid Feng 100ae76531SDavid Feng#include <asm-offsets.h> 110ae76531SDavid Feng#include <config.h> 120ae76531SDavid Feng#include <version.h> 130ae76531SDavid Feng#include <asm/macro.h> 140ae76531SDavid Feng#include <linux/linkage.h> 150ae76531SDavid Feng 160ae76531SDavid Feng/* 170ae76531SDavid Feng * void __asm_flush_dcache_level(level) 180ae76531SDavid Feng * 190ae76531SDavid Feng * clean and invalidate one level cache. 200ae76531SDavid Feng * 210ae76531SDavid Feng * x0: cache level 221e6ad55cSYork Sun * x1: 0 flush & invalidate, 1 invalidate only 231e6ad55cSYork Sun * x2~x9: clobbered 240ae76531SDavid Feng */ 250ae76531SDavid FengENTRY(__asm_flush_dcache_level) 261e6ad55cSYork Sun lsl x12, x0, #1 271e6ad55cSYork Sun msr csselr_el1, x12 /* select cache level */ 280ae76531SDavid Feng isb /* sync change of cssidr_el1 */ 290ae76531SDavid Feng mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ 300ae76531SDavid Feng and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ 310ae76531SDavid Feng add x2, x2, #4 /* x2 <- log2(cache line size) */ 320ae76531SDavid Feng mov x3, #0x3ff 330ae76531SDavid Feng and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ 3442ddfad6SLeo Yan clz w5, w3 /* bit position of #ways */ 350ae76531SDavid Feng mov x4, #0x7fff 360ae76531SDavid Feng and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ 371e6ad55cSYork Sun /* x12 <- cache level << 1 */ 380ae76531SDavid Feng /* x2 <- line length offset */ 390ae76531SDavid Feng /* x3 <- number of cache ways - 1 */ 400ae76531SDavid Feng /* x4 <- number of cache sets - 1 */ 410ae76531SDavid Feng /* x5 <- bit position of #ways */ 420ae76531SDavid Feng 430ae76531SDavid Fengloop_set: 440ae76531SDavid Feng mov x6, x3 /* x6 <- working copy of #ways */ 450ae76531SDavid Fengloop_way: 460ae76531SDavid Feng lsl x7, x6, x5 471e6ad55cSYork Sun orr x9, x12, x7 /* map way and level to cisw value */ 480ae76531SDavid Feng lsl x7, x4, x2 490ae76531SDavid Feng orr x9, x9, x7 /* map set number to cisw value */ 501e6ad55cSYork Sun tbz w1, #0, 1f 511e6ad55cSYork Sun dc isw, x9 521e6ad55cSYork Sun b 2f 531e6ad55cSYork Sun1: dc cisw, x9 /* clean & invalidate by set/way */ 541e6ad55cSYork Sun2: subs x6, x6, #1 /* decrement the way */ 550ae76531SDavid Feng b.ge loop_way 560ae76531SDavid Feng subs x4, x4, #1 /* decrement the set */ 570ae76531SDavid Feng b.ge loop_set 580ae76531SDavid Feng 590ae76531SDavid Feng ret 600ae76531SDavid FengENDPROC(__asm_flush_dcache_level) 610ae76531SDavid Feng 620ae76531SDavid Feng/* 631e6ad55cSYork Sun * void __asm_flush_dcache_all(int invalidate_only) 641e6ad55cSYork Sun * 651e6ad55cSYork Sun * x0: 0 flush & invalidate, 1 invalidate only 660ae76531SDavid Feng * 670ae76531SDavid Feng * clean and invalidate all data cache by SET/WAY. 680ae76531SDavid Feng */ 691e6ad55cSYork SunENTRY(__asm_dcache_all) 701e6ad55cSYork Sun mov x1, x0 710ae76531SDavid Feng dsb sy 720ae76531SDavid Feng mrs x10, clidr_el1 /* read clidr_el1 */ 730ae76531SDavid Feng lsr x11, x10, #24 740ae76531SDavid Feng and x11, x11, #0x7 /* x11 <- loc */ 750ae76531SDavid Feng cbz x11, finished /* if loc is 0, exit */ 760ae76531SDavid Feng mov x15, lr 770ae76531SDavid Feng mov x0, #0 /* start flush at cache level 0 */ 780ae76531SDavid Feng /* x0 <- cache level */ 790ae76531SDavid Feng /* x10 <- clidr_el1 */ 800ae76531SDavid Feng /* x11 <- loc */ 810ae76531SDavid Feng /* x15 <- return address */ 820ae76531SDavid Feng 830ae76531SDavid Fengloop_level: 841e6ad55cSYork Sun lsl x12, x0, #1 851e6ad55cSYork Sun add x12, x12, x0 /* x0 <- tripled cache level */ 861e6ad55cSYork Sun lsr x12, x10, x12 871e6ad55cSYork Sun and x12, x12, #7 /* x12 <- cache type */ 881e6ad55cSYork Sun cmp x12, #2 890ae76531SDavid Feng b.lt skip /* skip if no cache or icache */ 901e6ad55cSYork Sun bl __asm_flush_dcache_level /* x1 = 0 flush, 1 invalidate */ 910ae76531SDavid Fengskip: 920ae76531SDavid Feng add x0, x0, #1 /* increment cache level */ 930ae76531SDavid Feng cmp x11, x0 940ae76531SDavid Feng b.gt loop_level 950ae76531SDavid Feng 960ae76531SDavid Feng mov x0, #0 97*f1075aedSMichal Simek msr csselr_el1, x0 /* restore csselr_el1 */ 980ae76531SDavid Feng dsb sy 990ae76531SDavid Feng isb 1000ae76531SDavid Feng mov lr, x15 1010ae76531SDavid Feng 1020ae76531SDavid Fengfinished: 1030ae76531SDavid Feng ret 1041e6ad55cSYork SunENDPROC(__asm_dcache_all) 1051e6ad55cSYork Sun 1061e6ad55cSYork SunENTRY(__asm_flush_dcache_all) 1071e6ad55cSYork Sun mov x16, lr 1081e6ad55cSYork Sun mov x0, #0 1091e6ad55cSYork Sun bl __asm_dcache_all 1101e6ad55cSYork Sun mov lr, x16 1111e6ad55cSYork Sun ret 1120ae76531SDavid FengENDPROC(__asm_flush_dcache_all) 1130ae76531SDavid Feng 1141e6ad55cSYork SunENTRY(__asm_invalidate_dcache_all) 1151e6ad55cSYork Sun mov x16, lr 1161e6ad55cSYork Sun mov x0, #0xffff 1171e6ad55cSYork Sun bl __asm_dcache_all 1181e6ad55cSYork Sun mov lr, x16 1191e6ad55cSYork Sun ret 1201e6ad55cSYork SunENDPROC(__asm_invalidate_dcache_all) 1211e6ad55cSYork Sun 1220ae76531SDavid Feng/* 1230ae76531SDavid Feng * void __asm_flush_dcache_range(start, end) 1240ae76531SDavid Feng * 1250ae76531SDavid Feng * clean & invalidate data cache in the range 1260ae76531SDavid Feng * 1270ae76531SDavid Feng * x0: start address 1280ae76531SDavid Feng * x1: end address 1290ae76531SDavid Feng */ 1300ae76531SDavid FengENTRY(__asm_flush_dcache_range) 1310ae76531SDavid Feng mrs x3, ctr_el0 1320ae76531SDavid Feng lsr x3, x3, #16 1330ae76531SDavid Feng and x3, x3, #0xf 1340ae76531SDavid Feng mov x2, #4 1350ae76531SDavid Feng lsl x2, x2, x3 /* cache line size */ 1360ae76531SDavid Feng 1370ae76531SDavid Feng /* x2 <- minimal cache line size in cache system */ 1380ae76531SDavid Feng sub x3, x2, #1 1390ae76531SDavid Feng bic x0, x0, x3 1400ae76531SDavid Feng1: dc civac, x0 /* clean & invalidate data or unified cache */ 1410ae76531SDavid Feng add x0, x0, x2 1420ae76531SDavid Feng cmp x0, x1 1430ae76531SDavid Feng b.lo 1b 1440ae76531SDavid Feng dsb sy 1450ae76531SDavid Feng ret 1460ae76531SDavid FengENDPROC(__asm_flush_dcache_range) 1470ae76531SDavid Feng 1480ae76531SDavid Feng/* 1490ae76531SDavid Feng * void __asm_invalidate_icache_all(void) 1500ae76531SDavid Feng * 1510ae76531SDavid Feng * invalidate all tlb entries. 1520ae76531SDavid Feng */ 1530ae76531SDavid FengENTRY(__asm_invalidate_icache_all) 1540ae76531SDavid Feng ic ialluis 1550ae76531SDavid Feng isb sy 1560ae76531SDavid Feng ret 1570ae76531SDavid FengENDPROC(__asm_invalidate_icache_all) 158