1/* 2 * Cache maintenance 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20#include <linux/linkage.h> 21#include <linux/init.h> 22#include <asm/assembler.h> 23 24#include "proc-macros.S" 25 26/* 27 * __flush_dcache_all() 28 * 29 * Flush the whole D-cache. 30 * 31 * Corrupted registers: x0-x7, x9-x11 32 */ 33__flush_dcache_all: 34 dmb sy // ensure ordering with previous memory accesses 35 mrs x0, clidr_el1 // read clidr 36 and x3, x0, #0x7000000 // extract loc from clidr 37 lsr x3, x3, #23 // left align loc bit field 38 cbz x3, finished // if loc is 0, then no need to clean 39 mov x10, #0 // start clean at cache level 0 40loop1: 41 add x2, x10, x10, lsr #1 // work out 3x current cache level 42 lsr x1, x0, x2 // extract cache type bits from clidr 43 and x1, x1, #7 // mask of the bits for current cache only 44 cmp x1, #2 // see what cache we have at this level 45 b.lt skip // skip if no cache, or just i-cache 46 save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic 47 msr csselr_el1, x10 // select current cache level in csselr 48 isb // isb to sych the new cssr&csidr 49 mrs x1, ccsidr_el1 // read the new ccsidr 50 restore_irqs x9 51 and x2, x1, #7 // extract the length of the cache lines 52 add x2, x2, #4 // add 4 (line length offset) 53 mov x4, #0x3ff 54 and x4, x4, x1, lsr #3 // find maximum number on the way size 55 clz w5, w4 // find bit position of way size increment 56 mov x7, #0x7fff 57 and x7, x7, x1, lsr #13 // extract max number of the index size 58loop2: 59 mov x9, x4 // create working copy of max way size 60loop3: 61 lsl x6, x9, x5 62 orr x11, x10, x6 // factor way and cache number into x11 63 lsl x6, x7, x2 64 orr x11, x11, x6 // factor index number into x11 65 dc cisw, x11 // clean & invalidate by set/way 66 subs x9, x9, #1 // decrement the way 67 b.ge loop3 68 subs x7, x7, #1 // decrement the index 69 b.ge loop2 70skip: 71 add x10, x10, #2 // increment cache number 72 cmp x3, x10 73 b.gt loop1 74finished: 75 mov x10, #0 // swith back to cache level 0 76 msr csselr_el1, x10 // select current cache level in csselr 77 dsb sy 78 isb 79 ret 80ENDPROC(__flush_dcache_all) 81 82/* 83 * flush_cache_all() 84 * 85 * Flush the entire cache system. The data cache flush is now achieved 86 * using atomic clean / invalidates working outwards from L1 cache. This 87 * is done using Set/Way based cache maintainance instructions. The 88 * instruction cache can still be invalidated back to the point of 89 * unification in a single instruction. 90 */ 91ENTRY(flush_cache_all) 92 mov x12, lr 93 bl __flush_dcache_all 94 mov x0, #0 95 ic ialluis // I+BTB cache invalidate 96 ret x12 97ENDPROC(flush_cache_all) 98 99/* 100 * flush_icache_range(start,end) 101 * 102 * Ensure that the I and D caches are coherent within specified region. 103 * This is typically used when code has been written to a memory region, 104 * and will be executed. 105 * 106 * - start - virtual start address of region 107 * - end - virtual end address of region 108 */ 109ENTRY(flush_icache_range) 110 /* FALLTHROUGH */ 111 112/* 113 * __flush_cache_user_range(start,end) 114 * 115 * Ensure that the I and D caches are coherent within specified region. 116 * This is typically used when code has been written to a memory region, 117 * and will be executed. 118 * 119 * - start - virtual start address of region 120 * - end - virtual end address of region 121 */ 122ENTRY(__flush_cache_user_range) 123 dcache_line_size x2, x3 124 sub x3, x2, #1 125 bic x4, x0, x3 1261: 127USER(9f, dc cvau, x4 ) // clean D line to PoU 128 add x4, x4, x2 129 cmp x4, x1 130 b.lo 1b 131 dsb ish 132 133 icache_line_size x2, x3 134 sub x3, x2, #1 135 bic x4, x0, x3 1361: 137USER(9f, ic ivau, x4 ) // invalidate I line PoU 138 add x4, x4, x2 139 cmp x4, x1 140 b.lo 1b 1419: // ignore any faulting cache operation 142 dsb ish 143 isb 144 ret 145ENDPROC(flush_icache_range) 146ENDPROC(__flush_cache_user_range) 147 148/* 149 * __flush_dcache_area(kaddr, size) 150 * 151 * Ensure that the data held in the page kaddr is written back to the 152 * page in question. 153 * 154 * - kaddr - kernel address 155 * - size - size in question 156 */ 157ENTRY(__flush_dcache_area) 158 dcache_line_size x2, x3 159 add x1, x0, x1 160 sub x3, x2, #1 161 bic x0, x0, x3 1621: dc civac, x0 // clean & invalidate D line / unified line 163 add x0, x0, x2 164 cmp x0, x1 165 b.lo 1b 166 dsb sy 167 ret 168ENDPROC(__flush_dcache_area) 169 170/* 171 * __inval_cache_range(start, end) 172 * - start - start address of region 173 * - end - end address of region 174 */ 175ENTRY(__inval_cache_range) 176 /* FALLTHROUGH */ 177 178/* 179 * __dma_inv_range(start, end) 180 * - start - virtual start address of region 181 * - end - virtual end address of region 182 */ 183__dma_inv_range: 184 dcache_line_size x2, x3 185 sub x3, x2, #1 186 tst x1, x3 // end cache line aligned? 187 bic x1, x1, x3 188 b.eq 1f 189 dc civac, x1 // clean & invalidate D / U line 1901: tst x0, x3 // start cache line aligned? 191 bic x0, x0, x3 192 b.eq 2f 193 dc civac, x0 // clean & invalidate D / U line 194 b 3f 1952: dc ivac, x0 // invalidate D / U line 1963: add x0, x0, x2 197 cmp x0, x1 198 b.lo 2b 199 dsb sy 200 ret 201ENDPROC(__inval_cache_range) 202ENDPROC(__dma_inv_range) 203 204/* 205 * __dma_clean_range(start, end) 206 * - start - virtual start address of region 207 * - end - virtual end address of region 208 */ 209__dma_clean_range: 210 dcache_line_size x2, x3 211 sub x3, x2, #1 212 bic x0, x0, x3 2131: dc cvac, x0 // clean D / U line 214 add x0, x0, x2 215 cmp x0, x1 216 b.lo 1b 217 dsb sy 218 ret 219ENDPROC(__dma_clean_range) 220 221/* 222 * __dma_flush_range(start, end) 223 * - start - virtual start address of region 224 * - end - virtual end address of region 225 */ 226ENTRY(__dma_flush_range) 227 dcache_line_size x2, x3 228 sub x3, x2, #1 229 bic x0, x0, x3 2301: dc civac, x0 // clean & invalidate D / U line 231 add x0, x0, x2 232 cmp x0, x1 233 b.lo 1b 234 dsb sy 235 ret 236ENDPROC(__dma_flush_range) 237 238/* 239 * __dma_map_area(start, size, dir) 240 * - start - kernel virtual start address 241 * - size - size of region 242 * - dir - DMA direction 243 */ 244ENTRY(__dma_map_area) 245 add x1, x1, x0 246 cmp w2, #DMA_FROM_DEVICE 247 b.eq __dma_inv_range 248 b __dma_clean_range 249ENDPROC(__dma_map_area) 250 251/* 252 * __dma_unmap_area(start, size, dir) 253 * - start - kernel virtual start address 254 * - size - size of region 255 * - dir - DMA direction 256 */ 257ENTRY(__dma_unmap_area) 258 add x1, x1, x0 259 cmp w2, #DMA_TO_DEVICE 260 b.ne __dma_inv_range 261 ret 262ENDPROC(__dma_unmap_area) 263