1/* 2 * linux/arch/arm/mm/cache-v7.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2005 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This is the "shell" of the ARMv7 processor support. 12 */ 13#include <linux/linkage.h> 14#include <linux/init.h> 15#include <asm/assembler.h> 16 17#include "proc-macros.S" 18 19/* 20 * v7_flush_dcache_all() 21 * 22 * Flush the whole D-cache. 23 * 24 * Corrupted registers: r0-r5, r7, r9-r11 25 * 26 * - mm - mm_struct describing address space 27 */ 28ENTRY(v7_flush_dcache_all) 29 dmb @ ensure ordering with previous memory accesses 30 mrc p15, 1, r0, c0, c0, 1 @ read clidr 31 ands r3, r0, #0x7000000 @ extract loc from clidr 32 mov r3, r3, lsr #23 @ left align loc bit field 33 beq finished @ if loc is 0, then no need to clean 34 mov r10, #0 @ start clean at cache level 0 35loop1: 36 add r2, r10, r10, lsr #1 @ work out 3x current cache level 37 mov r1, r0, lsr r2 @ extract cache type bits from clidr 38 and r1, r1, #7 @ mask of the bits for current cache only 39 cmp r1, #2 @ see what cache we have at this level 40 blt skip @ skip if no cache, or just i-cache 41 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 42 isb @ isb to sych the new cssr&csidr 43 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 44 and r2, r1, #7 @ extract the length of the cache lines 45 add r2, r2, #4 @ add 4 (line length offset) 46 ldr r4, =0x3ff 47 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 48 clz r5, r4 @ find bit position of way size increment 49 ldr r7, =0x7fff 50 ands r7, r7, r1, lsr #13 @ extract max number of the index size 51loop2: 52 mov r9, r4 @ create working copy of max way size 53loop3: 54 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 55 orr r11, r11, r7, lsl r2 @ factor index number into r11 56 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 57 subs r9, r9, #1 @ decrement the way 58 bge loop3 59 subs r7, r7, #1 @ decrement the index 60 bge loop2 61skip: 62 add r10, r10, #2 @ increment cache number 63 cmp r3, r10 64 bgt loop1 65finished: 66 mov r10, #0 @ swith back to cache level 0 67 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 68 dsb 69 isb 70 mov pc, lr 71ENDPROC(v7_flush_dcache_all) 72 73/* 74 * v7_flush_cache_all() 75 * 76 * Flush the entire cache system. 77 * The data cache flush is now achieved using atomic clean / invalidates 78 * working outwards from L1 cache. This is done using Set/Way based cache 79 * maintainance instructions. 80 * The instruction cache can still be invalidated back to the point of 81 * unification in a single instruction. 82 * 83 */ 84ENTRY(v7_flush_kern_cache_all) 85 stmfd sp!, {r4-r5, r7, r9-r11, lr} 86 bl v7_flush_dcache_all 87 mov r0, #0 88 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 89 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 90 mov pc, lr 91ENDPROC(v7_flush_kern_cache_all) 92 93/* 94 * v7_flush_cache_all() 95 * 96 * Flush all TLB entries in a particular address space 97 * 98 * - mm - mm_struct describing address space 99 */ 100ENTRY(v7_flush_user_cache_all) 101 /*FALLTHROUGH*/ 102 103/* 104 * v7_flush_cache_range(start, end, flags) 105 * 106 * Flush a range of TLB entries in the specified address space. 107 * 108 * - start - start address (may not be aligned) 109 * - end - end address (exclusive, may not be aligned) 110 * - flags - vm_area_struct flags describing address space 111 * 112 * It is assumed that: 113 * - we have a VIPT cache. 114 */ 115ENTRY(v7_flush_user_cache_range) 116 mov pc, lr 117ENDPROC(v7_flush_user_cache_all) 118ENDPROC(v7_flush_user_cache_range) 119 120/* 121 * v7_coherent_kern_range(start,end) 122 * 123 * Ensure that the I and D caches are coherent within specified 124 * region. This is typically used when code has been written to 125 * a memory region, and will be executed. 126 * 127 * - start - virtual start address of region 128 * - end - virtual end address of region 129 * 130 * It is assumed that: 131 * - the Icache does not read data from the write buffer 132 */ 133ENTRY(v7_coherent_kern_range) 134 /* FALLTHROUGH */ 135 136/* 137 * v7_coherent_user_range(start,end) 138 * 139 * Ensure that the I and D caches are coherent within specified 140 * region. This is typically used when code has been written to 141 * a memory region, and will be executed. 142 * 143 * - start - virtual start address of region 144 * - end - virtual end address of region 145 * 146 * It is assumed that: 147 * - the Icache does not read data from the write buffer 148 */ 149ENTRY(v7_coherent_user_range) 150 dcache_line_size r2, r3 151 sub r3, r2, #1 152 bic r0, r0, r3 1531: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification 154 dsb 155 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line 156 add r0, r0, r2 157 cmp r0, r1 158 blo 1b 159 mov r0, #0 160 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 161 dsb 162 isb 163 mov pc, lr 164ENDPROC(v7_coherent_kern_range) 165ENDPROC(v7_coherent_user_range) 166 167/* 168 * v7_flush_kern_dcache_page(kaddr) 169 * 170 * Ensure that the data held in the page kaddr is written back 171 * to the page in question. 172 * 173 * - kaddr - kernel address (guaranteed to be page aligned) 174 */ 175ENTRY(v7_flush_kern_dcache_page) 176 dcache_line_size r2, r3 177 add r1, r0, #PAGE_SZ 1781: 179 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 180 add r0, r0, r2 181 cmp r0, r1 182 blo 1b 183 dsb 184 mov pc, lr 185ENDPROC(v7_flush_kern_dcache_page) 186 187/* 188 * v7_dma_inv_range(start,end) 189 * 190 * Invalidate the data cache within the specified region; we will 191 * be performing a DMA operation in this region and we want to 192 * purge old data in the cache. 193 * 194 * - start - virtual start address of region 195 * - end - virtual end address of region 196 */ 197ENTRY(v7_dma_inv_range) 198 dcache_line_size r2, r3 199 sub r3, r2, #1 200 tst r0, r3 201 bic r0, r0, r3 202 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 203 204 tst r1, r3 205 bic r1, r1, r3 206 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line 2071: 208 mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line 209 add r0, r0, r2 210 cmp r0, r1 211 blo 1b 212 dsb 213 mov pc, lr 214ENDPROC(v7_dma_inv_range) 215 216/* 217 * v7_dma_clean_range(start,end) 218 * - start - virtual start address of region 219 * - end - virtual end address of region 220 */ 221ENTRY(v7_dma_clean_range) 222 dcache_line_size r2, r3 223 sub r3, r2, #1 224 bic r0, r0, r3 2251: 226 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 227 add r0, r0, r2 228 cmp r0, r1 229 blo 1b 230 dsb 231 mov pc, lr 232ENDPROC(v7_dma_clean_range) 233 234/* 235 * v7_dma_flush_range(start,end) 236 * - start - virtual start address of region 237 * - end - virtual end address of region 238 */ 239ENTRY(v7_dma_flush_range) 240 dcache_line_size r2, r3 241 sub r3, r2, #1 242 bic r0, r0, r3 2431: 244 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 245 add r0, r0, r2 246 cmp r0, r1 247 blo 1b 248 dsb 249 mov pc, lr 250ENDPROC(v7_dma_flush_range) 251 252 __INITDATA 253 254 .type v7_cache_fns, #object 255ENTRY(v7_cache_fns) 256 .long v7_flush_kern_cache_all 257 .long v7_flush_user_cache_all 258 .long v7_flush_user_cache_range 259 .long v7_coherent_kern_range 260 .long v7_coherent_user_range 261 .long v7_flush_kern_dcache_page 262 .long v7_dma_inv_range 263 .long v7_dma_clean_range 264 .long v7_dma_flush_range 265 .size v7_cache_fns, . - v7_cache_fns 266