1/* 2 * linux/arch/arm/mm/cache-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv6 processor support. 11 */ 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <asm/assembler.h> 15#include <asm/unwind.h> 16 17#include "proc-macros.S" 18 19#define HARVARD_CACHE 20#define CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32 22#define BTB_FLUSH_SIZE 8 23 24#ifdef CONFIG_ARM_ERRATA_411920 25/* 26 * Invalidate the entire I cache (this code is a workaround for the ARM1136 27 * erratum 411920 - Invalidate Instruction Cache operation can fail. This 28 * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. 29 * 30 * Registers: 31 * r0 - set to 0 32 * r1 - corrupted 33 */ 34ENTRY(v6_icache_inval_all) 35 mov r0, #0 36 mrs r1, cpsr 37 cpsid ifa @ disable interrupts 38 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 39 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 40 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 41 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42 msr cpsr_cx, r1 @ restore interrupts 43 .rept 11 @ ARM Ltd recommends at least 44 nop @ 11 NOPs 45 .endr 46 mov pc, lr 47#endif 48 49/* 50 * v6_flush_cache_all() 51 * 52 * Flush the entire cache. 53 * 54 * It is assumed that: 55 */ 56ENTRY(v6_flush_kern_cache_all) 57 mov r0, #0 58#ifdef HARVARD_CACHE 59 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 60#ifndef CONFIG_ARM_ERRATA_411920 61 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 62#else 63 b v6_icache_inval_all 64#endif 65#else 66 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 67#endif 68 mov pc, lr 69 70/* 71 * v6_flush_cache_all() 72 * 73 * Flush all TLB entries in a particular address space 74 * 75 * - mm - mm_struct describing address space 76 */ 77ENTRY(v6_flush_user_cache_all) 78 /*FALLTHROUGH*/ 79 80/* 81 * v6_flush_cache_range(start, end, flags) 82 * 83 * Flush a range of TLB entries in the specified address space. 84 * 85 * - start - start address (may not be aligned) 86 * - end - end address (exclusive, may not be aligned) 87 * - flags - vm_area_struct flags describing address space 88 * 89 * It is assumed that: 90 * - we have a VIPT cache. 91 */ 92ENTRY(v6_flush_user_cache_range) 93 mov pc, lr 94 95/* 96 * v6_coherent_kern_range(start,end) 97 * 98 * Ensure that the I and D caches are coherent within specified 99 * region. This is typically used when code has been written to 100 * a memory region, and will be executed. 101 * 102 * - start - virtual start address of region 103 * - end - virtual end address of region 104 * 105 * It is assumed that: 106 * - the Icache does not read data from the write buffer 107 */ 108ENTRY(v6_coherent_kern_range) 109 /* FALLTHROUGH */ 110 111/* 112 * v6_coherent_user_range(start,end) 113 * 114 * Ensure that the I and D caches are coherent within specified 115 * region. This is typically used when code has been written to 116 * a memory region, and will be executed. 117 * 118 * - start - virtual start address of region 119 * - end - virtual end address of region 120 * 121 * It is assumed that: 122 * - the Icache does not read data from the write buffer 123 */ 124ENTRY(v6_coherent_user_range) 125 UNWIND(.fnstart ) 126#ifdef HARVARD_CACHE 127 bic r0, r0, #CACHE_LINE_SIZE - 1 1281: 129 USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 130 add r0, r0, #CACHE_LINE_SIZE 1312: 132 cmp r0, r1 133 blo 1b 134#endif 135 mov r0, #0 136#ifdef HARVARD_CACHE 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 138#ifndef CONFIG_ARM_ERRATA_411920 139 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 140#else 141 b v6_icache_inval_all 142#endif 143#else 144 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 145#endif 146 mov pc, lr 147 148/* 149 * Fault handling for the cache operation above. If the virtual address in r0 150 * isn't mapped, just try the next page. 151 */ 1529001: 153 mov r0, r0, lsr #12 154 mov r0, r0, lsl #12 155 add r0, r0, #4096 156 b 2b 157 UNWIND(.fnend ) 158ENDPROC(v6_coherent_user_range) 159ENDPROC(v6_coherent_kern_range) 160 161/* 162 * v6_flush_kern_dcache_area(void *addr, size_t size) 163 * 164 * Ensure that the data held in the page kaddr is written back 165 * to the page in question. 166 * 167 * - addr - kernel address 168 * - size - region size 169 */ 170ENTRY(v6_flush_kern_dcache_area) 171 add r1, r0, r1 1721: 173#ifdef HARVARD_CACHE 174 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 175#else 176 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 177#endif 178 add r0, r0, #D_CACHE_LINE_SIZE 179 cmp r0, r1 180 blo 1b 181#ifdef HARVARD_CACHE 182 mov r0, #0 183 mcr p15, 0, r0, c7, c10, 4 184#endif 185 mov pc, lr 186 187 188/* 189 * v6_dma_inv_range(start,end) 190 * 191 * Invalidate the data cache within the specified region; we will 192 * be performing a DMA operation in this region and we want to 193 * purge old data in the cache. 194 * 195 * - start - virtual start address of region 196 * - end - virtual end address of region 197 */ 198ENTRY(v6_dma_inv_range) 199 tst r0, #D_CACHE_LINE_SIZE - 1 200 bic r0, r0, #D_CACHE_LINE_SIZE - 1 201#ifdef HARVARD_CACHE 202 mcrne p15, 0, r0, c7, c10, 1 @ clean D line 203#else 204 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 205#endif 206 tst r1, #D_CACHE_LINE_SIZE - 1 207 bic r1, r1, #D_CACHE_LINE_SIZE - 1 208#ifdef HARVARD_CACHE 209 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 210#else 211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 212#endif 2131: 214#ifdef HARVARD_CACHE 215 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 216#else 217 mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 218#endif 219 add r0, r0, #D_CACHE_LINE_SIZE 220 cmp r0, r1 221 blo 1b 222 mov r0, #0 223 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 224 mov pc, lr 225 226/* 227 * v6_dma_clean_range(start,end) 228 * - start - virtual start address of region 229 * - end - virtual end address of region 230 */ 231ENTRY(v6_dma_clean_range) 232 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2331: 234#ifdef HARVARD_CACHE 235 mcr p15, 0, r0, c7, c10, 1 @ clean D line 236#else 237 mcr p15, 0, r0, c7, c11, 1 @ clean unified line 238#endif 239 add r0, r0, #D_CACHE_LINE_SIZE 240 cmp r0, r1 241 blo 1b 242 mov r0, #0 243 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 244 mov pc, lr 245 246/* 247 * v6_dma_flush_range(start,end) 248 * - start - virtual start address of region 249 * - end - virtual end address of region 250 */ 251ENTRY(v6_dma_flush_range) 252 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2531: 254#ifdef HARVARD_CACHE 255 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 256#else 257 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 258#endif 259 add r0, r0, #D_CACHE_LINE_SIZE 260 cmp r0, r1 261 blo 1b 262 mov r0, #0 263 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 264 mov pc, lr 265 266 __INITDATA 267 268 .type v6_cache_fns, #object 269ENTRY(v6_cache_fns) 270 .long v6_flush_kern_cache_all 271 .long v6_flush_user_cache_all 272 .long v6_flush_user_cache_range 273 .long v6_coherent_kern_range 274 .long v6_coherent_user_range 275 .long v6_flush_kern_dcache_area 276 .long v6_dma_inv_range 277 .long v6_dma_clean_range 278 .long v6_dma_flush_range 279 .size v6_cache_fns, . - v6_cache_fns 280