1/* 2 * linux/arch/arm/mm/cache-v4wb.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/memory.h> 13#include <asm/page.h> 14#include "proc-macros.S" 15 16/* 17 * The size of one data cache line. 18 */ 19#define CACHE_DLINESIZE 32 20 21/* 22 * The total size of the data cache. 23 */ 24#if defined(CONFIG_CPU_SA110) 25# define CACHE_DSIZE 16384 26#elif defined(CONFIG_CPU_SA1100) 27# define CACHE_DSIZE 8192 28#else 29# error Unknown cache size 30#endif 31 32/* 33 * This is the size at which it becomes more efficient to 34 * clean the whole cache, rather than using the individual 35 * cache line maintainence instructions. 36 * 37 * Size Clean (ticks) Dirty (ticks) 38 * 4096 21 20 21 53 55 54 39 * 8192 40 41 40 106 100 102 40 * 16384 77 77 76 140 140 138 41 * 32768 150 149 150 214 216 212 <--- 42 * 65536 296 297 296 351 358 361 43 * 131072 591 591 591 656 657 651 44 * Whole 132 136 132 221 217 207 <--- 45 */ 46#define CACHE_DLIMIT (CACHE_DSIZE * 4) 47 48 .data 49flush_base: 50 .long FLUSH_BASE 51 .text 52 53/* 54 * flush_user_cache_all() 55 * 56 * Clean and invalidate all cache entries in a particular address 57 * space. 58 */ 59ENTRY(v4wb_flush_user_cache_all) 60 /* FALLTHROUGH */ 61/* 62 * flush_kern_cache_all() 63 * 64 * Clean and invalidate the entire cache. 65 */ 66ENTRY(v4wb_flush_kern_cache_all) 67 mov ip, #0 68 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 69__flush_whole_cache: 70 ldr r3, =flush_base 71 ldr r1, [r3, #0] 72 eor r1, r1, #CACHE_DSIZE 73 str r1, [r3, #0] 74 add r2, r1, #CACHE_DSIZE 751: ldr r3, [r1], #32 76 cmp r1, r2 77 blo 1b 78#ifdef FLUSH_BASE_MINICACHE 79 add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE 80 sub r1, r2, #512 @ only 512 bytes 811: ldr r3, [r1], #32 82 cmp r1, r2 83 blo 1b 84#endif 85 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 86 mov pc, lr 87 88/* 89 * flush_user_cache_range(start, end, flags) 90 * 91 * Invalidate a range of cache entries in the specified 92 * address space. 93 * 94 * - start - start address (inclusive, page aligned) 95 * - end - end address (exclusive, page aligned) 96 * - flags - vma_area_struct flags describing address space 97 */ 98ENTRY(v4wb_flush_user_cache_range) 99 mov ip, #0 100 sub r3, r1, r0 @ calculate total size 101 tst r2, #VM_EXEC @ executable region? 102 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 103 104 cmp r3, #CACHE_DLIMIT @ total size >= limit? 105 bhs __flush_whole_cache @ flush whole D cache 106 1071: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 108 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 109 add r0, r0, #CACHE_DLINESIZE 110 cmp r0, r1 111 blo 1b 112 tst r2, #VM_EXEC 113 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 114 mov pc, lr 115 116/* 117 * flush_kern_dcache_area(void *addr, size_t size) 118 * 119 * Ensure no D cache aliasing occurs, either with itself or 120 * the I cache 121 * 122 * - addr - kernel address 123 * - size - region size 124 */ 125ENTRY(v4wb_flush_kern_dcache_area) 126 add r1, r0, r1 127 /* fall through */ 128 129/* 130 * coherent_kern_range(start, end) 131 * 132 * Ensure coherency between the Icache and the Dcache in the 133 * region described by start. If you have non-snooping 134 * Harvard caches, you need to implement this function. 135 * 136 * - start - virtual start address 137 * - end - virtual end address 138 */ 139ENTRY(v4wb_coherent_kern_range) 140 /* fall through */ 141 142/* 143 * coherent_user_range(start, end) 144 * 145 * Ensure coherency between the Icache and the Dcache in the 146 * region described by start. If you have non-snooping 147 * Harvard caches, you need to implement this function. 148 * 149 * - start - virtual start address 150 * - end - virtual end address 151 */ 152ENTRY(v4wb_coherent_user_range) 153 bic r0, r0, #CACHE_DLINESIZE - 1 1541: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 155 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 156 add r0, r0, #CACHE_DLINESIZE 157 cmp r0, r1 158 blo 1b 159 mov ip, #0 160 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 161 mcr p15, 0, ip, c7, c10, 4 @ drain WB 162 mov pc, lr 163 164 165/* 166 * dma_inv_range(start, end) 167 * 168 * Invalidate (discard) the specified virtual address range. 169 * May not write back any entries. If 'start' or 'end' 170 * are not cache line aligned, those lines must be written 171 * back. 172 * 173 * - start - virtual start address 174 * - end - virtual end address 175 */ 176ENTRY(v4wb_dma_inv_range) 177 tst r0, #CACHE_DLINESIZE - 1 178 bic r0, r0, #CACHE_DLINESIZE - 1 179 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 180 tst r1, #CACHE_DLINESIZE - 1 181 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 1821: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 183 add r0, r0, #CACHE_DLINESIZE 184 cmp r0, r1 185 blo 1b 186 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 187 mov pc, lr 188 189/* 190 * dma_clean_range(start, end) 191 * 192 * Clean (write back) the specified virtual address range. 193 * 194 * - start - virtual start address 195 * - end - virtual end address 196 */ 197ENTRY(v4wb_dma_clean_range) 198 bic r0, r0, #CACHE_DLINESIZE - 1 1991: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 200 add r0, r0, #CACHE_DLINESIZE 201 cmp r0, r1 202 blo 1b 203 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 204 mov pc, lr 205 206/* 207 * dma_flush_range(start, end) 208 * 209 * Clean and invalidate the specified virtual address range. 210 * 211 * - start - virtual start address 212 * - end - virtual end address 213 * 214 * This is actually the same as v4wb_coherent_kern_range() 215 */ 216 .globl v4wb_dma_flush_range 217 .set v4wb_dma_flush_range, v4wb_coherent_kern_range 218 219 __INITDATA 220 221 .type v4wb_cache_fns, #object 222ENTRY(v4wb_cache_fns) 223 .long v4wb_flush_kern_cache_all 224 .long v4wb_flush_user_cache_all 225 .long v4wb_flush_user_cache_range 226 .long v4wb_coherent_kern_range 227 .long v4wb_coherent_user_range 228 .long v4wb_flush_kern_dcache_area 229 .long v4wb_dma_inv_range 230 .long v4wb_dma_clean_range 231 .long v4wb_dma_flush_range 232 .size v4wb_cache_fns, . - v4wb_cache_fns 233