1/* 2 * linux/arch/arm/mm/cache-v4wt.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * ARMv4 write through cache operations support. 11 * 12 * We assume that the write buffer is not enabled. 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16#include <asm/page.h> 17#include "proc-macros.S" 18 19/* 20 * The size of one data cache line. 21 */ 22#define CACHE_DLINESIZE 32 23 24/* 25 * The number of data cache segments. 26 */ 27#define CACHE_DSEGMENTS 8 28 29/* 30 * The number of lines in a cache segment. 31 */ 32#define CACHE_DENTRIES 64 33 34/* 35 * This is the size at which it becomes more efficient to 36 * clean the whole cache, rather than using the individual 37 * cache line maintenance instructions. 38 * 39 * *** This needs benchmarking 40 */ 41#define CACHE_DLIMIT 16384 42 43/* 44 * flush_icache_all() 45 * 46 * Unconditionally clean and invalidate the entire icache. 47 */ 48ENTRY(v4wt_flush_icache_all) 49 mov r0, #0 50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 51 mov pc, lr 52ENDPROC(v4wt_flush_icache_all) 53 54/* 55 * flush_user_cache_all() 56 * 57 * Invalidate all cache entries in a particular address 58 * space. 59 */ 60ENTRY(v4wt_flush_user_cache_all) 61 /* FALLTHROUGH */ 62/* 63 * flush_kern_cache_all() 64 * 65 * Clean and invalidate the entire cache. 66 */ 67ENTRY(v4wt_flush_kern_cache_all) 68 mov r2, #VM_EXEC 69 mov ip, #0 70__flush_whole_cache: 71 tst r2, #VM_EXEC 72 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 73 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 74 mov pc, lr 75 76/* 77 * flush_user_cache_range(start, end, flags) 78 * 79 * Clean and invalidate a range of cache entries in the specified 80 * address space. 81 * 82 * - start - start address (inclusive, page aligned) 83 * - end - end address (exclusive, page aligned) 84 * - flags - vma_area_struct flags describing address space 85 */ 86ENTRY(v4wt_flush_user_cache_range) 87 sub r3, r1, r0 @ calculate total size 88 cmp r3, #CACHE_DLIMIT 89 bhs __flush_whole_cache 90 911: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 92 tst r2, #VM_EXEC 93 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 94 add r0, r0, #CACHE_DLINESIZE 95 cmp r0, r1 96 blo 1b 97 mov pc, lr 98 99/* 100 * coherent_kern_range(start, end) 101 * 102 * Ensure coherency between the Icache and the Dcache in the 103 * region described by start. If you have non-snooping 104 * Harvard caches, you need to implement this function. 105 * 106 * - start - virtual start address 107 * - end - virtual end address 108 */ 109ENTRY(v4wt_coherent_kern_range) 110 /* FALLTRHOUGH */ 111 112/* 113 * coherent_user_range(start, end) 114 * 115 * Ensure coherency between the Icache and the Dcache in the 116 * region described by start. If you have non-snooping 117 * Harvard caches, you need to implement this function. 118 * 119 * - start - virtual start address 120 * - end - virtual end address 121 */ 122ENTRY(v4wt_coherent_user_range) 123 bic r0, r0, #CACHE_DLINESIZE - 1 1241: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 125 add r0, r0, #CACHE_DLINESIZE 126 cmp r0, r1 127 blo 1b 128 mov r0, #0 129 mov pc, lr 130 131/* 132 * flush_kern_dcache_area(void *addr, size_t size) 133 * 134 * Ensure no D cache aliasing occurs, either with itself or 135 * the I cache 136 * 137 * - addr - kernel address 138 * - size - region size 139 */ 140ENTRY(v4wt_flush_kern_dcache_area) 141 mov r2, #0 142 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 143 add r1, r0, r1 144 /* fallthrough */ 145 146/* 147 * dma_inv_range(start, end) 148 * 149 * Invalidate (discard) the specified virtual address range. 150 * May not write back any entries. If 'start' or 'end' 151 * are not cache line aligned, those lines must be written 152 * back. 153 * 154 * - start - virtual start address 155 * - end - virtual end address 156 */ 157v4wt_dma_inv_range: 158 bic r0, r0, #CACHE_DLINESIZE - 1 1591: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 160 add r0, r0, #CACHE_DLINESIZE 161 cmp r0, r1 162 blo 1b 163 mov pc, lr 164 165/* 166 * dma_flush_range(start, end) 167 * 168 * Clean and invalidate the specified virtual address range. 169 * 170 * - start - virtual start address 171 * - end - virtual end address 172 */ 173 .globl v4wt_dma_flush_range 174 .equ v4wt_dma_flush_range, v4wt_dma_inv_range 175 176/* 177 * dma_unmap_area(start, size, dir) 178 * - start - kernel virtual start address 179 * - size - size of region 180 * - dir - DMA direction 181 */ 182ENTRY(v4wt_dma_unmap_area) 183 add r1, r1, r0 184 teq r2, #DMA_TO_DEVICE 185 bne v4wt_dma_inv_range 186 /* FALLTHROUGH */ 187 188/* 189 * dma_map_area(start, size, dir) 190 * - start - kernel virtual start address 191 * - size - size of region 192 * - dir - DMA direction 193 */ 194ENTRY(v4wt_dma_map_area) 195 mov pc, lr 196ENDPROC(v4wt_dma_unmap_area) 197ENDPROC(v4wt_dma_map_area) 198 199 __INITDATA 200 201 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 202 define_cache_functions v4wt 203