1/* 2 * linux/arch/arm/mm/cache-v4wt.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * ARMv4 write through cache operations support. 11 * 12 * We assume that the write buffer is not enabled. 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16#include <asm/page.h> 17#include "proc-macros.S" 18 19/* 20 * The size of one data cache line. 21 */ 22#define CACHE_DLINESIZE 32 23 24/* 25 * The number of data cache segments. 26 */ 27#define CACHE_DSEGMENTS 8 28 29/* 30 * The number of lines in a cache segment. 31 */ 32#define CACHE_DENTRIES 64 33 34/* 35 * This is the size at which it becomes more efficient to 36 * clean the whole cache, rather than using the individual 37 * cache line maintainence instructions. 38 * 39 * *** This needs benchmarking 40 */ 41#define CACHE_DLIMIT 16384 42 43/* 44 * flush_user_cache_all() 45 * 46 * Invalidate all cache entries in a particular address 47 * space. 48 */ 49ENTRY(v4wt_flush_user_cache_all) 50 /* FALLTHROUGH */ 51/* 52 * flush_kern_cache_all() 53 * 54 * Clean and invalidate the entire cache. 55 */ 56ENTRY(v4wt_flush_kern_cache_all) 57 mov r2, #VM_EXEC 58 mov ip, #0 59__flush_whole_cache: 60 tst r2, #VM_EXEC 61 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 62 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 63 mov pc, lr 64 65/* 66 * flush_user_cache_range(start, end, flags) 67 * 68 * Clean and invalidate a range of cache entries in the specified 69 * address space. 70 * 71 * - start - start address (inclusive, page aligned) 72 * - end - end address (exclusive, page aligned) 73 * - flags - vma_area_struct flags describing address space 74 */ 75ENTRY(v4wt_flush_user_cache_range) 76 sub r3, r1, r0 @ calculate total size 77 cmp r3, #CACHE_DLIMIT 78 bhs __flush_whole_cache 79 801: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 81 tst r2, #VM_EXEC 82 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 83 add r0, r0, #CACHE_DLINESIZE 84 cmp r0, r1 85 blo 1b 86 mov pc, lr 87 88/* 89 * coherent_kern_range(start, end) 90 * 91 * Ensure coherency between the Icache and the Dcache in the 92 * region described by start. If you have non-snooping 93 * Harvard caches, you need to implement this function. 94 * 95 * - start - virtual start address 96 * - end - virtual end address 97 */ 98ENTRY(v4wt_coherent_kern_range) 99 /* FALLTRHOUGH */ 100 101/* 102 * coherent_user_range(start, end) 103 * 104 * Ensure coherency between the Icache and the Dcache in the 105 * region described by start. If you have non-snooping 106 * Harvard caches, you need to implement this function. 107 * 108 * - start - virtual start address 109 * - end - virtual end address 110 */ 111ENTRY(v4wt_coherent_user_range) 112 bic r0, r0, #CACHE_DLINESIZE - 1 1131: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 114 add r0, r0, #CACHE_DLINESIZE 115 cmp r0, r1 116 blo 1b 117 mov pc, lr 118 119/* 120 * flush_kern_dcache_area(void *addr, size_t size) 121 * 122 * Ensure no D cache aliasing occurs, either with itself or 123 * the I cache 124 * 125 * - addr - kernel address 126 * - size - region size 127 */ 128ENTRY(v4wt_flush_kern_dcache_area) 129 mov r2, #0 130 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 131 add r1, r0, r1 132 /* fallthrough */ 133 134/* 135 * dma_inv_range(start, end) 136 * 137 * Invalidate (discard) the specified virtual address range. 138 * May not write back any entries. If 'start' or 'end' 139 * are not cache line aligned, those lines must be written 140 * back. 141 * 142 * - start - virtual start address 143 * - end - virtual end address 144 */ 145ENTRY(v4wt_dma_inv_range) 146 bic r0, r0, #CACHE_DLINESIZE - 1 1471: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 148 add r0, r0, #CACHE_DLINESIZE 149 cmp r0, r1 150 blo 1b 151 /* FALLTHROUGH */ 152 153/* 154 * dma_clean_range(start, end) 155 * 156 * Clean the specified virtual address range. 157 * 158 * - start - virtual start address 159 * - end - virtual end address 160 */ 161ENTRY(v4wt_dma_clean_range) 162 mov pc, lr 163 164/* 165 * dma_flush_range(start, end) 166 * 167 * Clean and invalidate the specified virtual address range. 168 * 169 * - start - virtual start address 170 * - end - virtual end address 171 */ 172 .globl v4wt_dma_flush_range 173 .equ v4wt_dma_flush_range, v4wt_dma_inv_range 174 175 __INITDATA 176 177 .type v4wt_cache_fns, #object 178ENTRY(v4wt_cache_fns) 179 .long v4wt_flush_kern_cache_all 180 .long v4wt_flush_user_cache_all 181 .long v4wt_flush_user_cache_range 182 .long v4wt_coherent_kern_range 183 .long v4wt_coherent_user_range 184 .long v4wt_flush_kern_dcache_area 185 .long v4wt_dma_inv_range 186 .long v4wt_dma_clean_range 187 .long v4wt_dma_flush_range 188 .size v4wt_cache_fns, . - v4wt_cache_fns 189