1/* 2 * linux/arch/arm/mm/cache-v4.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/page.h> 13#include "proc-macros.S" 14 15/* 16 * flush_user_cache_all() 17 * 18 * Invalidate all cache entries in a particular address 19 * space. 20 * 21 * - mm - mm_struct describing address space 22 */ 23ENTRY(v4_flush_user_cache_all) 24 /* FALLTHROUGH */ 25/* 26 * flush_kern_cache_all() 27 * 28 * Clean and invalidate the entire cache. 29 */ 30ENTRY(v4_flush_kern_cache_all) 31#ifdef CONFIG_CPU_CP15 32 mov r0, #0 33 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mov pc, lr 35#else 36 /* FALLTHROUGH */ 37#endif 38 39/* 40 * flush_user_cache_range(start, end, flags) 41 * 42 * Invalidate a range of cache entries in the specified 43 * address space. 44 * 45 * - start - start address (may not be aligned) 46 * - end - end address (exclusive, may not be aligned) 47 * - flags - vma_area_struct flags describing address space 48 */ 49ENTRY(v4_flush_user_cache_range) 50#ifdef CONFIG_CPU_CP15 51 mov ip, #0 52 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 53 mov pc, lr 54#else 55 /* FALLTHROUGH */ 56#endif 57 58/* 59 * coherent_kern_range(start, end) 60 * 61 * Ensure coherency between the Icache and the Dcache in the 62 * region described by start. If you have non-snooping 63 * Harvard caches, you need to implement this function. 64 * 65 * - start - virtual start address 66 * - end - virtual end address 67 */ 68ENTRY(v4_coherent_kern_range) 69 /* FALLTHROUGH */ 70 71/* 72 * coherent_user_range(start, end) 73 * 74 * Ensure coherency between the Icache and the Dcache in the 75 * region described by start. If you have non-snooping 76 * Harvard caches, you need to implement this function. 77 * 78 * - start - virtual start address 79 * - end - virtual end address 80 */ 81ENTRY(v4_coherent_user_range) 82 mov pc, lr 83 84/* 85 * flush_kern_dcache_area(void *addr, size_t size) 86 * 87 * Ensure no D cache aliasing occurs, either with itself or 88 * the I cache 89 * 90 * - addr - kernel address 91 * - size - region size 92 */ 93ENTRY(v4_flush_kern_dcache_area) 94 /* FALLTHROUGH */ 95 96/* 97 * dma_inv_range(start, end) 98 * 99 * Invalidate (discard) the specified virtual address range. 100 * May not write back any entries. If 'start' or 'end' 101 * are not cache line aligned, those lines must be written 102 * back. 103 * 104 * - start - virtual start address 105 * - end - virtual end address 106 */ 107ENTRY(v4_dma_inv_range) 108 /* FALLTHROUGH */ 109 110/* 111 * dma_flush_range(start, end) 112 * 113 * Clean and invalidate the specified virtual address range. 114 * 115 * - start - virtual start address 116 * - end - virtual end address 117 */ 118ENTRY(v4_dma_flush_range) 119#ifdef CONFIG_CPU_CP15 120 mov r0, #0 121 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 122#endif 123 /* FALLTHROUGH */ 124 125/* 126 * dma_clean_range(start, end) 127 * 128 * Clean (write back) the specified virtual address range. 129 * 130 * - start - virtual start address 131 * - end - virtual end address 132 */ 133ENTRY(v4_dma_clean_range) 134 mov pc, lr 135 136 __INITDATA 137 138 .type v4_cache_fns, #object 139ENTRY(v4_cache_fns) 140 .long v4_flush_kern_cache_all 141 .long v4_flush_user_cache_all 142 .long v4_flush_user_cache_range 143 .long v4_coherent_kern_range 144 .long v4_coherent_user_range 145 .long v4_flush_kern_dcache_area 146 .long v4_dma_inv_range 147 .long v4_dma_clean_range 148 .long v4_dma_flush_range 149 .size v4_cache_fns, . - v4_cache_fns 150