1/* 2 * linux/arch/arm/mm/cache-v4.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/hardware.h> 13#include <asm/page.h> 14#include "proc-macros.S" 15 16/* 17 * flush_user_cache_all() 18 * 19 * Invalidate all cache entries in a particular address 20 * space. 21 * 22 * - mm - mm_struct describing address space 23 */ 24ENTRY(v4_flush_user_cache_all) 25 /* FALLTHROUGH */ 26/* 27 * flush_kern_cache_all() 28 * 29 * Clean and invalidate the entire cache. 30 */ 31ENTRY(v4_flush_kern_cache_all) 32 mov r0, #0 33 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mov pc, lr 35 36/* 37 * flush_user_cache_range(start, end, flags) 38 * 39 * Invalidate a range of cache entries in the specified 40 * address space. 41 * 42 * - start - start address (may not be aligned) 43 * - end - end address (exclusive, may not be aligned) 44 * - flags - vma_area_struct flags describing address space 45 */ 46ENTRY(v4_flush_user_cache_range) 47 mov ip, #0 48 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 49 mov pc, lr 50 51/* 52 * coherent_kern_range(start, end) 53 * 54 * Ensure coherency between the Icache and the Dcache in the 55 * region described by start. If you have non-snooping 56 * Harvard caches, you need to implement this function. 57 * 58 * - start - virtual start address 59 * - end - virtual end address 60 */ 61ENTRY(v4_coherent_kern_range) 62 /* FALLTHROUGH */ 63 64/* 65 * coherent_user_range(start, end) 66 * 67 * Ensure coherency between the Icache and the Dcache in the 68 * region described by start. If you have non-snooping 69 * Harvard caches, you need to implement this function. 70 * 71 * - start - virtual start address 72 * - end - virtual end address 73 */ 74ENTRY(v4_coherent_user_range) 75 mov pc, lr 76 77/* 78 * flush_kern_dcache_page(void *page) 79 * 80 * Ensure no D cache aliasing occurs, either with itself or 81 * the I cache 82 * 83 * - addr - page aligned address 84 */ 85ENTRY(v4_flush_kern_dcache_page) 86 /* FALLTHROUGH */ 87 88/* 89 * dma_inv_range(start, end) 90 * 91 * Invalidate (discard) the specified virtual address range. 92 * May not write back any entries. If 'start' or 'end' 93 * are not cache line aligned, those lines must be written 94 * back. 95 * 96 * - start - virtual start address 97 * - end - virtual end address 98 */ 99ENTRY(v4_dma_inv_range) 100 /* FALLTHROUGH */ 101 102/* 103 * dma_flush_range(start, end) 104 * 105 * Clean and invalidate the specified virtual address range. 106 * 107 * - start - virtual start address 108 * - end - virtual end address 109 */ 110ENTRY(v4_dma_flush_range) 111 mov r0, #0 112 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 113 /* FALLTHROUGH */ 114 115/* 116 * dma_clean_range(start, end) 117 * 118 * Clean (write back) the specified virtual address range. 119 * 120 * - start - virtual start address 121 * - end - virtual end address 122 */ 123ENTRY(v4_dma_clean_range) 124 mov pc, lr 125 126 __INITDATA 127 128 .type v4_cache_fns, #object 129ENTRY(v4_cache_fns) 130 .long v4_flush_kern_cache_all 131 .long v4_flush_user_cache_all 132 .long v4_flush_user_cache_range 133 .long v4_coherent_kern_range 134 .long v4_coherent_user_range 135 .long v4_flush_kern_dcache_page 136 .long v4_dma_inv_range 137 .long v4_dma_clean_range 138 .long v4_dma_flush_range 139 .size v4_cache_fns, . - v4_cache_fns 140