1 /* 2 * arch/sh/mm/cache-sh7705.c 3 * 4 * Copyright (C) 1999, 2000 Niibe Yutaka 5 * Copyright (C) 2004 Alex Song 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 */ 12 #include <linux/init.h> 13 #include <linux/mman.h> 14 #include <linux/mm.h> 15 #include <linux/fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/threads.h> 18 #include <asm/addrspace.h> 19 #include <asm/page.h> 20 #include <asm/processor.h> 21 #include <asm/cache.h> 22 #include <asm/io.h> 23 #include <linux/uaccess.h> 24 #include <asm/mmu_context.h> 25 #include <asm/cacheflush.h> 26 27 /* 28 * The 32KB cache on the SH7705 suffers from the same synonym problem 29 * as SH4 CPUs 30 */ 31 static inline void cache_wback_all(void) 32 { 33 unsigned long ways, waysize, addrstart; 34 35 ways = current_cpu_data.dcache.ways; 36 waysize = current_cpu_data.dcache.sets; 37 waysize <<= current_cpu_data.dcache.entry_shift; 38 39 addrstart = CACHE_OC_ADDRESS_ARRAY; 40 41 do { 42 unsigned long addr; 43 44 for (addr = addrstart; 45 addr < addrstart + waysize; 46 addr += current_cpu_data.dcache.linesz) { 47 unsigned long data; 48 int v = SH_CACHE_UPDATED | SH_CACHE_VALID; 49 50 data = __raw_readl(addr); 51 52 if ((data & v) == v) 53 __raw_writel(data & ~v, addr); 54 55 } 56 57 addrstart += current_cpu_data.dcache.way_incr; 58 } while (--ways); 59 } 60 61 /* 62 * Write back the range of D-cache, and purge the I-cache. 63 * 64 * Called from kernel/module.c:sys_init_module and routine for a.out format. 65 */ 66 static void sh7705_flush_icache_range(void *args) 67 { 68 struct flusher_data *data = args; 69 unsigned long start, end; 70 71 start = data->addr1; 72 end = data->addr2; 73 74 __flush_wback_region((void *)start, end - start); 75 } 76 77 /* 78 * Writeback&Invalidate the D-cache of the page 79 */ 80 static void __flush_dcache_page(unsigned long phys) 81 { 82 unsigned long ways, waysize, addrstart; 83 unsigned long flags; 84 85 phys |= SH_CACHE_VALID; 86 87 /* 88 * Here, phys is the physical address of the page. We check all the 89 * tags in the cache for those with the same page number as this page 90 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are 91 * derived from the offset within in the 4k page). Matching valid 92 * entries are invalidated. 93 * 94 * Since 2 bits of the cache index are derived from the virtual page 95 * number, knowing this would reduce the number of cache entries to be 96 * searched by a factor of 4. However this function exists to deal with 97 * potential cache aliasing, therefore the optimisation is probably not 98 * possible. 99 */ 100 local_irq_save(flags); 101 jump_to_uncached(); 102 103 ways = current_cpu_data.dcache.ways; 104 waysize = current_cpu_data.dcache.sets; 105 waysize <<= current_cpu_data.dcache.entry_shift; 106 107 addrstart = CACHE_OC_ADDRESS_ARRAY; 108 109 do { 110 unsigned long addr; 111 112 for (addr = addrstart; 113 addr < addrstart + waysize; 114 addr += current_cpu_data.dcache.linesz) { 115 unsigned long data; 116 117 data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); 118 if (data == phys) { 119 data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); 120 __raw_writel(data, addr); 121 } 122 } 123 124 addrstart += current_cpu_data.dcache.way_incr; 125 } while (--ways); 126 127 back_to_cached(); 128 local_irq_restore(flags); 129 } 130 131 /* 132 * Write back & invalidate the D-cache of the page. 133 * (To avoid "alias" issues) 134 */ 135 static void sh7705_flush_dcache_folio(void *arg) 136 { 137 struct folio *folio = arg; 138 struct address_space *mapping = folio_flush_mapping(folio); 139 140 if (mapping && !mapping_mapped(mapping)) 141 clear_bit(PG_dcache_clean, &folio->flags); 142 else { 143 unsigned long pfn = folio_pfn(folio); 144 unsigned int i, nr = folio_nr_pages(folio); 145 146 for (i = 0; i < nr; i++) 147 __flush_dcache_page((pfn + i) * PAGE_SIZE); 148 } 149 } 150 151 static void sh7705_flush_cache_all(void *args) 152 { 153 unsigned long flags; 154 155 local_irq_save(flags); 156 jump_to_uncached(); 157 158 cache_wback_all(); 159 back_to_cached(); 160 local_irq_restore(flags); 161 } 162 163 /* 164 * Write back and invalidate I/D-caches for the page. 165 * 166 * ADDRESS: Virtual Address (U0 address) 167 */ 168 static void sh7705_flush_cache_page(void *args) 169 { 170 struct flusher_data *data = args; 171 unsigned long pfn = data->addr2; 172 173 __flush_dcache_page(pfn << PAGE_SHIFT); 174 } 175 176 /* 177 * This is called when a page-cache page is about to be mapped into a 178 * user process' address space. It offers an opportunity for a 179 * port to ensure d-cache/i-cache coherency if necessary. 180 * 181 * Not entirely sure why this is necessary on SH3 with 32K cache but 182 * without it we get occasional "Memory fault" when loading a program. 183 */ 184 static void sh7705_flush_icache_folio(void *arg) 185 { 186 struct folio *folio = arg; 187 __flush_purge_region(folio_address(folio), folio_size(folio)); 188 } 189 190 void __init sh7705_cache_init(void) 191 { 192 local_flush_icache_range = sh7705_flush_icache_range; 193 local_flush_dcache_folio = sh7705_flush_dcache_folio; 194 local_flush_cache_all = sh7705_flush_cache_all; 195 local_flush_cache_mm = sh7705_flush_cache_all; 196 local_flush_cache_dup_mm = sh7705_flush_cache_all; 197 local_flush_cache_range = sh7705_flush_cache_all; 198 local_flush_cache_page = sh7705_flush_cache_page; 199 local_flush_icache_folio = sh7705_flush_icache_folio; 200 } 201