xref: /openbmc/linux/arch/sh/mm/cache-sh7705.c (revision f42b3800)
1 /*
2  * arch/sh/mm/cache-sh7705.c
3  *
4  * Copyright (C) 1999, 2000  Niibe Yutaka
5  * Copyright (C) 2004  Alex Song
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  *
11  */
12 #include <linux/init.h>
13 #include <linux/mman.h>
14 #include <linux/mm.h>
15 #include <linux/threads.h>
16 #include <asm/addrspace.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/processor.h>
20 #include <asm/cache.h>
21 #include <asm/io.h>
22 #include <asm/uaccess.h>
23 #include <asm/pgalloc.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 
27 /*
28  * The 32KB cache on the SH7705 suffers from the same synonym problem
29  * as SH4 CPUs
30  */
31 static inline void cache_wback_all(void)
32 {
33 	unsigned long ways, waysize, addrstart;
34 
35 	ways = current_cpu_data.dcache.ways;
36 	waysize = current_cpu_data.dcache.sets;
37 	waysize <<= current_cpu_data.dcache.entry_shift;
38 
39 	addrstart = CACHE_OC_ADDRESS_ARRAY;
40 
41 	do {
42 		unsigned long addr;
43 
44 		for (addr = addrstart;
45 		     addr < addrstart + waysize;
46 		     addr += current_cpu_data.dcache.linesz) {
47 			unsigned long data;
48 			int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
49 
50 			data = ctrl_inl(addr);
51 
52 			if ((data & v) == v)
53 				ctrl_outl(data & ~v, addr);
54 
55 		}
56 
57 		addrstart += current_cpu_data.dcache.way_incr;
58 	} while (--ways);
59 }
60 
61 /*
62  * Write back the range of D-cache, and purge the I-cache.
63  *
64  * Called from kernel/module.c:sys_init_module and routine for a.out format.
65  */
66 void flush_icache_range(unsigned long start, unsigned long end)
67 {
68 	__flush_wback_region((void *)start, end - start);
69 }
70 
71 /*
72  * Writeback&Invalidate the D-cache of the page
73  */
74 static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
75 {
76 	unsigned long ways, waysize, addrstart;
77 	unsigned long flags;
78 
79 	phys |= SH_CACHE_VALID;
80 
81 	/*
82 	 * Here, phys is the physical address of the page. We check all the
83 	 * tags in the cache for those with the same page number as this page
84 	 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
85 	 * derived from the offset within in the 4k page). Matching valid
86 	 * entries are invalidated.
87 	 *
88 	 * Since 2 bits of the cache index are derived from the virtual page
89 	 * number, knowing this would reduce the number of cache entries to be
90 	 * searched by a factor of 4. However this function exists to deal with
91 	 * potential cache aliasing, therefore the optimisation is probably not
92 	 * possible.
93 	 */
94 	local_irq_save(flags);
95 	jump_to_uncached();
96 
97 	ways = current_cpu_data.dcache.ways;
98 	waysize = current_cpu_data.dcache.sets;
99 	waysize <<= current_cpu_data.dcache.entry_shift;
100 
101 	addrstart = CACHE_OC_ADDRESS_ARRAY;
102 
103 	do {
104 		unsigned long addr;
105 
106 		for (addr = addrstart;
107 		     addr < addrstart + waysize;
108 		     addr += current_cpu_data.dcache.linesz) {
109 			unsigned long data;
110 
111 			data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
112 		        if (data == phys) {
113 				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
114 				ctrl_outl(data, addr);
115 			}
116 		}
117 
118 		addrstart += current_cpu_data.dcache.way_incr;
119 	} while (--ways);
120 
121 	back_to_cached();
122 	local_irq_restore(flags);
123 }
124 
125 /*
126  * Write back & invalidate the D-cache of the page.
127  * (To avoid "alias" issues)
128  */
129 void flush_dcache_page(struct page *page)
130 {
131 	if (test_bit(PG_mapped, &page->flags))
132 		__flush_dcache_page(PHYSADDR(page_address(page)));
133 }
134 
135 void __uses_jump_to_uncached flush_cache_all(void)
136 {
137 	unsigned long flags;
138 
139 	local_irq_save(flags);
140 	jump_to_uncached();
141 
142 	cache_wback_all();
143 	back_to_cached();
144 	local_irq_restore(flags);
145 }
146 
147 void flush_cache_mm(struct mm_struct *mm)
148 {
149 	/* Is there any good way? */
150 	/* XXX: possibly call flush_cache_range for each vm area */
151 	flush_cache_all();
152 }
153 
154 /*
155  * Write back and invalidate D-caches.
156  *
157  * START, END: Virtual Address (U0 address)
158  *
159  * NOTE: We need to flush the _physical_ page entry.
160  * Flushing the cache lines for U0 only isn't enough.
161  * We need to flush for P1 too, which may contain aliases.
162  */
163 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
164 		       unsigned long end)
165 {
166 
167 	/*
168 	 * We could call flush_cache_page for the pages of these range,
169 	 * but it's not efficient (scan the caches all the time...).
170 	 *
171 	 * We can't use A-bit magic, as there's the case we don't have
172 	 * valid entry on TLB.
173 	 */
174 	flush_cache_all();
175 }
176 
177 /*
178  * Write back and invalidate I/D-caches for the page.
179  *
180  * ADDRESS: Virtual Address (U0 address)
181  */
182 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
183 		      unsigned long pfn)
184 {
185 	__flush_dcache_page(pfn << PAGE_SHIFT);
186 }
187 
188 /*
189  * This is called when a page-cache page is about to be mapped into a
190  * user process' address space.  It offers an opportunity for a
191  * port to ensure d-cache/i-cache coherency if necessary.
192  *
193  * Not entirely sure why this is necessary on SH3 with 32K cache but
194  * without it we get occasional "Memory fault" when loading a program.
195  */
196 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
197 {
198 	__flush_purge_region(page_address(page), PAGE_SIZE);
199 }
200