xref: /openbmc/linux/arch/nios2/mm/cacheflush.c (revision 867a0e05)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2009, Wind River Systems Inc
7  * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8  */
9 
10 #include <linux/export.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/cpuinfo.h>
17 
18 static void __flush_dcache(unsigned long start, unsigned long end)
19 {
20 	unsigned long addr;
21 
22 	start &= ~(cpuinfo.dcache_line_size - 1);
23 	end += (cpuinfo.dcache_line_size - 1);
24 	end &= ~(cpuinfo.dcache_line_size - 1);
25 
26 	if (end > start + cpuinfo.dcache_size)
27 		end = start + cpuinfo.dcache_size;
28 
29 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
30 		__asm__ __volatile__ ("   flushda 0(%0)\n"
31 					: /* Outputs */
32 					: /* Inputs  */ "r"(addr)
33 					/* : No clobber */);
34 	}
35 }
36 
37 static void __flush_dcache_all(unsigned long start, unsigned long end)
38 {
39 	unsigned long addr;
40 
41 	start &= ~(cpuinfo.dcache_line_size - 1);
42 	end += (cpuinfo.dcache_line_size - 1);
43 	end &= ~(cpuinfo.dcache_line_size - 1);
44 
45 	if (end > start + cpuinfo.dcache_size)
46 		end = start + cpuinfo.dcache_size;
47 
48 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
49 		__asm__ __volatile__ ("   flushd 0(%0)\n"
50 					: /* Outputs */
51 					: /* Inputs  */ "r"(addr)
52 					/* : No clobber */);
53 	}
54 }
55 
56 static void __invalidate_dcache(unsigned long start, unsigned long end)
57 {
58 	unsigned long addr;
59 
60 	start &= ~(cpuinfo.dcache_line_size - 1);
61 	end += (cpuinfo.dcache_line_size - 1);
62 	end &= ~(cpuinfo.dcache_line_size - 1);
63 
64 	if (end > start + cpuinfo.dcache_size)
65 		end = start + cpuinfo.dcache_size;
66 
67 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
68 		__asm__ __volatile__ ("   initda 0(%0)\n"
69 					: /* Outputs */
70 					: /* Inputs  */ "r"(addr)
71 					/* : No clobber */);
72 	}
73 }
74 
75 static void __flush_icache(unsigned long start, unsigned long end)
76 {
77 	unsigned long addr;
78 
79 	start &= ~(cpuinfo.icache_line_size - 1);
80 	end += (cpuinfo.icache_line_size - 1);
81 	end &= ~(cpuinfo.icache_line_size - 1);
82 
83 	if (end > start + cpuinfo.icache_size)
84 		end = start + cpuinfo.icache_size;
85 
86 	for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
87 		__asm__ __volatile__ ("   flushi %0\n"
88 					: /* Outputs */
89 					: /* Inputs  */ "r"(addr)
90 					/* : No clobber */);
91 	}
92 	__asm__ __volatile(" flushp\n");
93 }
94 
95 static void flush_aliases(struct address_space *mapping, struct page *page)
96 {
97 	struct mm_struct *mm = current->active_mm;
98 	struct vm_area_struct *mpnt;
99 	pgoff_t pgoff;
100 
101 	pgoff = page->index;
102 
103 	flush_dcache_mmap_lock(mapping);
104 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
105 		unsigned long offset;
106 
107 		if (mpnt->vm_mm != mm)
108 			continue;
109 		if (!(mpnt->vm_flags & VM_MAYSHARE))
110 			continue;
111 
112 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
113 		flush_cache_page(mpnt, mpnt->vm_start + offset,
114 			page_to_pfn(page));
115 	}
116 	flush_dcache_mmap_unlock(mapping);
117 }
118 
119 void flush_cache_all(void)
120 {
121 	__flush_dcache_all(0, cpuinfo.dcache_size);
122 	__flush_icache(0, cpuinfo.icache_size);
123 }
124 
125 void flush_cache_mm(struct mm_struct *mm)
126 {
127 	flush_cache_all();
128 }
129 
130 void flush_cache_dup_mm(struct mm_struct *mm)
131 {
132 	flush_cache_all();
133 }
134 
135 void flush_icache_range(unsigned long start, unsigned long end)
136 {
137 	__flush_icache(start, end);
138 }
139 
140 void flush_dcache_range(unsigned long start, unsigned long end)
141 {
142 	__flush_dcache(start, end);
143 }
144 EXPORT_SYMBOL(flush_dcache_range);
145 
146 void invalidate_dcache_range(unsigned long start, unsigned long end)
147 {
148 	__invalidate_dcache(start, end);
149 }
150 EXPORT_SYMBOL(invalidate_dcache_range);
151 
152 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
153 			unsigned long end)
154 {
155 	__flush_dcache(start, end);
156 	if (vma == NULL || (vma->vm_flags & VM_EXEC))
157 		__flush_icache(start, end);
158 }
159 
160 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
161 {
162 	unsigned long start = (unsigned long) page_address(page);
163 	unsigned long end = start + PAGE_SIZE;
164 
165 	__flush_icache(start, end);
166 }
167 
168 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
169 			unsigned long pfn)
170 {
171 	unsigned long start = vmaddr;
172 	unsigned long end = start + PAGE_SIZE;
173 
174 	__flush_dcache(start, end);
175 	if (vma->vm_flags & VM_EXEC)
176 		__flush_icache(start, end);
177 }
178 
179 void flush_dcache_page(struct page *page)
180 {
181 	struct address_space *mapping;
182 
183 	/*
184 	 * The zero page is never written to, so never has any dirty
185 	 * cache lines, and therefore never needs to be flushed.
186 	 */
187 	if (page == ZERO_PAGE(0))
188 		return;
189 
190 	mapping = page_mapping(page);
191 
192 	/* Flush this page if there are aliases. */
193 	if (mapping && !mapping_mapped(mapping)) {
194 		clear_bit(PG_dcache_clean, &page->flags);
195 	} else {
196 		unsigned long start = (unsigned long)page_address(page);
197 
198 		__flush_dcache_all(start, start + PAGE_SIZE);
199 		if (mapping)
200 			flush_aliases(mapping,  page);
201 		set_bit(PG_dcache_clean, &page->flags);
202 	}
203 }
204 EXPORT_SYMBOL(flush_dcache_page);
205 
206 void update_mmu_cache(struct vm_area_struct *vma,
207 		      unsigned long address, pte_t *pte)
208 {
209 	unsigned long pfn = pte_pfn(*pte);
210 	struct page *page;
211 
212 	if (!pfn_valid(pfn))
213 		return;
214 
215 	/*
216 	* The zero page is never written to, so never has any dirty
217 	* cache lines, and therefore never needs to be flushed.
218 	*/
219 	page = pfn_to_page(pfn);
220 	if (page == ZERO_PAGE(0))
221 		return;
222 
223 	if (!PageReserved(page) &&
224 	     !test_and_set_bit(PG_dcache_clean, &page->flags)) {
225 		unsigned long start = page_to_virt(page);
226 		struct address_space *mapping;
227 
228 		__flush_dcache(start, start + PAGE_SIZE);
229 
230 		mapping = page_mapping(page);
231 		if (mapping)
232 			flush_aliases(mapping, page);
233 	}
234 }
235 
236 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
237 		    struct page *to)
238 {
239 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
240 	copy_page(vto, vfrom);
241 	__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
242 }
243 
244 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
245 {
246 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
247 	clear_page(addr);
248 	__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
249 }
250 
251 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
252 			unsigned long user_vaddr,
253 			void *dst, void *src, int len)
254 {
255 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
256 	memcpy(dst, src, len);
257 	__flush_dcache((unsigned long)src, (unsigned long)src + len);
258 	if (vma->vm_flags & VM_EXEC)
259 		__flush_icache((unsigned long)src, (unsigned long)src + len);
260 }
261 
262 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
263 			unsigned long user_vaddr,
264 			void *dst, void *src, int len)
265 {
266 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
267 	memcpy(dst, src, len);
268 	__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
269 	if (vma->vm_flags & VM_EXEC)
270 		__flush_icache((unsigned long)dst, (unsigned long)dst + len);
271 }
272