xref: /openbmc/linux/arch/nios2/mm/cacheflush.c (revision 5c73cc4b6c83e88863a5de869cc5df3b913aef4a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2009, Wind River Systems Inc
7  * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8  */
9 
10 #include <linux/export.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/cpuinfo.h>
17 
18 static void __flush_dcache(unsigned long start, unsigned long end)
19 {
20 	unsigned long addr;
21 
22 	start &= ~(cpuinfo.dcache_line_size - 1);
23 	end += (cpuinfo.dcache_line_size - 1);
24 	end &= ~(cpuinfo.dcache_line_size - 1);
25 
26 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
27 		__asm__ __volatile__ ("   flushda 0(%0)\n"
28 					: /* Outputs */
29 					: /* Inputs  */ "r"(addr)
30 					/* : No clobber */);
31 	}
32 }
33 
34 static void __flush_dcache_all(unsigned long start, unsigned long end)
35 {
36 	unsigned long addr;
37 
38 	start &= ~(cpuinfo.dcache_line_size - 1);
39 	end += (cpuinfo.dcache_line_size - 1);
40 	end &= ~(cpuinfo.dcache_line_size - 1);
41 
42 	if (end > start + cpuinfo.dcache_size)
43 		end = start + cpuinfo.dcache_size;
44 
45 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
46 		__asm__ __volatile__ ("   flushd 0(%0)\n"
47 					: /* Outputs */
48 					: /* Inputs  */ "r"(addr)
49 					/* : No clobber */);
50 	}
51 }
52 
53 static void __invalidate_dcache(unsigned long start, unsigned long end)
54 {
55 	unsigned long addr;
56 
57 	start &= ~(cpuinfo.dcache_line_size - 1);
58 	end += (cpuinfo.dcache_line_size - 1);
59 	end &= ~(cpuinfo.dcache_line_size - 1);
60 
61 	if (end > start + cpuinfo.dcache_size)
62 		end = start + cpuinfo.dcache_size;
63 
64 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
65 		__asm__ __volatile__ ("   initda 0(%0)\n"
66 					: /* Outputs */
67 					: /* Inputs  */ "r"(addr)
68 					/* : No clobber */);
69 	}
70 }
71 
72 static void __flush_icache(unsigned long start, unsigned long end)
73 {
74 	unsigned long addr;
75 
76 	start &= ~(cpuinfo.icache_line_size - 1);
77 	end += (cpuinfo.icache_line_size - 1);
78 	end &= ~(cpuinfo.icache_line_size - 1);
79 
80 	if (end > start + cpuinfo.icache_size)
81 		end = start + cpuinfo.icache_size;
82 
83 	for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
84 		__asm__ __volatile__ ("   flushi %0\n"
85 					: /* Outputs */
86 					: /* Inputs  */ "r"(addr)
87 					/* : No clobber */);
88 	}
89 	__asm__ __volatile(" flushp\n");
90 }
91 
92 static void flush_aliases(struct address_space *mapping, struct page *page)
93 {
94 	struct mm_struct *mm = current->active_mm;
95 	struct vm_area_struct *mpnt;
96 	pgoff_t pgoff;
97 
98 	pgoff = page->index;
99 
100 	flush_dcache_mmap_lock(mapping);
101 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
102 		unsigned long offset;
103 
104 		if (mpnt->vm_mm != mm)
105 			continue;
106 		if (!(mpnt->vm_flags & VM_MAYSHARE))
107 			continue;
108 
109 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
110 		flush_cache_page(mpnt, mpnt->vm_start + offset,
111 			page_to_pfn(page));
112 	}
113 	flush_dcache_mmap_unlock(mapping);
114 }
115 
116 void flush_cache_all(void)
117 {
118 	__flush_dcache_all(0, cpuinfo.dcache_size);
119 	__flush_icache(0, cpuinfo.icache_size);
120 }
121 
122 void flush_cache_mm(struct mm_struct *mm)
123 {
124 	flush_cache_all();
125 }
126 
127 void flush_cache_dup_mm(struct mm_struct *mm)
128 {
129 	flush_cache_all();
130 }
131 
132 void flush_icache_range(unsigned long start, unsigned long end)
133 {
134 	__flush_icache(start, end);
135 }
136 
137 void flush_dcache_range(unsigned long start, unsigned long end)
138 {
139 	__flush_dcache(start, end);
140 }
141 EXPORT_SYMBOL(flush_dcache_range);
142 
143 void invalidate_dcache_range(unsigned long start, unsigned long end)
144 {
145 	__invalidate_dcache(start, end);
146 }
147 EXPORT_SYMBOL(invalidate_dcache_range);
148 
149 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
150 			unsigned long end)
151 {
152 	__flush_dcache(start, end);
153 	if (vma == NULL || (vma->vm_flags & VM_EXEC))
154 		__flush_icache(start, end);
155 }
156 
157 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
158 {
159 	unsigned long start = (unsigned long) page_address(page);
160 	unsigned long end = start + PAGE_SIZE;
161 
162 	__flush_icache(start, end);
163 }
164 
165 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
166 			unsigned long pfn)
167 {
168 	unsigned long start = vmaddr;
169 	unsigned long end = start + PAGE_SIZE;
170 
171 	__flush_dcache(start, end);
172 	if (vma->vm_flags & VM_EXEC)
173 		__flush_icache(start, end);
174 }
175 
176 void flush_dcache_page(struct page *page)
177 {
178 	struct address_space *mapping;
179 
180 	/*
181 	 * The zero page is never written to, so never has any dirty
182 	 * cache lines, and therefore never needs to be flushed.
183 	 */
184 	if (page == ZERO_PAGE(0))
185 		return;
186 
187 	mapping = page_mapping(page);
188 
189 	/* Flush this page if there are aliases. */
190 	if (mapping && !mapping_mapped(mapping)) {
191 		clear_bit(PG_dcache_clean, &page->flags);
192 	} else {
193 		unsigned long start = (unsigned long)page_address(page);
194 
195 		__flush_dcache_all(start, start + PAGE_SIZE);
196 		if (mapping)
197 			flush_aliases(mapping,  page);
198 		set_bit(PG_dcache_clean, &page->flags);
199 	}
200 }
201 EXPORT_SYMBOL(flush_dcache_page);
202 
203 void update_mmu_cache(struct vm_area_struct *vma,
204 		      unsigned long address, pte_t *pte)
205 {
206 	unsigned long pfn = pte_pfn(*pte);
207 	struct page *page;
208 
209 	if (!pfn_valid(pfn))
210 		return;
211 
212 	/*
213 	* The zero page is never written to, so never has any dirty
214 	* cache lines, and therefore never needs to be flushed.
215 	*/
216 	page = pfn_to_page(pfn);
217 	if (page == ZERO_PAGE(0))
218 		return;
219 
220 	if (!PageReserved(page) &&
221 	     !test_and_set_bit(PG_dcache_clean, &page->flags)) {
222 		unsigned long start = page_to_virt(page);
223 		struct address_space *mapping;
224 
225 		__flush_dcache(start, start + PAGE_SIZE);
226 
227 		mapping = page_mapping(page);
228 		if (mapping)
229 			flush_aliases(mapping, page);
230 	}
231 }
232 
233 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
234 		    struct page *to)
235 {
236 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
237 	copy_page(vto, vfrom);
238 	__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
239 }
240 
241 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
242 {
243 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
244 	clear_page(addr);
245 	__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
246 }
247 
248 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
249 			unsigned long user_vaddr,
250 			void *dst, void *src, int len)
251 {
252 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
253 	memcpy(dst, src, len);
254 	__flush_dcache((unsigned long)src, (unsigned long)src + len);
255 	if (vma->vm_flags & VM_EXEC)
256 		__flush_icache((unsigned long)src, (unsigned long)src + len);
257 }
258 
259 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
260 			unsigned long user_vaddr,
261 			void *dst, void *src, int len)
262 {
263 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
264 	memcpy(dst, src, len);
265 	__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
266 	if (vma->vm_flags & VM_EXEC)
267 		__flush_icache((unsigned long)dst, (unsigned long)dst + len);
268 }
269