xref: /openbmc/linux/arch/mips/mm/cache.c (revision 64c70b1c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/processor.h>
19 #include <asm/cpu.h>
20 #include <asm/cpu-features.h>
21 
22 /* Cache operations. */
23 void (*flush_cache_all)(void);
24 void (*__flush_cache_all)(void);
25 void (*flush_cache_mm)(struct mm_struct *mm);
26 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
27 	unsigned long end);
28 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
29 	unsigned long pfn);
30 void (*flush_icache_range)(unsigned long start, unsigned long end);
31 
32 /* MIPS specific cache operations */
33 void (*flush_cache_sigtramp)(unsigned long addr);
34 void (*local_flush_data_cache_page)(void * addr);
35 void (*flush_data_cache_page)(unsigned long addr);
36 void (*flush_icache_all)(void);
37 
38 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
39 EXPORT_SYMBOL(flush_data_cache_page);
40 
41 #ifdef CONFIG_DMA_NONCOHERENT
42 
43 /* DMA cache operations. */
44 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
45 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
46 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
47 
48 EXPORT_SYMBOL(_dma_cache_wback_inv);
49 EXPORT_SYMBOL(_dma_cache_wback);
50 EXPORT_SYMBOL(_dma_cache_inv);
51 
52 #endif /* CONFIG_DMA_NONCOHERENT */
53 
54 /*
55  * We could optimize the case where the cache argument is not BCACHE but
56  * that seems very atypical use ...
57  */
58 asmlinkage int sys_cacheflush(unsigned long addr,
59 	unsigned long bytes, unsigned int cache)
60 {
61 	if (bytes == 0)
62 		return 0;
63 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
64 		return -EFAULT;
65 
66 	flush_icache_range(addr, addr + bytes);
67 
68 	return 0;
69 }
70 
71 void __flush_dcache_page(struct page *page)
72 {
73 	struct address_space *mapping = page_mapping(page);
74 	unsigned long addr;
75 
76 	if (PageHighMem(page))
77 		return;
78 	if (mapping && !mapping_mapped(mapping)) {
79 		SetPageDcacheDirty(page);
80 		return;
81 	}
82 
83 	/*
84 	 * We could delay the flush for the !page_mapping case too.  But that
85 	 * case is for exec env/arg pages and those are %99 certainly going to
86 	 * get faulted into the tlb (and thus flushed) anyways.
87 	 */
88 	addr = (unsigned long) page_address(page);
89 	flush_data_cache_page(addr);
90 }
91 
92 EXPORT_SYMBOL(__flush_dcache_page);
93 
94 void __flush_anon_page(struct page *page, unsigned long vmaddr)
95 {
96 	if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
97 		void *kaddr;
98 
99 		kaddr = kmap_coherent(page, vmaddr);
100 		flush_data_cache_page((unsigned long)kaddr);
101 		kunmap_coherent();
102 	}
103 }
104 
105 EXPORT_SYMBOL(__flush_anon_page);
106 
107 void __update_cache(struct vm_area_struct *vma, unsigned long address,
108 	pte_t pte)
109 {
110 	struct page *page;
111 	unsigned long pfn, addr;
112 	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
113 
114 	pfn = pte_pfn(pte);
115 	if (unlikely(!pfn_valid(pfn)))
116 		return;
117 	page = pfn_to_page(pfn);
118 	if (page_mapping(page) && Page_dcache_dirty(page)) {
119 		addr = (unsigned long) page_address(page);
120 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
121 			flush_data_cache_page(addr);
122 		ClearPageDcacheDirty(page);
123 	}
124 }
125 
126 static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
127 
128 void __init cpu_cache_init(void)
129 {
130 	if (cpu_has_3k_cache) {
131 		extern void __weak r3k_cache_init(void);
132 
133 		r3k_cache_init();
134 		return;
135 	}
136 	if (cpu_has_6k_cache) {
137 		extern void __weak r6k_cache_init(void);
138 
139 		r6k_cache_init();
140 		return;
141 	}
142 	if (cpu_has_4k_cache) {
143 		extern void __weak r4k_cache_init(void);
144 
145 		r4k_cache_init();
146 		return;
147 	}
148 	if (cpu_has_8k_cache) {
149 		extern void __weak r8k_cache_init(void);
150 
151 		r8k_cache_init();
152 		return;
153 	}
154 	if (cpu_has_tx39_cache) {
155 		extern void __weak tx39_cache_init(void);
156 
157 		tx39_cache_init();
158 		return;
159 	}
160 	if (cpu_has_sb1_cache) {
161 		extern void __weak sb1_cache_init(void);
162 
163 		sb1_cache_init();
164 		return;
165 	}
166 
167 	panic(cache_panic);
168 }
169 
170 int __weak __uncached_access(struct file *file, unsigned long addr)
171 {
172 	if (file->f_flags & O_SYNC)
173 		return 1;
174 
175 	return addr >= __pa(high_memory);
176 }
177