xref: /openbmc/linux/arch/mips/mm/cache.c (revision df5062f0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * This file is subject to the terms and conditions of the GNU General Public
31da177e4SLinus Torvalds  * License.  See the file "COPYING" in the main directory of this archive
41da177e4SLinus Torvalds  * for more details.
51da177e4SLinus Torvalds  *
6641e97f3SRalf Baechle  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
77575a49fSRalf Baechle  * Copyright (C) 2007 MIPS Technologies, Inc.
81da177e4SLinus Torvalds  */
924e9d0b9SRalf Baechle #include <linux/fs.h>
1024e9d0b9SRalf Baechle #include <linux/fcntl.h>
111da177e4SLinus Torvalds #include <linux/kernel.h>
12641e97f3SRalf Baechle #include <linux/linkage.h>
13d9ba5778SPaul Gortmaker #include <linux/export.h>
141da177e4SLinus Torvalds #include <linux/sched.h>
15dbda6ac0SRalf Baechle #include <linux/syscalls.h>
161da177e4SLinus Torvalds #include <linux/mm.h>
1778b6d91eSIra Weiny #include <linux/highmem.h>
18842ca547SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
191da177e4SLinus Torvalds 
20393a7596SJiaxun Yang #include <asm/bcache.h>
211da177e4SLinus Torvalds #include <asm/cacheflush.h>
221da177e4SLinus Torvalds #include <asm/processor.h>
231da177e4SLinus Torvalds #include <asm/cpu.h>
241da177e4SLinus Torvalds #include <asm/cpu-features.h>
2569939424SPaul Burton #include <asm/setup.h>
26a2fa4cedSYanteng Si #include <asm/pgtable.h>
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds /* Cache operations. */
291da177e4SLinus Torvalds void (*flush_cache_all)(void);
301da177e4SLinus Torvalds void (*__flush_cache_all)(void);
314fa9de5aSJames Hogan EXPORT_SYMBOL_GPL(__flush_cache_all);
321da177e4SLinus Torvalds void (*flush_cache_mm)(struct mm_struct *mm);
331da177e4SLinus Torvalds void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
341da177e4SLinus Torvalds 	unsigned long end);
3553de0d47SRalf Baechle void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
3653de0d47SRalf Baechle 	unsigned long pfn);
37d4264f18SAtsushi Nemoto void (*flush_icache_range)(unsigned long start, unsigned long end);
388229f1a0SKees Cook EXPORT_SYMBOL_GPL(flush_icache_range);
39e0cee3eeSThomas Bogendoerfer void (*local_flush_icache_range)(unsigned long start, unsigned long end);
4090f91356SJames Hogan EXPORT_SYMBOL_GPL(local_flush_icache_range);
4101882b4dSJames Hogan void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
4201882b4dSJames Hogan void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
4301882b4dSJames Hogan EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
441da177e4SLinus Torvalds 
459c5a3d72SRalf Baechle void (*__flush_cache_vmap)(void);
469c5a3d72SRalf Baechle void (*__flush_cache_vunmap)(void);
479c5a3d72SRalf Baechle 
48d9cdc901SRalf Baechle void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
49d9cdc901SRalf Baechle EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
50d9cdc901SRalf Baechle 
511da177e4SLinus Torvalds /* MIPS specific cache operations */
521da177e4SLinus Torvalds void (*flush_data_cache_page)(unsigned long addr);
531da177e4SLinus Torvalds void (*flush_icache_all)(void);
541da177e4SLinus Torvalds 
559ff77c46SRalf Baechle EXPORT_SYMBOL(flush_data_cache_page);
56f2e3656dSSanjay Lal EXPORT_SYMBOL(flush_icache_all);
579ff77c46SRalf Baechle 
58393a7596SJiaxun Yang /*
59393a7596SJiaxun Yang  * Dummy cache handling routine
60393a7596SJiaxun Yang  */
61393a7596SJiaxun Yang 
cache_noop(void)62393a7596SJiaxun Yang void cache_noop(void) {}
63393a7596SJiaxun Yang 
64393a7596SJiaxun Yang #ifdef CONFIG_BOARD_SCACHE
65393a7596SJiaxun Yang 
66393a7596SJiaxun Yang static struct bcache_ops no_sc_ops = {
67393a7596SJiaxun Yang 	.bc_enable = (void *)cache_noop,
68393a7596SJiaxun Yang 	.bc_disable = (void *)cache_noop,
69393a7596SJiaxun Yang 	.bc_wback_inv = (void *)cache_noop,
70393a7596SJiaxun Yang 	.bc_inv = (void *)cache_noop
71393a7596SJiaxun Yang };
72393a7596SJiaxun Yang 
73393a7596SJiaxun Yang struct bcache_ops *bcops = &no_sc_ops;
74393a7596SJiaxun Yang #endif
75393a7596SJiaxun Yang 
76972dc3b7SChristoph Hellwig #ifdef CONFIG_DMA_NONCOHERENT
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* DMA cache operations. */
791da177e4SLinus Torvalds void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
801da177e4SLinus Torvalds void (*_dma_cache_wback)(unsigned long start, unsigned long size);
811da177e4SLinus Torvalds void (*_dma_cache_inv)(unsigned long start, unsigned long size);
821da177e4SLinus Torvalds 
83972dc3b7SChristoph Hellwig #endif /* CONFIG_DMA_NONCOHERENT */
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * We could optimize the case where the cache argument is not BCACHE but
871da177e4SLinus Torvalds  * that seems very atypical use ...
881da177e4SLinus Torvalds  */
SYSCALL_DEFINE3(cacheflush,unsigned long,addr,unsigned long,bytes,unsigned int,cache)89dbda6ac0SRalf Baechle SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
90dbda6ac0SRalf Baechle 	unsigned int, cache)
911da177e4SLinus Torvalds {
92750ccf68SAtsushi Nemoto 	if (bytes == 0)
93750ccf68SAtsushi Nemoto 		return 0;
9496d4f267SLinus Torvalds 	if (!access_ok((void __user *) addr, bytes))
951da177e4SLinus Torvalds 		return -EFAULT;
961da177e4SLinus Torvalds 
978e3a9f4cSJames Hogan 	__flush_icache_user_range(addr, addr + bytes);
981da177e4SLinus Torvalds 
991da177e4SLinus Torvalds 	return 0;
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
__flush_dcache_pages(struct page * page,unsigned int nr)10215fa3e8eSMatthew Wilcox (Oracle) void __flush_dcache_pages(struct page *page, unsigned int nr)
1031da177e4SLinus Torvalds {
10415fa3e8eSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
10515fa3e8eSMatthew Wilcox (Oracle) 	struct address_space *mapping = folio_flush_mapping(folio);
1061da177e4SLinus Torvalds 	unsigned long addr;
10715fa3e8eSMatthew Wilcox (Oracle) 	unsigned int i;
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds 	if (mapping && !mapping_mapped(mapping)) {
11015fa3e8eSMatthew Wilcox (Oracle) 		folio_set_dcache_dirty(folio);
1111da177e4SLinus Torvalds 		return;
1121da177e4SLinus Torvalds 	}
1131da177e4SLinus Torvalds 
1141da177e4SLinus Torvalds 	/*
1151da177e4SLinus Torvalds 	 * We could delay the flush for the !page_mapping case too.  But that
1161da177e4SLinus Torvalds 	 * case is for exec env/arg pages and those are %99 certainly going to
1171da177e4SLinus Torvalds 	 * get faulted into the tlb (and thus flushed) anyways.
1181da177e4SLinus Torvalds 	 */
11915fa3e8eSMatthew Wilcox (Oracle) 	for (i = 0; i < nr; i++) {
120*df5062f0SZi Yan 		addr = (unsigned long)kmap_local_page(nth_page(page, i));
1211da177e4SLinus Torvalds 		flush_data_cache_page(addr);
12215fa3e8eSMatthew Wilcox (Oracle) 		kunmap_local((void *)addr);
1231da177e4SLinus Torvalds 	}
12415fa3e8eSMatthew Wilcox (Oracle) }
12515fa3e8eSMatthew Wilcox (Oracle) EXPORT_SYMBOL(__flush_dcache_pages);
1261da177e4SLinus Torvalds 
__flush_anon_page(struct page * page,unsigned long vmaddr)1277575a49fSRalf Baechle void __flush_anon_page(struct page *page, unsigned long vmaddr)
1287575a49fSRalf Baechle {
1299a74b3ebSRalf Baechle 	unsigned long addr = (unsigned long) page_address(page);
13015fa3e8eSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
1319a74b3ebSRalf Baechle 
1329a74b3ebSRalf Baechle 	if (pages_do_alias(addr, vmaddr)) {
13315fa3e8eSMatthew Wilcox (Oracle) 		if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
1347575a49fSRalf Baechle 			void *kaddr;
1357575a49fSRalf Baechle 
1367575a49fSRalf Baechle 			kaddr = kmap_coherent(page, vmaddr);
1377575a49fSRalf Baechle 			flush_data_cache_page((unsigned long)kaddr);
138eacb9d61SRalf Baechle 			kunmap_coherent();
1399a74b3ebSRalf Baechle 		} else
1409a74b3ebSRalf Baechle 			flush_data_cache_page(addr);
1417575a49fSRalf Baechle 	}
1427575a49fSRalf Baechle }
1437575a49fSRalf Baechle 
1447575a49fSRalf Baechle EXPORT_SYMBOL(__flush_anon_page);
1457575a49fSRalf Baechle 
__update_cache(unsigned long address,pte_t pte)14637d22a0dSPaul Burton void __update_cache(unsigned long address, pte_t pte)
1471da177e4SLinus Torvalds {
14815fa3e8eSMatthew Wilcox (Oracle) 	struct folio *folio;
1495b9593f3SLars Persson 	unsigned long pfn, addr;
15037d22a0dSPaul Burton 	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
15115fa3e8eSMatthew Wilcox (Oracle) 	unsigned int i;
1521da177e4SLinus Torvalds 
1535b9593f3SLars Persson 	pfn = pte_pfn(pte);
154585fa724SRalf Baechle 	if (unlikely(!pfn_valid(pfn)))
155585fa724SRalf Baechle 		return;
156f4281bbaSPaul Burton 
15715fa3e8eSMatthew Wilcox (Oracle) 	folio = page_folio(pfn_to_page(pfn));
15815fa3e8eSMatthew Wilcox (Oracle) 	address &= PAGE_MASK;
15915fa3e8eSMatthew Wilcox (Oracle) 	address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
16015fa3e8eSMatthew Wilcox (Oracle) 
16115fa3e8eSMatthew Wilcox (Oracle) 	if (folio_test_dcache_dirty(folio)) {
16215fa3e8eSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_nr_pages(folio); i++) {
16315fa3e8eSMatthew Wilcox (Oracle) 			addr = (unsigned long)kmap_local_folio(folio, i);
16415fa3e8eSMatthew Wilcox (Oracle) 
16515fa3e8eSMatthew Wilcox (Oracle) 			if (exec || pages_do_alias(addr, address))
1665b9593f3SLars Persson 				flush_data_cache_page(addr);
16715fa3e8eSMatthew Wilcox (Oracle) 			kunmap_local((void *)addr);
16815fa3e8eSMatthew Wilcox (Oracle) 			address += PAGE_SIZE;
16915fa3e8eSMatthew Wilcox (Oracle) 		}
17015fa3e8eSMatthew Wilcox (Oracle) 		folio_clear_dcache_dirty(folio);
1711da177e4SLinus Torvalds 	}
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds 
17435133692SChris Dearman unsigned long _page_cachable_default;
1757b3e543dSAnton Altaparmakov EXPORT_SYMBOL(_page_cachable_default);
17635133692SChris Dearman 
177ed2adb74SThomas Bogendoerfer #define PM(p)	__pgprot(_page_cachable_default | (p))
178ed2adb74SThomas Bogendoerfer 
179499c1dd9SAnshuman Khandual static pgprot_t protection_map[16] __ro_after_init;
180499c1dd9SAnshuman Khandual DECLARE_VM_GET_PAGE_PROT
181499c1dd9SAnshuman Khandual 
setup_protection_map(void)18235133692SChris Dearman static inline void setup_protection_map(void)
18335133692SChris Dearman {
184ed2adb74SThomas Bogendoerfer 	protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
18550c25ee9SThomas Bogendoerfer 	protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
18650c25ee9SThomas Bogendoerfer 	protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
18750c25ee9SThomas Bogendoerfer 	protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
18850c25ee9SThomas Bogendoerfer 	protection_map[4]  = PM(_PAGE_PRESENT);
18950c25ee9SThomas Bogendoerfer 	protection_map[5]  = PM(_PAGE_PRESENT);
19050c25ee9SThomas Bogendoerfer 	protection_map[6]  = PM(_PAGE_PRESENT);
19150c25ee9SThomas Bogendoerfer 	protection_map[7]  = PM(_PAGE_PRESENT);
1926dd9344cSDavid Daney 
193ed2adb74SThomas Bogendoerfer 	protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
19450c25ee9SThomas Bogendoerfer 	protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
19550c25ee9SThomas Bogendoerfer 	protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
1960df162e1SThomas Bogendoerfer 				_PAGE_NO_READ);
19750c25ee9SThomas Bogendoerfer 	protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
19850c25ee9SThomas Bogendoerfer 	protection_map[12] = PM(_PAGE_PRESENT);
19950c25ee9SThomas Bogendoerfer 	protection_map[13] = PM(_PAGE_PRESENT);
20050c25ee9SThomas Bogendoerfer 	protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
20150c25ee9SThomas Bogendoerfer 	protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
2026dd9344cSDavid Daney }
2031da177e4SLinus Torvalds 
204ed2adb74SThomas Bogendoerfer #undef PM
205ed2adb74SThomas Bogendoerfer 
cpu_cache_init(void)206078a55fcSPaul Gortmaker void cpu_cache_init(void)
2071da177e4SLinus Torvalds {
20802cf2119SRalf Baechle 	if (cpu_has_3k_cache) {
20902cf2119SRalf Baechle 		extern void __weak r3k_cache_init(void);
2101da177e4SLinus Torvalds 
21102cf2119SRalf Baechle 		r3k_cache_init();
2121da177e4SLinus Torvalds 	}
21302cf2119SRalf Baechle 	if (cpu_has_4k_cache) {
21402cf2119SRalf Baechle 		extern void __weak r4k_cache_init(void);
21502cf2119SRalf Baechle 
21602cf2119SRalf Baechle 		r4k_cache_init();
21702cf2119SRalf Baechle 	}
21802cf2119SRalf Baechle 
21947d979ecSDavid Daney 	if (cpu_has_octeon_cache) {
22047d979ecSDavid Daney 		extern void __weak octeon_cache_init(void);
22147d979ecSDavid Daney 
22247d979ecSDavid Daney 		octeon_cache_init();
22347d979ecSDavid Daney 	}
22447d979ecSDavid Daney 
22535133692SChris Dearman 	setup_protection_map();
2261da177e4SLinus Torvalds }
227