xref: /openbmc/linux/arch/sh/mm/cache.c (revision a58e1a2a)
1cbbe2f68SPaul Mundt /*
2cbbe2f68SPaul Mundt  * arch/sh/mm/pg-mmu.c
3cbbe2f68SPaul Mundt  *
4cbbe2f68SPaul Mundt  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5cbbe2f68SPaul Mundt  * Copyright (C) 2002 - 2009  Paul Mundt
6cbbe2f68SPaul Mundt  *
7cbbe2f68SPaul Mundt  * Released under the terms of the GNU GPL v2.0.
8cbbe2f68SPaul Mundt  */
9cbbe2f68SPaul Mundt #include <linux/mm.h>
10cbbe2f68SPaul Mundt #include <linux/init.h>
11cbbe2f68SPaul Mundt #include <linux/mutex.h>
12cbbe2f68SPaul Mundt #include <linux/fs.h>
13cbbe2f68SPaul Mundt #include <linux/highmem.h>
14cbbe2f68SPaul Mundt #include <linux/module.h>
15cbbe2f68SPaul Mundt #include <asm/mmu_context.h>
16cbbe2f68SPaul Mundt #include <asm/cacheflush.h>
17cbbe2f68SPaul Mundt 
1837443ef3SPaul Mundt void (*flush_cache_all)(void);
1937443ef3SPaul Mundt void (*flush_cache_mm)(struct mm_struct *mm);
2037443ef3SPaul Mundt void (*flush_cache_dup_mm)(struct mm_struct *mm);
2137443ef3SPaul Mundt void (*flush_cache_page)(struct vm_area_struct *vma,
2237443ef3SPaul Mundt 				unsigned long addr, unsigned long pfn);
2337443ef3SPaul Mundt void (*flush_cache_range)(struct vm_area_struct *vma,
2437443ef3SPaul Mundt 				 unsigned long start, unsigned long end);
2537443ef3SPaul Mundt void (*flush_dcache_page)(struct page *page);
2637443ef3SPaul Mundt void (*flush_icache_range)(unsigned long start, unsigned long end);
2737443ef3SPaul Mundt void (*flush_icache_page)(struct vm_area_struct *vma,
2837443ef3SPaul Mundt 				 struct page *page);
2937443ef3SPaul Mundt void (*flush_cache_sigtramp)(unsigned long address);
3037443ef3SPaul Mundt void (*__flush_wback_region)(void *start, int size);
3137443ef3SPaul Mundt void (*__flush_purge_region)(void *start, int size);
3237443ef3SPaul Mundt void (*__flush_invalidate_region)(void *start, int size);
3337443ef3SPaul Mundt 
3437443ef3SPaul Mundt static inline void noop_flush_cache_all(void)
3537443ef3SPaul Mundt {
3637443ef3SPaul Mundt }
3737443ef3SPaul Mundt 
3837443ef3SPaul Mundt static inline void noop_flush_cache_mm(struct mm_struct *mm)
3937443ef3SPaul Mundt {
4037443ef3SPaul Mundt }
4137443ef3SPaul Mundt 
4237443ef3SPaul Mundt static inline void noop_flush_cache_page(struct vm_area_struct *vma,
4337443ef3SPaul Mundt 				unsigned long addr, unsigned long pfn)
4437443ef3SPaul Mundt {
4537443ef3SPaul Mundt }
4637443ef3SPaul Mundt 
4737443ef3SPaul Mundt static inline void noop_flush_cache_range(struct vm_area_struct *vma,
4837443ef3SPaul Mundt 				 unsigned long start, unsigned long end)
4937443ef3SPaul Mundt {
5037443ef3SPaul Mundt }
5137443ef3SPaul Mundt 
5237443ef3SPaul Mundt static inline void noop_flush_dcache_page(struct page *page)
5337443ef3SPaul Mundt {
5437443ef3SPaul Mundt }
5537443ef3SPaul Mundt 
5637443ef3SPaul Mundt static inline void noop_flush_icache_range(unsigned long start,
5737443ef3SPaul Mundt 					   unsigned long end)
5837443ef3SPaul Mundt {
5937443ef3SPaul Mundt }
6037443ef3SPaul Mundt 
6137443ef3SPaul Mundt static inline void noop_flush_icache_page(struct vm_area_struct *vma,
6237443ef3SPaul Mundt 					  struct page *page)
6337443ef3SPaul Mundt {
6437443ef3SPaul Mundt }
6537443ef3SPaul Mundt 
6637443ef3SPaul Mundt static inline void noop_flush_cache_sigtramp(unsigned long address)
6737443ef3SPaul Mundt {
6837443ef3SPaul Mundt }
6937443ef3SPaul Mundt 
7037443ef3SPaul Mundt static inline void noop__flush_region(void *start, int size)
7137443ef3SPaul Mundt {
7237443ef3SPaul Mundt }
7337443ef3SPaul Mundt 
74cbbe2f68SPaul Mundt void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75cbbe2f68SPaul Mundt 		       unsigned long vaddr, void *dst, const void *src,
76cbbe2f68SPaul Mundt 		       unsigned long len)
77cbbe2f68SPaul Mundt {
78cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
79cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
80cbbe2f68SPaul Mundt 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81cbbe2f68SPaul Mundt 		memcpy(vto, src, len);
82cbbe2f68SPaul Mundt 		kunmap_coherent();
83cbbe2f68SPaul Mundt 	} else {
84cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
85cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
86cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
87cbbe2f68SPaul Mundt 	}
88cbbe2f68SPaul Mundt 
89cbbe2f68SPaul Mundt 	if (vma->vm_flags & VM_EXEC)
90cbbe2f68SPaul Mundt 		flush_cache_page(vma, vaddr, page_to_pfn(page));
91cbbe2f68SPaul Mundt }
92cbbe2f68SPaul Mundt 
93cbbe2f68SPaul Mundt void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
94cbbe2f68SPaul Mundt 			 unsigned long vaddr, void *dst, const void *src,
95cbbe2f68SPaul Mundt 			 unsigned long len)
96cbbe2f68SPaul Mundt {
97cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
98cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
99cbbe2f68SPaul Mundt 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100cbbe2f68SPaul Mundt 		memcpy(dst, vfrom, len);
101cbbe2f68SPaul Mundt 		kunmap_coherent();
102cbbe2f68SPaul Mundt 	} else {
103cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
104cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
105cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
106cbbe2f68SPaul Mundt 	}
107cbbe2f68SPaul Mundt }
108cbbe2f68SPaul Mundt 
109cbbe2f68SPaul Mundt void copy_user_highpage(struct page *to, struct page *from,
110cbbe2f68SPaul Mundt 			unsigned long vaddr, struct vm_area_struct *vma)
111cbbe2f68SPaul Mundt {
112cbbe2f68SPaul Mundt 	void *vfrom, *vto;
113cbbe2f68SPaul Mundt 
114cbbe2f68SPaul Mundt 	vto = kmap_atomic(to, KM_USER1);
115cbbe2f68SPaul Mundt 
116cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
117cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &from->flags)) {
118cbbe2f68SPaul Mundt 		vfrom = kmap_coherent(from, vaddr);
119cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
120cbbe2f68SPaul Mundt 		kunmap_coherent();
121cbbe2f68SPaul Mundt 	} else {
122cbbe2f68SPaul Mundt 		vfrom = kmap_atomic(from, KM_USER0);
123cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
124cbbe2f68SPaul Mundt 		kunmap_atomic(vfrom, KM_USER0);
125cbbe2f68SPaul Mundt 	}
126cbbe2f68SPaul Mundt 
127cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
128cbbe2f68SPaul Mundt 		__flush_wback_region(vto, PAGE_SIZE);
129cbbe2f68SPaul Mundt 
130cbbe2f68SPaul Mundt 	kunmap_atomic(vto, KM_USER1);
131cbbe2f68SPaul Mundt 	/* Make sure this page is cleared on other CPU's too before using it */
132cbbe2f68SPaul Mundt 	smp_wmb();
133cbbe2f68SPaul Mundt }
134cbbe2f68SPaul Mundt EXPORT_SYMBOL(copy_user_highpage);
135cbbe2f68SPaul Mundt 
136cbbe2f68SPaul Mundt void clear_user_highpage(struct page *page, unsigned long vaddr)
137cbbe2f68SPaul Mundt {
138cbbe2f68SPaul Mundt 	void *kaddr = kmap_atomic(page, KM_USER0);
139cbbe2f68SPaul Mundt 
140cbbe2f68SPaul Mundt 	clear_page(kaddr);
141cbbe2f68SPaul Mundt 
142cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
143cbbe2f68SPaul Mundt 		__flush_wback_region(kaddr, PAGE_SIZE);
144cbbe2f68SPaul Mundt 
145cbbe2f68SPaul Mundt 	kunmap_atomic(kaddr, KM_USER0);
146cbbe2f68SPaul Mundt }
147cbbe2f68SPaul Mundt EXPORT_SYMBOL(clear_user_highpage);
148cbbe2f68SPaul Mundt 
149cbbe2f68SPaul Mundt void __update_cache(struct vm_area_struct *vma,
150cbbe2f68SPaul Mundt 		    unsigned long address, pte_t pte)
151cbbe2f68SPaul Mundt {
152cbbe2f68SPaul Mundt 	struct page *page;
153cbbe2f68SPaul Mundt 	unsigned long pfn = pte_pfn(pte);
154cbbe2f68SPaul Mundt 
155cbbe2f68SPaul Mundt 	if (!boot_cpu_data.dcache.n_aliases)
156cbbe2f68SPaul Mundt 		return;
157cbbe2f68SPaul Mundt 
158cbbe2f68SPaul Mundt 	page = pfn_to_page(pfn);
159cbbe2f68SPaul Mundt 	if (pfn_valid(pfn) && page_mapping(page)) {
160cbbe2f68SPaul Mundt 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
161cbbe2f68SPaul Mundt 		if (dirty) {
162cbbe2f68SPaul Mundt 			unsigned long addr = (unsigned long)page_address(page);
163cbbe2f68SPaul Mundt 
164cbbe2f68SPaul Mundt 			if (pages_do_alias(addr, address & PAGE_MASK))
165cbbe2f68SPaul Mundt 				__flush_wback_region((void *)addr, PAGE_SIZE);
166cbbe2f68SPaul Mundt 		}
167cbbe2f68SPaul Mundt 	}
168cbbe2f68SPaul Mundt }
169cbbe2f68SPaul Mundt 
170cbbe2f68SPaul Mundt void __flush_anon_page(struct page *page, unsigned long vmaddr)
171cbbe2f68SPaul Mundt {
172cbbe2f68SPaul Mundt 	unsigned long addr = (unsigned long) page_address(page);
173cbbe2f68SPaul Mundt 
174cbbe2f68SPaul Mundt 	if (pages_do_alias(addr, vmaddr)) {
175cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
176cbbe2f68SPaul Mundt 		    !test_bit(PG_dcache_dirty, &page->flags)) {
177cbbe2f68SPaul Mundt 			void *kaddr;
178cbbe2f68SPaul Mundt 
179cbbe2f68SPaul Mundt 			kaddr = kmap_coherent(page, vmaddr);
180cbbe2f68SPaul Mundt 			__flush_wback_region((void *)kaddr, PAGE_SIZE);
181cbbe2f68SPaul Mundt 			kunmap_coherent();
182cbbe2f68SPaul Mundt 		} else
183cbbe2f68SPaul Mundt 			__flush_wback_region((void *)addr, PAGE_SIZE);
184cbbe2f68SPaul Mundt 	}
185cbbe2f68SPaul Mundt }
186ecba1060SPaul Mundt 
18727d59ec1SPaul Mundt static void compute_alias(struct cache_info *c)
18827d59ec1SPaul Mundt {
18927d59ec1SPaul Mundt 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
19027d59ec1SPaul Mundt 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
19127d59ec1SPaul Mundt }
19227d59ec1SPaul Mundt 
19327d59ec1SPaul Mundt static void __init emit_cache_params(void)
19427d59ec1SPaul Mundt {
19527d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
19627d59ec1SPaul Mundt 		boot_cpu_data.icache.ways,
19727d59ec1SPaul Mundt 		boot_cpu_data.icache.sets,
19827d59ec1SPaul Mundt 		boot_cpu_data.icache.way_incr);
19927d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
20027d59ec1SPaul Mundt 		boot_cpu_data.icache.entry_mask,
20127d59ec1SPaul Mundt 		boot_cpu_data.icache.alias_mask,
20227d59ec1SPaul Mundt 		boot_cpu_data.icache.n_aliases);
20327d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
20427d59ec1SPaul Mundt 		boot_cpu_data.dcache.ways,
20527d59ec1SPaul Mundt 		boot_cpu_data.dcache.sets,
20627d59ec1SPaul Mundt 		boot_cpu_data.dcache.way_incr);
20727d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
20827d59ec1SPaul Mundt 		boot_cpu_data.dcache.entry_mask,
20927d59ec1SPaul Mundt 		boot_cpu_data.dcache.alias_mask,
21027d59ec1SPaul Mundt 		boot_cpu_data.dcache.n_aliases);
21127d59ec1SPaul Mundt 
21227d59ec1SPaul Mundt 	/*
21327d59ec1SPaul Mundt 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
21427d59ec1SPaul Mundt 	 */
21527d59ec1SPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
21627d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
21727d59ec1SPaul Mundt 			boot_cpu_data.scache.ways,
21827d59ec1SPaul Mundt 			boot_cpu_data.scache.sets,
21927d59ec1SPaul Mundt 			boot_cpu_data.scache.way_incr);
22027d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
22127d59ec1SPaul Mundt 			boot_cpu_data.scache.entry_mask,
22227d59ec1SPaul Mundt 			boot_cpu_data.scache.alias_mask,
22327d59ec1SPaul Mundt 			boot_cpu_data.scache.n_aliases);
22427d59ec1SPaul Mundt 	}
22527d59ec1SPaul Mundt }
22627d59ec1SPaul Mundt 
227ecba1060SPaul Mundt void __init cpu_cache_init(void)
228ecba1060SPaul Mundt {
22927d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.icache);
23027d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.dcache);
23127d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.scache);
23227d59ec1SPaul Mundt 
23337443ef3SPaul Mundt 	flush_cache_all		= noop_flush_cache_all;
23437443ef3SPaul Mundt 	flush_cache_mm		= noop_flush_cache_mm;
23537443ef3SPaul Mundt 	flush_cache_dup_mm	= noop_flush_cache_mm;
23637443ef3SPaul Mundt 	flush_cache_page	= noop_flush_cache_page;
23737443ef3SPaul Mundt 	flush_cache_range	= noop_flush_cache_range;
23837443ef3SPaul Mundt 	flush_dcache_page	= noop_flush_dcache_page;
23937443ef3SPaul Mundt 	flush_icache_range	= noop_flush_icache_range;
24037443ef3SPaul Mundt 	flush_icache_page	= noop_flush_icache_page;
24137443ef3SPaul Mundt 	flush_cache_sigtramp	= noop_flush_cache_sigtramp;
24237443ef3SPaul Mundt 
24337443ef3SPaul Mundt 	__flush_wback_region		= noop__flush_region;
24437443ef3SPaul Mundt 	__flush_purge_region		= noop__flush_region;
24537443ef3SPaul Mundt 	__flush_invalidate_region	= noop__flush_region;
24637443ef3SPaul Mundt 
247109b44a8SPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
248109b44a8SPaul Mundt 		extern void __weak sh2_cache_init(void);
249109b44a8SPaul Mundt 
250109b44a8SPaul Mundt 		sh2_cache_init();
251109b44a8SPaul Mundt 	}
252109b44a8SPaul Mundt 
253a58e1a2aSPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
254a58e1a2aSPaul Mundt 		extern void __weak sh2a_cache_init(void);
255a58e1a2aSPaul Mundt 
256a58e1a2aSPaul Mundt 		sh2a_cache_init();
257a58e1a2aSPaul Mundt 	}
258a58e1a2aSPaul Mundt 
259ecba1060SPaul Mundt 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
260ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
261ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
262ecba1060SPaul Mundt 		extern void __weak sh4_cache_init(void);
263ecba1060SPaul Mundt 
264ecba1060SPaul Mundt 		sh4_cache_init();
265ecba1060SPaul Mundt 	}
26627d59ec1SPaul Mundt 
26727d59ec1SPaul Mundt 	emit_cache_params();
268ecba1060SPaul Mundt }
269