xref: /openbmc/linux/arch/sh/mm/cache.c (revision 27d59ec1)
1cbbe2f68SPaul Mundt /*
2cbbe2f68SPaul Mundt  * arch/sh/mm/pg-mmu.c
3cbbe2f68SPaul Mundt  *
4cbbe2f68SPaul Mundt  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5cbbe2f68SPaul Mundt  * Copyright (C) 2002 - 2009  Paul Mundt
6cbbe2f68SPaul Mundt  *
7cbbe2f68SPaul Mundt  * Released under the terms of the GNU GPL v2.0.
8cbbe2f68SPaul Mundt  */
9cbbe2f68SPaul Mundt #include <linux/mm.h>
10cbbe2f68SPaul Mundt #include <linux/init.h>
11cbbe2f68SPaul Mundt #include <linux/mutex.h>
12cbbe2f68SPaul Mundt #include <linux/fs.h>
13cbbe2f68SPaul Mundt #include <linux/highmem.h>
14cbbe2f68SPaul Mundt #include <linux/module.h>
15cbbe2f68SPaul Mundt #include <asm/mmu_context.h>
16cbbe2f68SPaul Mundt #include <asm/cacheflush.h>
17cbbe2f68SPaul Mundt 
18cbbe2f68SPaul Mundt void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
19cbbe2f68SPaul Mundt 		       unsigned long vaddr, void *dst, const void *src,
20cbbe2f68SPaul Mundt 		       unsigned long len)
21cbbe2f68SPaul Mundt {
22cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
23cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
24cbbe2f68SPaul Mundt 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
25cbbe2f68SPaul Mundt 		memcpy(vto, src, len);
26cbbe2f68SPaul Mundt 		kunmap_coherent();
27cbbe2f68SPaul Mundt 	} else {
28cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
29cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
30cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
31cbbe2f68SPaul Mundt 	}
32cbbe2f68SPaul Mundt 
33cbbe2f68SPaul Mundt 	if (vma->vm_flags & VM_EXEC)
34cbbe2f68SPaul Mundt 		flush_cache_page(vma, vaddr, page_to_pfn(page));
35cbbe2f68SPaul Mundt }
36cbbe2f68SPaul Mundt 
37cbbe2f68SPaul Mundt void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
38cbbe2f68SPaul Mundt 			 unsigned long vaddr, void *dst, const void *src,
39cbbe2f68SPaul Mundt 			 unsigned long len)
40cbbe2f68SPaul Mundt {
41cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
42cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
43cbbe2f68SPaul Mundt 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
44cbbe2f68SPaul Mundt 		memcpy(dst, vfrom, len);
45cbbe2f68SPaul Mundt 		kunmap_coherent();
46cbbe2f68SPaul Mundt 	} else {
47cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
48cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
49cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
50cbbe2f68SPaul Mundt 	}
51cbbe2f68SPaul Mundt }
52cbbe2f68SPaul Mundt 
53cbbe2f68SPaul Mundt void copy_user_highpage(struct page *to, struct page *from,
54cbbe2f68SPaul Mundt 			unsigned long vaddr, struct vm_area_struct *vma)
55cbbe2f68SPaul Mundt {
56cbbe2f68SPaul Mundt 	void *vfrom, *vto;
57cbbe2f68SPaul Mundt 
58cbbe2f68SPaul Mundt 	vto = kmap_atomic(to, KM_USER1);
59cbbe2f68SPaul Mundt 
60cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
61cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &from->flags)) {
62cbbe2f68SPaul Mundt 		vfrom = kmap_coherent(from, vaddr);
63cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
64cbbe2f68SPaul Mundt 		kunmap_coherent();
65cbbe2f68SPaul Mundt 	} else {
66cbbe2f68SPaul Mundt 		vfrom = kmap_atomic(from, KM_USER0);
67cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
68cbbe2f68SPaul Mundt 		kunmap_atomic(vfrom, KM_USER0);
69cbbe2f68SPaul Mundt 	}
70cbbe2f68SPaul Mundt 
71cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
72cbbe2f68SPaul Mundt 		__flush_wback_region(vto, PAGE_SIZE);
73cbbe2f68SPaul Mundt 
74cbbe2f68SPaul Mundt 	kunmap_atomic(vto, KM_USER1);
75cbbe2f68SPaul Mundt 	/* Make sure this page is cleared on other CPU's too before using it */
76cbbe2f68SPaul Mundt 	smp_wmb();
77cbbe2f68SPaul Mundt }
78cbbe2f68SPaul Mundt EXPORT_SYMBOL(copy_user_highpage);
79cbbe2f68SPaul Mundt 
80cbbe2f68SPaul Mundt void clear_user_highpage(struct page *page, unsigned long vaddr)
81cbbe2f68SPaul Mundt {
82cbbe2f68SPaul Mundt 	void *kaddr = kmap_atomic(page, KM_USER0);
83cbbe2f68SPaul Mundt 
84cbbe2f68SPaul Mundt 	clear_page(kaddr);
85cbbe2f68SPaul Mundt 
86cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
87cbbe2f68SPaul Mundt 		__flush_wback_region(kaddr, PAGE_SIZE);
88cbbe2f68SPaul Mundt 
89cbbe2f68SPaul Mundt 	kunmap_atomic(kaddr, KM_USER0);
90cbbe2f68SPaul Mundt }
91cbbe2f68SPaul Mundt EXPORT_SYMBOL(clear_user_highpage);
92cbbe2f68SPaul Mundt 
93cbbe2f68SPaul Mundt void __update_cache(struct vm_area_struct *vma,
94cbbe2f68SPaul Mundt 		    unsigned long address, pte_t pte)
95cbbe2f68SPaul Mundt {
96cbbe2f68SPaul Mundt 	struct page *page;
97cbbe2f68SPaul Mundt 	unsigned long pfn = pte_pfn(pte);
98cbbe2f68SPaul Mundt 
99cbbe2f68SPaul Mundt 	if (!boot_cpu_data.dcache.n_aliases)
100cbbe2f68SPaul Mundt 		return;
101cbbe2f68SPaul Mundt 
102cbbe2f68SPaul Mundt 	page = pfn_to_page(pfn);
103cbbe2f68SPaul Mundt 	if (pfn_valid(pfn) && page_mapping(page)) {
104cbbe2f68SPaul Mundt 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
105cbbe2f68SPaul Mundt 		if (dirty) {
106cbbe2f68SPaul Mundt 			unsigned long addr = (unsigned long)page_address(page);
107cbbe2f68SPaul Mundt 
108cbbe2f68SPaul Mundt 			if (pages_do_alias(addr, address & PAGE_MASK))
109cbbe2f68SPaul Mundt 				__flush_wback_region((void *)addr, PAGE_SIZE);
110cbbe2f68SPaul Mundt 		}
111cbbe2f68SPaul Mundt 	}
112cbbe2f68SPaul Mundt }
113cbbe2f68SPaul Mundt 
114cbbe2f68SPaul Mundt void __flush_anon_page(struct page *page, unsigned long vmaddr)
115cbbe2f68SPaul Mundt {
116cbbe2f68SPaul Mundt 	unsigned long addr = (unsigned long) page_address(page);
117cbbe2f68SPaul Mundt 
118cbbe2f68SPaul Mundt 	if (pages_do_alias(addr, vmaddr)) {
119cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
120cbbe2f68SPaul Mundt 		    !test_bit(PG_dcache_dirty, &page->flags)) {
121cbbe2f68SPaul Mundt 			void *kaddr;
122cbbe2f68SPaul Mundt 
123cbbe2f68SPaul Mundt 			kaddr = kmap_coherent(page, vmaddr);
124cbbe2f68SPaul Mundt 			__flush_wback_region((void *)kaddr, PAGE_SIZE);
125cbbe2f68SPaul Mundt 			kunmap_coherent();
126cbbe2f68SPaul Mundt 		} else
127cbbe2f68SPaul Mundt 			__flush_wback_region((void *)addr, PAGE_SIZE);
128cbbe2f68SPaul Mundt 	}
129cbbe2f68SPaul Mundt }
130ecba1060SPaul Mundt 
13127d59ec1SPaul Mundt static void compute_alias(struct cache_info *c)
13227d59ec1SPaul Mundt {
13327d59ec1SPaul Mundt 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
13427d59ec1SPaul Mundt 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
13527d59ec1SPaul Mundt }
13627d59ec1SPaul Mundt 
13727d59ec1SPaul Mundt static void __init emit_cache_params(void)
13827d59ec1SPaul Mundt {
13927d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
14027d59ec1SPaul Mundt 		boot_cpu_data.icache.ways,
14127d59ec1SPaul Mundt 		boot_cpu_data.icache.sets,
14227d59ec1SPaul Mundt 		boot_cpu_data.icache.way_incr);
14327d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
14427d59ec1SPaul Mundt 		boot_cpu_data.icache.entry_mask,
14527d59ec1SPaul Mundt 		boot_cpu_data.icache.alias_mask,
14627d59ec1SPaul Mundt 		boot_cpu_data.icache.n_aliases);
14727d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
14827d59ec1SPaul Mundt 		boot_cpu_data.dcache.ways,
14927d59ec1SPaul Mundt 		boot_cpu_data.dcache.sets,
15027d59ec1SPaul Mundt 		boot_cpu_data.dcache.way_incr);
15127d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
15227d59ec1SPaul Mundt 		boot_cpu_data.dcache.entry_mask,
15327d59ec1SPaul Mundt 		boot_cpu_data.dcache.alias_mask,
15427d59ec1SPaul Mundt 		boot_cpu_data.dcache.n_aliases);
15527d59ec1SPaul Mundt 
15627d59ec1SPaul Mundt 	/*
15727d59ec1SPaul Mundt 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
15827d59ec1SPaul Mundt 	 */
15927d59ec1SPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
16027d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
16127d59ec1SPaul Mundt 			boot_cpu_data.scache.ways,
16227d59ec1SPaul Mundt 			boot_cpu_data.scache.sets,
16327d59ec1SPaul Mundt 			boot_cpu_data.scache.way_incr);
16427d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
16527d59ec1SPaul Mundt 			boot_cpu_data.scache.entry_mask,
16627d59ec1SPaul Mundt 			boot_cpu_data.scache.alias_mask,
16727d59ec1SPaul Mundt 			boot_cpu_data.scache.n_aliases);
16827d59ec1SPaul Mundt 	}
16927d59ec1SPaul Mundt }
17027d59ec1SPaul Mundt 
171ecba1060SPaul Mundt void __init cpu_cache_init(void)
172ecba1060SPaul Mundt {
17327d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.icache);
17427d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.dcache);
17527d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.scache);
17627d59ec1SPaul Mundt 
177ecba1060SPaul Mundt 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
178ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
179ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
180ecba1060SPaul Mundt 		extern void __weak sh4_cache_init(void);
181ecba1060SPaul Mundt 
182ecba1060SPaul Mundt 		sh4_cache_init();
183ecba1060SPaul Mundt 	}
18427d59ec1SPaul Mundt 
18527d59ec1SPaul Mundt 	emit_cache_params();
186ecba1060SPaul Mundt }
187