xref: /openbmc/linux/arch/sh/mm/cache.c (revision cbbe2f68)
1cbbe2f68SPaul Mundt /*
2cbbe2f68SPaul Mundt  * arch/sh/mm/pg-mmu.c
3cbbe2f68SPaul Mundt  *
4cbbe2f68SPaul Mundt  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5cbbe2f68SPaul Mundt  * Copyright (C) 2002 - 2009  Paul Mundt
6cbbe2f68SPaul Mundt  *
7cbbe2f68SPaul Mundt  * Released under the terms of the GNU GPL v2.0.
8cbbe2f68SPaul Mundt  */
9cbbe2f68SPaul Mundt #include <linux/mm.h>
10cbbe2f68SPaul Mundt #include <linux/init.h>
11cbbe2f68SPaul Mundt #include <linux/mutex.h>
12cbbe2f68SPaul Mundt #include <linux/fs.h>
13cbbe2f68SPaul Mundt #include <linux/highmem.h>
14cbbe2f68SPaul Mundt #include <linux/module.h>
15cbbe2f68SPaul Mundt #include <asm/mmu_context.h>
16cbbe2f68SPaul Mundt #include <asm/cacheflush.h>
17cbbe2f68SPaul Mundt 
18cbbe2f68SPaul Mundt void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
19cbbe2f68SPaul Mundt 		       unsigned long vaddr, void *dst, const void *src,
20cbbe2f68SPaul Mundt 		       unsigned long len)
21cbbe2f68SPaul Mundt {
22cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
23cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
24cbbe2f68SPaul Mundt 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
25cbbe2f68SPaul Mundt 		memcpy(vto, src, len);
26cbbe2f68SPaul Mundt 		kunmap_coherent();
27cbbe2f68SPaul Mundt 	} else {
28cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
29cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
30cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
31cbbe2f68SPaul Mundt 	}
32cbbe2f68SPaul Mundt 
33cbbe2f68SPaul Mundt 	if (vma->vm_flags & VM_EXEC)
34cbbe2f68SPaul Mundt 		flush_cache_page(vma, vaddr, page_to_pfn(page));
35cbbe2f68SPaul Mundt }
36cbbe2f68SPaul Mundt 
37cbbe2f68SPaul Mundt void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
38cbbe2f68SPaul Mundt 			 unsigned long vaddr, void *dst, const void *src,
39cbbe2f68SPaul Mundt 			 unsigned long len)
40cbbe2f68SPaul Mundt {
41cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
42cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
43cbbe2f68SPaul Mundt 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
44cbbe2f68SPaul Mundt 		memcpy(dst, vfrom, len);
45cbbe2f68SPaul Mundt 		kunmap_coherent();
46cbbe2f68SPaul Mundt 	} else {
47cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
48cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
49cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
50cbbe2f68SPaul Mundt 	}
51cbbe2f68SPaul Mundt }
52cbbe2f68SPaul Mundt 
53cbbe2f68SPaul Mundt void copy_user_highpage(struct page *to, struct page *from,
54cbbe2f68SPaul Mundt 			unsigned long vaddr, struct vm_area_struct *vma)
55cbbe2f68SPaul Mundt {
56cbbe2f68SPaul Mundt 	void *vfrom, *vto;
57cbbe2f68SPaul Mundt 
58cbbe2f68SPaul Mundt 	vto = kmap_atomic(to, KM_USER1);
59cbbe2f68SPaul Mundt 
60cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
61cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &from->flags)) {
62cbbe2f68SPaul Mundt 		vfrom = kmap_coherent(from, vaddr);
63cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
64cbbe2f68SPaul Mundt 		kunmap_coherent();
65cbbe2f68SPaul Mundt 	} else {
66cbbe2f68SPaul Mundt 		vfrom = kmap_atomic(from, KM_USER0);
67cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
68cbbe2f68SPaul Mundt 		kunmap_atomic(vfrom, KM_USER0);
69cbbe2f68SPaul Mundt 	}
70cbbe2f68SPaul Mundt 
71cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
72cbbe2f68SPaul Mundt 		__flush_wback_region(vto, PAGE_SIZE);
73cbbe2f68SPaul Mundt 
74cbbe2f68SPaul Mundt 	kunmap_atomic(vto, KM_USER1);
75cbbe2f68SPaul Mundt 	/* Make sure this page is cleared on other CPU's too before using it */
76cbbe2f68SPaul Mundt 	smp_wmb();
77cbbe2f68SPaul Mundt }
78cbbe2f68SPaul Mundt EXPORT_SYMBOL(copy_user_highpage);
79cbbe2f68SPaul Mundt 
80cbbe2f68SPaul Mundt void clear_user_highpage(struct page *page, unsigned long vaddr)
81cbbe2f68SPaul Mundt {
82cbbe2f68SPaul Mundt 	void *kaddr = kmap_atomic(page, KM_USER0);
83cbbe2f68SPaul Mundt 
84cbbe2f68SPaul Mundt 	clear_page(kaddr);
85cbbe2f68SPaul Mundt 
86cbbe2f68SPaul Mundt 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
87cbbe2f68SPaul Mundt 		__flush_wback_region(kaddr, PAGE_SIZE);
88cbbe2f68SPaul Mundt 
89cbbe2f68SPaul Mundt 	kunmap_atomic(kaddr, KM_USER0);
90cbbe2f68SPaul Mundt }
91cbbe2f68SPaul Mundt EXPORT_SYMBOL(clear_user_highpage);
92cbbe2f68SPaul Mundt 
93cbbe2f68SPaul Mundt void __update_cache(struct vm_area_struct *vma,
94cbbe2f68SPaul Mundt 		    unsigned long address, pte_t pte)
95cbbe2f68SPaul Mundt {
96cbbe2f68SPaul Mundt 	struct page *page;
97cbbe2f68SPaul Mundt 	unsigned long pfn = pte_pfn(pte);
98cbbe2f68SPaul Mundt 
99cbbe2f68SPaul Mundt 	if (!boot_cpu_data.dcache.n_aliases)
100cbbe2f68SPaul Mundt 		return;
101cbbe2f68SPaul Mundt 
102cbbe2f68SPaul Mundt 	page = pfn_to_page(pfn);
103cbbe2f68SPaul Mundt 	if (pfn_valid(pfn) && page_mapping(page)) {
104cbbe2f68SPaul Mundt 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
105cbbe2f68SPaul Mundt 		if (dirty) {
106cbbe2f68SPaul Mundt 			unsigned long addr = (unsigned long)page_address(page);
107cbbe2f68SPaul Mundt 
108cbbe2f68SPaul Mundt 			if (pages_do_alias(addr, address & PAGE_MASK))
109cbbe2f68SPaul Mundt 				__flush_wback_region((void *)addr, PAGE_SIZE);
110cbbe2f68SPaul Mundt 		}
111cbbe2f68SPaul Mundt 	}
112cbbe2f68SPaul Mundt }
113cbbe2f68SPaul Mundt 
114cbbe2f68SPaul Mundt void __flush_anon_page(struct page *page, unsigned long vmaddr)
115cbbe2f68SPaul Mundt {
116cbbe2f68SPaul Mundt 	unsigned long addr = (unsigned long) page_address(page);
117cbbe2f68SPaul Mundt 
118cbbe2f68SPaul Mundt 	if (pages_do_alias(addr, vmaddr)) {
119cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
120cbbe2f68SPaul Mundt 		    !test_bit(PG_dcache_dirty, &page->flags)) {
121cbbe2f68SPaul Mundt 			void *kaddr;
122cbbe2f68SPaul Mundt 
123cbbe2f68SPaul Mundt 			kaddr = kmap_coherent(page, vmaddr);
124cbbe2f68SPaul Mundt 			__flush_wback_region((void *)kaddr, PAGE_SIZE);
125cbbe2f68SPaul Mundt 			kunmap_coherent();
126cbbe2f68SPaul Mundt 		} else
127cbbe2f68SPaul Mundt 			__flush_wback_region((void *)addr, PAGE_SIZE);
128cbbe2f68SPaul Mundt 	}
129cbbe2f68SPaul Mundt }
130