xref: /openbmc/linux/arch/arm64/mm/flush.c (revision ca79522c)
1 /*
2  * Based on arch/arm/mm/flush.c
3  *
4  * Copyright (C) 1995-2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/cachetype.h>
26 #include <asm/tlbflush.h>
27 
28 #include "mm.h"
29 
30 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
31 		       unsigned long end)
32 {
33 	if (vma->vm_flags & VM_EXEC)
34 		__flush_icache_all();
35 }
36 
37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
38 				unsigned long uaddr, void *kaddr,
39 				unsigned long len)
40 {
41 	if (vma->vm_flags & VM_EXEC) {
42 		unsigned long addr = (unsigned long)kaddr;
43 		if (icache_is_aliasing()) {
44 			__flush_dcache_area(kaddr, len);
45 			__flush_icache_all();
46 		} else {
47 			flush_icache_range(addr, addr + len);
48 		}
49 	}
50 }
51 
52 /*
53  * Copy user data from/to a page which is mapped into a different processes
54  * address space.  Really, we want to allow our "user space" model to handle
55  * this.
56  *
57  * Note that this code needs to run on the current CPU.
58  */
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
60 		       unsigned long uaddr, void *dst, const void *src,
61 		       unsigned long len)
62 {
63 #ifdef CONFIG_SMP
64 	preempt_disable();
65 #endif
66 	memcpy(dst, src, len);
67 	flush_ptrace_access(vma, page, uaddr, dst, len);
68 #ifdef CONFIG_SMP
69 	preempt_enable();
70 #endif
71 }
72 
73 void __flush_dcache_page(struct page *page)
74 {
75 	__flush_dcache_area(page_address(page), PAGE_SIZE);
76 }
77 
78 void __sync_icache_dcache(pte_t pte, unsigned long addr)
79 {
80 	unsigned long pfn;
81 	struct page *page;
82 
83 	pfn = pte_pfn(pte);
84 	if (!pfn_valid(pfn))
85 		return;
86 
87 	page = pfn_to_page(pfn);
88 	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
89 		__flush_dcache_page(page);
90 		__flush_icache_all();
91 	} else if (icache_is_aivivt()) {
92 		__flush_icache_all();
93 	}
94 }
95 
96 /*
97  * Ensure cache coherency between kernel mapping and userspace mapping of this
98  * page.
99  */
100 void flush_dcache_page(struct page *page)
101 {
102 	struct address_space *mapping;
103 
104 	/*
105 	 * The zero page is never written to, so never has any dirty cache
106 	 * lines, and therefore never needs to be flushed.
107 	 */
108 	if (page == ZERO_PAGE(0))
109 		return;
110 
111 	mapping = page_mapping(page);
112 	if (mapping && mapping_mapped(mapping)) {
113 		__flush_dcache_page(page);
114 		__flush_icache_all();
115 		set_bit(PG_dcache_clean, &page->flags);
116 	} else {
117 		clear_bit(PG_dcache_clean, &page->flags);
118 	}
119 }
120 EXPORT_SYMBOL(flush_dcache_page);
121 
122 /*
123  * Additional functions defined in assembly.
124  */
125 EXPORT_SYMBOL(flush_cache_all);
126 EXPORT_SYMBOL(flush_icache_range);
127