1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/arch/arm/mm/flush.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1995-2002 Russell King
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds #include <linux/module.h>
81da177e4SLinus Torvalds #include <linux/mm.h>
91da177e4SLinus Torvalds #include <linux/pagemap.h>
1039af22a7SNicolas Pitre #include <linux/highmem.h>
111da177e4SLinus Torvalds
121da177e4SLinus Torvalds #include <asm/cacheflush.h>
1346097c7dSRussell King #include <asm/cachetype.h>
147e5a69e8SNicolas Pitre #include <asm/highmem.h>
152ef7f3dbSRussell King #include <asm/smp_plat.h>
168d802d28SRussell King #include <asm/tlbflush.h>
170b19f933SSteve Capper #include <linux/hugetlb.h>
188d802d28SRussell King
191b2e2b73SRussell King #include "mm.h"
201b2e2b73SRussell King
21f8130906SRussell King #ifdef CONFIG_ARM_HEAVY_MB
224e1f8a6fSRussell King void (*soc_mb)(void);
234e1f8a6fSRussell King
arm_heavy_mb(void)24f8130906SRussell King void arm_heavy_mb(void)
25f8130906SRussell King {
26f8130906SRussell King #ifdef CONFIG_OUTER_CACHE_SYNC
27f8130906SRussell King if (outer_cache.sync)
28f8130906SRussell King outer_cache.sync();
29f8130906SRussell King #endif
304e1f8a6fSRussell King if (soc_mb)
314e1f8a6fSRussell King soc_mb();
32f8130906SRussell King }
33f8130906SRussell King EXPORT_SYMBOL(arm_heavy_mb);
34f8130906SRussell King #endif
35f8130906SRussell King
368d802d28SRussell King #ifdef CONFIG_CPU_CACHE_VIPT
37d7b6b358SRussell King
flush_pfn_alias(unsigned long pfn,unsigned long vaddr)38481467d6SCatalin Marinas static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39481467d6SCatalin Marinas {
40de27c308SRussell King unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
41141fa40cSCatalin Marinas const int zero = 0;
42481467d6SCatalin Marinas
4367ece144SRussell King set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
44481467d6SCatalin Marinas
45481467d6SCatalin Marinas asm( "mcrr p15, 0, %1, %0, c14\n"
46df71dfd4SRussell King " mcr p15, 0, %2, c7, c10, 4"
47481467d6SCatalin Marinas :
4812e669b4SJungseung Lee : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
49481467d6SCatalin Marinas : "cc");
50481467d6SCatalin Marinas }
51481467d6SCatalin Marinas
flush_icache_alias(unsigned long pfn,unsigned long vaddr,unsigned long len)52c4e259c8SWill Deacon static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
53c4e259c8SWill Deacon {
5467ece144SRussell King unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
55c4e259c8SWill Deacon unsigned long offset = vaddr & (PAGE_SIZE - 1);
56c4e259c8SWill Deacon unsigned long to;
57c4e259c8SWill Deacon
5867ece144SRussell King set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
5967ece144SRussell King to = va + offset;
60c4e259c8SWill Deacon flush_icache_range(to, to + len);
61c4e259c8SWill Deacon }
62c4e259c8SWill Deacon
flush_cache_mm(struct mm_struct * mm)63d7b6b358SRussell King void flush_cache_mm(struct mm_struct *mm)
64d7b6b358SRussell King {
65d7b6b358SRussell King if (cache_is_vivt()) {
662f0b1926SRussell King vivt_flush_cache_mm(mm);
67d7b6b358SRussell King return;
68d7b6b358SRussell King }
69d7b6b358SRussell King
70d7b6b358SRussell King if (cache_is_vipt_aliasing()) {
71d7b6b358SRussell King asm( "mcr p15, 0, %0, c7, c14, 0\n"
72df71dfd4SRussell King " mcr p15, 0, %0, c7, c10, 4"
73d7b6b358SRussell King :
74d7b6b358SRussell King : "r" (0)
75d7b6b358SRussell King : "cc");
76d7b6b358SRussell King }
77d7b6b358SRussell King }
78d7b6b358SRussell King
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)79d7b6b358SRussell King void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80d7b6b358SRussell King {
81d7b6b358SRussell King if (cache_is_vivt()) {
822f0b1926SRussell King vivt_flush_cache_range(vma, start, end);
83d7b6b358SRussell King return;
84d7b6b358SRussell King }
85d7b6b358SRussell King
86d7b6b358SRussell King if (cache_is_vipt_aliasing()) {
87d7b6b358SRussell King asm( "mcr p15, 0, %0, c7, c14, 0\n"
88df71dfd4SRussell King " mcr p15, 0, %0, c7, c10, 4"
89d7b6b358SRussell King :
90d7b6b358SRussell King : "r" (0)
91d7b6b358SRussell King : "cc");
92d7b6b358SRussell King }
939e95922bSRussell King
946060e8dfSRussell King if (vma->vm_flags & VM_EXEC)
959e95922bSRussell King __flush_icache_all();
96d7b6b358SRussell King }
97d7b6b358SRussell King
flush_cache_pages(struct vm_area_struct * vma,unsigned long user_addr,unsigned long pfn,unsigned int nr)988b5989f3SMatthew Wilcox (Oracle) void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
99d7b6b358SRussell King {
100d7b6b358SRussell King if (cache_is_vivt()) {
1018b5989f3SMatthew Wilcox (Oracle) vivt_flush_cache_pages(vma, user_addr, pfn, nr);
102d7b6b358SRussell King return;
103d7b6b358SRussell King }
104d7b6b358SRussell King
1052df341edSRussell King if (cache_is_vipt_aliasing()) {
106d7b6b358SRussell King flush_pfn_alias(pfn, user_addr);
1072df341edSRussell King __flush_icache_all();
1082df341edSRussell King }
1099e95922bSRussell King
1109e95922bSRussell King if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
1119e95922bSRussell King __flush_icache_all();
112d7b6b358SRussell King }
113c4e259c8SWill Deacon
1142ef7f3dbSRussell King #else
1152ef7f3dbSRussell King #define flush_pfn_alias(pfn,vaddr) do { } while (0)
116c4e259c8SWill Deacon #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
1172ef7f3dbSRussell King #endif
118a188ad2bSGeorge G. Davis
11972e6ae28SVictor Kamensky #define FLAG_PA_IS_EXEC 1
12072e6ae28SVictor Kamensky #define FLAG_PA_CORE_IN_MM 2
12172e6ae28SVictor Kamensky
flush_ptrace_access_other(void * args)1222ef7f3dbSRussell King static void flush_ptrace_access_other(void *args)
1232ef7f3dbSRussell King {
1242ef7f3dbSRussell King __flush_icache_all();
1252ef7f3dbSRussell King }
1262ef7f3dbSRussell King
12772e6ae28SVictor Kamensky static inline
__flush_ptrace_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len,unsigned int flags)12872e6ae28SVictor Kamensky void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
12972e6ae28SVictor Kamensky unsigned long len, unsigned int flags)
130a188ad2bSGeorge G. Davis {
131a188ad2bSGeorge G. Davis if (cache_is_vivt()) {
13272e6ae28SVictor Kamensky if (flags & FLAG_PA_CORE_IN_MM) {
1332ef7f3dbSRussell King unsigned long addr = (unsigned long)kaddr;
1342ef7f3dbSRussell King __cpuc_coherent_kern_range(addr, addr + len);
1352ef7f3dbSRussell King }
136a188ad2bSGeorge G. Davis return;
137a188ad2bSGeorge G. Davis }
138a188ad2bSGeorge G. Davis
139a188ad2bSGeorge G. Davis if (cache_is_vipt_aliasing()) {
140a188ad2bSGeorge G. Davis flush_pfn_alias(page_to_pfn(page), uaddr);
1412df341edSRussell King __flush_icache_all();
142a188ad2bSGeorge G. Davis return;
143a188ad2bSGeorge G. Davis }
144a188ad2bSGeorge G. Davis
145c4e259c8SWill Deacon /* VIPT non-aliasing D-cache */
14672e6ae28SVictor Kamensky if (flags & FLAG_PA_IS_EXEC) {
147a188ad2bSGeorge G. Davis unsigned long addr = (unsigned long)kaddr;
148c4e259c8SWill Deacon if (icache_is_vipt_aliasing())
149c4e259c8SWill Deacon flush_icache_alias(page_to_pfn(page), uaddr, len);
150c4e259c8SWill Deacon else
151a188ad2bSGeorge G. Davis __cpuc_coherent_kern_range(addr, addr + len);
1522ef7f3dbSRussell King if (cache_ops_need_broadcast())
1532ef7f3dbSRussell King smp_call_function(flush_ptrace_access_other,
1542ef7f3dbSRussell King NULL, 1);
1552ef7f3dbSRussell King }
1562ef7f3dbSRussell King }
1572ef7f3dbSRussell King
15872e6ae28SVictor Kamensky static
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)15972e6ae28SVictor Kamensky void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
16072e6ae28SVictor Kamensky unsigned long uaddr, void *kaddr, unsigned long len)
16172e6ae28SVictor Kamensky {
16272e6ae28SVictor Kamensky unsigned int flags = 0;
16372e6ae28SVictor Kamensky if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
16472e6ae28SVictor Kamensky flags |= FLAG_PA_CORE_IN_MM;
16572e6ae28SVictor Kamensky if (vma->vm_flags & VM_EXEC)
16672e6ae28SVictor Kamensky flags |= FLAG_PA_IS_EXEC;
16772e6ae28SVictor Kamensky __flush_ptrace_access(page, uaddr, kaddr, len, flags);
16872e6ae28SVictor Kamensky }
16972e6ae28SVictor Kamensky
flush_uprobe_xol_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)17072e6ae28SVictor Kamensky void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
17172e6ae28SVictor Kamensky void *kaddr, unsigned long len)
17272e6ae28SVictor Kamensky {
17372e6ae28SVictor Kamensky unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
17472e6ae28SVictor Kamensky
17572e6ae28SVictor Kamensky __flush_ptrace_access(page, uaddr, kaddr, len, flags);
17672e6ae28SVictor Kamensky }
17772e6ae28SVictor Kamensky
1782ef7f3dbSRussell King /*
1792ef7f3dbSRussell King * Copy user data from/to a page which is mapped into a different
1802ef7f3dbSRussell King * processes address space. Really, we want to allow our "user
1812ef7f3dbSRussell King * space" model to handle this.
1822ef7f3dbSRussell King *
1832ef7f3dbSRussell King * Note that this code needs to run on the current CPU.
1842ef7f3dbSRussell King */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)1852ef7f3dbSRussell King void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
1862ef7f3dbSRussell King unsigned long uaddr, void *dst, const void *src,
1872ef7f3dbSRussell King unsigned long len)
1882ef7f3dbSRussell King {
1892ef7f3dbSRussell King #ifdef CONFIG_SMP
1902ef7f3dbSRussell King preempt_disable();
1912ef7f3dbSRussell King #endif
1922ef7f3dbSRussell King memcpy(dst, src, len);
1932ef7f3dbSRussell King flush_ptrace_access(vma, page, uaddr, dst, len);
1942ef7f3dbSRussell King #ifdef CONFIG_SMP
1952ef7f3dbSRussell King preempt_enable();
1962ef7f3dbSRussell King #endif
1972ef7f3dbSRussell King }
1981da177e4SLinus Torvalds
__flush_dcache_folio(struct address_space * mapping,struct folio * folio)1998b5989f3SMatthew Wilcox (Oracle) void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
2001da177e4SLinus Torvalds {
2011da177e4SLinus Torvalds /*
2021da177e4SLinus Torvalds * Writeback any data associated with the kernel mapping of this
2031da177e4SLinus Torvalds * page. This ensures that data in the physical page is mutually
2041da177e4SLinus Torvalds * coherent with the kernels mapping.
2051da177e4SLinus Torvalds */
2068b5989f3SMatthew Wilcox (Oracle) if (!folio_test_highmem(folio)) {
2078b5989f3SMatthew Wilcox (Oracle) __cpuc_flush_dcache_area(folio_address(folio),
2088b5989f3SMatthew Wilcox (Oracle) folio_size(folio));
2097e5a69e8SNicolas Pitre } else {
2100b19f933SSteve Capper unsigned long i;
211dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) {
2128b5989f3SMatthew Wilcox (Oracle) for (i = 0; i < folio_nr_pages(folio); i++) {
2138b5989f3SMatthew Wilcox (Oracle) void *addr = kmap_local_folio(folio,
2148b5989f3SMatthew Wilcox (Oracle) i * PAGE_SIZE);
2157e5a69e8SNicolas Pitre __cpuc_flush_dcache_area(addr, PAGE_SIZE);
2168b5989f3SMatthew Wilcox (Oracle) kunmap_local(addr);
2170b19f933SSteve Capper }
218dd0f67f4SJoonsoo Kim } else {
2198b5989f3SMatthew Wilcox (Oracle) for (i = 0; i < folio_nr_pages(folio); i++) {
2208b5989f3SMatthew Wilcox (Oracle) void *addr = kmap_high_get(folio_page(folio, i));
221dd0f67f4SJoonsoo Kim if (addr) {
222dd0f67f4SJoonsoo Kim __cpuc_flush_dcache_area(addr, PAGE_SIZE);
2238b5989f3SMatthew Wilcox (Oracle) kunmap_high(folio_page(folio, i));
224dd0f67f4SJoonsoo Kim }
2257e5a69e8SNicolas Pitre }
2267e5a69e8SNicolas Pitre }
2270b19f933SSteve Capper }
2281da177e4SLinus Torvalds
2291da177e4SLinus Torvalds /*
2308830f04aSRussell King * If this is a page cache page, and we have an aliasing VIPT cache,
2318830f04aSRussell King * we only need to do one flush - which would be at the relevant
2328d802d28SRussell King * userspace colour, which is congruent with page->index.
2338d802d28SRussell King */
234f91fb05dSRussell King if (mapping && cache_is_vipt_aliasing())
2358b5989f3SMatthew Wilcox (Oracle) flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
2368d802d28SRussell King }
2378d802d28SRussell King
__flush_dcache_aliases(struct address_space * mapping,struct folio * folio)2388b5989f3SMatthew Wilcox (Oracle) static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
2398830f04aSRussell King {
2408830f04aSRussell King struct mm_struct *mm = current->active_mm;
2418b5989f3SMatthew Wilcox (Oracle) struct vm_area_struct *vma;
2428b5989f3SMatthew Wilcox (Oracle) pgoff_t pgoff, pgoff_end;
2438830f04aSRussell King
2448d802d28SRussell King /*
2451da177e4SLinus Torvalds * There are possible user space mappings of this page:
2461da177e4SLinus Torvalds * - VIVT cache: we need to also write back and invalidate all user
2471da177e4SLinus Torvalds * data in the current VM view associated with this page.
2481da177e4SLinus Torvalds * - aliasing VIPT: we only need to find one mapping of this page.
2491da177e4SLinus Torvalds */
2508b5989f3SMatthew Wilcox (Oracle) pgoff = folio->index;
2518b5989f3SMatthew Wilcox (Oracle) pgoff_end = pgoff + folio_nr_pages(folio) - 1;
2521da177e4SLinus Torvalds
2531da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping);
2548b5989f3SMatthew Wilcox (Oracle) vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
2558b5989f3SMatthew Wilcox (Oracle) unsigned long start, offset, pfn;
2568b5989f3SMatthew Wilcox (Oracle) unsigned int nr;
2571da177e4SLinus Torvalds
2581da177e4SLinus Torvalds /*
2591da177e4SLinus Torvalds * If this VMA is not in our MM, we can ignore it.
2601da177e4SLinus Torvalds */
2618b5989f3SMatthew Wilcox (Oracle) if (vma->vm_mm != mm)
2621da177e4SLinus Torvalds continue;
2638b5989f3SMatthew Wilcox (Oracle) if (!(vma->vm_flags & VM_MAYSHARE))
2641da177e4SLinus Torvalds continue;
2658b5989f3SMatthew Wilcox (Oracle)
2668b5989f3SMatthew Wilcox (Oracle) start = vma->vm_start;
2678b5989f3SMatthew Wilcox (Oracle) pfn = folio_pfn(folio);
2688b5989f3SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
2698b5989f3SMatthew Wilcox (Oracle) offset = pgoff - vma->vm_pgoff;
2708b5989f3SMatthew Wilcox (Oracle) if (offset > -nr) {
2718b5989f3SMatthew Wilcox (Oracle) pfn -= offset;
2728b5989f3SMatthew Wilcox (Oracle) nr += offset;
2738b5989f3SMatthew Wilcox (Oracle) } else {
2748b5989f3SMatthew Wilcox (Oracle) start += offset * PAGE_SIZE;
2758b5989f3SMatthew Wilcox (Oracle) }
2768b5989f3SMatthew Wilcox (Oracle) if (start + nr * PAGE_SIZE > vma->vm_end)
2778b5989f3SMatthew Wilcox (Oracle) nr = (vma->vm_end - start) / PAGE_SIZE;
2788b5989f3SMatthew Wilcox (Oracle)
2798b5989f3SMatthew Wilcox (Oracle) flush_cache_pages(vma, start, pfn, nr);
2801da177e4SLinus Torvalds }
2811da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping);
2821da177e4SLinus Torvalds }
2831da177e4SLinus Torvalds
2846012191aSCatalin Marinas #if __LINUX_ARM_ARCH__ >= 6
__sync_icache_dcache(pte_t pteval)2856012191aSCatalin Marinas void __sync_icache_dcache(pte_t pteval)
2866012191aSCatalin Marinas {
2876012191aSCatalin Marinas unsigned long pfn;
2888b5989f3SMatthew Wilcox (Oracle) struct folio *folio;
2896012191aSCatalin Marinas struct address_space *mapping;
2906012191aSCatalin Marinas
2916012191aSCatalin Marinas if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
2926012191aSCatalin Marinas /* only flush non-aliasing VIPT caches for exec mappings */
2936012191aSCatalin Marinas return;
2946012191aSCatalin Marinas pfn = pte_pfn(pteval);
2956012191aSCatalin Marinas if (!pfn_valid(pfn))
2966012191aSCatalin Marinas return;
2976012191aSCatalin Marinas
2988b5989f3SMatthew Wilcox (Oracle) folio = page_folio(pfn_to_page(pfn));
299*0c027c2bSYongqiang Liu if (folio_test_reserved(folio))
300*0c027c2bSYongqiang Liu return;
301*0c027c2bSYongqiang Liu
3026012191aSCatalin Marinas if (cache_is_vipt_aliasing())
3038b5989f3SMatthew Wilcox (Oracle) mapping = folio_flush_mapping(folio);
3046012191aSCatalin Marinas else
3056012191aSCatalin Marinas mapping = NULL;
3066012191aSCatalin Marinas
3078b5989f3SMatthew Wilcox (Oracle) if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
3088b5989f3SMatthew Wilcox (Oracle) __flush_dcache_folio(mapping, folio);
3098373dc38Ssaeed bishara
3108373dc38Ssaeed bishara if (pte_exec(pteval))
3116012191aSCatalin Marinas __flush_icache_all();
3126012191aSCatalin Marinas }
3136012191aSCatalin Marinas #endif
3146012191aSCatalin Marinas
3151da177e4SLinus Torvalds /*
3161da177e4SLinus Torvalds * Ensure cache coherency between kernel mapping and userspace mapping
3171da177e4SLinus Torvalds * of this page.
3181da177e4SLinus Torvalds *
3191da177e4SLinus Torvalds * We have three cases to consider:
3201da177e4SLinus Torvalds * - VIPT non-aliasing cache: fully coherent so nothing required.
3211da177e4SLinus Torvalds * - VIVT: fully aliasing, so we need to handle every alias in our
3221da177e4SLinus Torvalds * current VM view.
3231da177e4SLinus Torvalds * - VIPT aliasing: need to handle one alias in our current VM view.
3241da177e4SLinus Torvalds *
3251da177e4SLinus Torvalds * If we need to handle aliasing:
3261da177e4SLinus Torvalds * If the page only exists in the page cache and there are no user
3271da177e4SLinus Torvalds * space mappings, we can be lazy and remember that we may have dirty
3281da177e4SLinus Torvalds * kernel cache lines for later. Otherwise, we assume we have
3291da177e4SLinus Torvalds * aliasing mappings.
330df2f5e72SRussell King *
33131bee4cfSsaeed bishara * Note that we disable the lazy flush for SMP configurations where
33231bee4cfSsaeed bishara * the cache maintenance operations are not automatically broadcasted.
3331da177e4SLinus Torvalds */
flush_dcache_folio(struct folio * folio)3348b5989f3SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio)
3351da177e4SLinus Torvalds {
336421fe93cSRussell King struct address_space *mapping;
337421fe93cSRussell King
338421fe93cSRussell King /*
339421fe93cSRussell King * The zero page is never written to, so never has any dirty
340421fe93cSRussell King * cache lines, and therefore never needs to be flushed.
341421fe93cSRussell King */
3428b5989f3SMatthew Wilcox (Oracle) if (is_zero_pfn(folio_pfn(folio)))
343421fe93cSRussell King return;
344421fe93cSRussell King
34500a19f3eSRabin Vincent if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
3468b5989f3SMatthew Wilcox (Oracle) if (test_bit(PG_dcache_clean, &folio->flags))
3478b5989f3SMatthew Wilcox (Oracle) clear_bit(PG_dcache_clean, &folio->flags);
34800a19f3eSRabin Vincent return;
34900a19f3eSRabin Vincent }
35000a19f3eSRabin Vincent
3518b5989f3SMatthew Wilcox (Oracle) mapping = folio_flush_mapping(folio);
3521da177e4SLinus Torvalds
35385848dd7SCatalin Marinas if (!cache_ops_need_broadcast() &&
3548b5989f3SMatthew Wilcox (Oracle) mapping && !folio_mapped(folio))
3558b5989f3SMatthew Wilcox (Oracle) clear_bit(PG_dcache_clean, &folio->flags);
35685848dd7SCatalin Marinas else {
3578b5989f3SMatthew Wilcox (Oracle) __flush_dcache_folio(mapping, folio);
3588830f04aSRussell King if (mapping && cache_is_vivt())
3598b5989f3SMatthew Wilcox (Oracle) __flush_dcache_aliases(mapping, folio);
360826cbdafSCatalin Marinas else if (mapping)
361826cbdafSCatalin Marinas __flush_icache_all();
3628b5989f3SMatthew Wilcox (Oracle) set_bit(PG_dcache_clean, &folio->flags);
3638830f04aSRussell King }
3641da177e4SLinus Torvalds }
3658b5989f3SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio);
3668b5989f3SMatthew Wilcox (Oracle)
flush_dcache_page(struct page * page)3678b5989f3SMatthew Wilcox (Oracle) void flush_dcache_page(struct page *page)
3688b5989f3SMatthew Wilcox (Oracle) {
3698b5989f3SMatthew Wilcox (Oracle) flush_dcache_folio(page_folio(page));
3708b5989f3SMatthew Wilcox (Oracle) }
3711da177e4SLinus Torvalds EXPORT_SYMBOL(flush_dcache_page);
3726020dff0SRussell King /*
3736020dff0SRussell King * Flush an anonymous page so that users of get_user_pages()
3746020dff0SRussell King * can safely access the data. The expected sequence is:
3756020dff0SRussell King *
3766020dff0SRussell King * get_user_pages()
3776020dff0SRussell King * -> flush_anon_page
3786020dff0SRussell King * memcpy() to/from page
3796020dff0SRussell King * if written to page, flush_dcache_page()
3806020dff0SRussell King */
38157ea76fdSArnd Bergmann void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
__flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)3826020dff0SRussell King void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
3836020dff0SRussell King {
3846020dff0SRussell King unsigned long pfn;
3856020dff0SRussell King
3866020dff0SRussell King /* VIPT non-aliasing caches need do nothing */
3876020dff0SRussell King if (cache_is_vipt_nonaliasing())
3886020dff0SRussell King return;
3896020dff0SRussell King
3906020dff0SRussell King /*
3916020dff0SRussell King * Write back and invalidate userspace mapping.
3926020dff0SRussell King */
3936020dff0SRussell King pfn = page_to_pfn(page);
3946020dff0SRussell King if (cache_is_vivt()) {
3956020dff0SRussell King flush_cache_page(vma, vmaddr, pfn);
3966020dff0SRussell King } else {
3976020dff0SRussell King /*
3986020dff0SRussell King * For aliasing VIPT, we can flush an alias of the
3996020dff0SRussell King * userspace address only.
4006020dff0SRussell King */
4016020dff0SRussell King flush_pfn_alias(pfn, vmaddr);
4022df341edSRussell King __flush_icache_all();
4036020dff0SRussell King }
4046020dff0SRussell King
4056020dff0SRussell King /*
4066020dff0SRussell King * Invalidate kernel mapping. No data should be contained
4076020dff0SRussell King * in this mapping of the page. FIXME: this is overkill
4086020dff0SRussell King * since we actually ask for a write-back and invalidate.
4096020dff0SRussell King */
4102c9b9c84SRussell King __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
4116020dff0SRussell King }
412