193c91cb2SLey Foon Tan /*
293c91cb2SLey Foon Tan * This file is subject to the terms and conditions of the GNU General Public
393c91cb2SLey Foon Tan * License. See the file "COPYING" in the main directory of this archive
493c91cb2SLey Foon Tan * for more details.
593c91cb2SLey Foon Tan *
693c91cb2SLey Foon Tan * Copyright (C) 2009, Wind River Systems Inc
793c91cb2SLey Foon Tan * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
893c91cb2SLey Foon Tan */
993c91cb2SLey Foon Tan
1093c91cb2SLey Foon Tan #include <linux/export.h>
1193c91cb2SLey Foon Tan #include <linux/sched.h>
1293c91cb2SLey Foon Tan #include <linux/mm.h>
1393c91cb2SLey Foon Tan #include <linux/fs.h>
14842ca547SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
1593c91cb2SLey Foon Tan
1693c91cb2SLey Foon Tan #include <asm/cacheflush.h>
1793c91cb2SLey Foon Tan #include <asm/cpuinfo.h>
1893c91cb2SLey Foon Tan
__flush_dcache(unsigned long start,unsigned long end)1993c91cb2SLey Foon Tan static void __flush_dcache(unsigned long start, unsigned long end)
2093c91cb2SLey Foon Tan {
2193c91cb2SLey Foon Tan unsigned long addr;
2293c91cb2SLey Foon Tan
2393c91cb2SLey Foon Tan start &= ~(cpuinfo.dcache_line_size - 1);
2493c91cb2SLey Foon Tan end += (cpuinfo.dcache_line_size - 1);
2593c91cb2SLey Foon Tan end &= ~(cpuinfo.dcache_line_size - 1);
2693c91cb2SLey Foon Tan
2793c91cb2SLey Foon Tan if (end > start + cpuinfo.dcache_size)
2893c91cb2SLey Foon Tan end = start + cpuinfo.dcache_size;
2993c91cb2SLey Foon Tan
3093c91cb2SLey Foon Tan for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
3193c91cb2SLey Foon Tan __asm__ __volatile__ (" flushd 0(%0)\n"
3293c91cb2SLey Foon Tan : /* Outputs */
3393c91cb2SLey Foon Tan : /* Inputs */ "r"(addr)
3493c91cb2SLey Foon Tan /* : No clobber */);
3593c91cb2SLey Foon Tan }
3693c91cb2SLey Foon Tan }
3793c91cb2SLey Foon Tan
__invalidate_dcache(unsigned long start,unsigned long end)3893c91cb2SLey Foon Tan static void __invalidate_dcache(unsigned long start, unsigned long end)
3993c91cb2SLey Foon Tan {
4093c91cb2SLey Foon Tan unsigned long addr;
4193c91cb2SLey Foon Tan
4293c91cb2SLey Foon Tan start &= ~(cpuinfo.dcache_line_size - 1);
4393c91cb2SLey Foon Tan end += (cpuinfo.dcache_line_size - 1);
4493c91cb2SLey Foon Tan end &= ~(cpuinfo.dcache_line_size - 1);
4593c91cb2SLey Foon Tan
4693c91cb2SLey Foon Tan for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
4793c91cb2SLey Foon Tan __asm__ __volatile__ (" initda 0(%0)\n"
4893c91cb2SLey Foon Tan : /* Outputs */
4993c91cb2SLey Foon Tan : /* Inputs */ "r"(addr)
5093c91cb2SLey Foon Tan /* : No clobber */);
5193c91cb2SLey Foon Tan }
5293c91cb2SLey Foon Tan }
5393c91cb2SLey Foon Tan
__flush_icache(unsigned long start,unsigned long end)5493c91cb2SLey Foon Tan static void __flush_icache(unsigned long start, unsigned long end)
5593c91cb2SLey Foon Tan {
5693c91cb2SLey Foon Tan unsigned long addr;
5793c91cb2SLey Foon Tan
5893c91cb2SLey Foon Tan start &= ~(cpuinfo.icache_line_size - 1);
5993c91cb2SLey Foon Tan end += (cpuinfo.icache_line_size - 1);
6093c91cb2SLey Foon Tan end &= ~(cpuinfo.icache_line_size - 1);
6193c91cb2SLey Foon Tan
6293c91cb2SLey Foon Tan if (end > start + cpuinfo.icache_size)
6393c91cb2SLey Foon Tan end = start + cpuinfo.icache_size;
6493c91cb2SLey Foon Tan
6593c91cb2SLey Foon Tan for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
6693c91cb2SLey Foon Tan __asm__ __volatile__ (" flushi %0\n"
6793c91cb2SLey Foon Tan : /* Outputs */
6893c91cb2SLey Foon Tan : /* Inputs */ "r"(addr)
6993c91cb2SLey Foon Tan /* : No clobber */);
7093c91cb2SLey Foon Tan }
7193c91cb2SLey Foon Tan __asm__ __volatile(" flushp\n");
7293c91cb2SLey Foon Tan }
7393c91cb2SLey Foon Tan
flush_aliases(struct address_space * mapping,struct folio * folio)7499420941SMatthew Wilcox (Oracle) static void flush_aliases(struct address_space *mapping, struct folio *folio)
7593c91cb2SLey Foon Tan {
7693c91cb2SLey Foon Tan struct mm_struct *mm = current->active_mm;
7799420941SMatthew Wilcox (Oracle) struct vm_area_struct *vma;
78*7db15418SHelge Deller unsigned long flags;
7993c91cb2SLey Foon Tan pgoff_t pgoff;
8099420941SMatthew Wilcox (Oracle) unsigned long nr = folio_nr_pages(folio);
8193c91cb2SLey Foon Tan
8299420941SMatthew Wilcox (Oracle) pgoff = folio->index;
8393c91cb2SLey Foon Tan
84*7db15418SHelge Deller flush_dcache_mmap_lock_irqsave(mapping, flags);
8599420941SMatthew Wilcox (Oracle) vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
8699420941SMatthew Wilcox (Oracle) unsigned long start;
8793c91cb2SLey Foon Tan
8899420941SMatthew Wilcox (Oracle) if (vma->vm_mm != mm)
8993c91cb2SLey Foon Tan continue;
9099420941SMatthew Wilcox (Oracle) if (!(vma->vm_flags & VM_MAYSHARE))
9193c91cb2SLey Foon Tan continue;
9293c91cb2SLey Foon Tan
9399420941SMatthew Wilcox (Oracle) start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
9499420941SMatthew Wilcox (Oracle) flush_cache_range(vma, start, start + nr * PAGE_SIZE);
9593c91cb2SLey Foon Tan }
96*7db15418SHelge Deller flush_dcache_mmap_unlock_irqrestore(mapping, flags);
9793c91cb2SLey Foon Tan }
9893c91cb2SLey Foon Tan
flush_cache_all(void)9993c91cb2SLey Foon Tan void flush_cache_all(void)
10093c91cb2SLey Foon Tan {
1018e3d7c83SLey Foon Tan __flush_dcache(0, cpuinfo.dcache_size);
10293c91cb2SLey Foon Tan __flush_icache(0, cpuinfo.icache_size);
10393c91cb2SLey Foon Tan }
10493c91cb2SLey Foon Tan
flush_cache_mm(struct mm_struct * mm)10593c91cb2SLey Foon Tan void flush_cache_mm(struct mm_struct *mm)
10693c91cb2SLey Foon Tan {
10793c91cb2SLey Foon Tan flush_cache_all();
10893c91cb2SLey Foon Tan }
10993c91cb2SLey Foon Tan
flush_cache_dup_mm(struct mm_struct * mm)11093c91cb2SLey Foon Tan void flush_cache_dup_mm(struct mm_struct *mm)
11193c91cb2SLey Foon Tan {
11293c91cb2SLey Foon Tan flush_cache_all();
11393c91cb2SLey Foon Tan }
11493c91cb2SLey Foon Tan
flush_icache_range(unsigned long start,unsigned long end)11593c91cb2SLey Foon Tan void flush_icache_range(unsigned long start, unsigned long end)
11693c91cb2SLey Foon Tan {
1171a70db49SLey Foon Tan __flush_dcache(start, end);
11893c91cb2SLey Foon Tan __flush_icache(start, end);
11993c91cb2SLey Foon Tan }
12093c91cb2SLey Foon Tan
flush_dcache_range(unsigned long start,unsigned long end)12193c91cb2SLey Foon Tan void flush_dcache_range(unsigned long start, unsigned long end)
12293c91cb2SLey Foon Tan {
12393c91cb2SLey Foon Tan __flush_dcache(start, end);
1241a70db49SLey Foon Tan __flush_icache(start, end);
12593c91cb2SLey Foon Tan }
12693c91cb2SLey Foon Tan EXPORT_SYMBOL(flush_dcache_range);
12793c91cb2SLey Foon Tan
invalidate_dcache_range(unsigned long start,unsigned long end)12893c91cb2SLey Foon Tan void invalidate_dcache_range(unsigned long start, unsigned long end)
12993c91cb2SLey Foon Tan {
13093c91cb2SLey Foon Tan __invalidate_dcache(start, end);
13193c91cb2SLey Foon Tan }
13293c91cb2SLey Foon Tan EXPORT_SYMBOL(invalidate_dcache_range);
13393c91cb2SLey Foon Tan
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)13493c91cb2SLey Foon Tan void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
13593c91cb2SLey Foon Tan unsigned long end)
13693c91cb2SLey Foon Tan {
13793c91cb2SLey Foon Tan __flush_dcache(start, end);
13893c91cb2SLey Foon Tan if (vma == NULL || (vma->vm_flags & VM_EXEC))
13993c91cb2SLey Foon Tan __flush_icache(start, end);
14093c91cb2SLey Foon Tan }
14193c91cb2SLey Foon Tan
flush_icache_pages(struct vm_area_struct * vma,struct page * page,unsigned int nr)14299420941SMatthew Wilcox (Oracle) void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
14399420941SMatthew Wilcox (Oracle) unsigned int nr)
14493c91cb2SLey Foon Tan {
14593c91cb2SLey Foon Tan unsigned long start = (unsigned long) page_address(page);
14699420941SMatthew Wilcox (Oracle) unsigned long end = start + nr * PAGE_SIZE;
14793c91cb2SLey Foon Tan
1481a70db49SLey Foon Tan __flush_dcache(start, end);
14993c91cb2SLey Foon Tan __flush_icache(start, end);
15093c91cb2SLey Foon Tan }
15193c91cb2SLey Foon Tan
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)15293c91cb2SLey Foon Tan void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
15393c91cb2SLey Foon Tan unsigned long pfn)
15493c91cb2SLey Foon Tan {
15593c91cb2SLey Foon Tan unsigned long start = vmaddr;
15693c91cb2SLey Foon Tan unsigned long end = start + PAGE_SIZE;
15793c91cb2SLey Foon Tan
15893c91cb2SLey Foon Tan __flush_dcache(start, end);
15993c91cb2SLey Foon Tan if (vma->vm_flags & VM_EXEC)
16093c91cb2SLey Foon Tan __flush_icache(start, end);
16193c91cb2SLey Foon Tan }
16293c91cb2SLey Foon Tan
__flush_dcache_folio(struct folio * folio)16399420941SMatthew Wilcox (Oracle) static void __flush_dcache_folio(struct folio *folio)
1641a70db49SLey Foon Tan {
1651a70db49SLey Foon Tan /*
1661a70db49SLey Foon Tan * Writeback any data associated with the kernel mapping of this
1671a70db49SLey Foon Tan * page. This ensures that data in the physical page is mutually
1681a70db49SLey Foon Tan * coherent with the kernels mapping.
1691a70db49SLey Foon Tan */
17099420941SMatthew Wilcox (Oracle) unsigned long start = (unsigned long)folio_address(folio);
1711a70db49SLey Foon Tan
17299420941SMatthew Wilcox (Oracle) __flush_dcache(start, start + folio_size(folio));
1731a70db49SLey Foon Tan }
1741a70db49SLey Foon Tan
flush_dcache_folio(struct folio * folio)17599420941SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio)
17693c91cb2SLey Foon Tan {
17793c91cb2SLey Foon Tan struct address_space *mapping;
17893c91cb2SLey Foon Tan
17993c91cb2SLey Foon Tan /*
18093c91cb2SLey Foon Tan * The zero page is never written to, so never has any dirty
18193c91cb2SLey Foon Tan * cache lines, and therefore never needs to be flushed.
18293c91cb2SLey Foon Tan */
18399420941SMatthew Wilcox (Oracle) if (is_zero_pfn(folio_pfn(folio)))
18493c91cb2SLey Foon Tan return;
18593c91cb2SLey Foon Tan
18699420941SMatthew Wilcox (Oracle) mapping = folio_flush_mapping(folio);
18793c91cb2SLey Foon Tan
18893c91cb2SLey Foon Tan /* Flush this page if there are aliases. */
18993c91cb2SLey Foon Tan if (mapping && !mapping_mapped(mapping)) {
19099420941SMatthew Wilcox (Oracle) clear_bit(PG_dcache_clean, &folio->flags);
19193c91cb2SLey Foon Tan } else {
19299420941SMatthew Wilcox (Oracle) __flush_dcache_folio(folio);
1931a70db49SLey Foon Tan if (mapping) {
19499420941SMatthew Wilcox (Oracle) unsigned long start = (unsigned long)folio_address(folio);
19599420941SMatthew Wilcox (Oracle) flush_aliases(mapping, folio);
19699420941SMatthew Wilcox (Oracle) flush_icache_range(start, start + folio_size(folio));
1971a70db49SLey Foon Tan }
19899420941SMatthew Wilcox (Oracle) set_bit(PG_dcache_clean, &folio->flags);
19993c91cb2SLey Foon Tan }
20093c91cb2SLey Foon Tan }
20199420941SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio);
20299420941SMatthew Wilcox (Oracle)
flush_dcache_page(struct page * page)20399420941SMatthew Wilcox (Oracle) void flush_dcache_page(struct page *page)
20499420941SMatthew Wilcox (Oracle) {
20599420941SMatthew Wilcox (Oracle) flush_dcache_folio(page_folio(page));
20699420941SMatthew Wilcox (Oracle) }
20793c91cb2SLey Foon Tan EXPORT_SYMBOL(flush_dcache_page);
20893c91cb2SLey Foon Tan
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)20999420941SMatthew Wilcox (Oracle) void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
21099420941SMatthew Wilcox (Oracle) unsigned long address, pte_t *ptep, unsigned int nr)
21193c91cb2SLey Foon Tan {
2123ac23944SNicholas Piggin pte_t pte = *ptep;
2133ac23944SNicholas Piggin unsigned long pfn = pte_pfn(pte);
21499420941SMatthew Wilcox (Oracle) struct folio *folio;
2151a70db49SLey Foon Tan struct address_space *mapping;
21693c91cb2SLey Foon Tan
2173ac23944SNicholas Piggin reload_tlb_page(vma, address, pte);
218ef5cbcb6SNicholas Piggin
21993c91cb2SLey Foon Tan if (!pfn_valid(pfn))
22093c91cb2SLey Foon Tan return;
22193c91cb2SLey Foon Tan
22293c91cb2SLey Foon Tan /*
22393c91cb2SLey Foon Tan * The zero page is never written to, so never has any dirty
22493c91cb2SLey Foon Tan * cache lines, and therefore never needs to be flushed.
22593c91cb2SLey Foon Tan */
22699420941SMatthew Wilcox (Oracle) if (is_zero_pfn(pfn))
22793c91cb2SLey Foon Tan return;
22893c91cb2SLey Foon Tan
22999420941SMatthew Wilcox (Oracle) folio = page_folio(pfn_to_page(pfn));
23099420941SMatthew Wilcox (Oracle) if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
23199420941SMatthew Wilcox (Oracle) __flush_dcache_folio(folio);
2321a70db49SLey Foon Tan
23399420941SMatthew Wilcox (Oracle) mapping = folio_flush_mapping(folio);
23499420941SMatthew Wilcox (Oracle) if (mapping) {
23599420941SMatthew Wilcox (Oracle) flush_aliases(mapping, folio);
2361a70db49SLey Foon Tan if (vma->vm_flags & VM_EXEC)
23799420941SMatthew Wilcox (Oracle) flush_icache_pages(vma, &folio->page,
23899420941SMatthew Wilcox (Oracle) folio_nr_pages(folio));
23993c91cb2SLey Foon Tan }
24093c91cb2SLey Foon Tan }
24193c91cb2SLey Foon Tan
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * to)24293c91cb2SLey Foon Tan void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
24393c91cb2SLey Foon Tan struct page *to)
24493c91cb2SLey Foon Tan {
24593c91cb2SLey Foon Tan __flush_dcache(vaddr, vaddr + PAGE_SIZE);
2461a70db49SLey Foon Tan __flush_icache(vaddr, vaddr + PAGE_SIZE);
24793c91cb2SLey Foon Tan copy_page(vto, vfrom);
24893c91cb2SLey Foon Tan __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
2491a70db49SLey Foon Tan __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
25093c91cb2SLey Foon Tan }
25193c91cb2SLey Foon Tan
clear_user_page(void * addr,unsigned long vaddr,struct page * page)25293c91cb2SLey Foon Tan void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
25393c91cb2SLey Foon Tan {
25493c91cb2SLey Foon Tan __flush_dcache(vaddr, vaddr + PAGE_SIZE);
2551a70db49SLey Foon Tan __flush_icache(vaddr, vaddr + PAGE_SIZE);
25693c91cb2SLey Foon Tan clear_page(addr);
25793c91cb2SLey Foon Tan __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
2581a70db49SLey Foon Tan __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
25993c91cb2SLey Foon Tan }
26093c91cb2SLey Foon Tan
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)26193c91cb2SLey Foon Tan void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
26293c91cb2SLey Foon Tan unsigned long user_vaddr,
26393c91cb2SLey Foon Tan void *dst, void *src, int len)
26493c91cb2SLey Foon Tan {
26593c91cb2SLey Foon Tan flush_cache_page(vma, user_vaddr, page_to_pfn(page));
26693c91cb2SLey Foon Tan memcpy(dst, src, len);
2678e3d7c83SLey Foon Tan __flush_dcache((unsigned long)src, (unsigned long)src + len);
26893c91cb2SLey Foon Tan if (vma->vm_flags & VM_EXEC)
26993c91cb2SLey Foon Tan __flush_icache((unsigned long)src, (unsigned long)src + len);
27093c91cb2SLey Foon Tan }
27193c91cb2SLey Foon Tan
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)27293c91cb2SLey Foon Tan void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
27393c91cb2SLey Foon Tan unsigned long user_vaddr,
27493c91cb2SLey Foon Tan void *dst, void *src, int len)
27593c91cb2SLey Foon Tan {
27693c91cb2SLey Foon Tan flush_cache_page(vma, user_vaddr, page_to_pfn(page));
27793c91cb2SLey Foon Tan memcpy(dst, src, len);
2788e3d7c83SLey Foon Tan __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
27993c91cb2SLey Foon Tan if (vma->vm_flags & VM_EXEC)
28093c91cb2SLey Foon Tan __flush_icache((unsigned long)dst, (unsigned long)dst + len);
28193c91cb2SLey Foon Tan }
282