xref: /openbmc/linux/arch/csky/abiv2/cacheflush.c (revision ffcdf473)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/cache.h>
5 #include <linux/highmem.h>
6 #include <linux/mm.h>
7 #include <asm/cache.h>
8 #include <asm/tlbflush.h>
9 
10 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
11 		      pte_t *pte)
12 {
13 	unsigned long addr;
14 	struct page *page;
15 
16 	flush_tlb_page(vma, address);
17 
18 	if (!pfn_valid(pte_pfn(*pte)))
19 		return;
20 
21 	page = pfn_to_page(pte_pfn(*pte));
22 	if (page == ZERO_PAGE(0))
23 		return;
24 
25 	if (test_and_set_bit(PG_dcache_clean, &page->flags))
26 		return;
27 
28 	addr = (unsigned long) kmap_atomic(page);
29 
30 	dcache_wb_range(addr, addr + PAGE_SIZE);
31 
32 	if (vma->vm_flags & VM_EXEC)
33 		icache_inv_range(addr, addr + PAGE_SIZE);
34 
35 	kunmap_atomic((void *) addr);
36 }
37 
38 void flush_icache_deferred(struct mm_struct *mm)
39 {
40 	unsigned int cpu = smp_processor_id();
41 	cpumask_t *mask = &mm->context.icache_stale_mask;
42 
43 	if (cpumask_test_cpu(cpu, mask)) {
44 		cpumask_clear_cpu(cpu, mask);
45 		/*
46 		 * Ensure the remote hart's writes are visible to this hart.
47 		 * This pairs with a barrier in flush_icache_mm.
48 		 */
49 		smp_mb();
50 		local_icache_inv_all(NULL);
51 	}
52 }
53 
54 void flush_icache_mm_range(struct mm_struct *mm,
55 		unsigned long start, unsigned long end)
56 {
57 	unsigned int cpu;
58 	cpumask_t others, *mask;
59 
60 	preempt_disable();
61 
62 #ifdef CONFIG_CPU_HAS_ICACHE_INS
63 	if (mm == current->mm) {
64 		icache_inv_range(start, end);
65 		preempt_enable();
66 		return;
67 	}
68 #endif
69 
70 	/* Mark every hart's icache as needing a flush for this MM. */
71 	mask = &mm->context.icache_stale_mask;
72 	cpumask_setall(mask);
73 
74 	/* Flush this hart's I$ now, and mark it as flushed. */
75 	cpu = smp_processor_id();
76 	cpumask_clear_cpu(cpu, mask);
77 	local_icache_inv_all(NULL);
78 
79 	/*
80 	 * Flush the I$ of other harts concurrently executing, and mark them as
81 	 * flushed.
82 	 */
83 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
84 
85 	if (mm != current->active_mm || !cpumask_empty(&others)) {
86 		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
87 		cpumask_clear(mask);
88 	}
89 
90 	preempt_enable();
91 }
92