xref: /openbmc/linux/arch/csky/abiv2/cacheflush.c (revision 65844828)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/cache.h>
5 #include <linux/highmem.h>
6 #include <linux/mm.h>
7 #include <asm/cache.h>
8 
9 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
10 		      pte_t *pte)
11 {
12 	unsigned long addr;
13 	struct page *page;
14 
15 	if (!pfn_valid(pte_pfn(*pte)))
16 		return;
17 
18 	page = pfn_to_page(pte_pfn(*pte));
19 	if (page == ZERO_PAGE(0))
20 		return;
21 
22 	if (test_and_set_bit(PG_dcache_clean, &page->flags))
23 		return;
24 
25 	addr = (unsigned long) kmap_atomic(page);
26 
27 	dcache_wb_range(addr, addr + PAGE_SIZE);
28 
29 	if (vma->vm_flags & VM_EXEC)
30 		icache_inv_range(addr, addr + PAGE_SIZE);
31 
32 	kunmap_atomic((void *) addr);
33 }
34 
35 void flush_icache_deferred(struct mm_struct *mm)
36 {
37 	unsigned int cpu = smp_processor_id();
38 	cpumask_t *mask = &mm->context.icache_stale_mask;
39 
40 	if (cpumask_test_cpu(cpu, mask)) {
41 		cpumask_clear_cpu(cpu, mask);
42 		/*
43 		 * Ensure the remote hart's writes are visible to this hart.
44 		 * This pairs with a barrier in flush_icache_mm.
45 		 */
46 		smp_mb();
47 		local_icache_inv_all(NULL);
48 	}
49 }
50 
51 void flush_icache_mm_range(struct mm_struct *mm,
52 		unsigned long start, unsigned long end)
53 {
54 	unsigned int cpu;
55 	cpumask_t others, *mask;
56 
57 	preempt_disable();
58 
59 #ifdef CONFIG_CPU_HAS_ICACHE_INS
60 	if (mm == current->mm) {
61 		icache_inv_range(start, end);
62 		preempt_enable();
63 		return;
64 	}
65 #endif
66 
67 	/* Mark every hart's icache as needing a flush for this MM. */
68 	mask = &mm->context.icache_stale_mask;
69 	cpumask_setall(mask);
70 
71 	/* Flush this hart's I$ now, and mark it as flushed. */
72 	cpu = smp_processor_id();
73 	cpumask_clear_cpu(cpu, mask);
74 	local_icache_inv_all(NULL);
75 
76 	/*
77 	 * Flush the I$ of other harts concurrently executing, and mark them as
78 	 * flushed.
79 	 */
80 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
81 
82 	if (mm != current->active_mm || !cpumask_empty(&others)) {
83 		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
84 		cpumask_clear(mask);
85 	}
86 
87 	preempt_enable();
88 }
89