xref: /openbmc/linux/arch/csky/abiv2/cacheflush.c (revision e724e7aa)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/cache.h>
5 #include <linux/highmem.h>
6 #include <linux/mm.h>
7 #include <asm/cache.h>
8 #include <asm/tlbflush.h>
9 
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int nr)10 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
11 		unsigned long address, pte_t *pte, unsigned int nr)
12 {
13 	unsigned long pfn = pte_pfn(*pte);
14 	struct folio *folio;
15 	unsigned int i;
16 
17 	flush_tlb_page(vma, address);
18 
19 	if (!pfn_valid(pfn))
20 		return;
21 
22 	folio = page_folio(pfn_to_page(pfn));
23 
24 	if (test_and_set_bit(PG_dcache_clean, &folio->flags))
25 		return;
26 
27 	icache_inv_range(address, address + nr*PAGE_SIZE);
28 	for (i = 0; i < folio_nr_pages(folio); i++) {
29 		unsigned long addr = (unsigned long) kmap_local_folio(folio,
30 								i * PAGE_SIZE);
31 
32 		dcache_wb_range(addr, addr + PAGE_SIZE);
33 		if (vma->vm_flags & VM_EXEC)
34 			icache_inv_range(addr, addr + PAGE_SIZE);
35 		kunmap_local((void *) addr);
36 	}
37 }
38 
flush_icache_deferred(struct mm_struct * mm)39 void flush_icache_deferred(struct mm_struct *mm)
40 {
41 	unsigned int cpu = smp_processor_id();
42 	cpumask_t *mask = &mm->context.icache_stale_mask;
43 
44 	if (cpumask_test_cpu(cpu, mask)) {
45 		cpumask_clear_cpu(cpu, mask);
46 		/*
47 		 * Ensure the remote hart's writes are visible to this hart.
48 		 * This pairs with a barrier in flush_icache_mm.
49 		 */
50 		smp_mb();
51 		local_icache_inv_all(NULL);
52 	}
53 }
54 
flush_icache_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end)55 void flush_icache_mm_range(struct mm_struct *mm,
56 		unsigned long start, unsigned long end)
57 {
58 	unsigned int cpu;
59 	cpumask_t others, *mask;
60 
61 	preempt_disable();
62 
63 #ifdef CONFIG_CPU_HAS_ICACHE_INS
64 	if (mm == current->mm) {
65 		icache_inv_range(start, end);
66 		preempt_enable();
67 		return;
68 	}
69 #endif
70 
71 	/* Mark every hart's icache as needing a flush for this MM. */
72 	mask = &mm->context.icache_stale_mask;
73 	cpumask_setall(mask);
74 
75 	/* Flush this hart's I$ now, and mark it as flushed. */
76 	cpu = smp_processor_id();
77 	cpumask_clear_cpu(cpu, mask);
78 	local_icache_inv_all(NULL);
79 
80 	/*
81 	 * Flush the I$ of other harts concurrently executing, and mark them as
82 	 * flushed.
83 	 */
84 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
85 
86 	if (mm != current->active_mm || !cpumask_empty(&others)) {
87 		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
88 		cpumask_clear(mask);
89 	}
90 
91 	preempt_enable();
92 }
93