1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/cache.h> 5 #include <linux/highmem.h> 6 #include <linux/mm.h> 7 #include <asm/cache.h> 8 #include <asm/tlbflush.h> 9 10 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 11 pte_t *pte) 12 { 13 unsigned long addr; 14 struct page *page; 15 16 flush_tlb_page(vma, address); 17 18 if (!pfn_valid(pte_pfn(*pte))) 19 return; 20 21 page = pfn_to_page(pte_pfn(*pte)); 22 if (page == ZERO_PAGE(0)) 23 return; 24 25 if (test_and_set_bit(PG_dcache_clean, &page->flags)) 26 return; 27 28 addr = (unsigned long) kmap_atomic(page); 29 30 icache_inv_range(address, address + PAGE_SIZE); 31 dcache_wb_range(addr, addr + PAGE_SIZE); 32 33 kunmap_atomic((void *) addr); 34 } 35 36 void flush_icache_deferred(struct mm_struct *mm) 37 { 38 unsigned int cpu = smp_processor_id(); 39 cpumask_t *mask = &mm->context.icache_stale_mask; 40 41 if (cpumask_test_cpu(cpu, mask)) { 42 cpumask_clear_cpu(cpu, mask); 43 /* 44 * Ensure the remote hart's writes are visible to this hart. 45 * This pairs with a barrier in flush_icache_mm. 46 */ 47 smp_mb(); 48 local_icache_inv_all(NULL); 49 } 50 } 51 52 void flush_icache_mm_range(struct mm_struct *mm, 53 unsigned long start, unsigned long end) 54 { 55 unsigned int cpu; 56 cpumask_t others, *mask; 57 58 preempt_disable(); 59 60 #ifdef CONFIG_CPU_HAS_ICACHE_INS 61 if (mm == current->mm) { 62 icache_inv_range(start, end); 63 preempt_enable(); 64 return; 65 } 66 #endif 67 68 /* Mark every hart's icache as needing a flush for this MM. */ 69 mask = &mm->context.icache_stale_mask; 70 cpumask_setall(mask); 71 72 /* Flush this hart's I$ now, and mark it as flushed. */ 73 cpu = smp_processor_id(); 74 cpumask_clear_cpu(cpu, mask); 75 local_icache_inv_all(NULL); 76 77 /* 78 * Flush the I$ of other harts concurrently executing, and mark them as 79 * flushed. 80 */ 81 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 82 83 if (mm != current->active_mm || !cpumask_empty(&others)) { 84 on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); 85 cpumask_clear(mask); 86 } 87 88 preempt_enable(); 89 } 90