100a9730eSGuo Ren // SPDX-License-Identifier: GPL-2.0 200a9730eSGuo Ren // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 300a9730eSGuo Ren 400a9730eSGuo Ren #include <linux/cache.h> 500a9730eSGuo Ren #include <linux/highmem.h> 600a9730eSGuo Ren #include <linux/mm.h> 700a9730eSGuo Ren #include <asm/cache.h> 800a9730eSGuo Ren 900a9730eSGuo Ren void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 1000a9730eSGuo Ren pte_t *pte) 1100a9730eSGuo Ren { 12d936a7e7SGuo Ren unsigned long addr; 1300a9730eSGuo Ren struct page *page; 1400a9730eSGuo Ren 15d936a7e7SGuo Ren page = pfn_to_page(pte_pfn(*pte)); 16d936a7e7SGuo Ren if (page == ZERO_PAGE(0)) 1700a9730eSGuo Ren return; 1800a9730eSGuo Ren 19d936a7e7SGuo Ren if (test_and_set_bit(PG_dcache_clean, &page->flags)) 2000a9730eSGuo Ren return; 2100a9730eSGuo Ren 2200a9730eSGuo Ren addr = (unsigned long) kmap_atomic(page); 2300a9730eSGuo Ren 24d936a7e7SGuo Ren dcache_wb_range(addr, addr + PAGE_SIZE); 25d936a7e7SGuo Ren 26d936a7e7SGuo Ren if (vma->vm_flags & VM_EXEC) 27d936a7e7SGuo Ren icache_inv_range(addr, addr + PAGE_SIZE); 2800a9730eSGuo Ren 2900a9730eSGuo Ren kunmap_atomic((void *) addr); 3000a9730eSGuo Ren } 31997153b9SGuo Ren 32997153b9SGuo Ren void flush_icache_deferred(struct mm_struct *mm) 33997153b9SGuo Ren { 34997153b9SGuo Ren unsigned int cpu = smp_processor_id(); 35997153b9SGuo Ren cpumask_t *mask = &mm->context.icache_stale_mask; 36997153b9SGuo Ren 37997153b9SGuo Ren if (cpumask_test_cpu(cpu, mask)) { 38997153b9SGuo Ren cpumask_clear_cpu(cpu, mask); 39997153b9SGuo Ren /* 40997153b9SGuo Ren * Ensure the remote hart's writes are visible to this hart. 41997153b9SGuo Ren * This pairs with a barrier in flush_icache_mm. 42997153b9SGuo Ren */ 43997153b9SGuo Ren smp_mb(); 44997153b9SGuo Ren local_icache_inv_all(NULL); 45997153b9SGuo Ren } 46997153b9SGuo Ren } 47997153b9SGuo Ren 48997153b9SGuo Ren void flush_icache_mm_range(struct mm_struct *mm, 49997153b9SGuo Ren unsigned long start, unsigned long end) 50997153b9SGuo Ren { 51997153b9SGuo Ren unsigned int cpu; 52997153b9SGuo Ren cpumask_t others, *mask; 53997153b9SGuo Ren 54997153b9SGuo Ren preempt_disable(); 55997153b9SGuo Ren 56997153b9SGuo Ren #ifdef CONFIG_CPU_HAS_ICACHE_INS 57997153b9SGuo Ren if (mm == current->mm) { 58997153b9SGuo Ren icache_inv_range(start, end); 59997153b9SGuo Ren preempt_enable(); 60997153b9SGuo Ren return; 61997153b9SGuo Ren } 62997153b9SGuo Ren #endif 63997153b9SGuo Ren 64997153b9SGuo Ren /* Mark every hart's icache as needing a flush for this MM. */ 65997153b9SGuo Ren mask = &mm->context.icache_stale_mask; 66997153b9SGuo Ren cpumask_setall(mask); 67997153b9SGuo Ren 68997153b9SGuo Ren /* Flush this hart's I$ now, and mark it as flushed. */ 69997153b9SGuo Ren cpu = smp_processor_id(); 70997153b9SGuo Ren cpumask_clear_cpu(cpu, mask); 71997153b9SGuo Ren local_icache_inv_all(NULL); 72997153b9SGuo Ren 73997153b9SGuo Ren /* 74997153b9SGuo Ren * Flush the I$ of other harts concurrently executing, and mark them as 75997153b9SGuo Ren * flushed. 76997153b9SGuo Ren */ 77997153b9SGuo Ren cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 78997153b9SGuo Ren 79997153b9SGuo Ren if (mm != current->active_mm || !cpumask_empty(&others)) { 80997153b9SGuo Ren on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); 81997153b9SGuo Ren cpumask_clear(mask); 82997153b9SGuo Ren } 83997153b9SGuo Ren 84997153b9SGuo Ren preempt_enable(); 85997153b9SGuo Ren } 86