1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/cache.h> 5 #include <linux/highmem.h> 6 #include <linux/mm.h> 7 #include <asm/cache.h> 8 9 void flush_icache_page(struct vm_area_struct *vma, struct page *page) 10 { 11 unsigned long start; 12 13 start = (unsigned long) kmap_atomic(page); 14 15 cache_wbinv_range(start, start + PAGE_SIZE); 16 17 kunmap_atomic((void *)start); 18 } 19 20 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 21 unsigned long vaddr, int len) 22 { 23 unsigned long kaddr; 24 25 kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); 26 27 cache_wbinv_range(kaddr, kaddr + len); 28 29 kunmap_atomic((void *)kaddr); 30 } 31 32 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 33 pte_t *pte) 34 { 35 unsigned long addr, pfn; 36 struct page *page; 37 void *va; 38 39 if (!(vma->vm_flags & VM_EXEC)) 40 return; 41 42 pfn = pte_pfn(*pte); 43 if (unlikely(!pfn_valid(pfn))) 44 return; 45 46 page = pfn_to_page(pfn); 47 if (page == ZERO_PAGE(0)) 48 return; 49 50 va = page_address(page); 51 addr = (unsigned long) va; 52 53 if (va == NULL && PageHighMem(page)) 54 addr = (unsigned long) kmap_atomic(page); 55 56 cache_wbinv_range(addr, addr + PAGE_SIZE); 57 58 if (va == NULL && PageHighMem(page)) 59 kunmap_atomic((void *) addr); 60 } 61