1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/tlb.h 4 * 5 * Copyright (C) 2002 Russell King 6 * 7 * Experimentation shows that on a StrongARM, it appears to be faster 8 * to use the "invalidate whole tlb" rather than "invalidate single 9 * tlb" for this. 10 * 11 * This appears true for both the process fork+exit case, as well as 12 * the munmap-large-area case. 13 */ 14 #ifndef __ASMARM_TLB_H 15 #define __ASMARM_TLB_H 16 17 #include <asm/cacheflush.h> 18 19 #ifndef CONFIG_MMU 20 21 #include <linux/pagemap.h> 22 23 #define tlb_flush(tlb) ((void) tlb) 24 25 #include <asm-generic/tlb.h> 26 27 #else /* !CONFIG_MMU */ 28 29 #include <linux/swap.h> 30 #include <asm/pgalloc.h> 31 #include <asm/tlbflush.h> 32 33 static inline void __tlb_remove_table(void *_table) 34 { 35 free_page_and_swap_cache((struct page *)_table); 36 } 37 38 #include <asm-generic/tlb.h> 39 40 static inline void 41 __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) 42 { 43 pgtable_pte_page_dtor(pte); 44 45 #ifndef CONFIG_ARM_LPAE 46 /* 47 * With the classic ARM MMU, a pte page has two corresponding pmd 48 * entries, each covering 1MB. 49 */ 50 addr = (addr & PMD_MASK) + SZ_1M; 51 __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE); 52 #endif 53 54 tlb_remove_table(tlb, pte); 55 } 56 57 static inline void 58 __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) 59 { 60 #ifdef CONFIG_ARM_LPAE 61 struct page *page = virt_to_page(pmdp); 62 63 tlb_remove_table(tlb, page); 64 #endif 65 } 66 67 #endif /* CONFIG_MMU */ 68 #endif 69