1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_HIGHMEM_H 3 #define _ASM_HIGHMEM_H 4 5 #include <asm/kmap_types.h> 6 7 #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) 8 #define LAST_PKMAP PTRS_PER_PTE 9 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 10 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 11 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 12 13 #define flush_cache_kmaps() \ 14 do { \ 15 if (cache_is_vivt()) \ 16 flush_cache_all(); \ 17 } while (0) 18 19 extern pte_t *pkmap_page_table; 20 21 /* 22 * The reason for kmap_high_get() is to ensure that the currently kmap'd 23 * page usage count does not decrease to zero while we're using its 24 * existing virtual mapping in an atomic context. With a VIVT cache this 25 * is essential to do, but with a VIPT cache this is only an optimization 26 * so not to pay the price of establishing a second mapping if an existing 27 * one can be used. However, on platforms without hardware TLB maintenance 28 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since 29 * the locking involved must also disable IRQs which is incompatible with 30 * the IPI mechanism used by global TLB operations. 31 */ 32 #define ARCH_NEEDS_KMAP_HIGH_GET 33 #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) 34 #undef ARCH_NEEDS_KMAP_HIGH_GET 35 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) 36 #error "The sum of features in your kernel config cannot be supported together" 37 #endif 38 #endif 39 40 /* 41 * Needed to be able to broadcast the TLB invalidation for kmap. 42 */ 43 #ifdef CONFIG_ARM_ERRATA_798181 44 #undef ARCH_NEEDS_KMAP_HIGH_GET 45 #endif 46 47 #ifdef ARCH_NEEDS_KMAP_HIGH_GET 48 extern void *kmap_high_get(struct page *page); 49 #else 50 static inline void *kmap_high_get(struct page *page) 51 { 52 return NULL; 53 } 54 #endif 55 56 /* 57 * The following functions are already defined by <linux/highmem.h> 58 * when CONFIG_HIGHMEM is not set. 59 */ 60 #ifdef CONFIG_HIGHMEM 61 extern void *kmap_atomic_pfn(unsigned long pfn); 62 #endif 63 64 #endif 65