1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2d73cd428SNicolas Pitre #ifndef _ASM_HIGHMEM_H 3d73cd428SNicolas Pitre #define _ASM_HIGHMEM_H 4d73cd428SNicolas Pitre 5*68061c02SArnd Bergmann #include <asm/cachetype.h> 62a15ba82SThomas Gleixner #include <asm/fixmap.h> 7d73cd428SNicolas Pitre 8d73cd428SNicolas Pitre #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) 9d73cd428SNicolas Pitre #define LAST_PKMAP PTRS_PER_PTE 10d73cd428SNicolas Pitre #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 11d73cd428SNicolas Pitre #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 12d73cd428SNicolas Pitre #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 13d73cd428SNicolas Pitre 147e5a69e8SNicolas Pitre #define flush_cache_kmaps() \ 157e5a69e8SNicolas Pitre do { \ 167e5a69e8SNicolas Pitre if (cache_is_vivt()) \ 177e5a69e8SNicolas Pitre flush_cache_all(); \ 187e5a69e8SNicolas Pitre } while (0) 19d73cd428SNicolas Pitre 20d73cd428SNicolas Pitre extern pte_t *pkmap_page_table; 21d73cd428SNicolas Pitre 227e5a69e8SNicolas Pitre /* 23aaa50048SNicolas Pitre * The reason for kmap_high_get() is to ensure that the currently kmap'd 24aaa50048SNicolas Pitre * page usage count does not decrease to zero while we're using its 25aaa50048SNicolas Pitre * existing virtual mapping in an atomic context. With a VIVT cache this 26aaa50048SNicolas Pitre * is essential to do, but with a VIPT cache this is only an optimization 27aaa50048SNicolas Pitre * so not to pay the price of establishing a second mapping if an existing 28aaa50048SNicolas Pitre * one can be used. However, on platforms without hardware TLB maintenance 29aaa50048SNicolas Pitre * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since 30aaa50048SNicolas Pitre * the locking involved must also disable IRQs which is incompatible with 31aaa50048SNicolas Pitre * the IPI mechanism used by global TLB operations. 32aaa50048SNicolas Pitre */ 33aaa50048SNicolas Pitre #define ARCH_NEEDS_KMAP_HIGH_GET 34aaa50048SNicolas Pitre #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) 35aaa50048SNicolas Pitre #undef ARCH_NEEDS_KMAP_HIGH_GET 36aaa50048SNicolas Pitre #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) 37aaa50048SNicolas Pitre #error "The sum of features in your kernel config cannot be supported together" 38aaa50048SNicolas Pitre #endif 39aaa50048SNicolas Pitre #endif 40aaa50048SNicolas Pitre 4193dc6887SCatalin Marinas /* 4293dc6887SCatalin Marinas * Needed to be able to broadcast the TLB invalidation for kmap. 4393dc6887SCatalin Marinas */ 4493dc6887SCatalin Marinas #ifdef CONFIG_ARM_ERRATA_798181 4593dc6887SCatalin Marinas #undef ARCH_NEEDS_KMAP_HIGH_GET 4693dc6887SCatalin Marinas #endif 4793dc6887SCatalin Marinas 48aaa50048SNicolas Pitre #ifdef ARCH_NEEDS_KMAP_HIGH_GET 49aaa50048SNicolas Pitre extern void *kmap_high_get(struct page *page); 502a15ba82SThomas Gleixner arch_kmap_local_high_get(struct page * page)512a15ba82SThomas Gleixnerstatic inline void *arch_kmap_local_high_get(struct page *page) 522a15ba82SThomas Gleixner { 532a15ba82SThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt()) 542a15ba82SThomas Gleixner return NULL; 552a15ba82SThomas Gleixner return kmap_high_get(page); 562a15ba82SThomas Gleixner } 572a15ba82SThomas Gleixner #define arch_kmap_local_high_get arch_kmap_local_high_get 582a15ba82SThomas Gleixner 592a15ba82SThomas Gleixner #else /* ARCH_NEEDS_KMAP_HIGH_GET */ kmap_high_get(struct page * page)60aaa50048SNicolas Pitrestatic inline void *kmap_high_get(struct page *page) 61aaa50048SNicolas Pitre { 62aaa50048SNicolas Pitre return NULL; 63aaa50048SNicolas Pitre } 642a15ba82SThomas Gleixner #endif /* !ARCH_NEEDS_KMAP_HIGH_GET */ 65aaa50048SNicolas Pitre 662a15ba82SThomas Gleixner #define arch_kmap_local_post_map(vaddr, pteval) \ 672a15ba82SThomas Gleixner local_flush_tlb_kernel_page(vaddr) 682a15ba82SThomas Gleixner 692a15ba82SThomas Gleixner #define arch_kmap_local_pre_unmap(vaddr) \ 702a15ba82SThomas Gleixner do { \ 712a15ba82SThomas Gleixner if (cache_is_vivt()) \ 722a15ba82SThomas Gleixner __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \ 732a15ba82SThomas Gleixner } while (0) 742a15ba82SThomas Gleixner 752a15ba82SThomas Gleixner #define arch_kmap_local_post_unmap(vaddr) \ 762a15ba82SThomas Gleixner local_flush_tlb_kernel_page(vaddr) 77d73cd428SNicolas Pitre 78d73cd428SNicolas Pitre #endif 79