xref: /openbmc/linux/arch/arm/include/asm/highmem.h (revision 8fdff1dc)
1 #ifndef _ASM_HIGHMEM_H
2 #define _ASM_HIGHMEM_H
3 
4 #include <asm/kmap_types.h>
5 
6 #define PKMAP_BASE		(PAGE_OFFSET - PMD_SIZE)
7 #define LAST_PKMAP		PTRS_PER_PTE
8 #define LAST_PKMAP_MASK		(LAST_PKMAP - 1)
9 #define PKMAP_NR(virt)		(((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10 #define PKMAP_ADDR(nr)		(PKMAP_BASE + ((nr) << PAGE_SHIFT))
11 
12 #define kmap_prot		PAGE_KERNEL
13 
14 #define flush_cache_kmaps() \
15 	do { \
16 		if (cache_is_vivt()) \
17 			flush_cache_all(); \
18 	} while (0)
19 
20 extern pte_t *pkmap_page_table;
21 
22 extern void *kmap_high(struct page *page);
23 extern void kunmap_high(struct page *page);
24 
25 /*
26  * The reason for kmap_high_get() is to ensure that the currently kmap'd
27  * page usage count does not decrease to zero while we're using its
28  * existing virtual mapping in an atomic context.  With a VIVT cache this
29  * is essential to do, but with a VIPT cache this is only an optimization
30  * so not to pay the price of establishing a second mapping if an existing
31  * one can be used.  However, on platforms without hardware TLB maintenance
32  * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
33  * the locking involved must also disable IRQs which is incompatible with
34  * the IPI mechanism used by global TLB operations.
35  */
36 #define ARCH_NEEDS_KMAP_HIGH_GET
37 #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
38 #undef ARCH_NEEDS_KMAP_HIGH_GET
39 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
40 #error "The sum of features in your kernel config cannot be supported together"
41 #endif
42 #endif
43 
44 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
45 extern void *kmap_high_get(struct page *page);
46 #else
47 static inline void *kmap_high_get(struct page *page)
48 {
49 	return NULL;
50 }
51 #endif
52 
53 /*
54  * The following functions are already defined by <linux/highmem.h>
55  * when CONFIG_HIGHMEM is not set.
56  */
57 #ifdef CONFIG_HIGHMEM
58 extern void *kmap(struct page *page);
59 extern void kunmap(struct page *page);
60 extern void *kmap_atomic(struct page *page);
61 extern void __kunmap_atomic(void *kvaddr);
62 extern void *kmap_atomic_pfn(unsigned long pfn);
63 extern struct page *kmap_atomic_to_page(const void *ptr);
64 #endif
65 
66 #endif
67