xref: /openbmc/linux/arch/xtensa/include/asm/highmem.h (revision a591525f)
1 /*
2  * include/asm-xtensa/highmem.h
3  *
4  * This file is subject to the terms and conditions of the GNU General
5  * Public License.  See the file "COPYING" in the main directory of
6  * this archive for more details.
7  *
8  * Copyright (C) 2003 - 2005 Tensilica Inc.
9  * Copyright (C) 2014 Cadence Design Systems Inc.
10  */
11 
12 #ifndef _XTENSA_HIGHMEM_H
13 #define _XTENSA_HIGHMEM_H
14 
15 #include <linux/wait.h>
16 #include <asm/cacheflush.h>
17 #include <asm/fixmap.h>
18 #include <asm/kmap_types.h>
19 #include <asm/pgtable.h>
20 
21 #define PKMAP_BASE		((FIXADDR_START - \
22 				  (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
23 #define LAST_PKMAP		(PTRS_PER_PTE * DCACHE_N_COLORS)
24 #define LAST_PKMAP_MASK		(LAST_PKMAP - 1)
25 #define PKMAP_NR(virt)		(((virt) - PKMAP_BASE) >> PAGE_SHIFT)
26 #define PKMAP_ADDR(nr)		(PKMAP_BASE + ((nr) << PAGE_SHIFT))
27 
28 #define kmap_prot		PAGE_KERNEL_EXEC
29 
30 #if DCACHE_WAY_SIZE > PAGE_SIZE
31 #define get_pkmap_color get_pkmap_color
32 static inline int get_pkmap_color(struct page *page)
33 {
34 	return DCACHE_ALIAS(page_to_phys(page));
35 }
36 
37 extern unsigned int last_pkmap_nr_arr[];
38 
39 static inline unsigned int get_next_pkmap_nr(unsigned int color)
40 {
41 	last_pkmap_nr_arr[color] =
42 		(last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
43 	return last_pkmap_nr_arr[color] + color;
44 }
45 
46 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
47 {
48 	return pkmap_nr < DCACHE_N_COLORS;
49 }
50 
51 static inline int get_pkmap_entries_count(unsigned int color)
52 {
53 	return LAST_PKMAP / DCACHE_N_COLORS;
54 }
55 
56 extern wait_queue_head_t pkmap_map_wait_arr[];
57 
58 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
59 {
60 	return pkmap_map_wait_arr + color;
61 }
62 #endif
63 
64 extern pte_t *pkmap_page_table;
65 
66 void *kmap_high(struct page *page);
67 void kunmap_high(struct page *page);
68 
69 static inline void *kmap(struct page *page)
70 {
71 	/* Check if this memory layout is broken because PKMAP overlaps
72 	 * page table.
73 	 */
74 	BUILD_BUG_ON(PKMAP_BASE <
75 		     TLBTEMP_BASE_1 + TLBTEMP_SIZE);
76 	BUG_ON(in_interrupt());
77 	if (!PageHighMem(page))
78 		return page_address(page);
79 	return kmap_high(page);
80 }
81 
82 static inline void kunmap(struct page *page)
83 {
84 	BUG_ON(in_interrupt());
85 	if (!PageHighMem(page))
86 		return;
87 	kunmap_high(page);
88 }
89 
90 static inline void flush_cache_kmaps(void)
91 {
92 	flush_cache_all();
93 }
94 
95 void *kmap_atomic(struct page *page);
96 void __kunmap_atomic(void *kvaddr);
97 
98 void kmap_init(void);
99 
100 #endif
101