xref: /openbmc/linux/arch/powerpc/include/asm/highmem.h (revision 7ca43e75)
1 /*
2  * highmem.h: virtual kernel memory mappings for high memory
3  *
4  * PowerPC version, stolen from the i386 version.
5  *
6  * Used in CONFIG_HIGHMEM systems for memory pages which
7  * are not addressable by direct kernel virtual addresses.
8  *
9  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
10  *		      Gerhard.Wichert@pdb.siemens.de
11  *
12  *
13  * Redesigned the x86 32-bit VM architecture to deal with
14  * up to 16 Terrabyte physical memory. With current x86 CPUs
15  * we now support up to 64 Gigabytes physical RAM.
16  *
17  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18  */
19 
20 #ifndef _ASM_HIGHMEM_H
21 #define _ASM_HIGHMEM_H
22 
23 #ifdef __KERNEL__
24 
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/highmem.h>
28 #include <asm/kmap_types.h>
29 #include <asm/tlbflush.h>
30 #include <asm/page.h>
31 #include <asm/fixmap.h>
32 
33 extern pte_t *kmap_pte;
34 extern pgprot_t kmap_prot;
35 extern pte_t *pkmap_page_table;
36 
37 /*
38  * Right now we initialize only a single pte table. It can be extended
39  * easily, subsequent pte tables have to be allocated in one physical
40  * chunk of RAM.
41  */
42 /*
43  * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
44  * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
45  * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
46  * in case of 16K/64K/256K page sizes.
47  */
48 #ifdef CONFIG_PPC_4K_PAGES
49 #define PKMAP_ORDER	PTE_SHIFT
50 #else
51 #define PKMAP_ORDER	9
52 #endif
53 #define LAST_PKMAP	(1 << PKMAP_ORDER)
54 #ifndef CONFIG_PPC_4K_PAGES
55 #define PKMAP_BASE	(FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
56 #else
57 #define PKMAP_BASE	((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
58 #endif
59 #define LAST_PKMAP_MASK	(LAST_PKMAP-1)
60 #define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
61 #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
62 
63 extern void *kmap_high(struct page *page);
64 extern void kunmap_high(struct page *page);
65 
66 static inline void *kmap(struct page *page)
67 {
68 	might_sleep();
69 	if (!PageHighMem(page))
70 		return page_address(page);
71 	return kmap_high(page);
72 }
73 
74 static inline void kunmap(struct page *page)
75 {
76 	BUG_ON(in_interrupt());
77 	if (!PageHighMem(page))
78 		return;
79 	kunmap_high(page);
80 }
81 
82 /*
83  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
84  * gives a more generic (and caching) interface. But kmap_atomic can
85  * be used in IRQ contexts, so in some (very limited) cases we need
86  * it.
87  */
88 static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
89 {
90 	unsigned int idx;
91 	unsigned long vaddr;
92 
93 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
94 	pagefault_disable();
95 	if (!PageHighMem(page))
96 		return page_address(page);
97 
98 	debug_kmap_atomic(type);
99 	idx = type + KM_TYPE_NR*smp_processor_id();
100 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
101 #ifdef CONFIG_DEBUG_HIGHMEM
102 	BUG_ON(!pte_none(*(kmap_pte-idx)));
103 #endif
104 	__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
105 	local_flush_tlb_page(NULL, vaddr);
106 
107 	return (void*) vaddr;
108 }
109 
110 static inline void *kmap_atomic(struct page *page, enum km_type type)
111 {
112 	return kmap_atomic_prot(page, type, kmap_prot);
113 }
114 
115 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
116 {
117 #ifdef CONFIG_DEBUG_HIGHMEM
118 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
119 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
120 
121 	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
122 		pagefault_enable();
123 		return;
124 	}
125 
126 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
127 
128 	/*
129 	 * force other mappings to Oops if they'll try to access
130 	 * this pte without first remap it
131 	 */
132 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
133 	local_flush_tlb_page(NULL, vaddr);
134 #endif
135 	pagefault_enable();
136 }
137 
138 static inline struct page *kmap_atomic_to_page(void *ptr)
139 {
140 	unsigned long idx, vaddr = (unsigned long) ptr;
141 	pte_t *pte;
142 
143 	if (vaddr < FIXADDR_START)
144 		return virt_to_page(ptr);
145 
146 	idx = virt_to_fix(vaddr);
147 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
148 	return pte_page(*pte);
149 }
150 
151 #define flush_cache_kmaps()	flush_cache_all()
152 
153 #endif /* __KERNEL__ */
154 
155 #endif /* _ASM_HIGHMEM_H */
156