113f876baSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
213f876baSThomas Gleixner #ifndef _LINUX_HIGHMEM_INTERNAL_H
313f876baSThomas Gleixner #define _LINUX_HIGHMEM_INTERNAL_H
413f876baSThomas Gleixner 
513f876baSThomas Gleixner /*
613f876baSThomas Gleixner  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
713f876baSThomas Gleixner  */
813f876baSThomas Gleixner #ifdef CONFIG_KMAP_LOCAL
913f876baSThomas Gleixner void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
1013f876baSThomas Gleixner void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
1139ade048SFabio M. De Francesco void kunmap_local_indexed(const void *vaddr);
125fbda3ecSThomas Gleixner void kmap_local_fork(struct task_struct *tsk);
135fbda3ecSThomas Gleixner void __kmap_local_sched_out(void);
145fbda3ecSThomas Gleixner void __kmap_local_sched_in(void);
kmap_assert_nomap(void)155fbda3ecSThomas Gleixner static inline void kmap_assert_nomap(void)
165fbda3ecSThomas Gleixner {
175fbda3ecSThomas Gleixner 	DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
185fbda3ecSThomas Gleixner }
195fbda3ecSThomas Gleixner #else
kmap_local_fork(struct task_struct * tsk)205fbda3ecSThomas Gleixner static inline void kmap_local_fork(struct task_struct *tsk) { }
kmap_assert_nomap(void)215fbda3ecSThomas Gleixner static inline void kmap_assert_nomap(void) { }
2213f876baSThomas Gleixner #endif
2313f876baSThomas Gleixner 
2413f876baSThomas Gleixner #ifdef CONFIG_HIGHMEM
2513f876baSThomas Gleixner #include <asm/highmem.h>
2613f876baSThomas Gleixner 
2713f876baSThomas Gleixner #ifndef ARCH_HAS_KMAP_FLUSH_TLB
kmap_flush_tlb(unsigned long addr)2813f876baSThomas Gleixner static inline void kmap_flush_tlb(unsigned long addr) { }
2913f876baSThomas Gleixner #endif
3013f876baSThomas Gleixner 
3113f876baSThomas Gleixner #ifndef kmap_prot
3213f876baSThomas Gleixner #define kmap_prot PAGE_KERNEL
3313f876baSThomas Gleixner #endif
3413f876baSThomas Gleixner 
3513f876baSThomas Gleixner void *kmap_high(struct page *page);
3613f876baSThomas Gleixner void kunmap_high(struct page *page);
3713f876baSThomas Gleixner void __kmap_flush_unused(void);
3813f876baSThomas Gleixner struct page *__kmap_to_page(void *addr);
3913f876baSThomas Gleixner 
kmap(struct page * page)4013f876baSThomas Gleixner static inline void *kmap(struct page *page)
4113f876baSThomas Gleixner {
4213f876baSThomas Gleixner 	void *addr;
4313f876baSThomas Gleixner 
4413f876baSThomas Gleixner 	might_sleep();
4513f876baSThomas Gleixner 	if (!PageHighMem(page))
4613f876baSThomas Gleixner 		addr = page_address(page);
4713f876baSThomas Gleixner 	else
4813f876baSThomas Gleixner 		addr = kmap_high(page);
4913f876baSThomas Gleixner 	kmap_flush_tlb((unsigned long)addr);
5013f876baSThomas Gleixner 	return addr;
5113f876baSThomas Gleixner }
5213f876baSThomas Gleixner 
kunmap(struct page * page)5313f876baSThomas Gleixner static inline void kunmap(struct page *page)
5413f876baSThomas Gleixner {
5513f876baSThomas Gleixner 	might_sleep();
5613f876baSThomas Gleixner 	if (!PageHighMem(page))
5713f876baSThomas Gleixner 		return;
5813f876baSThomas Gleixner 	kunmap_high(page);
5913f876baSThomas Gleixner }
6013f876baSThomas Gleixner 
kmap_to_page(void * addr)6113f876baSThomas Gleixner static inline struct page *kmap_to_page(void *addr)
6213f876baSThomas Gleixner {
6313f876baSThomas Gleixner 	return __kmap_to_page(addr);
6413f876baSThomas Gleixner }
6513f876baSThomas Gleixner 
kmap_flush_unused(void)6613f876baSThomas Gleixner static inline void kmap_flush_unused(void)
6713f876baSThomas Gleixner {
6813f876baSThomas Gleixner 	__kmap_flush_unused();
6913f876baSThomas Gleixner }
7013f876baSThomas Gleixner 
kmap_local_page(struct page * page)71f3ba3c71SThomas Gleixner static inline void *kmap_local_page(struct page *page)
72f3ba3c71SThomas Gleixner {
73f3ba3c71SThomas Gleixner 	return __kmap_local_page_prot(page, kmap_prot);
74f3ba3c71SThomas Gleixner }
75f3ba3c71SThomas Gleixner 
kmap_local_folio(struct folio * folio,size_t offset)7653c36de0SMatthew Wilcox (Oracle) static inline void *kmap_local_folio(struct folio *folio, size_t offset)
7753c36de0SMatthew Wilcox (Oracle) {
7853c36de0SMatthew Wilcox (Oracle) 	struct page *page = folio_page(folio, offset / PAGE_SIZE);
7953c36de0SMatthew Wilcox (Oracle) 	return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
8053c36de0SMatthew Wilcox (Oracle) }
8153c36de0SMatthew Wilcox (Oracle) 
kmap_local_page_prot(struct page * page,pgprot_t prot)82f3ba3c71SThomas Gleixner static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
83f3ba3c71SThomas Gleixner {
84f3ba3c71SThomas Gleixner 	return __kmap_local_page_prot(page, prot);
85f3ba3c71SThomas Gleixner }
86f3ba3c71SThomas Gleixner 
kmap_local_pfn(unsigned long pfn)87f3ba3c71SThomas Gleixner static inline void *kmap_local_pfn(unsigned long pfn)
88f3ba3c71SThomas Gleixner {
89f3ba3c71SThomas Gleixner 	return __kmap_local_pfn_prot(pfn, kmap_prot);
90f3ba3c71SThomas Gleixner }
91f3ba3c71SThomas Gleixner 
__kunmap_local(const void * vaddr)9239ade048SFabio M. De Francesco static inline void __kunmap_local(const void *vaddr)
93f3ba3c71SThomas Gleixner {
94f3ba3c71SThomas Gleixner 	kunmap_local_indexed(vaddr);
95f3ba3c71SThomas Gleixner }
96f3ba3c71SThomas Gleixner 
kmap_atomic_prot(struct page * page,pgprot_t prot)9713f876baSThomas Gleixner static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
9813f876baSThomas Gleixner {
9951386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
10051386120SSebastian Andrzej Siewior 		migrate_disable();
10151386120SSebastian Andrzej Siewior 	else
10213f876baSThomas Gleixner 		preempt_disable();
10351386120SSebastian Andrzej Siewior 
10413f876baSThomas Gleixner 	pagefault_disable();
10513f876baSThomas Gleixner 	return __kmap_local_page_prot(page, prot);
10613f876baSThomas Gleixner }
10713f876baSThomas Gleixner 
kmap_atomic(struct page * page)10813f876baSThomas Gleixner static inline void *kmap_atomic(struct page *page)
10913f876baSThomas Gleixner {
11013f876baSThomas Gleixner 	return kmap_atomic_prot(page, kmap_prot);
11113f876baSThomas Gleixner }
11213f876baSThomas Gleixner 
kmap_atomic_pfn(unsigned long pfn)11313f876baSThomas Gleixner static inline void *kmap_atomic_pfn(unsigned long pfn)
11413f876baSThomas Gleixner {
11551386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
11651386120SSebastian Andrzej Siewior 		migrate_disable();
11751386120SSebastian Andrzej Siewior 	else
11813f876baSThomas Gleixner 		preempt_disable();
11951386120SSebastian Andrzej Siewior 
12013f876baSThomas Gleixner 	pagefault_disable();
12113f876baSThomas Gleixner 	return __kmap_local_pfn_prot(pfn, kmap_prot);
12213f876baSThomas Gleixner }
12313f876baSThomas Gleixner 
__kunmap_atomic(const void * addr)12439ade048SFabio M. De Francesco static inline void __kunmap_atomic(const void *addr)
12513f876baSThomas Gleixner {
12613f876baSThomas Gleixner 	kunmap_local_indexed(addr);
12713f876baSThomas Gleixner 	pagefault_enable();
12851386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
12951386120SSebastian Andrzej Siewior 		migrate_enable();
13051386120SSebastian Andrzej Siewior 	else
13113f876baSThomas Gleixner 		preempt_enable();
13213f876baSThomas Gleixner }
13313f876baSThomas Gleixner 
13413f876baSThomas Gleixner unsigned int __nr_free_highpages(void);
13513f876baSThomas Gleixner extern atomic_long_t _totalhigh_pages;
13613f876baSThomas Gleixner 
nr_free_highpages(void)13713f876baSThomas Gleixner static inline unsigned int nr_free_highpages(void)
13813f876baSThomas Gleixner {
13913f876baSThomas Gleixner 	return __nr_free_highpages();
14013f876baSThomas Gleixner }
14113f876baSThomas Gleixner 
totalhigh_pages(void)14213f876baSThomas Gleixner static inline unsigned long totalhigh_pages(void)
14313f876baSThomas Gleixner {
14413f876baSThomas Gleixner 	return (unsigned long)atomic_long_read(&_totalhigh_pages);
14513f876baSThomas Gleixner }
14613f876baSThomas Gleixner 
totalhigh_pages_add(long count)14713f876baSThomas Gleixner static inline void totalhigh_pages_add(long count)
14813f876baSThomas Gleixner {
14913f876baSThomas Gleixner 	atomic_long_add(count, &_totalhigh_pages);
15013f876baSThomas Gleixner }
15113f876baSThomas Gleixner 
is_kmap_addr(const void * x)1524e140f59SMatthew Wilcox (Oracle) static inline bool is_kmap_addr(const void *x)
1534e140f59SMatthew Wilcox (Oracle) {
1544e140f59SMatthew Wilcox (Oracle) 	unsigned long addr = (unsigned long)x;
155*f3837ab7SIra Weiny 
156*f3837ab7SIra Weiny 	return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
157*f3837ab7SIra Weiny 		(addr >= __fix_to_virt(FIX_KMAP_END) &&
158*f3837ab7SIra Weiny 		 addr < __fix_to_virt(FIX_KMAP_BEGIN));
1594e140f59SMatthew Wilcox (Oracle) }
16013f876baSThomas Gleixner #else /* CONFIG_HIGHMEM */
16113f876baSThomas Gleixner 
kmap_to_page(void * addr)16213f876baSThomas Gleixner static inline struct page *kmap_to_page(void *addr)
16313f876baSThomas Gleixner {
16413f876baSThomas Gleixner 	return virt_to_page(addr);
16513f876baSThomas Gleixner }
16613f876baSThomas Gleixner 
kmap(struct page * page)16713f876baSThomas Gleixner static inline void *kmap(struct page *page)
16813f876baSThomas Gleixner {
16913f876baSThomas Gleixner 	might_sleep();
17013f876baSThomas Gleixner 	return page_address(page);
17113f876baSThomas Gleixner }
17213f876baSThomas Gleixner 
kunmap_high(struct page * page)17313f876baSThomas Gleixner static inline void kunmap_high(struct page *page) { }
kmap_flush_unused(void)17413f876baSThomas Gleixner static inline void kmap_flush_unused(void) { }
17513f876baSThomas Gleixner 
kunmap(struct page * page)17613f876baSThomas Gleixner static inline void kunmap(struct page *page)
17713f876baSThomas Gleixner {
17813f876baSThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
17913f876baSThomas Gleixner 	kunmap_flush_on_unmap(page_address(page));
18013f876baSThomas Gleixner #endif
18113f876baSThomas Gleixner }
18213f876baSThomas Gleixner 
kmap_local_page(struct page * page)183f3ba3c71SThomas Gleixner static inline void *kmap_local_page(struct page *page)
184f3ba3c71SThomas Gleixner {
185f3ba3c71SThomas Gleixner 	return page_address(page);
186f3ba3c71SThomas Gleixner }
187f3ba3c71SThomas Gleixner 
kmap_local_folio(struct folio * folio,size_t offset)18853c36de0SMatthew Wilcox (Oracle) static inline void *kmap_local_folio(struct folio *folio, size_t offset)
18953c36de0SMatthew Wilcox (Oracle) {
19053c36de0SMatthew Wilcox (Oracle) 	return page_address(&folio->page) + offset;
19153c36de0SMatthew Wilcox (Oracle) }
19253c36de0SMatthew Wilcox (Oracle) 
kmap_local_page_prot(struct page * page,pgprot_t prot)193f3ba3c71SThomas Gleixner static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
194f3ba3c71SThomas Gleixner {
195f3ba3c71SThomas Gleixner 	return kmap_local_page(page);
196f3ba3c71SThomas Gleixner }
197f3ba3c71SThomas Gleixner 
kmap_local_pfn(unsigned long pfn)198f3ba3c71SThomas Gleixner static inline void *kmap_local_pfn(unsigned long pfn)
199f3ba3c71SThomas Gleixner {
200f3ba3c71SThomas Gleixner 	return kmap_local_page(pfn_to_page(pfn));
201f3ba3c71SThomas Gleixner }
202f3ba3c71SThomas Gleixner 
__kunmap_local(const void * addr)20339ade048SFabio M. De Francesco static inline void __kunmap_local(const void *addr)
204f3ba3c71SThomas Gleixner {
205f3ba3c71SThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
20688d7b120SMatthew Wilcox (Oracle) 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
207f3ba3c71SThomas Gleixner #endif
208f3ba3c71SThomas Gleixner }
209f3ba3c71SThomas Gleixner 
kmap_atomic(struct page * page)21013f876baSThomas Gleixner static inline void *kmap_atomic(struct page *page)
21113f876baSThomas Gleixner {
21251386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
21351386120SSebastian Andrzej Siewior 		migrate_disable();
21451386120SSebastian Andrzej Siewior 	else
21513f876baSThomas Gleixner 		preempt_disable();
21613f876baSThomas Gleixner 	pagefault_disable();
21713f876baSThomas Gleixner 	return page_address(page);
21813f876baSThomas Gleixner }
21913f876baSThomas Gleixner 
kmap_atomic_prot(struct page * page,pgprot_t prot)22013f876baSThomas Gleixner static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22113f876baSThomas Gleixner {
22213f876baSThomas Gleixner 	return kmap_atomic(page);
22313f876baSThomas Gleixner }
22413f876baSThomas Gleixner 
kmap_atomic_pfn(unsigned long pfn)22513f876baSThomas Gleixner static inline void *kmap_atomic_pfn(unsigned long pfn)
22613f876baSThomas Gleixner {
22713f876baSThomas Gleixner 	return kmap_atomic(pfn_to_page(pfn));
22813f876baSThomas Gleixner }
22913f876baSThomas Gleixner 
__kunmap_atomic(const void * addr)23039ade048SFabio M. De Francesco static inline void __kunmap_atomic(const void *addr)
23113f876baSThomas Gleixner {
23213f876baSThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
23388d7b120SMatthew Wilcox (Oracle) 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
23413f876baSThomas Gleixner #endif
23513f876baSThomas Gleixner 	pagefault_enable();
23651386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
23751386120SSebastian Andrzej Siewior 		migrate_enable();
23851386120SSebastian Andrzej Siewior 	else
23913f876baSThomas Gleixner 		preempt_enable();
24013f876baSThomas Gleixner }
24113f876baSThomas Gleixner 
nr_free_highpages(void)24213f876baSThomas Gleixner static inline unsigned int nr_free_highpages(void) { return 0; }
totalhigh_pages(void)24313f876baSThomas Gleixner static inline unsigned long totalhigh_pages(void) { return 0UL; }
24413f876baSThomas Gleixner 
is_kmap_addr(const void * x)2454e140f59SMatthew Wilcox (Oracle) static inline bool is_kmap_addr(const void *x)
2464e140f59SMatthew Wilcox (Oracle) {
2474e140f59SMatthew Wilcox (Oracle) 	return false;
2484e140f59SMatthew Wilcox (Oracle) }
2494e140f59SMatthew Wilcox (Oracle) 
25013f876baSThomas Gleixner #endif /* CONFIG_HIGHMEM */
25113f876baSThomas Gleixner 
252e7392b4eSFabio M. De Francesco /**
253e7392b4eSFabio M. De Francesco  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
254e7392b4eSFabio M. De Francesco  * @__addr:       Virtual address to be unmapped
255e7392b4eSFabio M. De Francesco  *
256e7392b4eSFabio M. De Francesco  * Unmaps an address previously mapped by kmap_atomic() and re-enables
257e7392b4eSFabio M. De Francesco  * pagefaults. Depending on PREEMP_RT configuration, re-enables also
258e7392b4eSFabio M. De Francesco  * migration and preemption. Users should not count on these side effects.
259e7392b4eSFabio M. De Francesco  *
260e7392b4eSFabio M. De Francesco  * Mappings should be unmapped in the reverse order that they were mapped.
261e7392b4eSFabio M. De Francesco  * See kmap_local_page() for details on nesting.
262e7392b4eSFabio M. De Francesco  *
263e7392b4eSFabio M. De Francesco  * @__addr can be any address within the mapped page, so there is no need
264e7392b4eSFabio M. De Francesco  * to subtract any offset that has been added. In contrast to kunmap(),
265e7392b4eSFabio M. De Francesco  * this function takes the address returned from kmap_atomic(), not the
266e7392b4eSFabio M. De Francesco  * page passed to it. The compiler will warn you if you pass the page.
26713f876baSThomas Gleixner  */
26813f876baSThomas Gleixner #define kunmap_atomic(__addr)					\
26913f876baSThomas Gleixner do {								\
27013f876baSThomas Gleixner 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
27113f876baSThomas Gleixner 	__kunmap_atomic(__addr);				\
27213f876baSThomas Gleixner } while (0)
27313f876baSThomas Gleixner 
274d7ca25c5SIra Weiny /**
275d7ca25c5SIra Weiny  * kunmap_local - Unmap a page mapped via kmap_local_page().
276d7ca25c5SIra Weiny  * @__addr: An address within the page mapped
277d7ca25c5SIra Weiny  *
278d7ca25c5SIra Weiny  * @__addr can be any address within the mapped page.  Commonly it is the
279d7ca25c5SIra Weiny  * address return from kmap_local_page(), but it can also include offsets.
280d7ca25c5SIra Weiny  *
281d7ca25c5SIra Weiny  * Unmapping should be done in the reverse order of the mapping.  See
282d7ca25c5SIra Weiny  * kmap_local_page() for details.
283d7ca25c5SIra Weiny  */
284f3ba3c71SThomas Gleixner #define kunmap_local(__addr)					\
285f3ba3c71SThomas Gleixner do {								\
286f3ba3c71SThomas Gleixner 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
287f3ba3c71SThomas Gleixner 	__kunmap_local(__addr);					\
288f3ba3c71SThomas Gleixner } while (0)
289f3ba3c71SThomas Gleixner 
29013f876baSThomas Gleixner #endif
291