xref: /openbmc/linux/arch/s390/include/asm/kfence.h (revision 2d1494fb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_S390_KFENCE_H
4 #define _ASM_S390_KFENCE_H
5 
6 #include <linux/mm.h>
7 #include <linux/kfence.h>
8 #include <asm/set_memory.h>
9 #include <asm/page.h>
10 
11 void __kernel_map_pages(struct page *page, int numpages, int enable);
12 
arch_kfence_init_pool(void)13 static __always_inline bool arch_kfence_init_pool(void)
14 {
15 	return true;
16 }
17 
18 #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
19 
20 /*
21  * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
22  * but earlier where page table allocations still happen with memblock.
23  * Reason is that arch_kfence_init_pool() gets called when the system
24  * is still in a limbo state - disabling and enabling bottom halves is
25  * not yet allowed, but that is what our page_table_alloc() would do.
26  */
kfence_split_mapping(void)27 static __always_inline void kfence_split_mapping(void)
28 {
29 #ifdef CONFIG_KFENCE
30 	unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
31 
32 	set_memory_4k((unsigned long)__kfence_pool, pool_pages);
33 #endif
34 }
35 
kfence_protect_page(unsigned long addr,bool protect)36 static inline bool kfence_protect_page(unsigned long addr, bool protect)
37 {
38 	__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
39 	return true;
40 }
41 
42 #endif /* _ASM_S390_KFENCE_H */
43