xref: /openbmc/linux/arch/s390/include/asm/kfence.h (revision e41ba1115a351dd037c21ac75660638219d51485)
1*e41ba111SSven Schnelle /* SPDX-License-Identifier: GPL-2.0 */
2*e41ba111SSven Schnelle 
3*e41ba111SSven Schnelle #ifndef _ASM_S390_KFENCE_H
4*e41ba111SSven Schnelle #define _ASM_S390_KFENCE_H
5*e41ba111SSven Schnelle 
6*e41ba111SSven Schnelle #include <linux/mm.h>
7*e41ba111SSven Schnelle #include <linux/kfence.h>
8*e41ba111SSven Schnelle #include <asm/set_memory.h>
9*e41ba111SSven Schnelle #include <asm/page.h>
10*e41ba111SSven Schnelle 
11*e41ba111SSven Schnelle void __kernel_map_pages(struct page *page, int numpages, int enable);
12*e41ba111SSven Schnelle 
13*e41ba111SSven Schnelle static __always_inline bool arch_kfence_init_pool(void)
14*e41ba111SSven Schnelle {
15*e41ba111SSven Schnelle 	return true;
16*e41ba111SSven Schnelle }
17*e41ba111SSven Schnelle 
18*e41ba111SSven Schnelle #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
19*e41ba111SSven Schnelle 
20*e41ba111SSven Schnelle /*
21*e41ba111SSven Schnelle  * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
22*e41ba111SSven Schnelle  * but earlier where page table allocations still happen with memblock.
23*e41ba111SSven Schnelle  * Reason is that arch_kfence_init_pool() gets called when the system
24*e41ba111SSven Schnelle  * is still in a limbo state - disabling and enabling bottom halves is
25*e41ba111SSven Schnelle  * not yet allowed, but that is what our page_table_alloc() would do.
26*e41ba111SSven Schnelle  */
27*e41ba111SSven Schnelle static __always_inline void kfence_split_mapping(void)
28*e41ba111SSven Schnelle {
29*e41ba111SSven Schnelle #ifdef CONFIG_KFENCE
30*e41ba111SSven Schnelle 	unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
31*e41ba111SSven Schnelle 
32*e41ba111SSven Schnelle 	set_memory_4k((unsigned long)__kfence_pool, pool_pages);
33*e41ba111SSven Schnelle #endif
34*e41ba111SSven Schnelle }
35*e41ba111SSven Schnelle 
36*e41ba111SSven Schnelle static inline bool kfence_protect_page(unsigned long addr, bool protect)
37*e41ba111SSven Schnelle {
38*e41ba111SSven Schnelle 	__kernel_map_pages(virt_to_page(addr), 1, !protect);
39*e41ba111SSven Schnelle 	return true;
40*e41ba111SSven Schnelle }
41*e41ba111SSven Schnelle 
42*e41ba111SSven Schnelle #endif /* _ASM_S390_KFENCE_H */
43