xref: /openbmc/linux/arch/s390/include/asm/kfence.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1e41ba111SSven Schnelle /* SPDX-License-Identifier: GPL-2.0 */
2e41ba111SSven Schnelle 
3e41ba111SSven Schnelle #ifndef _ASM_S390_KFENCE_H
4e41ba111SSven Schnelle #define _ASM_S390_KFENCE_H
5e41ba111SSven Schnelle 
6e41ba111SSven Schnelle #include <linux/mm.h>
7e41ba111SSven Schnelle #include <linux/kfence.h>
8e41ba111SSven Schnelle #include <asm/set_memory.h>
9e41ba111SSven Schnelle #include <asm/page.h>
10e41ba111SSven Schnelle 
11e41ba111SSven Schnelle void __kernel_map_pages(struct page *page, int numpages, int enable);
12e41ba111SSven Schnelle 
arch_kfence_init_pool(void)13e41ba111SSven Schnelle static __always_inline bool arch_kfence_init_pool(void)
14e41ba111SSven Schnelle {
15e41ba111SSven Schnelle 	return true;
16e41ba111SSven Schnelle }
17e41ba111SSven Schnelle 
18e41ba111SSven Schnelle #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
19e41ba111SSven Schnelle 
20e41ba111SSven Schnelle /*
21e41ba111SSven Schnelle  * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
22e41ba111SSven Schnelle  * but earlier where page table allocations still happen with memblock.
23e41ba111SSven Schnelle  * Reason is that arch_kfence_init_pool() gets called when the system
24e41ba111SSven Schnelle  * is still in a limbo state - disabling and enabling bottom halves is
25e41ba111SSven Schnelle  * not yet allowed, but that is what our page_table_alloc() would do.
26e41ba111SSven Schnelle  */
kfence_split_mapping(void)27e41ba111SSven Schnelle static __always_inline void kfence_split_mapping(void)
28e41ba111SSven Schnelle {
29e41ba111SSven Schnelle #ifdef CONFIG_KFENCE
30e41ba111SSven Schnelle 	unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
31e41ba111SSven Schnelle 
32e41ba111SSven Schnelle 	set_memory_4k((unsigned long)__kfence_pool, pool_pages);
33e41ba111SSven Schnelle #endif
34e41ba111SSven Schnelle }
35e41ba111SSven Schnelle 
kfence_protect_page(unsigned long addr,bool protect)36e41ba111SSven Schnelle static inline bool kfence_protect_page(unsigned long addr, bool protect)
37e41ba111SSven Schnelle {
38*2d1494fbSLinus Walleij 	__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
39e41ba111SSven Schnelle 	return true;
40e41ba111SSven Schnelle }
41e41ba111SSven Schnelle 
42e41ba111SSven Schnelle #endif /* _ASM_S390_KFENCE_H */
43