11dc0da6eSAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
21dc0da6eSAlexander Potapenko /*
31dc0da6eSAlexander Potapenko * x86 KFENCE support.
41dc0da6eSAlexander Potapenko *
51dc0da6eSAlexander Potapenko * Copyright (C) 2020, Google LLC.
61dc0da6eSAlexander Potapenko */
71dc0da6eSAlexander Potapenko
81dc0da6eSAlexander Potapenko #ifndef _ASM_X86_KFENCE_H
91dc0da6eSAlexander Potapenko #define _ASM_X86_KFENCE_H
101dc0da6eSAlexander Potapenko
11*00e67bf0SMarco Elver #ifndef MODULE
12*00e67bf0SMarco Elver
131dc0da6eSAlexander Potapenko #include <linux/bug.h>
141dc0da6eSAlexander Potapenko #include <linux/kfence.h>
151dc0da6eSAlexander Potapenko
161dc0da6eSAlexander Potapenko #include <asm/pgalloc.h>
171dc0da6eSAlexander Potapenko #include <asm/pgtable.h>
181dc0da6eSAlexander Potapenko #include <asm/set_memory.h>
191dc0da6eSAlexander Potapenko #include <asm/tlbflush.h>
201dc0da6eSAlexander Potapenko
211dc0da6eSAlexander Potapenko /* Force 4K pages for __kfence_pool. */
arch_kfence_init_pool(void)221dc0da6eSAlexander Potapenko static inline bool arch_kfence_init_pool(void)
231dc0da6eSAlexander Potapenko {
241dc0da6eSAlexander Potapenko unsigned long addr;
251dc0da6eSAlexander Potapenko
261dc0da6eSAlexander Potapenko for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
271dc0da6eSAlexander Potapenko addr += PAGE_SIZE) {
281dc0da6eSAlexander Potapenko unsigned int level;
291dc0da6eSAlexander Potapenko
301dc0da6eSAlexander Potapenko if (!lookup_address(addr, &level))
311dc0da6eSAlexander Potapenko return false;
321dc0da6eSAlexander Potapenko
331dc0da6eSAlexander Potapenko if (level != PG_LEVEL_4K)
341dc0da6eSAlexander Potapenko set_memory_4k(addr, 1);
351dc0da6eSAlexander Potapenko }
361dc0da6eSAlexander Potapenko
371dc0da6eSAlexander Potapenko return true;
381dc0da6eSAlexander Potapenko }
391dc0da6eSAlexander Potapenko
401dc0da6eSAlexander Potapenko /* Protect the given page and flush TLB. */
kfence_protect_page(unsigned long addr,bool protect)411dc0da6eSAlexander Potapenko static inline bool kfence_protect_page(unsigned long addr, bool protect)
421dc0da6eSAlexander Potapenko {
431dc0da6eSAlexander Potapenko unsigned int level;
441dc0da6eSAlexander Potapenko pte_t *pte = lookup_address(addr, &level);
451dc0da6eSAlexander Potapenko
461dc0da6eSAlexander Potapenko if (WARN_ON(!pte || level != PG_LEVEL_4K))
471dc0da6eSAlexander Potapenko return false;
481dc0da6eSAlexander Potapenko
491dc0da6eSAlexander Potapenko /*
501dc0da6eSAlexander Potapenko * We need to avoid IPIs, as we may get KFENCE allocations or faults
511dc0da6eSAlexander Potapenko * with interrupts disabled. Therefore, the below is best-effort, and
521dc0da6eSAlexander Potapenko * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
531dc0da6eSAlexander Potapenko * lazy fault handling takes care of faults after the page is PRESENT.
541dc0da6eSAlexander Potapenko */
551dc0da6eSAlexander Potapenko
561dc0da6eSAlexander Potapenko if (protect)
571dc0da6eSAlexander Potapenko set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
581dc0da6eSAlexander Potapenko else
591dc0da6eSAlexander Potapenko set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
601dc0da6eSAlexander Potapenko
616a77d38eSMarco Elver /*
626a77d38eSMarco Elver * Flush this CPU's TLB, assuming whoever did the allocation/free is
636a77d38eSMarco Elver * likely to continue running on this CPU.
646a77d38eSMarco Elver */
656a77d38eSMarco Elver preempt_disable();
661dc0da6eSAlexander Potapenko flush_tlb_one_kernel(addr);
676a77d38eSMarco Elver preempt_enable();
681dc0da6eSAlexander Potapenko return true;
691dc0da6eSAlexander Potapenko }
701dc0da6eSAlexander Potapenko
71*00e67bf0SMarco Elver #endif /* !MODULE */
72*00e67bf0SMarco Elver
731dc0da6eSAlexander Potapenko #endif /* _ASM_X86_KFENCE_H */
74