14ca8cc8dSAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
24ca8cc8dSAlexander Potapenko /*
34ca8cc8dSAlexander Potapenko * x86 KMSAN support.
44ca8cc8dSAlexander Potapenko *
54ca8cc8dSAlexander Potapenko * Copyright (C) 2022, Google LLC
64ca8cc8dSAlexander Potapenko * Author: Alexander Potapenko <glider@google.com>
74ca8cc8dSAlexander Potapenko */
84ca8cc8dSAlexander Potapenko
94ca8cc8dSAlexander Potapenko #ifndef _ASM_X86_KMSAN_H
104ca8cc8dSAlexander Potapenko #define _ASM_X86_KMSAN_H
114ca8cc8dSAlexander Potapenko
124ca8cc8dSAlexander Potapenko #ifndef MODULE
134ca8cc8dSAlexander Potapenko
14ce732a75SAlexander Potapenko #include <asm/cpu_entry_area.h>
154ca8cc8dSAlexander Potapenko #include <asm/processor.h>
164ca8cc8dSAlexander Potapenko #include <linux/mmzone.h>
174ca8cc8dSAlexander Potapenko
18ce732a75SAlexander Potapenko DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow);
19ce732a75SAlexander Potapenko DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin);
20ce732a75SAlexander Potapenko
21ce732a75SAlexander Potapenko /*
22ce732a75SAlexander Potapenko * Functions below are declared in the header to make sure they are inlined.
23ce732a75SAlexander Potapenko * They all are called from kmsan_get_metadata() for every memory access in
24ce732a75SAlexander Potapenko * the kernel, so speed is important here.
25ce732a75SAlexander Potapenko */
26ce732a75SAlexander Potapenko
27ce732a75SAlexander Potapenko /*
28ce732a75SAlexander Potapenko * Compute metadata addresses for the CPU entry area on x86.
29ce732a75SAlexander Potapenko */
arch_kmsan_get_meta_or_null(void * addr,bool is_origin)30ce732a75SAlexander Potapenko static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
31ce732a75SAlexander Potapenko {
32ce732a75SAlexander Potapenko unsigned long addr64 = (unsigned long)addr;
33ce732a75SAlexander Potapenko char *metadata_array;
34ce732a75SAlexander Potapenko unsigned long off;
35ce732a75SAlexander Potapenko int cpu;
36ce732a75SAlexander Potapenko
37ce732a75SAlexander Potapenko if ((addr64 < CPU_ENTRY_AREA_BASE) ||
38ce732a75SAlexander Potapenko (addr64 >= (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE)))
39ce732a75SAlexander Potapenko return NULL;
40ce732a75SAlexander Potapenko cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE;
41ce732a75SAlexander Potapenko off = addr64 - (unsigned long)get_cpu_entry_area(cpu);
42ce732a75SAlexander Potapenko if ((off < 0) || (off >= CPU_ENTRY_AREA_SIZE))
43ce732a75SAlexander Potapenko return NULL;
44ce732a75SAlexander Potapenko metadata_array = is_origin ? cpu_entry_area_origin :
45ce732a75SAlexander Potapenko cpu_entry_area_shadow;
46ce732a75SAlexander Potapenko return &per_cpu(metadata_array[off], cpu);
47ce732a75SAlexander Potapenko }
48ce732a75SAlexander Potapenko
494ca8cc8dSAlexander Potapenko /*
504ca8cc8dSAlexander Potapenko * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version.
514ca8cc8dSAlexander Potapenko */
kmsan_phys_addr_valid(unsigned long addr)524ca8cc8dSAlexander Potapenko static inline bool kmsan_phys_addr_valid(unsigned long addr)
534ca8cc8dSAlexander Potapenko {
544ca8cc8dSAlexander Potapenko if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
554ca8cc8dSAlexander Potapenko return !(addr >> boot_cpu_data.x86_phys_bits);
564ca8cc8dSAlexander Potapenko else
574ca8cc8dSAlexander Potapenko return true;
584ca8cc8dSAlexander Potapenko }
594ca8cc8dSAlexander Potapenko
604ca8cc8dSAlexander Potapenko /*
614ca8cc8dSAlexander Potapenko * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version.
624ca8cc8dSAlexander Potapenko */
kmsan_virt_addr_valid(void * addr)634ca8cc8dSAlexander Potapenko static inline bool kmsan_virt_addr_valid(void *addr)
644ca8cc8dSAlexander Potapenko {
654ca8cc8dSAlexander Potapenko unsigned long x = (unsigned long)addr;
664ca8cc8dSAlexander Potapenko unsigned long y = x - __START_KERNEL_map;
67*6335c0cdSMarco Elver bool ret;
684ca8cc8dSAlexander Potapenko
694ca8cc8dSAlexander Potapenko /* use the carry flag to determine if x was < __START_KERNEL_map */
704ca8cc8dSAlexander Potapenko if (unlikely(x > y)) {
714ca8cc8dSAlexander Potapenko x = y + phys_base;
724ca8cc8dSAlexander Potapenko
734ca8cc8dSAlexander Potapenko if (y >= KERNEL_IMAGE_SIZE)
744ca8cc8dSAlexander Potapenko return false;
754ca8cc8dSAlexander Potapenko } else {
764ca8cc8dSAlexander Potapenko x = y + (__START_KERNEL_map - PAGE_OFFSET);
774ca8cc8dSAlexander Potapenko
784ca8cc8dSAlexander Potapenko /* carry flag will be set if starting x was >= PAGE_OFFSET */
794ca8cc8dSAlexander Potapenko if ((x > y) || !kmsan_phys_addr_valid(x))
804ca8cc8dSAlexander Potapenko return false;
814ca8cc8dSAlexander Potapenko }
824ca8cc8dSAlexander Potapenko
83*6335c0cdSMarco Elver /*
84*6335c0cdSMarco Elver * pfn_valid() relies on RCU, and may call into the scheduler on exiting
85*6335c0cdSMarco Elver * the critical section. However, this would result in recursion with
86*6335c0cdSMarco Elver * KMSAN. Therefore, disable preemption here, and re-enable preemption
87*6335c0cdSMarco Elver * below while suppressing reschedules to avoid recursion.
88*6335c0cdSMarco Elver *
89*6335c0cdSMarco Elver * Note, this sacrifices occasionally breaking scheduling guarantees.
90*6335c0cdSMarco Elver * Although, a kernel compiled with KMSAN has already given up on any
91*6335c0cdSMarco Elver * performance guarantees due to being heavily instrumented.
92*6335c0cdSMarco Elver */
93*6335c0cdSMarco Elver preempt_disable();
94*6335c0cdSMarco Elver ret = pfn_valid(x >> PAGE_SHIFT);
95*6335c0cdSMarco Elver preempt_enable_no_resched();
96*6335c0cdSMarco Elver
97*6335c0cdSMarco Elver return ret;
984ca8cc8dSAlexander Potapenko }
994ca8cc8dSAlexander Potapenko
1004ca8cc8dSAlexander Potapenko #endif /* !MODULE */
1014ca8cc8dSAlexander Potapenko
1024ca8cc8dSAlexander Potapenko #endif /* _ASM_X86_KMSAN_H */
103