xref: /openbmc/linux/arch/x86/include/asm/kmsan.h (revision 4ca8cc8d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * x86 KMSAN support.
4  *
5  * Copyright (C) 2022, Google LLC
6  * Author: Alexander Potapenko <glider@google.com>
7  */
8 
9 #ifndef _ASM_X86_KMSAN_H
10 #define _ASM_X86_KMSAN_H
11 
12 #ifndef MODULE
13 
14 #include <asm/processor.h>
15 #include <linux/mmzone.h>
16 
17 /*
18  * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version.
19  */
20 static inline bool kmsan_phys_addr_valid(unsigned long addr)
21 {
22 	if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
23 		return !(addr >> boot_cpu_data.x86_phys_bits);
24 	else
25 		return true;
26 }
27 
28 /*
29  * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version.
30  */
31 static inline bool kmsan_virt_addr_valid(void *addr)
32 {
33 	unsigned long x = (unsigned long)addr;
34 	unsigned long y = x - __START_KERNEL_map;
35 
36 	/* use the carry flag to determine if x was < __START_KERNEL_map */
37 	if (unlikely(x > y)) {
38 		x = y + phys_base;
39 
40 		if (y >= KERNEL_IMAGE_SIZE)
41 			return false;
42 	} else {
43 		x = y + (__START_KERNEL_map - PAGE_OFFSET);
44 
45 		/* carry flag will be set if starting x was >= PAGE_OFFSET */
46 		if ((x > y) || !kmsan_phys_addr_valid(x))
47 			return false;
48 	}
49 
50 	return pfn_valid(x >> PAGE_SHIFT);
51 }
52 
53 #endif /* !MODULE */
54 
55 #endif /* _ASM_X86_KMSAN_H */
56