xref: /openbmc/linux/arch/arm/include/asm/kasan_def.h (revision c12366ba)
1*c12366baSLinus Walleij /* SPDX-License-Identifier: GPL-2.0 */
2*c12366baSLinus Walleij /*
3*c12366baSLinus Walleij  *  arch/arm/include/asm/kasan_def.h
4*c12366baSLinus Walleij  *
5*c12366baSLinus Walleij  *  Copyright (c) 2018 Huawei Technologies Co., Ltd.
6*c12366baSLinus Walleij  *
7*c12366baSLinus Walleij  *  Author: Abbott Liu <liuwenliang@huawei.com>
8*c12366baSLinus Walleij  */
9*c12366baSLinus Walleij 
10*c12366baSLinus Walleij #ifndef __ASM_KASAN_DEF_H
11*c12366baSLinus Walleij #define __ASM_KASAN_DEF_H
12*c12366baSLinus Walleij 
13*c12366baSLinus Walleij #ifdef CONFIG_KASAN
14*c12366baSLinus Walleij 
15*c12366baSLinus Walleij /*
16*c12366baSLinus Walleij  * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
17*c12366baSLinus Walleij  * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
18*c12366baSLinus Walleij  * addressable by a 32bit architecture) out of the virtual address
19*c12366baSLinus Walleij  * space to use as shadow memory for KASan as follows:
20*c12366baSLinus Walleij  *
21*c12366baSLinus Walleij  * +----+ 0xffffffff
22*c12366baSLinus Walleij  * |    |							\
23*c12366baSLinus Walleij  * |    | |-> Static kernel image (vmlinux) BSS and page table
24*c12366baSLinus Walleij  * |    |/
25*c12366baSLinus Walleij  * +----+ PAGE_OFFSET
26*c12366baSLinus Walleij  * |    |							\
27*c12366baSLinus Walleij  * |    | |->  Loadable kernel modules virtual address space area
28*c12366baSLinus Walleij  * |    |/
29*c12366baSLinus Walleij  * +----+ MODULES_VADDR = KASAN_SHADOW_END
30*c12366baSLinus Walleij  * |    |						\
31*c12366baSLinus Walleij  * |    | |-> The shadow area of kernel virtual address.
32*c12366baSLinus Walleij  * |    |/
33*c12366baSLinus Walleij  * +----+->  TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
34*c12366baSLinus Walleij  * |    |\   shadow address of MODULES_VADDR
35*c12366baSLinus Walleij  * |    | |
36*c12366baSLinus Walleij  * |    | |
37*c12366baSLinus Walleij  * |    | |-> The user space area in lowmem. The kernel address
38*c12366baSLinus Walleij  * |    | |   sanitizer do not use this space, nor does it map it.
39*c12366baSLinus Walleij  * |    | |
40*c12366baSLinus Walleij  * |    | |
41*c12366baSLinus Walleij  * |    | |
42*c12366baSLinus Walleij  * |    | |
43*c12366baSLinus Walleij  * |    |/
44*c12366baSLinus Walleij  * ------ 0
45*c12366baSLinus Walleij  *
46*c12366baSLinus Walleij  * 1) KASAN_SHADOW_START
47*c12366baSLinus Walleij  *   This value begins with the MODULE_VADDR's shadow address. It is the
48*c12366baSLinus Walleij  *   start of kernel virtual space. Since we have modules to load, we need
49*c12366baSLinus Walleij  *   to cover also that area with shadow memory so we can find memory
50*c12366baSLinus Walleij  *   bugs in modules.
51*c12366baSLinus Walleij  *
52*c12366baSLinus Walleij  * 2) KASAN_SHADOW_END
53*c12366baSLinus Walleij  *   This value is the 0x100000000's shadow address: the mapping that would
54*c12366baSLinus Walleij  *   be after the end of the kernel memory at 0xffffffff. It is the end of
55*c12366baSLinus Walleij  *   kernel address sanitizer shadow area. It is also the start of the
56*c12366baSLinus Walleij  *   module area.
57*c12366baSLinus Walleij  *
58*c12366baSLinus Walleij  * 3) KASAN_SHADOW_OFFSET:
59*c12366baSLinus Walleij  *   This value is used to map an address to the corresponding shadow
60*c12366baSLinus Walleij  *   address by the following formula:
61*c12366baSLinus Walleij  *
62*c12366baSLinus Walleij  *	shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
63*c12366baSLinus Walleij  *
64*c12366baSLinus Walleij  *  As you would expect, >> 3 is equal to dividing by 8, meaning each
65*c12366baSLinus Walleij  *  byte in the shadow memory covers 8 bytes of kernel memory, so one
66*c12366baSLinus Walleij  *  bit shadow memory per byte of kernel memory is used.
67*c12366baSLinus Walleij  *
68*c12366baSLinus Walleij  *  The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
69*c12366baSLinus Walleij  *  on the VMSPLIT layout of the system: the kernel and userspace can
70*c12366baSLinus Walleij  *  split up lowmem in different ways according to needs, so we calculate
71*c12366baSLinus Walleij  *  the shadow offset depending on this.
72*c12366baSLinus Walleij  */
73*c12366baSLinus Walleij 
74*c12366baSLinus Walleij #define KASAN_SHADOW_SCALE_SHIFT	3
75*c12366baSLinus Walleij #define KASAN_SHADOW_OFFSET	_AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
76*c12366baSLinus Walleij #define KASAN_SHADOW_END	((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
77*c12366baSLinus Walleij 				 + KASAN_SHADOW_OFFSET)
78*c12366baSLinus Walleij #define KASAN_SHADOW_START      ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
79*c12366baSLinus Walleij 
80*c12366baSLinus Walleij #endif
81*c12366baSLinus Walleij #endif
82