1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains core software tag-based KASAN code. 4 * 5 * Copyright (c) 2018 Google, Inc. 6 * Author: Andrey Konovalov <andreyknvl@google.com> 7 */ 8 9 #define pr_fmt(fmt) "kasan: " fmt 10 11 #include <linux/export.h> 12 #include <linux/interrupt.h> 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kmemleak.h> 17 #include <linux/linkage.h> 18 #include <linux/memblock.h> 19 #include <linux/memory.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/printk.h> 23 #include <linux/random.h> 24 #include <linux/sched.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/slab.h> 27 #include <linux/stacktrace.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/vmalloc.h> 31 #include <linux/bug.h> 32 33 #include "kasan.h" 34 #include "../slab.h" 35 36 static DEFINE_PER_CPU(u32, prng_state); 37 38 void __init kasan_init_sw_tags(void) 39 { 40 int cpu; 41 42 for_each_possible_cpu(cpu) 43 per_cpu(prng_state, cpu) = (u32)get_cycles(); 44 45 pr_info("KernelAddressSanitizer initialized\n"); 46 } 47 48 /* 49 * If a preemption happens between this_cpu_read and this_cpu_write, the only 50 * side effect is that we'll give a few allocated in different contexts objects 51 * the same tag. Since tag-based KASAN is meant to be used a probabilistic 52 * bug-detection debug feature, this doesn't have significant negative impact. 53 * 54 * Ideally the tags use strong randomness to prevent any attempts to predict 55 * them during explicit exploit attempts. But strong randomness is expensive, 56 * and we did an intentional trade-off to use a PRNG. This non-atomic RMW 57 * sequence has in fact positive effect, since interrupts that randomly skew 58 * PRNG at unpredictable points do only good. 59 */ 60 u8 kasan_random_tag(void) 61 { 62 u32 state = this_cpu_read(prng_state); 63 64 state = 1664525 * state + 1013904223; 65 this_cpu_write(prng_state, state); 66 67 return (u8)(state % (KASAN_TAG_MAX + 1)); 68 } 69 70 bool kasan_check_range(unsigned long addr, size_t size, bool write, 71 unsigned long ret_ip) 72 { 73 u8 tag; 74 u8 *shadow_first, *shadow_last, *shadow; 75 void *untagged_addr; 76 77 if (unlikely(size == 0)) 78 return true; 79 80 if (unlikely(addr + size < addr)) 81 return !kasan_report(addr, size, write, ret_ip); 82 83 tag = get_tag((const void *)addr); 84 85 /* 86 * Ignore accesses for pointers tagged with 0xff (native kernel 87 * pointer tag) to suppress false positives caused by kmap. 88 * 89 * Some kernel code was written to account for archs that don't keep 90 * high memory mapped all the time, but rather map and unmap particular 91 * pages when needed. Instead of storing a pointer to the kernel memory, 92 * this code saves the address of the page structure and offset within 93 * that page for later use. Those pages are then mapped and unmapped 94 * with kmap/kunmap when necessary and virt_to_page is used to get the 95 * virtual address of the page. For arm64 (that keeps the high memory 96 * mapped all the time), kmap is turned into a page_address call. 97 98 * The issue is that with use of the page_address + virt_to_page 99 * sequence the top byte value of the original pointer gets lost (gets 100 * set to KASAN_TAG_KERNEL (0xFF)). 101 */ 102 if (tag == KASAN_TAG_KERNEL) 103 return true; 104 105 untagged_addr = kasan_reset_tag((const void *)addr); 106 if (unlikely(untagged_addr < 107 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { 108 return !kasan_report(addr, size, write, ret_ip); 109 } 110 shadow_first = kasan_mem_to_shadow(untagged_addr); 111 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); 112 for (shadow = shadow_first; shadow <= shadow_last; shadow++) { 113 if (*shadow != tag) { 114 return !kasan_report(addr, size, write, ret_ip); 115 } 116 } 117 118 return true; 119 } 120 121 bool kasan_byte_accessible(const void *addr) 122 { 123 u8 tag = get_tag(addr); 124 void *untagged_addr = kasan_reset_tag(addr); 125 u8 shadow_byte; 126 127 if (untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) 128 return false; 129 130 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr)); 131 return tag == KASAN_TAG_KERNEL || tag == shadow_byte; 132 } 133 134 #define DEFINE_HWASAN_LOAD_STORE(size) \ 135 void __hwasan_load##size##_noabort(unsigned long addr) \ 136 { \ 137 kasan_check_range(addr, size, false, _RET_IP_); \ 138 } \ 139 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ 140 void __hwasan_store##size##_noabort(unsigned long addr) \ 141 { \ 142 kasan_check_range(addr, size, true, _RET_IP_); \ 143 } \ 144 EXPORT_SYMBOL(__hwasan_store##size##_noabort) 145 146 DEFINE_HWASAN_LOAD_STORE(1); 147 DEFINE_HWASAN_LOAD_STORE(2); 148 DEFINE_HWASAN_LOAD_STORE(4); 149 DEFINE_HWASAN_LOAD_STORE(8); 150 DEFINE_HWASAN_LOAD_STORE(16); 151 152 void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) 153 { 154 kasan_check_range(addr, size, false, _RET_IP_); 155 } 156 EXPORT_SYMBOL(__hwasan_loadN_noabort); 157 158 void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) 159 { 160 kasan_check_range(addr, size, true, _RET_IP_); 161 } 162 EXPORT_SYMBOL(__hwasan_storeN_noabort); 163 164 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) 165 { 166 kasan_poison((void *)addr, size, tag, false); 167 } 168 EXPORT_SYMBOL(__hwasan_tag_memory); 169 170 void kasan_set_free_info(struct kmem_cache *cache, 171 void *object, u8 tag) 172 { 173 struct kasan_alloc_meta *alloc_meta; 174 u8 idx = 0; 175 176 alloc_meta = kasan_get_alloc_meta(cache, object); 177 if (!alloc_meta) 178 return; 179 180 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY 181 idx = alloc_meta->free_track_idx; 182 alloc_meta->free_pointer_tag[idx] = tag; 183 alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; 184 #endif 185 186 kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); 187 } 188 189 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, 190 void *object, u8 tag) 191 { 192 struct kasan_alloc_meta *alloc_meta; 193 int i = 0; 194 195 alloc_meta = kasan_get_alloc_meta(cache, object); 196 if (!alloc_meta) 197 return NULL; 198 199 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY 200 for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { 201 if (alloc_meta->free_pointer_tag[i] == tag) 202 break; 203 } 204 if (i == KASAN_NR_FREE_STACKS) 205 i = alloc_meta->free_track_idx; 206 #endif 207 208 return &alloc_meta->free_track[i]; 209 } 210