1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains core software tag-based KASAN code. 4 * 5 * Copyright (c) 2018 Google, Inc. 6 * Author: Andrey Konovalov <andreyknvl@google.com> 7 */ 8 9 #define pr_fmt(fmt) "kasan: " fmt 10 11 #include <linux/export.h> 12 #include <linux/interrupt.h> 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kmemleak.h> 17 #include <linux/linkage.h> 18 #include <linux/memblock.h> 19 #include <linux/memory.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/printk.h> 23 #include <linux/random.h> 24 #include <linux/sched.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/slab.h> 27 #include <linux/stacktrace.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/vmalloc.h> 31 #include <linux/bug.h> 32 33 #include "kasan.h" 34 #include "../slab.h" 35 36 static DEFINE_PER_CPU(u32, prng_state); 37 38 void __init kasan_init_sw_tags(void) 39 { 40 int cpu; 41 42 for_each_possible_cpu(cpu) 43 per_cpu(prng_state, cpu) = (u32)get_cycles(); 44 45 kasan_init_tags(); 46 47 pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n", 48 kasan_stack_collection_enabled() ? "on" : "off"); 49 } 50 51 /* 52 * If a preemption happens between this_cpu_read and this_cpu_write, the only 53 * side effect is that we'll give a few allocated in different contexts objects 54 * the same tag. Since tag-based KASAN is meant to be used a probabilistic 55 * bug-detection debug feature, this doesn't have significant negative impact. 56 * 57 * Ideally the tags use strong randomness to prevent any attempts to predict 58 * them during explicit exploit attempts. But strong randomness is expensive, 59 * and we did an intentional trade-off to use a PRNG. This non-atomic RMW 60 * sequence has in fact positive effect, since interrupts that randomly skew 61 * PRNG at unpredictable points do only good. 62 */ 63 u8 kasan_random_tag(void) 64 { 65 u32 state = this_cpu_read(prng_state); 66 67 state = 1664525 * state + 1013904223; 68 this_cpu_write(prng_state, state); 69 70 return (u8)(state % (KASAN_TAG_MAX + 1)); 71 } 72 73 bool kasan_check_range(unsigned long addr, size_t size, bool write, 74 unsigned long ret_ip) 75 { 76 u8 tag; 77 u8 *shadow_first, *shadow_last, *shadow; 78 void *untagged_addr; 79 80 if (unlikely(size == 0)) 81 return true; 82 83 if (unlikely(addr + size < addr)) 84 return !kasan_report(addr, size, write, ret_ip); 85 86 tag = get_tag((const void *)addr); 87 88 /* 89 * Ignore accesses for pointers tagged with 0xff (native kernel 90 * pointer tag) to suppress false positives caused by kmap. 91 * 92 * Some kernel code was written to account for archs that don't keep 93 * high memory mapped all the time, but rather map and unmap particular 94 * pages when needed. Instead of storing a pointer to the kernel memory, 95 * this code saves the address of the page structure and offset within 96 * that page for later use. Those pages are then mapped and unmapped 97 * with kmap/kunmap when necessary and virt_to_page is used to get the 98 * virtual address of the page. For arm64 (that keeps the high memory 99 * mapped all the time), kmap is turned into a page_address call. 100 101 * The issue is that with use of the page_address + virt_to_page 102 * sequence the top byte value of the original pointer gets lost (gets 103 * set to KASAN_TAG_KERNEL (0xFF)). 104 */ 105 if (tag == KASAN_TAG_KERNEL) 106 return true; 107 108 untagged_addr = kasan_reset_tag((const void *)addr); 109 if (unlikely(!addr_has_metadata(untagged_addr))) 110 return !kasan_report(addr, size, write, ret_ip); 111 shadow_first = kasan_mem_to_shadow(untagged_addr); 112 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); 113 for (shadow = shadow_first; shadow <= shadow_last; shadow++) { 114 if (*shadow != tag) { 115 return !kasan_report(addr, size, write, ret_ip); 116 } 117 } 118 119 return true; 120 } 121 122 bool kasan_byte_accessible(const void *addr) 123 { 124 u8 tag = get_tag(addr); 125 void *untagged_addr = kasan_reset_tag(addr); 126 u8 shadow_byte; 127 128 if (!addr_has_metadata(untagged_addr)) 129 return false; 130 131 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr)); 132 return tag == KASAN_TAG_KERNEL || tag == shadow_byte; 133 } 134 135 #define DEFINE_HWASAN_LOAD_STORE(size) \ 136 void __hwasan_load##size##_noabort(unsigned long addr) \ 137 { \ 138 kasan_check_range(addr, size, false, _RET_IP_); \ 139 } \ 140 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ 141 void __hwasan_store##size##_noabort(unsigned long addr) \ 142 { \ 143 kasan_check_range(addr, size, true, _RET_IP_); \ 144 } \ 145 EXPORT_SYMBOL(__hwasan_store##size##_noabort) 146 147 DEFINE_HWASAN_LOAD_STORE(1); 148 DEFINE_HWASAN_LOAD_STORE(2); 149 DEFINE_HWASAN_LOAD_STORE(4); 150 DEFINE_HWASAN_LOAD_STORE(8); 151 DEFINE_HWASAN_LOAD_STORE(16); 152 153 void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) 154 { 155 kasan_check_range(addr, size, false, _RET_IP_); 156 } 157 EXPORT_SYMBOL(__hwasan_loadN_noabort); 158 159 void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) 160 { 161 kasan_check_range(addr, size, true, _RET_IP_); 162 } 163 EXPORT_SYMBOL(__hwasan_storeN_noabort); 164 165 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) 166 { 167 kasan_poison((void *)addr, size, tag, false); 168 } 169 EXPORT_SYMBOL(__hwasan_tag_memory); 170 171 void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, 172 unsigned long ret_ip) 173 { 174 kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, 175 ret_ip); 176 } 177