1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Kernel Electric-Fence (KFENCE). For more info please see 4 * Documentation/dev-tools/kfence.rst. 5 * 6 * Copyright (C) 2020, Google LLC. 7 */ 8 9 #ifndef MM_KFENCE_KFENCE_H 10 #define MM_KFENCE_KFENCE_H 11 12 #include <linux/mm.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/types.h> 16 17 #include "../slab.h" /* for struct kmem_cache */ 18 19 /* 20 * Get the canary byte pattern for @addr. Use a pattern that varies based on the 21 * lower 3 bits of the address, to detect memory corruptions with higher 22 * probability, where similar constants are used. 23 */ 24 #define KFENCE_CANARY_PATTERN(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7)) 25 26 /* Maximum stack depth for reports. */ 27 #define KFENCE_STACK_DEPTH 64 28 29 /* KFENCE object states. */ 30 enum kfence_object_state { 31 KFENCE_OBJECT_UNUSED, /* Object is unused. */ 32 KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */ 33 KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */ 34 }; 35 36 /* Alloc/free tracking information. */ 37 struct kfence_track { 38 pid_t pid; 39 int cpu; 40 u64 ts_nsec; 41 int num_stack_entries; 42 unsigned long stack_entries[KFENCE_STACK_DEPTH]; 43 }; 44 45 /* KFENCE metadata per guarded allocation. */ 46 struct kfence_metadata { 47 struct list_head list; /* Freelist node; access under kfence_freelist_lock. */ 48 struct rcu_head rcu_head; /* For delayed freeing. */ 49 50 /* 51 * Lock protecting below data; to ensure consistency of the below data, 52 * since the following may execute concurrently: __kfence_alloc(), 53 * __kfence_free(), kfence_handle_page_fault(). However, note that we 54 * cannot grab the same metadata off the freelist twice, and multiple 55 * __kfence_alloc() cannot run concurrently on the same metadata. 56 */ 57 raw_spinlock_t lock; 58 59 /* The current state of the object; see above. */ 60 enum kfence_object_state state; 61 62 /* 63 * Allocated object address; cannot be calculated from size, because of 64 * alignment requirements. 65 * 66 * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant. 67 */ 68 unsigned long addr; 69 70 /* 71 * The size of the original allocation. 72 */ 73 size_t size; 74 75 /* 76 * The kmem_cache cache of the last allocation; NULL if never allocated 77 * or the cache has already been destroyed. 78 */ 79 struct kmem_cache *cache; 80 81 /* 82 * In case of an invalid access, the page that was unprotected; we 83 * optimistically only store one address. 84 */ 85 unsigned long unprotected_page; 86 87 /* Allocation and free stack information. */ 88 struct kfence_track alloc_track; 89 struct kfence_track free_track; 90 /* For updating alloc_covered on frees. */ 91 u32 alloc_stack_hash; 92 #ifdef CONFIG_MEMCG 93 struct obj_cgroup *objcg; 94 #endif 95 }; 96 97 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; 98 99 /* KFENCE error types for report generation. */ 100 enum kfence_error_type { 101 KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ 102 KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ 103 KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ 104 KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ 105 KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ 106 }; 107 108 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, 109 const struct kfence_metadata *meta, enum kfence_error_type type); 110 111 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); 112 113 #endif /* MM_KFENCE_KFENCE_H */ 114