1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 5 #include <linux/static_key.h> 6 #include <linux/types.h> 7 8 struct kmem_cache; 9 struct page; 10 struct vm_struct; 11 struct task_struct; 12 13 #ifdef CONFIG_KASAN 14 15 #include <linux/linkage.h> 16 #include <asm/kasan.h> 17 18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */ 19 struct kunit_kasan_expectation { 20 bool report_expected; 21 bool report_found; 22 }; 23 24 #endif 25 26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 27 28 #include <linux/pgtable.h> 29 30 /* Software KASAN implementations use shadow memory. */ 31 32 #ifdef CONFIG_KASAN_SW_TAGS 33 #define KASAN_SHADOW_INIT 0xFF 34 #else 35 #define KASAN_SHADOW_INIT 0 36 #endif 37 38 #ifndef PTE_HWTABLE_PTRS 39 #define PTE_HWTABLE_PTRS 0 40 #endif 41 42 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 43 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 44 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; 45 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; 46 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 47 48 int kasan_populate_early_shadow(const void *shadow_start, 49 const void *shadow_end); 50 51 static inline void *kasan_mem_to_shadow(const void *addr) 52 { 53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 54 + KASAN_SHADOW_OFFSET; 55 } 56 57 int kasan_add_zero_shadow(void *start, unsigned long size); 58 void kasan_remove_zero_shadow(void *start, unsigned long size); 59 60 /* Enable reporting bugs after kasan_disable_current() */ 61 extern void kasan_enable_current(void); 62 63 /* Disable reporting bugs for current task */ 64 extern void kasan_disable_current(void); 65 66 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 67 68 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 69 { 70 return 0; 71 } 72 static inline void kasan_remove_zero_shadow(void *start, 73 unsigned long size) 74 {} 75 76 static inline void kasan_enable_current(void) {} 77 static inline void kasan_disable_current(void) {} 78 79 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 80 81 #ifdef CONFIG_KASAN 82 83 struct kasan_cache { 84 int alloc_meta_offset; 85 int free_meta_offset; 86 }; 87 88 #ifdef CONFIG_KASAN_HW_TAGS 89 90 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); 91 92 static __always_inline bool kasan_enabled(void) 93 { 94 return static_branch_likely(&kasan_flag_enabled); 95 } 96 97 #else /* CONFIG_KASAN_HW_TAGS */ 98 99 static inline bool kasan_enabled(void) 100 { 101 return true; 102 } 103 104 #endif /* CONFIG_KASAN_HW_TAGS */ 105 106 slab_flags_t __kasan_never_merge(void); 107 static __always_inline slab_flags_t kasan_never_merge(void) 108 { 109 if (kasan_enabled()) 110 return __kasan_never_merge(); 111 return 0; 112 } 113 114 void __kasan_unpoison_range(const void *addr, size_t size); 115 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 116 { 117 if (kasan_enabled()) 118 __kasan_unpoison_range(addr, size); 119 } 120 121 void __kasan_alloc_pages(struct page *page, unsigned int order); 122 static __always_inline void kasan_alloc_pages(struct page *page, 123 unsigned int order) 124 { 125 if (kasan_enabled()) 126 __kasan_alloc_pages(page, order); 127 } 128 129 void __kasan_free_pages(struct page *page, unsigned int order); 130 static __always_inline void kasan_free_pages(struct page *page, 131 unsigned int order) 132 { 133 if (kasan_enabled()) 134 __kasan_free_pages(page, order); 135 } 136 137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 138 slab_flags_t *flags); 139 static __always_inline void kasan_cache_create(struct kmem_cache *cache, 140 unsigned int *size, slab_flags_t *flags) 141 { 142 if (kasan_enabled()) 143 __kasan_cache_create(cache, size, flags); 144 } 145 146 size_t __kasan_metadata_size(struct kmem_cache *cache); 147 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) 148 { 149 if (kasan_enabled()) 150 return __kasan_metadata_size(cache); 151 return 0; 152 } 153 154 void __kasan_poison_slab(struct page *page); 155 static __always_inline void kasan_poison_slab(struct page *page) 156 { 157 if (kasan_enabled()) 158 __kasan_poison_slab(page); 159 } 160 161 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 162 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 163 void *object) 164 { 165 if (kasan_enabled()) 166 __kasan_unpoison_object_data(cache, object); 167 } 168 169 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 170 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 171 void *object) 172 { 173 if (kasan_enabled()) 174 __kasan_poison_object_data(cache, object); 175 } 176 177 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 178 const void *object); 179 static __always_inline void * __must_check kasan_init_slab_obj( 180 struct kmem_cache *cache, const void *object) 181 { 182 if (kasan_enabled()) 183 return __kasan_init_slab_obj(cache, object); 184 return (void *)object; 185 } 186 187 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); 188 static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, 189 unsigned long ip) 190 { 191 if (kasan_enabled()) 192 return __kasan_slab_free(s, object, ip); 193 return false; 194 } 195 196 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); 197 static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) 198 { 199 if (kasan_enabled()) 200 __kasan_slab_free_mempool(ptr, ip); 201 } 202 203 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 204 void *object, gfp_t flags); 205 static __always_inline void * __must_check kasan_slab_alloc( 206 struct kmem_cache *s, void *object, gfp_t flags) 207 { 208 if (kasan_enabled()) 209 return __kasan_slab_alloc(s, object, flags); 210 return object; 211 } 212 213 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 214 size_t size, gfp_t flags); 215 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 216 const void *object, size_t size, gfp_t flags) 217 { 218 if (kasan_enabled()) 219 return __kasan_kmalloc(s, object, size, flags); 220 return (void *)object; 221 } 222 223 void * __must_check __kasan_kmalloc_large(const void *ptr, 224 size_t size, gfp_t flags); 225 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 226 size_t size, gfp_t flags) 227 { 228 if (kasan_enabled()) 229 return __kasan_kmalloc_large(ptr, size, flags); 230 return (void *)ptr; 231 } 232 233 void * __must_check __kasan_krealloc(const void *object, 234 size_t new_size, gfp_t flags); 235 static __always_inline void * __must_check kasan_krealloc(const void *object, 236 size_t new_size, gfp_t flags) 237 { 238 if (kasan_enabled()) 239 return __kasan_krealloc(object, new_size, flags); 240 return (void *)object; 241 } 242 243 void __kasan_kfree_large(void *ptr, unsigned long ip); 244 static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) 245 { 246 if (kasan_enabled()) 247 __kasan_kfree_large(ptr, ip); 248 } 249 250 bool kasan_save_enable_multi_shot(void); 251 void kasan_restore_multi_shot(bool enabled); 252 253 #else /* CONFIG_KASAN */ 254 255 static inline bool kasan_enabled(void) 256 { 257 return false; 258 } 259 static inline slab_flags_t kasan_never_merge(void) 260 { 261 return 0; 262 } 263 static inline void kasan_unpoison_range(const void *address, size_t size) {} 264 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 265 static inline void kasan_free_pages(struct page *page, unsigned int order) {} 266 static inline void kasan_cache_create(struct kmem_cache *cache, 267 unsigned int *size, 268 slab_flags_t *flags) {} 269 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 270 static inline void kasan_poison_slab(struct page *page) {} 271 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 272 void *object) {} 273 static inline void kasan_poison_object_data(struct kmem_cache *cache, 274 void *object) {} 275 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 276 const void *object) 277 { 278 return (void *)object; 279 } 280 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, 281 unsigned long ip) 282 { 283 return false; 284 } 285 static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} 286 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 287 gfp_t flags) 288 { 289 return object; 290 } 291 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 292 size_t size, gfp_t flags) 293 { 294 return (void *)object; 295 } 296 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 297 { 298 return (void *)ptr; 299 } 300 static inline void *kasan_krealloc(const void *object, size_t new_size, 301 gfp_t flags) 302 { 303 return (void *)object; 304 } 305 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} 306 307 #endif /* CONFIG_KASAN */ 308 309 #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK 310 void kasan_unpoison_task_stack(struct task_struct *task); 311 #else 312 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 313 #endif 314 315 #ifdef CONFIG_KASAN_GENERIC 316 317 void kasan_cache_shrink(struct kmem_cache *cache); 318 void kasan_cache_shutdown(struct kmem_cache *cache); 319 void kasan_record_aux_stack(void *ptr); 320 321 #else /* CONFIG_KASAN_GENERIC */ 322 323 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 324 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 325 static inline void kasan_record_aux_stack(void *ptr) {} 326 327 #endif /* CONFIG_KASAN_GENERIC */ 328 329 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 330 331 static inline void *kasan_reset_tag(const void *addr) 332 { 333 return (void *)arch_kasan_reset_tag(addr); 334 } 335 336 /** 337 * kasan_report - print a report about a bad memory access detected by KASAN 338 * @addr: address of the bad access 339 * @size: size of the bad access 340 * @is_write: whether the bad access is a write or a read 341 * @ip: instruction pointer for the accessibility check or the bad access itself 342 */ 343 bool kasan_report(unsigned long addr, size_t size, 344 bool is_write, unsigned long ip); 345 346 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 347 348 static inline void *kasan_reset_tag(const void *addr) 349 { 350 return (void *)addr; 351 } 352 353 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 354 355 #ifdef CONFIG_KASAN_SW_TAGS 356 void __init kasan_init_sw_tags(void); 357 #else 358 static inline void kasan_init_sw_tags(void) { } 359 #endif 360 361 #ifdef CONFIG_KASAN_HW_TAGS 362 void kasan_init_hw_tags_cpu(void); 363 void __init kasan_init_hw_tags(void); 364 #else 365 static inline void kasan_init_hw_tags_cpu(void) { } 366 static inline void kasan_init_hw_tags(void) { } 367 #endif 368 369 #ifdef CONFIG_KASAN_VMALLOC 370 371 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 372 void kasan_poison_vmalloc(const void *start, unsigned long size); 373 void kasan_unpoison_vmalloc(const void *start, unsigned long size); 374 void kasan_release_vmalloc(unsigned long start, unsigned long end, 375 unsigned long free_region_start, 376 unsigned long free_region_end); 377 378 #else /* CONFIG_KASAN_VMALLOC */ 379 380 static inline int kasan_populate_vmalloc(unsigned long start, 381 unsigned long size) 382 { 383 return 0; 384 } 385 386 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 387 { } 388 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) 389 { } 390 static inline void kasan_release_vmalloc(unsigned long start, 391 unsigned long end, 392 unsigned long free_region_start, 393 unsigned long free_region_end) {} 394 395 #endif /* CONFIG_KASAN_VMALLOC */ 396 397 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 398 !defined(CONFIG_KASAN_VMALLOC) 399 400 /* 401 * These functions provide a special case to support backing module 402 * allocations with real shadow memory. With KASAN vmalloc, the special 403 * case is unnecessary, as the work is handled in the generic case. 404 */ 405 int kasan_module_alloc(void *addr, size_t size); 406 void kasan_free_shadow(const struct vm_struct *vm); 407 408 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 409 410 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 411 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 412 413 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 414 415 #ifdef CONFIG_KASAN_INLINE 416 void kasan_non_canonical_hook(unsigned long addr); 417 #else /* CONFIG_KASAN_INLINE */ 418 static inline void kasan_non_canonical_hook(unsigned long addr) { } 419 #endif /* CONFIG_KASAN_INLINE */ 420 421 #endif /* LINUX_KASAN_H */ 422