1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 5 #include <linux/static_key.h> 6 #include <linux/types.h> 7 8 struct kmem_cache; 9 struct page; 10 struct vm_struct; 11 struct task_struct; 12 13 #ifdef CONFIG_KASAN 14 15 #include <linux/linkage.h> 16 #include <asm/kasan.h> 17 18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */ 19 struct kunit_kasan_expectation { 20 bool report_expected; 21 bool report_found; 22 }; 23 24 #endif 25 26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 27 28 #include <linux/pgtable.h> 29 30 /* Software KASAN implementations use shadow memory. */ 31 32 #ifdef CONFIG_KASAN_SW_TAGS 33 #define KASAN_SHADOW_INIT 0xFF 34 #else 35 #define KASAN_SHADOW_INIT 0 36 #endif 37 38 #ifndef PTE_HWTABLE_PTRS 39 #define PTE_HWTABLE_PTRS 0 40 #endif 41 42 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 43 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 44 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; 45 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; 46 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 47 48 int kasan_populate_early_shadow(const void *shadow_start, 49 const void *shadow_end); 50 51 static inline void *kasan_mem_to_shadow(const void *addr) 52 { 53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 54 + KASAN_SHADOW_OFFSET; 55 } 56 57 int kasan_add_zero_shadow(void *start, unsigned long size); 58 void kasan_remove_zero_shadow(void *start, unsigned long size); 59 60 /* Enable reporting bugs after kasan_disable_current() */ 61 extern void kasan_enable_current(void); 62 63 /* Disable reporting bugs for current task */ 64 extern void kasan_disable_current(void); 65 66 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 67 68 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 69 { 70 return 0; 71 } 72 static inline void kasan_remove_zero_shadow(void *start, 73 unsigned long size) 74 {} 75 76 static inline void kasan_enable_current(void) {} 77 static inline void kasan_disable_current(void) {} 78 79 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 80 81 #ifdef CONFIG_KASAN 82 83 struct kasan_cache { 84 int alloc_meta_offset; 85 int free_meta_offset; 86 bool is_kmalloc; 87 }; 88 89 #ifdef CONFIG_KASAN_HW_TAGS 90 91 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); 92 93 static __always_inline bool kasan_enabled(void) 94 { 95 return static_branch_likely(&kasan_flag_enabled); 96 } 97 98 #else /* CONFIG_KASAN_HW_TAGS */ 99 100 static inline bool kasan_enabled(void) 101 { 102 return true; 103 } 104 105 #endif /* CONFIG_KASAN_HW_TAGS */ 106 107 slab_flags_t __kasan_never_merge(void); 108 static __always_inline slab_flags_t kasan_never_merge(void) 109 { 110 if (kasan_enabled()) 111 return __kasan_never_merge(); 112 return 0; 113 } 114 115 void __kasan_unpoison_range(const void *addr, size_t size); 116 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 117 { 118 if (kasan_enabled()) 119 __kasan_unpoison_range(addr, size); 120 } 121 122 void __kasan_alloc_pages(struct page *page, unsigned int order); 123 static __always_inline void kasan_alloc_pages(struct page *page, 124 unsigned int order) 125 { 126 if (kasan_enabled()) 127 __kasan_alloc_pages(page, order); 128 } 129 130 void __kasan_free_pages(struct page *page, unsigned int order); 131 static __always_inline void kasan_free_pages(struct page *page, 132 unsigned int order) 133 { 134 if (kasan_enabled()) 135 __kasan_free_pages(page, order); 136 } 137 138 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 139 slab_flags_t *flags); 140 static __always_inline void kasan_cache_create(struct kmem_cache *cache, 141 unsigned int *size, slab_flags_t *flags) 142 { 143 if (kasan_enabled()) 144 __kasan_cache_create(cache, size, flags); 145 } 146 147 void __kasan_cache_create_kmalloc(struct kmem_cache *cache); 148 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) 149 { 150 if (kasan_enabled()) 151 __kasan_cache_create_kmalloc(cache); 152 } 153 154 size_t __kasan_metadata_size(struct kmem_cache *cache); 155 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) 156 { 157 if (kasan_enabled()) 158 return __kasan_metadata_size(cache); 159 return 0; 160 } 161 162 void __kasan_poison_slab(struct page *page); 163 static __always_inline void kasan_poison_slab(struct page *page) 164 { 165 if (kasan_enabled()) 166 __kasan_poison_slab(page); 167 } 168 169 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 170 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 171 void *object) 172 { 173 if (kasan_enabled()) 174 __kasan_unpoison_object_data(cache, object); 175 } 176 177 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 178 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 179 void *object) 180 { 181 if (kasan_enabled()) 182 __kasan_poison_object_data(cache, object); 183 } 184 185 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 186 const void *object); 187 static __always_inline void * __must_check kasan_init_slab_obj( 188 struct kmem_cache *cache, const void *object) 189 { 190 if (kasan_enabled()) 191 return __kasan_init_slab_obj(cache, object); 192 return (void *)object; 193 } 194 195 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); 196 static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) 197 { 198 if (kasan_enabled()) 199 return __kasan_slab_free(s, object, _RET_IP_); 200 return false; 201 } 202 203 void __kasan_kfree_large(void *ptr, unsigned long ip); 204 static __always_inline void kasan_kfree_large(void *ptr) 205 { 206 if (kasan_enabled()) 207 __kasan_kfree_large(ptr, _RET_IP_); 208 } 209 210 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); 211 static __always_inline void kasan_slab_free_mempool(void *ptr) 212 { 213 if (kasan_enabled()) 214 __kasan_slab_free_mempool(ptr, _RET_IP_); 215 } 216 217 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 218 void *object, gfp_t flags); 219 static __always_inline void * __must_check kasan_slab_alloc( 220 struct kmem_cache *s, void *object, gfp_t flags) 221 { 222 if (kasan_enabled()) 223 return __kasan_slab_alloc(s, object, flags); 224 return object; 225 } 226 227 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 228 size_t size, gfp_t flags); 229 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 230 const void *object, size_t size, gfp_t flags) 231 { 232 if (kasan_enabled()) 233 return __kasan_kmalloc(s, object, size, flags); 234 return (void *)object; 235 } 236 237 void * __must_check __kasan_kmalloc_large(const void *ptr, 238 size_t size, gfp_t flags); 239 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 240 size_t size, gfp_t flags) 241 { 242 if (kasan_enabled()) 243 return __kasan_kmalloc_large(ptr, size, flags); 244 return (void *)ptr; 245 } 246 247 void * __must_check __kasan_krealloc(const void *object, 248 size_t new_size, gfp_t flags); 249 static __always_inline void * __must_check kasan_krealloc(const void *object, 250 size_t new_size, gfp_t flags) 251 { 252 if (kasan_enabled()) 253 return __kasan_krealloc(object, new_size, flags); 254 return (void *)object; 255 } 256 257 /* 258 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 259 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 260 */ 261 bool __kasan_check_byte(const void *addr, unsigned long ip); 262 static __always_inline bool kasan_check_byte(const void *addr) 263 { 264 if (kasan_enabled()) 265 return __kasan_check_byte(addr, _RET_IP_); 266 return true; 267 } 268 269 270 bool kasan_save_enable_multi_shot(void); 271 void kasan_restore_multi_shot(bool enabled); 272 273 #else /* CONFIG_KASAN */ 274 275 static inline bool kasan_enabled(void) 276 { 277 return false; 278 } 279 static inline slab_flags_t kasan_never_merge(void) 280 { 281 return 0; 282 } 283 static inline void kasan_unpoison_range(const void *address, size_t size) {} 284 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 285 static inline void kasan_free_pages(struct page *page, unsigned int order) {} 286 static inline void kasan_cache_create(struct kmem_cache *cache, 287 unsigned int *size, 288 slab_flags_t *flags) {} 289 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} 290 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 291 static inline void kasan_poison_slab(struct page *page) {} 292 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 293 void *object) {} 294 static inline void kasan_poison_object_data(struct kmem_cache *cache, 295 void *object) {} 296 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 297 const void *object) 298 { 299 return (void *)object; 300 } 301 static inline bool kasan_slab_free(struct kmem_cache *s, void *object) 302 { 303 return false; 304 } 305 static inline void kasan_kfree_large(void *ptr) {} 306 static inline void kasan_slab_free_mempool(void *ptr) {} 307 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 308 gfp_t flags) 309 { 310 return object; 311 } 312 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 313 size_t size, gfp_t flags) 314 { 315 return (void *)object; 316 } 317 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 318 { 319 return (void *)ptr; 320 } 321 static inline void *kasan_krealloc(const void *object, size_t new_size, 322 gfp_t flags) 323 { 324 return (void *)object; 325 } 326 static inline bool kasan_check_byte(const void *address) 327 { 328 return true; 329 } 330 331 #endif /* CONFIG_KASAN */ 332 333 #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK 334 void kasan_unpoison_task_stack(struct task_struct *task); 335 #else 336 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 337 #endif 338 339 #ifdef CONFIG_KASAN_GENERIC 340 341 void kasan_cache_shrink(struct kmem_cache *cache); 342 void kasan_cache_shutdown(struct kmem_cache *cache); 343 void kasan_record_aux_stack(void *ptr); 344 345 #else /* CONFIG_KASAN_GENERIC */ 346 347 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 348 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 349 static inline void kasan_record_aux_stack(void *ptr) {} 350 351 #endif /* CONFIG_KASAN_GENERIC */ 352 353 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 354 355 static inline void *kasan_reset_tag(const void *addr) 356 { 357 return (void *)arch_kasan_reset_tag(addr); 358 } 359 360 /** 361 * kasan_report - print a report about a bad memory access detected by KASAN 362 * @addr: address of the bad access 363 * @size: size of the bad access 364 * @is_write: whether the bad access is a write or a read 365 * @ip: instruction pointer for the accessibility check or the bad access itself 366 */ 367 bool kasan_report(unsigned long addr, size_t size, 368 bool is_write, unsigned long ip); 369 370 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 371 372 static inline void *kasan_reset_tag(const void *addr) 373 { 374 return (void *)addr; 375 } 376 377 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 378 379 #ifdef CONFIG_KASAN_SW_TAGS 380 void __init kasan_init_sw_tags(void); 381 #else 382 static inline void kasan_init_sw_tags(void) { } 383 #endif 384 385 #ifdef CONFIG_KASAN_HW_TAGS 386 void kasan_init_hw_tags_cpu(void); 387 void __init kasan_init_hw_tags(void); 388 #else 389 static inline void kasan_init_hw_tags_cpu(void) { } 390 static inline void kasan_init_hw_tags(void) { } 391 #endif 392 393 #ifdef CONFIG_KASAN_VMALLOC 394 395 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 396 void kasan_poison_vmalloc(const void *start, unsigned long size); 397 void kasan_unpoison_vmalloc(const void *start, unsigned long size); 398 void kasan_release_vmalloc(unsigned long start, unsigned long end, 399 unsigned long free_region_start, 400 unsigned long free_region_end); 401 402 #else /* CONFIG_KASAN_VMALLOC */ 403 404 static inline int kasan_populate_vmalloc(unsigned long start, 405 unsigned long size) 406 { 407 return 0; 408 } 409 410 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 411 { } 412 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) 413 { } 414 static inline void kasan_release_vmalloc(unsigned long start, 415 unsigned long end, 416 unsigned long free_region_start, 417 unsigned long free_region_end) {} 418 419 #endif /* CONFIG_KASAN_VMALLOC */ 420 421 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 422 !defined(CONFIG_KASAN_VMALLOC) 423 424 /* 425 * These functions provide a special case to support backing module 426 * allocations with real shadow memory. With KASAN vmalloc, the special 427 * case is unnecessary, as the work is handled in the generic case. 428 */ 429 int kasan_module_alloc(void *addr, size_t size); 430 void kasan_free_shadow(const struct vm_struct *vm); 431 432 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 433 434 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 435 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 436 437 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 438 439 #ifdef CONFIG_KASAN_INLINE 440 void kasan_non_canonical_hook(unsigned long addr); 441 #else /* CONFIG_KASAN_INLINE */ 442 static inline void kasan_non_canonical_hook(unsigned long addr) { } 443 #endif /* CONFIG_KASAN_INLINE */ 444 445 #endif /* LINUX_KASAN_H */ 446