1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 5 #include <linux/bug.h> 6 #include <linux/kasan-enabled.h> 7 #include <linux/kernel.h> 8 #include <linux/static_key.h> 9 #include <linux/types.h> 10 11 struct kmem_cache; 12 struct page; 13 struct slab; 14 struct vm_struct; 15 struct task_struct; 16 17 #ifdef CONFIG_KASAN 18 19 #include <linux/linkage.h> 20 #include <asm/kasan.h> 21 22 #endif 23 24 typedef unsigned int __bitwise kasan_vmalloc_flags_t; 25 26 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) 27 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) 28 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) 29 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) 30 31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 32 33 #include <linux/pgtable.h> 34 35 /* Software KASAN implementations use shadow memory. */ 36 37 #ifdef CONFIG_KASAN_SW_TAGS 38 /* This matches KASAN_TAG_INVALID. */ 39 #define KASAN_SHADOW_INIT 0xFE 40 #else 41 #define KASAN_SHADOW_INIT 0 42 #endif 43 44 #ifndef PTE_HWTABLE_PTRS 45 #define PTE_HWTABLE_PTRS 0 46 #endif 47 48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; 51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; 52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 53 54 int kasan_populate_early_shadow(const void *shadow_start, 55 const void *shadow_end); 56 57 static inline void *kasan_mem_to_shadow(const void *addr) 58 { 59 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 60 + KASAN_SHADOW_OFFSET; 61 } 62 63 int kasan_add_zero_shadow(void *start, unsigned long size); 64 void kasan_remove_zero_shadow(void *start, unsigned long size); 65 66 /* Enable reporting bugs after kasan_disable_current() */ 67 extern void kasan_enable_current(void); 68 69 /* Disable reporting bugs for current task */ 70 extern void kasan_disable_current(void); 71 72 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 73 74 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 75 { 76 return 0; 77 } 78 static inline void kasan_remove_zero_shadow(void *start, 79 unsigned long size) 80 {} 81 82 static inline void kasan_enable_current(void) {} 83 static inline void kasan_disable_current(void) {} 84 85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 86 87 #ifdef CONFIG_KASAN_HW_TAGS 88 89 #else /* CONFIG_KASAN_HW_TAGS */ 90 91 #endif /* CONFIG_KASAN_HW_TAGS */ 92 93 static inline bool kasan_has_integrated_init(void) 94 { 95 return kasan_hw_tags_enabled(); 96 } 97 98 #ifdef CONFIG_KASAN 99 100 struct kasan_cache { 101 #ifdef CONFIG_KASAN_GENERIC 102 int alloc_meta_offset; 103 int free_meta_offset; 104 #endif 105 bool is_kmalloc; 106 }; 107 108 void __kasan_unpoison_range(const void *addr, size_t size); 109 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 110 { 111 if (kasan_enabled()) 112 __kasan_unpoison_range(addr, size); 113 } 114 115 void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 116 static __always_inline void kasan_poison_pages(struct page *page, 117 unsigned int order, bool init) 118 { 119 if (kasan_enabled()) 120 __kasan_poison_pages(page, order, init); 121 } 122 123 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); 124 static __always_inline void kasan_unpoison_pages(struct page *page, 125 unsigned int order, bool init) 126 { 127 if (kasan_enabled()) 128 __kasan_unpoison_pages(page, order, init); 129 } 130 131 void __kasan_cache_create_kmalloc(struct kmem_cache *cache); 132 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) 133 { 134 if (kasan_enabled()) 135 __kasan_cache_create_kmalloc(cache); 136 } 137 138 void __kasan_poison_slab(struct slab *slab); 139 static __always_inline void kasan_poison_slab(struct slab *slab) 140 { 141 if (kasan_enabled()) 142 __kasan_poison_slab(slab); 143 } 144 145 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 146 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 147 void *object) 148 { 149 if (kasan_enabled()) 150 __kasan_unpoison_object_data(cache, object); 151 } 152 153 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 154 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 155 void *object) 156 { 157 if (kasan_enabled()) 158 __kasan_poison_object_data(cache, object); 159 } 160 161 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 162 const void *object); 163 static __always_inline void * __must_check kasan_init_slab_obj( 164 struct kmem_cache *cache, const void *object) 165 { 166 if (kasan_enabled()) 167 return __kasan_init_slab_obj(cache, object); 168 return (void *)object; 169 } 170 171 bool __kasan_slab_free(struct kmem_cache *s, void *object, 172 unsigned long ip, bool init); 173 static __always_inline bool kasan_slab_free(struct kmem_cache *s, 174 void *object, bool init) 175 { 176 if (kasan_enabled()) 177 return __kasan_slab_free(s, object, _RET_IP_, init); 178 return false; 179 } 180 181 void __kasan_kfree_large(void *ptr, unsigned long ip); 182 static __always_inline void kasan_kfree_large(void *ptr) 183 { 184 if (kasan_enabled()) 185 __kasan_kfree_large(ptr, _RET_IP_); 186 } 187 188 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); 189 static __always_inline void kasan_slab_free_mempool(void *ptr) 190 { 191 if (kasan_enabled()) 192 __kasan_slab_free_mempool(ptr, _RET_IP_); 193 } 194 195 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 196 void *object, gfp_t flags, bool init); 197 static __always_inline void * __must_check kasan_slab_alloc( 198 struct kmem_cache *s, void *object, gfp_t flags, bool init) 199 { 200 if (kasan_enabled()) 201 return __kasan_slab_alloc(s, object, flags, init); 202 return object; 203 } 204 205 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 206 size_t size, gfp_t flags); 207 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 208 const void *object, size_t size, gfp_t flags) 209 { 210 if (kasan_enabled()) 211 return __kasan_kmalloc(s, object, size, flags); 212 return (void *)object; 213 } 214 215 void * __must_check __kasan_kmalloc_large(const void *ptr, 216 size_t size, gfp_t flags); 217 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 218 size_t size, gfp_t flags) 219 { 220 if (kasan_enabled()) 221 return __kasan_kmalloc_large(ptr, size, flags); 222 return (void *)ptr; 223 } 224 225 void * __must_check __kasan_krealloc(const void *object, 226 size_t new_size, gfp_t flags); 227 static __always_inline void * __must_check kasan_krealloc(const void *object, 228 size_t new_size, gfp_t flags) 229 { 230 if (kasan_enabled()) 231 return __kasan_krealloc(object, new_size, flags); 232 return (void *)object; 233 } 234 235 /* 236 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 237 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 238 */ 239 bool __kasan_check_byte(const void *addr, unsigned long ip); 240 static __always_inline bool kasan_check_byte(const void *addr) 241 { 242 if (kasan_enabled()) 243 return __kasan_check_byte(addr, _RET_IP_); 244 return true; 245 } 246 247 #else /* CONFIG_KASAN */ 248 249 static inline void kasan_unpoison_range(const void *address, size_t size) {} 250 static inline void kasan_poison_pages(struct page *page, unsigned int order, 251 bool init) {} 252 static inline void kasan_unpoison_pages(struct page *page, unsigned int order, 253 bool init) {} 254 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} 255 static inline void kasan_poison_slab(struct slab *slab) {} 256 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 257 void *object) {} 258 static inline void kasan_poison_object_data(struct kmem_cache *cache, 259 void *object) {} 260 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 261 const void *object) 262 { 263 return (void *)object; 264 } 265 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) 266 { 267 return false; 268 } 269 static inline void kasan_kfree_large(void *ptr) {} 270 static inline void kasan_slab_free_mempool(void *ptr) {} 271 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 272 gfp_t flags, bool init) 273 { 274 return object; 275 } 276 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 277 size_t size, gfp_t flags) 278 { 279 return (void *)object; 280 } 281 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 282 { 283 return (void *)ptr; 284 } 285 static inline void *kasan_krealloc(const void *object, size_t new_size, 286 gfp_t flags) 287 { 288 return (void *)object; 289 } 290 static inline bool kasan_check_byte(const void *address) 291 { 292 return true; 293 } 294 295 #endif /* CONFIG_KASAN */ 296 297 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) 298 void kasan_unpoison_task_stack(struct task_struct *task); 299 #else 300 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 301 #endif 302 303 #ifdef CONFIG_KASAN_GENERIC 304 305 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object); 306 slab_flags_t kasan_never_merge(void); 307 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 308 slab_flags_t *flags); 309 310 void kasan_cache_shrink(struct kmem_cache *cache); 311 void kasan_cache_shutdown(struct kmem_cache *cache); 312 void kasan_record_aux_stack(void *ptr); 313 void kasan_record_aux_stack_noalloc(void *ptr); 314 315 #else /* CONFIG_KASAN_GENERIC */ 316 317 /* Tag-based KASAN modes do not use per-object metadata. */ 318 static inline size_t kasan_metadata_size(struct kmem_cache *cache, 319 bool in_object) 320 { 321 return 0; 322 } 323 /* And thus nothing prevents cache merging. */ 324 static inline slab_flags_t kasan_never_merge(void) 325 { 326 return 0; 327 } 328 /* And no cache-related metadata initialization is required. */ 329 static inline void kasan_cache_create(struct kmem_cache *cache, 330 unsigned int *size, 331 slab_flags_t *flags) {} 332 333 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 334 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 335 static inline void kasan_record_aux_stack(void *ptr) {} 336 static inline void kasan_record_aux_stack_noalloc(void *ptr) {} 337 338 #endif /* CONFIG_KASAN_GENERIC */ 339 340 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 341 342 static inline void *kasan_reset_tag(const void *addr) 343 { 344 return (void *)arch_kasan_reset_tag(addr); 345 } 346 347 /** 348 * kasan_report - print a report about a bad memory access detected by KASAN 349 * @addr: address of the bad access 350 * @size: size of the bad access 351 * @is_write: whether the bad access is a write or a read 352 * @ip: instruction pointer for the accessibility check or the bad access itself 353 */ 354 bool kasan_report(unsigned long addr, size_t size, 355 bool is_write, unsigned long ip); 356 357 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 358 359 static inline void *kasan_reset_tag(const void *addr) 360 { 361 return (void *)addr; 362 } 363 364 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 365 366 #ifdef CONFIG_KASAN_HW_TAGS 367 368 void kasan_report_async(void); 369 370 #endif /* CONFIG_KASAN_HW_TAGS */ 371 372 #ifdef CONFIG_KASAN_SW_TAGS 373 void __init kasan_init_sw_tags(void); 374 #else 375 static inline void kasan_init_sw_tags(void) { } 376 #endif 377 378 #ifdef CONFIG_KASAN_HW_TAGS 379 void kasan_init_hw_tags_cpu(void); 380 void __init kasan_init_hw_tags(void); 381 #else 382 static inline void kasan_init_hw_tags_cpu(void) { } 383 static inline void kasan_init_hw_tags(void) { } 384 #endif 385 386 #ifdef CONFIG_KASAN_VMALLOC 387 388 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 389 390 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); 391 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 392 void kasan_release_vmalloc(unsigned long start, unsigned long end, 393 unsigned long free_region_start, 394 unsigned long free_region_end); 395 396 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 397 398 static inline void kasan_populate_early_vm_area_shadow(void *start, 399 unsigned long size) 400 { } 401 static inline int kasan_populate_vmalloc(unsigned long start, 402 unsigned long size) 403 { 404 return 0; 405 } 406 static inline void kasan_release_vmalloc(unsigned long start, 407 unsigned long end, 408 unsigned long free_region_start, 409 unsigned long free_region_end) { } 410 411 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 412 413 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, 414 kasan_vmalloc_flags_t flags); 415 static __always_inline void *kasan_unpoison_vmalloc(const void *start, 416 unsigned long size, 417 kasan_vmalloc_flags_t flags) 418 { 419 if (kasan_enabled()) 420 return __kasan_unpoison_vmalloc(start, size, flags); 421 return (void *)start; 422 } 423 424 void __kasan_poison_vmalloc(const void *start, unsigned long size); 425 static __always_inline void kasan_poison_vmalloc(const void *start, 426 unsigned long size) 427 { 428 if (kasan_enabled()) 429 __kasan_poison_vmalloc(start, size); 430 } 431 432 #else /* CONFIG_KASAN_VMALLOC */ 433 434 static inline void kasan_populate_early_vm_area_shadow(void *start, 435 unsigned long size) { } 436 static inline int kasan_populate_vmalloc(unsigned long start, 437 unsigned long size) 438 { 439 return 0; 440 } 441 static inline void kasan_release_vmalloc(unsigned long start, 442 unsigned long end, 443 unsigned long free_region_start, 444 unsigned long free_region_end) { } 445 446 static inline void *kasan_unpoison_vmalloc(const void *start, 447 unsigned long size, 448 kasan_vmalloc_flags_t flags) 449 { 450 return (void *)start; 451 } 452 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 453 { } 454 455 #endif /* CONFIG_KASAN_VMALLOC */ 456 457 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 458 !defined(CONFIG_KASAN_VMALLOC) 459 460 /* 461 * These functions allocate and free shadow memory for kernel modules. 462 * They are only required when KASAN_VMALLOC is not supported, as otherwise 463 * shadow memory is allocated by the generic vmalloc handlers. 464 */ 465 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); 466 void kasan_free_module_shadow(const struct vm_struct *vm); 467 468 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 469 470 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } 471 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} 472 473 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 474 475 #ifdef CONFIG_KASAN_INLINE 476 void kasan_non_canonical_hook(unsigned long addr); 477 #else /* CONFIG_KASAN_INLINE */ 478 static inline void kasan_non_canonical_hook(unsigned long addr) { } 479 #endif /* CONFIG_KASAN_INLINE */ 480 481 #endif /* LINUX_KASAN_H */ 482