xref: /openbmc/linux/include/linux/kasan.h (revision 3252b1d8)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
20b24beccSAndrey Ryabinin #ifndef _LINUX_KASAN_H
30b24beccSAndrey Ryabinin #define _LINUX_KASAN_H
40b24beccSAndrey Ryabinin 
57a3b8353SPeter Collingbourne #include <linux/bug.h>
62db710ccSMarco Elver #include <linux/kernel.h>
734303244SAndrey Konovalov #include <linux/static_key.h>
80b24beccSAndrey Ryabinin #include <linux/types.h>
90b24beccSAndrey Ryabinin 
100b24beccSAndrey Ryabinin struct kmem_cache;
110b24beccSAndrey Ryabinin struct page;
12a5af5aa8SAndrey Ryabinin struct vm_struct;
135be9b730SMasami Hiramatsu struct task_struct;
140b24beccSAndrey Ryabinin 
150b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
160b24beccSAndrey Ryabinin 
17d5750edfSAndrey Konovalov #include <linux/linkage.h>
1865fddcfcSMike Rapoport #include <asm/kasan.h>
190b24beccSAndrey Ryabinin 
2083c4e7a0SPatricia Alfonso /* kasan_data struct is used in KUnit tests for KASAN expected failures */
2183c4e7a0SPatricia Alfonso struct kunit_kasan_expectation {
2283c4e7a0SPatricia Alfonso 	bool report_found;
2383c4e7a0SPatricia Alfonso };
2483c4e7a0SPatricia Alfonso 
25d5750edfSAndrey Konovalov #endif
26d5750edfSAndrey Konovalov 
27d5750edfSAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28d5750edfSAndrey Konovalov 
29d5750edfSAndrey Konovalov #include <linux/pgtable.h>
30d5750edfSAndrey Konovalov 
31d5750edfSAndrey Konovalov /* Software KASAN implementations use shadow memory. */
32d5750edfSAndrey Konovalov 
33d5750edfSAndrey Konovalov #ifdef CONFIG_KASAN_SW_TAGS
34a064cb00SAndrey Konovalov /* This matches KASAN_TAG_INVALID. */
35a064cb00SAndrey Konovalov #define KASAN_SHADOW_INIT 0xFE
36d5750edfSAndrey Konovalov #else
37d5750edfSAndrey Konovalov #define KASAN_SHADOW_INIT 0
38d5750edfSAndrey Konovalov #endif
39d5750edfSAndrey Konovalov 
4029970dc2SHailong Liu #ifndef PTE_HWTABLE_PTRS
4129970dc2SHailong Liu #define PTE_HWTABLE_PTRS 0
4229970dc2SHailong Liu #endif
4329970dc2SHailong Liu 
449577dd74SAndrey Konovalov extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
45cb32c9c5SDaniel Axtens extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
46cb32c9c5SDaniel Axtens extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
47cb32c9c5SDaniel Axtens extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
489577dd74SAndrey Konovalov extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
4969786cdbSAndrey Ryabinin 
509577dd74SAndrey Konovalov int kasan_populate_early_shadow(const void *shadow_start,
5169786cdbSAndrey Ryabinin 				const void *shadow_end);
5269786cdbSAndrey Ryabinin 
530b24beccSAndrey Ryabinin static inline void *kasan_mem_to_shadow(const void *addr)
540b24beccSAndrey Ryabinin {
550b24beccSAndrey Ryabinin 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
560b24beccSAndrey Ryabinin 		+ KASAN_SHADOW_OFFSET;
570b24beccSAndrey Ryabinin }
580b24beccSAndrey Ryabinin 
59d5750edfSAndrey Konovalov int kasan_add_zero_shadow(void *start, unsigned long size);
60d5750edfSAndrey Konovalov void kasan_remove_zero_shadow(void *start, unsigned long size);
61d5750edfSAndrey Konovalov 
62d73b4936SAndrey Konovalov /* Enable reporting bugs after kasan_disable_current() */
63d73b4936SAndrey Konovalov extern void kasan_enable_current(void);
64d73b4936SAndrey Konovalov 
65d73b4936SAndrey Konovalov /* Disable reporting bugs for current task */
66d73b4936SAndrey Konovalov extern void kasan_disable_current(void);
67d73b4936SAndrey Konovalov 
68d5750edfSAndrey Konovalov #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69d5750edfSAndrey Konovalov 
70d5750edfSAndrey Konovalov static inline int kasan_add_zero_shadow(void *start, unsigned long size)
71d5750edfSAndrey Konovalov {
72d5750edfSAndrey Konovalov 	return 0;
73d5750edfSAndrey Konovalov }
74d5750edfSAndrey Konovalov static inline void kasan_remove_zero_shadow(void *start,
75d5750edfSAndrey Konovalov 					unsigned long size)
76d5750edfSAndrey Konovalov {}
77d5750edfSAndrey Konovalov 
78d73b4936SAndrey Konovalov static inline void kasan_enable_current(void) {}
79d73b4936SAndrey Konovalov static inline void kasan_disable_current(void) {}
80d73b4936SAndrey Konovalov 
81d5750edfSAndrey Konovalov #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
82d5750edfSAndrey Konovalov 
8334303244SAndrey Konovalov #ifdef CONFIG_KASAN_HW_TAGS
84e86f8b09SAndrey Konovalov 
8534303244SAndrey Konovalov DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
86e86f8b09SAndrey Konovalov 
8734303244SAndrey Konovalov static __always_inline bool kasan_enabled(void)
8834303244SAndrey Konovalov {
8934303244SAndrey Konovalov 	return static_branch_likely(&kasan_flag_enabled);
9034303244SAndrey Konovalov }
91e86f8b09SAndrey Konovalov 
921bb5eab3SAndrey Konovalov static inline bool kasan_has_integrated_init(void)
931bb5eab3SAndrey Konovalov {
941bb5eab3SAndrey Konovalov 	return kasan_enabled();
951bb5eab3SAndrey Konovalov }
961bb5eab3SAndrey Konovalov 
977a3b8353SPeter Collingbourne void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
987a3b8353SPeter Collingbourne void kasan_free_pages(struct page *page, unsigned int order);
997a3b8353SPeter Collingbourne 
100e86f8b09SAndrey Konovalov #else /* CONFIG_KASAN_HW_TAGS */
101e86f8b09SAndrey Konovalov 
10234303244SAndrey Konovalov static inline bool kasan_enabled(void)
10334303244SAndrey Konovalov {
1047a3b8353SPeter Collingbourne 	return IS_ENABLED(CONFIG_KASAN);
10534303244SAndrey Konovalov }
106e86f8b09SAndrey Konovalov 
1071bb5eab3SAndrey Konovalov static inline bool kasan_has_integrated_init(void)
1081bb5eab3SAndrey Konovalov {
1091bb5eab3SAndrey Konovalov 	return false;
1101bb5eab3SAndrey Konovalov }
1111bb5eab3SAndrey Konovalov 
1127a3b8353SPeter Collingbourne static __always_inline void kasan_alloc_pages(struct page *page,
1137a3b8353SPeter Collingbourne 					      unsigned int order, gfp_t flags)
1147a3b8353SPeter Collingbourne {
1157a3b8353SPeter Collingbourne 	/* Only available for integrated init. */
1167a3b8353SPeter Collingbourne 	BUILD_BUG();
1177a3b8353SPeter Collingbourne }
1187a3b8353SPeter Collingbourne 
1197a3b8353SPeter Collingbourne static __always_inline void kasan_free_pages(struct page *page,
1207a3b8353SPeter Collingbourne 					     unsigned int order)
1217a3b8353SPeter Collingbourne {
1227a3b8353SPeter Collingbourne 	/* Only available for integrated init. */
1237a3b8353SPeter Collingbourne 	BUILD_BUG();
1247a3b8353SPeter Collingbourne }
1257a3b8353SPeter Collingbourne 
126e86f8b09SAndrey Konovalov #endif /* CONFIG_KASAN_HW_TAGS */
127e86f8b09SAndrey Konovalov 
1287a3b8353SPeter Collingbourne #ifdef CONFIG_KASAN
1297a3b8353SPeter Collingbourne 
1307a3b8353SPeter Collingbourne struct kasan_cache {
1317a3b8353SPeter Collingbourne 	int alloc_meta_offset;
1327a3b8353SPeter Collingbourne 	int free_meta_offset;
1337a3b8353SPeter Collingbourne 	bool is_kmalloc;
1347a3b8353SPeter Collingbourne };
1357a3b8353SPeter Collingbourne 
136e86f8b09SAndrey Konovalov slab_flags_t __kasan_never_merge(void);
137e86f8b09SAndrey Konovalov static __always_inline slab_flags_t kasan_never_merge(void)
138e86f8b09SAndrey Konovalov {
139e86f8b09SAndrey Konovalov 	if (kasan_enabled())
140e86f8b09SAndrey Konovalov 		return __kasan_never_merge();
141e86f8b09SAndrey Konovalov 	return 0;
142e86f8b09SAndrey Konovalov }
14334303244SAndrey Konovalov 
14434303244SAndrey Konovalov void __kasan_unpoison_range(const void *addr, size_t size);
14534303244SAndrey Konovalov static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
14634303244SAndrey Konovalov {
14734303244SAndrey Konovalov 	if (kasan_enabled())
14834303244SAndrey Konovalov 		__kasan_unpoison_range(addr, size);
14934303244SAndrey Konovalov }
15034303244SAndrey Konovalov 
1517a3b8353SPeter Collingbourne void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
1527a3b8353SPeter Collingbourne static __always_inline void kasan_poison_pages(struct page *page,
1531bb5eab3SAndrey Konovalov 						unsigned int order, bool init)
15434303244SAndrey Konovalov {
15534303244SAndrey Konovalov 	if (kasan_enabled())
1567a3b8353SPeter Collingbourne 		__kasan_poison_pages(page, order, init);
15734303244SAndrey Konovalov }
15834303244SAndrey Konovalov 
1597a3b8353SPeter Collingbourne void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
1607a3b8353SPeter Collingbourne static __always_inline void kasan_unpoison_pages(struct page *page,
1611bb5eab3SAndrey Konovalov 						 unsigned int order, bool init)
16234303244SAndrey Konovalov {
16334303244SAndrey Konovalov 	if (kasan_enabled())
1647a3b8353SPeter Collingbourne 		__kasan_unpoison_pages(page, order, init);
16534303244SAndrey Konovalov }
16634303244SAndrey Konovalov 
16734303244SAndrey Konovalov void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
16834303244SAndrey Konovalov 				slab_flags_t *flags);
16934303244SAndrey Konovalov static __always_inline void kasan_cache_create(struct kmem_cache *cache,
17034303244SAndrey Konovalov 				unsigned int *size, slab_flags_t *flags)
17134303244SAndrey Konovalov {
17234303244SAndrey Konovalov 	if (kasan_enabled())
17334303244SAndrey Konovalov 		__kasan_cache_create(cache, size, flags);
17434303244SAndrey Konovalov }
17534303244SAndrey Konovalov 
17692850134SAndrey Konovalov void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
17792850134SAndrey Konovalov static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
17892850134SAndrey Konovalov {
17992850134SAndrey Konovalov 	if (kasan_enabled())
18092850134SAndrey Konovalov 		__kasan_cache_create_kmalloc(cache);
18192850134SAndrey Konovalov }
18292850134SAndrey Konovalov 
18334303244SAndrey Konovalov size_t __kasan_metadata_size(struct kmem_cache *cache);
18434303244SAndrey Konovalov static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
18534303244SAndrey Konovalov {
18634303244SAndrey Konovalov 	if (kasan_enabled())
18734303244SAndrey Konovalov 		return __kasan_metadata_size(cache);
18834303244SAndrey Konovalov 	return 0;
18934303244SAndrey Konovalov }
19034303244SAndrey Konovalov 
19134303244SAndrey Konovalov void __kasan_poison_slab(struct page *page);
19234303244SAndrey Konovalov static __always_inline void kasan_poison_slab(struct page *page)
19334303244SAndrey Konovalov {
19434303244SAndrey Konovalov 	if (kasan_enabled())
19534303244SAndrey Konovalov 		__kasan_poison_slab(page);
19634303244SAndrey Konovalov }
19734303244SAndrey Konovalov 
19834303244SAndrey Konovalov void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
19934303244SAndrey Konovalov static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
20034303244SAndrey Konovalov 							void *object)
20134303244SAndrey Konovalov {
20234303244SAndrey Konovalov 	if (kasan_enabled())
20334303244SAndrey Konovalov 		__kasan_unpoison_object_data(cache, object);
20434303244SAndrey Konovalov }
20534303244SAndrey Konovalov 
20634303244SAndrey Konovalov void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
20734303244SAndrey Konovalov static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
20834303244SAndrey Konovalov 							void *object)
20934303244SAndrey Konovalov {
21034303244SAndrey Konovalov 	if (kasan_enabled())
21134303244SAndrey Konovalov 		__kasan_poison_object_data(cache, object);
21234303244SAndrey Konovalov }
21334303244SAndrey Konovalov 
21434303244SAndrey Konovalov void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
21534303244SAndrey Konovalov 					  const void *object);
21634303244SAndrey Konovalov static __always_inline void * __must_check kasan_init_slab_obj(
21734303244SAndrey Konovalov 				struct kmem_cache *cache, const void *object)
21834303244SAndrey Konovalov {
21934303244SAndrey Konovalov 	if (kasan_enabled())
22034303244SAndrey Konovalov 		return __kasan_init_slab_obj(cache, object);
22134303244SAndrey Konovalov 	return (void *)object;
22234303244SAndrey Konovalov }
22334303244SAndrey Konovalov 
224d57a964eSAndrey Konovalov bool __kasan_slab_free(struct kmem_cache *s, void *object,
225d57a964eSAndrey Konovalov 			unsigned long ip, bool init);
226d57a964eSAndrey Konovalov static __always_inline bool kasan_slab_free(struct kmem_cache *s,
227d57a964eSAndrey Konovalov 						void *object, bool init)
22834303244SAndrey Konovalov {
22934303244SAndrey Konovalov 	if (kasan_enabled())
230d57a964eSAndrey Konovalov 		return __kasan_slab_free(s, object, _RET_IP_, init);
23134303244SAndrey Konovalov 	return false;
23234303244SAndrey Konovalov }
23334303244SAndrey Konovalov 
234200072ceSAndrey Konovalov void __kasan_kfree_large(void *ptr, unsigned long ip);
235200072ceSAndrey Konovalov static __always_inline void kasan_kfree_large(void *ptr)
236200072ceSAndrey Konovalov {
237200072ceSAndrey Konovalov 	if (kasan_enabled())
238200072ceSAndrey Konovalov 		__kasan_kfree_large(ptr, _RET_IP_);
239200072ceSAndrey Konovalov }
240200072ceSAndrey Konovalov 
241eeb3160cSAndrey Konovalov void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
242027b37b5SAndrey Konovalov static __always_inline void kasan_slab_free_mempool(void *ptr)
243eeb3160cSAndrey Konovalov {
244eeb3160cSAndrey Konovalov 	if (kasan_enabled())
245027b37b5SAndrey Konovalov 		__kasan_slab_free_mempool(ptr, _RET_IP_);
246eeb3160cSAndrey Konovalov }
247eeb3160cSAndrey Konovalov 
24834303244SAndrey Konovalov void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
249da844b78SAndrey Konovalov 				       void *object, gfp_t flags, bool init);
25034303244SAndrey Konovalov static __always_inline void * __must_check kasan_slab_alloc(
251da844b78SAndrey Konovalov 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
25234303244SAndrey Konovalov {
25334303244SAndrey Konovalov 	if (kasan_enabled())
254da844b78SAndrey Konovalov 		return __kasan_slab_alloc(s, object, flags, init);
25534303244SAndrey Konovalov 	return object;
25634303244SAndrey Konovalov }
25734303244SAndrey Konovalov 
25834303244SAndrey Konovalov void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
25934303244SAndrey Konovalov 				    size_t size, gfp_t flags);
26034303244SAndrey Konovalov static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
26134303244SAndrey Konovalov 				const void *object, size_t size, gfp_t flags)
26234303244SAndrey Konovalov {
26334303244SAndrey Konovalov 	if (kasan_enabled())
26434303244SAndrey Konovalov 		return __kasan_kmalloc(s, object, size, flags);
26534303244SAndrey Konovalov 	return (void *)object;
26634303244SAndrey Konovalov }
26734303244SAndrey Konovalov 
26834303244SAndrey Konovalov void * __must_check __kasan_kmalloc_large(const void *ptr,
26934303244SAndrey Konovalov 					  size_t size, gfp_t flags);
27034303244SAndrey Konovalov static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
27134303244SAndrey Konovalov 						      size_t size, gfp_t flags)
27234303244SAndrey Konovalov {
27334303244SAndrey Konovalov 	if (kasan_enabled())
27434303244SAndrey Konovalov 		return __kasan_kmalloc_large(ptr, size, flags);
27534303244SAndrey Konovalov 	return (void *)ptr;
27634303244SAndrey Konovalov }
27734303244SAndrey Konovalov 
27834303244SAndrey Konovalov void * __must_check __kasan_krealloc(const void *object,
27934303244SAndrey Konovalov 				     size_t new_size, gfp_t flags);
28034303244SAndrey Konovalov static __always_inline void * __must_check kasan_krealloc(const void *object,
28134303244SAndrey Konovalov 						 size_t new_size, gfp_t flags)
28234303244SAndrey Konovalov {
28334303244SAndrey Konovalov 	if (kasan_enabled())
28434303244SAndrey Konovalov 		return __kasan_krealloc(object, new_size, flags);
28534303244SAndrey Konovalov 	return (void *)object;
28634303244SAndrey Konovalov }
28734303244SAndrey Konovalov 
288611806b4SAndrey Konovalov /*
289611806b4SAndrey Konovalov  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
290611806b4SAndrey Konovalov  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
291611806b4SAndrey Konovalov  */
292611806b4SAndrey Konovalov bool __kasan_check_byte(const void *addr, unsigned long ip);
293611806b4SAndrey Konovalov static __always_inline bool kasan_check_byte(const void *addr)
294611806b4SAndrey Konovalov {
295611806b4SAndrey Konovalov 	if (kasan_enabled())
296611806b4SAndrey Konovalov 		return __kasan_check_byte(addr, _RET_IP_);
297611806b4SAndrey Konovalov 	return true;
298611806b4SAndrey Konovalov }
299611806b4SAndrey Konovalov 
300611806b4SAndrey Konovalov 
301b0845ce5SMark Rutland bool kasan_save_enable_multi_shot(void);
302b0845ce5SMark Rutland void kasan_restore_multi_shot(bool enabled);
303b0845ce5SMark Rutland 
3040b24beccSAndrey Ryabinin #else /* CONFIG_KASAN */
3050b24beccSAndrey Ryabinin 
306e86f8b09SAndrey Konovalov static inline slab_flags_t kasan_never_merge(void)
307e86f8b09SAndrey Konovalov {
308e86f8b09SAndrey Konovalov 	return 0;
309e86f8b09SAndrey Konovalov }
310cebd0eb2SAndrey Konovalov static inline void kasan_unpoison_range(const void *address, size_t size) {}
3117a3b8353SPeter Collingbourne static inline void kasan_poison_pages(struct page *page, unsigned int order,
3127a3b8353SPeter Collingbourne 				      bool init) {}
3137a3b8353SPeter Collingbourne static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
3147a3b8353SPeter Collingbourne 					bool init) {}
3157ed2f9e6SAlexander Potapenko static inline void kasan_cache_create(struct kmem_cache *cache,
316be4a7988SAlexey Dobriyan 				      unsigned int *size,
317d50112edSAlexey Dobriyan 				      slab_flags_t *flags) {}
31892850134SAndrey Konovalov static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
31934303244SAndrey Konovalov static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
3200316bec2SAndrey Ryabinin static inline void kasan_poison_slab(struct page *page) {}
3210316bec2SAndrey Ryabinin static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
3220316bec2SAndrey Ryabinin 					void *object) {}
3230316bec2SAndrey Ryabinin static inline void kasan_poison_object_data(struct kmem_cache *cache,
3240316bec2SAndrey Ryabinin 					void *object) {}
3250116523cSAndrey Konovalov static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
3260116523cSAndrey Konovalov 				const void *object)
3270116523cSAndrey Konovalov {
3280116523cSAndrey Konovalov 	return (void *)object;
3290116523cSAndrey Konovalov }
330d57a964eSAndrey Konovalov static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
3310116523cSAndrey Konovalov {
33234303244SAndrey Konovalov 	return false;
3330116523cSAndrey Konovalov }
334200072ceSAndrey Konovalov static inline void kasan_kfree_large(void *ptr) {}
335027b37b5SAndrey Konovalov static inline void kasan_slab_free_mempool(void *ptr) {}
33634303244SAndrey Konovalov static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
337da844b78SAndrey Konovalov 				   gfp_t flags, bool init)
33834303244SAndrey Konovalov {
33934303244SAndrey Konovalov 	return object;
34034303244SAndrey Konovalov }
3410116523cSAndrey Konovalov static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
3420116523cSAndrey Konovalov 				size_t size, gfp_t flags)
3430116523cSAndrey Konovalov {
3440116523cSAndrey Konovalov 	return (void *)object;
3450116523cSAndrey Konovalov }
34634303244SAndrey Konovalov static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
34734303244SAndrey Konovalov {
34834303244SAndrey Konovalov 	return (void *)ptr;
34934303244SAndrey Konovalov }
3500116523cSAndrey Konovalov static inline void *kasan_krealloc(const void *object, size_t new_size,
3510116523cSAndrey Konovalov 				 gfp_t flags)
3520116523cSAndrey Konovalov {
3530116523cSAndrey Konovalov 	return (void *)object;
3540116523cSAndrey Konovalov }
355611806b4SAndrey Konovalov static inline bool kasan_check_byte(const void *address)
356611806b4SAndrey Konovalov {
357611806b4SAndrey Konovalov 	return true;
358611806b4SAndrey Konovalov }
3599b75a867SAndrey Ryabinin 
3600b24beccSAndrey Ryabinin #endif /* CONFIG_KASAN */
3610b24beccSAndrey Ryabinin 
36202c58773SWalter Wu #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
363d56a9ef8SAndrey Konovalov void kasan_unpoison_task_stack(struct task_struct *task);
364d56a9ef8SAndrey Konovalov #else
365d56a9ef8SAndrey Konovalov static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
366d56a9ef8SAndrey Konovalov #endif
367d56a9ef8SAndrey Konovalov 
3682bd926b4SAndrey Konovalov #ifdef CONFIG_KASAN_GENERIC
3692bd926b4SAndrey Konovalov 
3702bd926b4SAndrey Konovalov void kasan_cache_shrink(struct kmem_cache *cache);
3712bd926b4SAndrey Konovalov void kasan_cache_shutdown(struct kmem_cache *cache);
37226e760c9SWalter Wu void kasan_record_aux_stack(void *ptr);
3737cb3007cSMarco Elver void kasan_record_aux_stack_noalloc(void *ptr);
3742bd926b4SAndrey Konovalov 
3752bd926b4SAndrey Konovalov #else /* CONFIG_KASAN_GENERIC */
3762bd926b4SAndrey Konovalov 
3772bd926b4SAndrey Konovalov static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
3782bd926b4SAndrey Konovalov static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
37926e760c9SWalter Wu static inline void kasan_record_aux_stack(void *ptr) {}
3807cb3007cSMarco Elver static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
3812bd926b4SAndrey Konovalov 
3822bd926b4SAndrey Konovalov #endif /* CONFIG_KASAN_GENERIC */
3832bd926b4SAndrey Konovalov 
3842e903b91SAndrey Konovalov #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
3853c9e3aa1SAndrey Konovalov 
386c0054c56SAndrey Konovalov static inline void *kasan_reset_tag(const void *addr)
387c0054c56SAndrey Konovalov {
388c0054c56SAndrey Konovalov 	return (void *)arch_kasan_reset_tag(addr);
389c0054c56SAndrey Konovalov }
3903c9e3aa1SAndrey Konovalov 
39149c6631dSVincenzo Frascino /**
39249c6631dSVincenzo Frascino  * kasan_report - print a report about a bad memory access detected by KASAN
39349c6631dSVincenzo Frascino  * @addr: address of the bad access
39449c6631dSVincenzo Frascino  * @size: size of the bad access
39549c6631dSVincenzo Frascino  * @is_write: whether the bad access is a write or a read
39649c6631dSVincenzo Frascino  * @ip: instruction pointer for the accessibility check or the bad access itself
39749c6631dSVincenzo Frascino  */
3988cceeff4SWalter Wu bool kasan_report(unsigned long addr, size_t size,
39941eea9cdSAndrey Konovalov 		bool is_write, unsigned long ip);
40041eea9cdSAndrey Konovalov 
4012e903b91SAndrey Konovalov #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
4023c9e3aa1SAndrey Konovalov 
4033c9e3aa1SAndrey Konovalov static inline void *kasan_reset_tag(const void *addr)
4043c9e3aa1SAndrey Konovalov {
4053c9e3aa1SAndrey Konovalov 	return (void *)addr;
4063c9e3aa1SAndrey Konovalov }
4073c9e3aa1SAndrey Konovalov 
4082e903b91SAndrey Konovalov #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
4092e903b91SAndrey Konovalov 
4108f7b5054SVincenzo Frascino #ifdef CONFIG_KASAN_HW_TAGS
4118f7b5054SVincenzo Frascino 
4128f7b5054SVincenzo Frascino void kasan_report_async(void);
4138f7b5054SVincenzo Frascino 
4148f7b5054SVincenzo Frascino #endif /* CONFIG_KASAN_HW_TAGS */
4158f7b5054SVincenzo Frascino 
4162e903b91SAndrey Konovalov #ifdef CONFIG_KASAN_SW_TAGS
4172e903b91SAndrey Konovalov void __init kasan_init_sw_tags(void);
4182e903b91SAndrey Konovalov #else
4192e903b91SAndrey Konovalov static inline void kasan_init_sw_tags(void) { }
4202e903b91SAndrey Konovalov #endif
4212e903b91SAndrey Konovalov 
4222e903b91SAndrey Konovalov #ifdef CONFIG_KASAN_HW_TAGS
4232e903b91SAndrey Konovalov void kasan_init_hw_tags_cpu(void);
4242e903b91SAndrey Konovalov void __init kasan_init_hw_tags(void);
4252e903b91SAndrey Konovalov #else
4262e903b91SAndrey Konovalov static inline void kasan_init_hw_tags_cpu(void) { }
4272e903b91SAndrey Konovalov static inline void kasan_init_hw_tags(void) { }
4282e903b91SAndrey Konovalov #endif
429080eb83fSAndrey Konovalov 
4303c5c3cfbSDaniel Axtens #ifdef CONFIG_KASAN_VMALLOC
4313b1a4a86SAndrey Konovalov 
432d98c9e83SAndrey Ryabinin int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
433d98c9e83SAndrey Ryabinin void kasan_poison_vmalloc(const void *start, unsigned long size);
434d98c9e83SAndrey Ryabinin void kasan_unpoison_vmalloc(const void *start, unsigned long size);
4353c5c3cfbSDaniel Axtens void kasan_release_vmalloc(unsigned long start, unsigned long end,
4363c5c3cfbSDaniel Axtens 			   unsigned long free_region_start,
4373c5c3cfbSDaniel Axtens 			   unsigned long free_region_end);
4383b1a4a86SAndrey Konovalov 
439*3252b1d8SKefeng Wang void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
440*3252b1d8SKefeng Wang 
4413b1a4a86SAndrey Konovalov #else /* CONFIG_KASAN_VMALLOC */
4423b1a4a86SAndrey Konovalov 
443d98c9e83SAndrey Ryabinin static inline int kasan_populate_vmalloc(unsigned long start,
444d98c9e83SAndrey Ryabinin 					unsigned long size)
4453c5c3cfbSDaniel Axtens {
4463c5c3cfbSDaniel Axtens 	return 0;
4473c5c3cfbSDaniel Axtens }
4483c5c3cfbSDaniel Axtens 
449d98c9e83SAndrey Ryabinin static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
450d98c9e83SAndrey Ryabinin { }
451d98c9e83SAndrey Ryabinin static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
452d98c9e83SAndrey Ryabinin { }
4533c5c3cfbSDaniel Axtens static inline void kasan_release_vmalloc(unsigned long start,
4543c5c3cfbSDaniel Axtens 					 unsigned long end,
4553c5c3cfbSDaniel Axtens 					 unsigned long free_region_start,
4563c5c3cfbSDaniel Axtens 					 unsigned long free_region_end) {}
4573b1a4a86SAndrey Konovalov 
458*3252b1d8SKefeng Wang static inline void kasan_populate_early_vm_area_shadow(void *start,
459*3252b1d8SKefeng Wang 						       unsigned long size)
460*3252b1d8SKefeng Wang { }
461*3252b1d8SKefeng Wang 
4623b1a4a86SAndrey Konovalov #endif /* CONFIG_KASAN_VMALLOC */
4633b1a4a86SAndrey Konovalov 
4640fea6e9aSAndrey Konovalov #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
4650fea6e9aSAndrey Konovalov 		!defined(CONFIG_KASAN_VMALLOC)
4663b1a4a86SAndrey Konovalov 
4673b1a4a86SAndrey Konovalov /*
4683b1a4a86SAndrey Konovalov  * These functions provide a special case to support backing module
4693b1a4a86SAndrey Konovalov  * allocations with real shadow memory. With KASAN vmalloc, the special
4703b1a4a86SAndrey Konovalov  * case is unnecessary, as the work is handled in the generic case.
4713b1a4a86SAndrey Konovalov  */
4723b1a4a86SAndrey Konovalov int kasan_module_alloc(void *addr, size_t size);
4733b1a4a86SAndrey Konovalov void kasan_free_shadow(const struct vm_struct *vm);
4743b1a4a86SAndrey Konovalov 
4750fea6e9aSAndrey Konovalov #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
4763b1a4a86SAndrey Konovalov 
4773b1a4a86SAndrey Konovalov static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
4783b1a4a86SAndrey Konovalov static inline void kasan_free_shadow(const struct vm_struct *vm) {}
4793b1a4a86SAndrey Konovalov 
4800fea6e9aSAndrey Konovalov #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
4813c5c3cfbSDaniel Axtens 
4822f004eeaSJann Horn #ifdef CONFIG_KASAN_INLINE
4832f004eeaSJann Horn void kasan_non_canonical_hook(unsigned long addr);
4842f004eeaSJann Horn #else /* CONFIG_KASAN_INLINE */
4852f004eeaSJann Horn static inline void kasan_non_canonical_hook(unsigned long addr) { }
4862f004eeaSJann Horn #endif /* CONFIG_KASAN_INLINE */
4872f004eeaSJann Horn 
4880b24beccSAndrey Ryabinin #endif /* LINUX_KASAN_H */
489