xref: /openbmc/linux/include/linux/kasan.h (revision 00b8c557)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/static_key.h>
6 #include <linux/types.h>
7 
8 struct kmem_cache;
9 struct page;
10 struct vm_struct;
11 struct task_struct;
12 
13 #ifdef CONFIG_KASAN
14 
15 #include <linux/linkage.h>
16 #include <asm/kasan.h>
17 
18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
19 struct kunit_kasan_expectation {
20 	bool report_expected;
21 	bool report_found;
22 };
23 
24 #endif
25 
26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27 
28 #include <linux/pgtable.h>
29 
30 /* Software KASAN implementations use shadow memory. */
31 
32 #ifdef CONFIG_KASAN_SW_TAGS
33 #define KASAN_SHADOW_INIT 0xFF
34 #else
35 #define KASAN_SHADOW_INIT 0
36 #endif
37 
38 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
39 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
40 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
41 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
42 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
43 
44 int kasan_populate_early_shadow(const void *shadow_start,
45 				const void *shadow_end);
46 
47 static inline void *kasan_mem_to_shadow(const void *addr)
48 {
49 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
50 		+ KASAN_SHADOW_OFFSET;
51 }
52 
53 int kasan_add_zero_shadow(void *start, unsigned long size);
54 void kasan_remove_zero_shadow(void *start, unsigned long size);
55 
56 /* Enable reporting bugs after kasan_disable_current() */
57 extern void kasan_enable_current(void);
58 
59 /* Disable reporting bugs for current task */
60 extern void kasan_disable_current(void);
61 
62 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
63 
64 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
65 {
66 	return 0;
67 }
68 static inline void kasan_remove_zero_shadow(void *start,
69 					unsigned long size)
70 {}
71 
72 static inline void kasan_enable_current(void) {}
73 static inline void kasan_disable_current(void) {}
74 
75 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76 
77 #ifdef CONFIG_KASAN
78 
79 struct kasan_cache {
80 	int alloc_meta_offset;
81 	int free_meta_offset;
82 };
83 
84 #ifdef CONFIG_KASAN_HW_TAGS
85 
86 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
87 
88 static __always_inline bool kasan_enabled(void)
89 {
90 	return static_branch_likely(&kasan_flag_enabled);
91 }
92 
93 #else /* CONFIG_KASAN_HW_TAGS */
94 
95 static inline bool kasan_enabled(void)
96 {
97 	return true;
98 }
99 
100 #endif /* CONFIG_KASAN_HW_TAGS */
101 
102 slab_flags_t __kasan_never_merge(void);
103 static __always_inline slab_flags_t kasan_never_merge(void)
104 {
105 	if (kasan_enabled())
106 		return __kasan_never_merge();
107 	return 0;
108 }
109 
110 void __kasan_unpoison_range(const void *addr, size_t size);
111 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
112 {
113 	if (kasan_enabled())
114 		__kasan_unpoison_range(addr, size);
115 }
116 
117 void __kasan_alloc_pages(struct page *page, unsigned int order);
118 static __always_inline void kasan_alloc_pages(struct page *page,
119 						unsigned int order)
120 {
121 	if (kasan_enabled())
122 		__kasan_alloc_pages(page, order);
123 }
124 
125 void __kasan_free_pages(struct page *page, unsigned int order);
126 static __always_inline void kasan_free_pages(struct page *page,
127 						unsigned int order)
128 {
129 	if (kasan_enabled())
130 		__kasan_free_pages(page, order);
131 }
132 
133 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
134 				slab_flags_t *flags);
135 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
136 				unsigned int *size, slab_flags_t *flags)
137 {
138 	if (kasan_enabled())
139 		__kasan_cache_create(cache, size, flags);
140 }
141 
142 size_t __kasan_metadata_size(struct kmem_cache *cache);
143 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
144 {
145 	if (kasan_enabled())
146 		return __kasan_metadata_size(cache);
147 	return 0;
148 }
149 
150 void __kasan_poison_slab(struct page *page);
151 static __always_inline void kasan_poison_slab(struct page *page)
152 {
153 	if (kasan_enabled())
154 		__kasan_poison_slab(page);
155 }
156 
157 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
158 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
159 							void *object)
160 {
161 	if (kasan_enabled())
162 		__kasan_unpoison_object_data(cache, object);
163 }
164 
165 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
166 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
167 							void *object)
168 {
169 	if (kasan_enabled())
170 		__kasan_poison_object_data(cache, object);
171 }
172 
173 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
174 					  const void *object);
175 static __always_inline void * __must_check kasan_init_slab_obj(
176 				struct kmem_cache *cache, const void *object)
177 {
178 	if (kasan_enabled())
179 		return __kasan_init_slab_obj(cache, object);
180 	return (void *)object;
181 }
182 
183 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
184 static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
185 						unsigned long ip)
186 {
187 	if (kasan_enabled())
188 		return __kasan_slab_free(s, object, ip);
189 	return false;
190 }
191 
192 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
193 static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
194 {
195 	if (kasan_enabled())
196 		__kasan_slab_free_mempool(ptr, ip);
197 }
198 
199 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
200 				       void *object, gfp_t flags);
201 static __always_inline void * __must_check kasan_slab_alloc(
202 				struct kmem_cache *s, void *object, gfp_t flags)
203 {
204 	if (kasan_enabled())
205 		return __kasan_slab_alloc(s, object, flags);
206 	return object;
207 }
208 
209 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
210 				    size_t size, gfp_t flags);
211 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
212 				const void *object, size_t size, gfp_t flags)
213 {
214 	if (kasan_enabled())
215 		return __kasan_kmalloc(s, object, size, flags);
216 	return (void *)object;
217 }
218 
219 void * __must_check __kasan_kmalloc_large(const void *ptr,
220 					  size_t size, gfp_t flags);
221 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
222 						      size_t size, gfp_t flags)
223 {
224 	if (kasan_enabled())
225 		return __kasan_kmalloc_large(ptr, size, flags);
226 	return (void *)ptr;
227 }
228 
229 void * __must_check __kasan_krealloc(const void *object,
230 				     size_t new_size, gfp_t flags);
231 static __always_inline void * __must_check kasan_krealloc(const void *object,
232 						 size_t new_size, gfp_t flags)
233 {
234 	if (kasan_enabled())
235 		return __kasan_krealloc(object, new_size, flags);
236 	return (void *)object;
237 }
238 
239 void __kasan_kfree_large(void *ptr, unsigned long ip);
240 static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
241 {
242 	if (kasan_enabled())
243 		__kasan_kfree_large(ptr, ip);
244 }
245 
246 bool kasan_save_enable_multi_shot(void);
247 void kasan_restore_multi_shot(bool enabled);
248 
249 #else /* CONFIG_KASAN */
250 
251 static inline bool kasan_enabled(void)
252 {
253 	return false;
254 }
255 static inline slab_flags_t kasan_never_merge(void)
256 {
257 	return 0;
258 }
259 static inline void kasan_unpoison_range(const void *address, size_t size) {}
260 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
261 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
262 static inline void kasan_cache_create(struct kmem_cache *cache,
263 				      unsigned int *size,
264 				      slab_flags_t *flags) {}
265 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
266 static inline void kasan_poison_slab(struct page *page) {}
267 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
268 					void *object) {}
269 static inline void kasan_poison_object_data(struct kmem_cache *cache,
270 					void *object) {}
271 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
272 				const void *object)
273 {
274 	return (void *)object;
275 }
276 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
277 				   unsigned long ip)
278 {
279 	return false;
280 }
281 static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
282 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
283 				   gfp_t flags)
284 {
285 	return object;
286 }
287 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
288 				size_t size, gfp_t flags)
289 {
290 	return (void *)object;
291 }
292 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
293 {
294 	return (void *)ptr;
295 }
296 static inline void *kasan_krealloc(const void *object, size_t new_size,
297 				 gfp_t flags)
298 {
299 	return (void *)object;
300 }
301 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
302 
303 #endif /* CONFIG_KASAN */
304 
305 #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
306 void kasan_unpoison_task_stack(struct task_struct *task);
307 #else
308 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
309 #endif
310 
311 #ifdef CONFIG_KASAN_GENERIC
312 
313 void kasan_cache_shrink(struct kmem_cache *cache);
314 void kasan_cache_shutdown(struct kmem_cache *cache);
315 void kasan_record_aux_stack(void *ptr);
316 
317 #else /* CONFIG_KASAN_GENERIC */
318 
319 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
320 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
321 static inline void kasan_record_aux_stack(void *ptr) {}
322 
323 #endif /* CONFIG_KASAN_GENERIC */
324 
325 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
326 
327 static inline void *kasan_reset_tag(const void *addr)
328 {
329 	return (void *)arch_kasan_reset_tag(addr);
330 }
331 
332 bool kasan_report(unsigned long addr, size_t size,
333 		bool is_write, unsigned long ip);
334 
335 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
336 
337 static inline void *kasan_reset_tag(const void *addr)
338 {
339 	return (void *)addr;
340 }
341 
342 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
343 
344 #ifdef CONFIG_KASAN_SW_TAGS
345 void __init kasan_init_sw_tags(void);
346 #else
347 static inline void kasan_init_sw_tags(void) { }
348 #endif
349 
350 #ifdef CONFIG_KASAN_HW_TAGS
351 void kasan_init_hw_tags_cpu(void);
352 void __init kasan_init_hw_tags(void);
353 #else
354 static inline void kasan_init_hw_tags_cpu(void) { }
355 static inline void kasan_init_hw_tags(void) { }
356 #endif
357 
358 #ifdef CONFIG_KASAN_VMALLOC
359 
360 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
361 void kasan_poison_vmalloc(const void *start, unsigned long size);
362 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
363 void kasan_release_vmalloc(unsigned long start, unsigned long end,
364 			   unsigned long free_region_start,
365 			   unsigned long free_region_end);
366 
367 #else /* CONFIG_KASAN_VMALLOC */
368 
369 static inline int kasan_populate_vmalloc(unsigned long start,
370 					unsigned long size)
371 {
372 	return 0;
373 }
374 
375 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
376 { }
377 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
378 { }
379 static inline void kasan_release_vmalloc(unsigned long start,
380 					 unsigned long end,
381 					 unsigned long free_region_start,
382 					 unsigned long free_region_end) {}
383 
384 #endif /* CONFIG_KASAN_VMALLOC */
385 
386 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
387 		!defined(CONFIG_KASAN_VMALLOC)
388 
389 /*
390  * These functions provide a special case to support backing module
391  * allocations with real shadow memory. With KASAN vmalloc, the special
392  * case is unnecessary, as the work is handled in the generic case.
393  */
394 int kasan_module_alloc(void *addr, size_t size);
395 void kasan_free_shadow(const struct vm_struct *vm);
396 
397 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
398 
399 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
400 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
401 
402 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
403 
404 #ifdef CONFIG_KASAN_INLINE
405 void kasan_non_canonical_hook(unsigned long addr);
406 #else /* CONFIG_KASAN_INLINE */
407 static inline void kasan_non_canonical_hook(unsigned long addr) { }
408 #endif /* CONFIG_KASAN_INLINE */
409 
410 #endif /* LINUX_KASAN_H */
411