xref: /openbmc/linux/include/linux/kasan.h (revision 1fe3a33b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kernel.h>
7 #include <linux/static_key.h>
8 #include <linux/types.h>
9 
10 struct kmem_cache;
11 struct page;
12 struct slab;
13 struct vm_struct;
14 struct task_struct;
15 
16 #ifdef CONFIG_KASAN
17 
18 #include <linux/linkage.h>
19 #include <asm/kasan.h>
20 
21 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
22 struct kunit_kasan_expectation {
23 	bool report_found;
24 };
25 
26 #endif
27 
28 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
29 
30 #include <linux/pgtable.h>
31 
32 /* Software KASAN implementations use shadow memory. */
33 
34 #ifdef CONFIG_KASAN_SW_TAGS
35 /* This matches KASAN_TAG_INVALID. */
36 #define KASAN_SHADOW_INIT 0xFE
37 #else
38 #define KASAN_SHADOW_INIT 0
39 #endif
40 
41 #ifndef PTE_HWTABLE_PTRS
42 #define PTE_HWTABLE_PTRS 0
43 #endif
44 
45 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
46 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
47 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
48 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
49 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
50 
51 int kasan_populate_early_shadow(const void *shadow_start,
52 				const void *shadow_end);
53 
54 static inline void *kasan_mem_to_shadow(const void *addr)
55 {
56 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
57 		+ KASAN_SHADOW_OFFSET;
58 }
59 
60 int kasan_add_zero_shadow(void *start, unsigned long size);
61 void kasan_remove_zero_shadow(void *start, unsigned long size);
62 
63 /* Enable reporting bugs after kasan_disable_current() */
64 extern void kasan_enable_current(void);
65 
66 /* Disable reporting bugs for current task */
67 extern void kasan_disable_current(void);
68 
69 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
70 
71 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
72 {
73 	return 0;
74 }
75 static inline void kasan_remove_zero_shadow(void *start,
76 					unsigned long size)
77 {}
78 
79 static inline void kasan_enable_current(void) {}
80 static inline void kasan_disable_current(void) {}
81 
82 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
83 
84 #ifdef CONFIG_KASAN_HW_TAGS
85 
86 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
87 
88 static __always_inline bool kasan_enabled(void)
89 {
90 	return static_branch_likely(&kasan_flag_enabled);
91 }
92 
93 static inline bool kasan_hw_tags_enabled(void)
94 {
95 	return kasan_enabled();
96 }
97 
98 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
99 void kasan_free_pages(struct page *page, unsigned int order);
100 
101 #else /* CONFIG_KASAN_HW_TAGS */
102 
103 static inline bool kasan_enabled(void)
104 {
105 	return IS_ENABLED(CONFIG_KASAN);
106 }
107 
108 static inline bool kasan_hw_tags_enabled(void)
109 {
110 	return false;
111 }
112 
113 static __always_inline void kasan_alloc_pages(struct page *page,
114 					      unsigned int order, gfp_t flags)
115 {
116 	/* Only available for integrated init. */
117 	BUILD_BUG();
118 }
119 
120 static __always_inline void kasan_free_pages(struct page *page,
121 					     unsigned int order)
122 {
123 	/* Only available for integrated init. */
124 	BUILD_BUG();
125 }
126 
127 #endif /* CONFIG_KASAN_HW_TAGS */
128 
129 static inline bool kasan_has_integrated_init(void)
130 {
131 	return kasan_hw_tags_enabled();
132 }
133 
134 #ifdef CONFIG_KASAN
135 
136 struct kasan_cache {
137 	int alloc_meta_offset;
138 	int free_meta_offset;
139 	bool is_kmalloc;
140 };
141 
142 slab_flags_t __kasan_never_merge(void);
143 static __always_inline slab_flags_t kasan_never_merge(void)
144 {
145 	if (kasan_enabled())
146 		return __kasan_never_merge();
147 	return 0;
148 }
149 
150 void __kasan_unpoison_range(const void *addr, size_t size);
151 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
152 {
153 	if (kasan_enabled())
154 		__kasan_unpoison_range(addr, size);
155 }
156 
157 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
158 static __always_inline void kasan_poison_pages(struct page *page,
159 						unsigned int order, bool init)
160 {
161 	if (kasan_enabled())
162 		__kasan_poison_pages(page, order, init);
163 }
164 
165 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
166 static __always_inline void kasan_unpoison_pages(struct page *page,
167 						 unsigned int order, bool init)
168 {
169 	if (kasan_enabled())
170 		__kasan_unpoison_pages(page, order, init);
171 }
172 
173 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
174 				slab_flags_t *flags);
175 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
176 				unsigned int *size, slab_flags_t *flags)
177 {
178 	if (kasan_enabled())
179 		__kasan_cache_create(cache, size, flags);
180 }
181 
182 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
183 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
184 {
185 	if (kasan_enabled())
186 		__kasan_cache_create_kmalloc(cache);
187 }
188 
189 size_t __kasan_metadata_size(struct kmem_cache *cache);
190 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
191 {
192 	if (kasan_enabled())
193 		return __kasan_metadata_size(cache);
194 	return 0;
195 }
196 
197 void __kasan_poison_slab(struct slab *slab);
198 static __always_inline void kasan_poison_slab(struct slab *slab)
199 {
200 	if (kasan_enabled())
201 		__kasan_poison_slab(slab);
202 }
203 
204 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
205 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
206 							void *object)
207 {
208 	if (kasan_enabled())
209 		__kasan_unpoison_object_data(cache, object);
210 }
211 
212 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
213 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
214 							void *object)
215 {
216 	if (kasan_enabled())
217 		__kasan_poison_object_data(cache, object);
218 }
219 
220 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
221 					  const void *object);
222 static __always_inline void * __must_check kasan_init_slab_obj(
223 				struct kmem_cache *cache, const void *object)
224 {
225 	if (kasan_enabled())
226 		return __kasan_init_slab_obj(cache, object);
227 	return (void *)object;
228 }
229 
230 bool __kasan_slab_free(struct kmem_cache *s, void *object,
231 			unsigned long ip, bool init);
232 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
233 						void *object, bool init)
234 {
235 	if (kasan_enabled())
236 		return __kasan_slab_free(s, object, _RET_IP_, init);
237 	return false;
238 }
239 
240 void __kasan_kfree_large(void *ptr, unsigned long ip);
241 static __always_inline void kasan_kfree_large(void *ptr)
242 {
243 	if (kasan_enabled())
244 		__kasan_kfree_large(ptr, _RET_IP_);
245 }
246 
247 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
248 static __always_inline void kasan_slab_free_mempool(void *ptr)
249 {
250 	if (kasan_enabled())
251 		__kasan_slab_free_mempool(ptr, _RET_IP_);
252 }
253 
254 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
255 				       void *object, gfp_t flags, bool init);
256 static __always_inline void * __must_check kasan_slab_alloc(
257 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
258 {
259 	if (kasan_enabled())
260 		return __kasan_slab_alloc(s, object, flags, init);
261 	return object;
262 }
263 
264 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
265 				    size_t size, gfp_t flags);
266 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
267 				const void *object, size_t size, gfp_t flags)
268 {
269 	if (kasan_enabled())
270 		return __kasan_kmalloc(s, object, size, flags);
271 	return (void *)object;
272 }
273 
274 void * __must_check __kasan_kmalloc_large(const void *ptr,
275 					  size_t size, gfp_t flags);
276 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
277 						      size_t size, gfp_t flags)
278 {
279 	if (kasan_enabled())
280 		return __kasan_kmalloc_large(ptr, size, flags);
281 	return (void *)ptr;
282 }
283 
284 void * __must_check __kasan_krealloc(const void *object,
285 				     size_t new_size, gfp_t flags);
286 static __always_inline void * __must_check kasan_krealloc(const void *object,
287 						 size_t new_size, gfp_t flags)
288 {
289 	if (kasan_enabled())
290 		return __kasan_krealloc(object, new_size, flags);
291 	return (void *)object;
292 }
293 
294 /*
295  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
296  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
297  */
298 bool __kasan_check_byte(const void *addr, unsigned long ip);
299 static __always_inline bool kasan_check_byte(const void *addr)
300 {
301 	if (kasan_enabled())
302 		return __kasan_check_byte(addr, _RET_IP_);
303 	return true;
304 }
305 
306 
307 bool kasan_save_enable_multi_shot(void);
308 void kasan_restore_multi_shot(bool enabled);
309 
310 #else /* CONFIG_KASAN */
311 
312 static inline slab_flags_t kasan_never_merge(void)
313 {
314 	return 0;
315 }
316 static inline void kasan_unpoison_range(const void *address, size_t size) {}
317 static inline void kasan_poison_pages(struct page *page, unsigned int order,
318 				      bool init) {}
319 static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
320 					bool init) {}
321 static inline void kasan_cache_create(struct kmem_cache *cache,
322 				      unsigned int *size,
323 				      slab_flags_t *flags) {}
324 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
325 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
326 static inline void kasan_poison_slab(struct slab *slab) {}
327 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
328 					void *object) {}
329 static inline void kasan_poison_object_data(struct kmem_cache *cache,
330 					void *object) {}
331 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
332 				const void *object)
333 {
334 	return (void *)object;
335 }
336 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
337 {
338 	return false;
339 }
340 static inline void kasan_kfree_large(void *ptr) {}
341 static inline void kasan_slab_free_mempool(void *ptr) {}
342 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
343 				   gfp_t flags, bool init)
344 {
345 	return object;
346 }
347 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
348 				size_t size, gfp_t flags)
349 {
350 	return (void *)object;
351 }
352 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
353 {
354 	return (void *)ptr;
355 }
356 static inline void *kasan_krealloc(const void *object, size_t new_size,
357 				 gfp_t flags)
358 {
359 	return (void *)object;
360 }
361 static inline bool kasan_check_byte(const void *address)
362 {
363 	return true;
364 }
365 
366 #endif /* CONFIG_KASAN */
367 
368 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
369 void kasan_unpoison_task_stack(struct task_struct *task);
370 #else
371 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
372 #endif
373 
374 #ifdef CONFIG_KASAN_GENERIC
375 
376 void kasan_cache_shrink(struct kmem_cache *cache);
377 void kasan_cache_shutdown(struct kmem_cache *cache);
378 void kasan_record_aux_stack(void *ptr);
379 void kasan_record_aux_stack_noalloc(void *ptr);
380 
381 #else /* CONFIG_KASAN_GENERIC */
382 
383 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
384 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
385 static inline void kasan_record_aux_stack(void *ptr) {}
386 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
387 
388 #endif /* CONFIG_KASAN_GENERIC */
389 
390 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
391 
392 static inline void *kasan_reset_tag(const void *addr)
393 {
394 	return (void *)arch_kasan_reset_tag(addr);
395 }
396 
397 /**
398  * kasan_report - print a report about a bad memory access detected by KASAN
399  * @addr: address of the bad access
400  * @size: size of the bad access
401  * @is_write: whether the bad access is a write or a read
402  * @ip: instruction pointer for the accessibility check or the bad access itself
403  */
404 bool kasan_report(unsigned long addr, size_t size,
405 		bool is_write, unsigned long ip);
406 
407 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
408 
409 static inline void *kasan_reset_tag(const void *addr)
410 {
411 	return (void *)addr;
412 }
413 
414 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
415 
416 #ifdef CONFIG_KASAN_HW_TAGS
417 
418 void kasan_report_async(void);
419 
420 #endif /* CONFIG_KASAN_HW_TAGS */
421 
422 #ifdef CONFIG_KASAN_SW_TAGS
423 void __init kasan_init_sw_tags(void);
424 #else
425 static inline void kasan_init_sw_tags(void) { }
426 #endif
427 
428 #ifdef CONFIG_KASAN_HW_TAGS
429 void kasan_init_hw_tags_cpu(void);
430 void __init kasan_init_hw_tags(void);
431 #else
432 static inline void kasan_init_hw_tags_cpu(void) { }
433 static inline void kasan_init_hw_tags(void) { }
434 #endif
435 
436 #ifdef CONFIG_KASAN_VMALLOC
437 
438 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
439 void kasan_poison_vmalloc(const void *start, unsigned long size);
440 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
441 void kasan_release_vmalloc(unsigned long start, unsigned long end,
442 			   unsigned long free_region_start,
443 			   unsigned long free_region_end);
444 
445 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
446 
447 #else /* CONFIG_KASAN_VMALLOC */
448 
449 static inline int kasan_populate_vmalloc(unsigned long start,
450 					unsigned long size)
451 {
452 	return 0;
453 }
454 
455 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
456 { }
457 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
458 { }
459 static inline void kasan_release_vmalloc(unsigned long start,
460 					 unsigned long end,
461 					 unsigned long free_region_start,
462 					 unsigned long free_region_end) {}
463 
464 static inline void kasan_populate_early_vm_area_shadow(void *start,
465 						       unsigned long size)
466 { }
467 
468 #endif /* CONFIG_KASAN_VMALLOC */
469 
470 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
471 		!defined(CONFIG_KASAN_VMALLOC)
472 
473 /*
474  * These functions provide a special case to support backing module
475  * allocations with real shadow memory. With KASAN vmalloc, the special
476  * case is unnecessary, as the work is handled in the generic case.
477  */
478 int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
479 void kasan_free_shadow(const struct vm_struct *vm);
480 
481 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
482 
483 static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
484 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
485 
486 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
487 
488 #ifdef CONFIG_KASAN_INLINE
489 void kasan_non_canonical_hook(unsigned long addr);
490 #else /* CONFIG_KASAN_INLINE */
491 static inline void kasan_non_canonical_hook(unsigned long addr) { }
492 #endif /* CONFIG_KASAN_INLINE */
493 
494 #endif /* LINUX_KASAN_H */
495