xref: /openbmc/linux/include/linux/kasan.h (revision 35f752be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/static_key.h>
6 #include <linux/types.h>
7 
8 struct kmem_cache;
9 struct page;
10 struct vm_struct;
11 struct task_struct;
12 
13 #ifdef CONFIG_KASAN
14 
15 #include <linux/linkage.h>
16 #include <asm/kasan.h>
17 
18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
19 struct kunit_kasan_expectation {
20 	bool report_expected;
21 	bool report_found;
22 };
23 
24 #endif
25 
26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27 
28 #include <linux/pgtable.h>
29 
30 /* Software KASAN implementations use shadow memory. */
31 
32 #ifdef CONFIG_KASAN_SW_TAGS
33 /* This matches KASAN_TAG_INVALID. */
34 #define KASAN_SHADOW_INIT 0xFE
35 #else
36 #define KASAN_SHADOW_INIT 0
37 #endif
38 
39 #ifndef PTE_HWTABLE_PTRS
40 #define PTE_HWTABLE_PTRS 0
41 #endif
42 
43 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
44 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
45 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
46 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
47 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
48 
49 int kasan_populate_early_shadow(const void *shadow_start,
50 				const void *shadow_end);
51 
52 static inline void *kasan_mem_to_shadow(const void *addr)
53 {
54 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
55 		+ KASAN_SHADOW_OFFSET;
56 }
57 
58 int kasan_add_zero_shadow(void *start, unsigned long size);
59 void kasan_remove_zero_shadow(void *start, unsigned long size);
60 
61 /* Enable reporting bugs after kasan_disable_current() */
62 extern void kasan_enable_current(void);
63 
64 /* Disable reporting bugs for current task */
65 extern void kasan_disable_current(void);
66 
67 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
68 
69 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
70 {
71 	return 0;
72 }
73 static inline void kasan_remove_zero_shadow(void *start,
74 					unsigned long size)
75 {}
76 
77 static inline void kasan_enable_current(void) {}
78 static inline void kasan_disable_current(void) {}
79 
80 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
81 
82 #ifdef CONFIG_KASAN
83 
84 struct kasan_cache {
85 	int alloc_meta_offset;
86 	int free_meta_offset;
87 	bool is_kmalloc;
88 };
89 
90 #ifdef CONFIG_KASAN_HW_TAGS
91 
92 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
93 
94 static __always_inline bool kasan_enabled(void)
95 {
96 	return static_branch_likely(&kasan_flag_enabled);
97 }
98 
99 static inline bool kasan_has_integrated_init(void)
100 {
101 	return kasan_enabled();
102 }
103 
104 #else /* CONFIG_KASAN_HW_TAGS */
105 
106 static inline bool kasan_enabled(void)
107 {
108 	return true;
109 }
110 
111 static inline bool kasan_has_integrated_init(void)
112 {
113 	return false;
114 }
115 
116 #endif /* CONFIG_KASAN_HW_TAGS */
117 
118 slab_flags_t __kasan_never_merge(void);
119 static __always_inline slab_flags_t kasan_never_merge(void)
120 {
121 	if (kasan_enabled())
122 		return __kasan_never_merge();
123 	return 0;
124 }
125 
126 void __kasan_unpoison_range(const void *addr, size_t size);
127 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
128 {
129 	if (kasan_enabled())
130 		__kasan_unpoison_range(addr, size);
131 }
132 
133 void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
134 static __always_inline void kasan_alloc_pages(struct page *page,
135 						unsigned int order, bool init)
136 {
137 	if (kasan_enabled())
138 		__kasan_alloc_pages(page, order, init);
139 }
140 
141 void __kasan_free_pages(struct page *page, unsigned int order, bool init);
142 static __always_inline void kasan_free_pages(struct page *page,
143 						unsigned int order, bool init)
144 {
145 	if (kasan_enabled())
146 		__kasan_free_pages(page, order, init);
147 }
148 
149 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
150 				slab_flags_t *flags);
151 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
152 				unsigned int *size, slab_flags_t *flags)
153 {
154 	if (kasan_enabled())
155 		__kasan_cache_create(cache, size, flags);
156 }
157 
158 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
159 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
160 {
161 	if (kasan_enabled())
162 		__kasan_cache_create_kmalloc(cache);
163 }
164 
165 size_t __kasan_metadata_size(struct kmem_cache *cache);
166 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
167 {
168 	if (kasan_enabled())
169 		return __kasan_metadata_size(cache);
170 	return 0;
171 }
172 
173 void __kasan_poison_slab(struct page *page);
174 static __always_inline void kasan_poison_slab(struct page *page)
175 {
176 	if (kasan_enabled())
177 		__kasan_poison_slab(page);
178 }
179 
180 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
181 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
182 							void *object)
183 {
184 	if (kasan_enabled())
185 		__kasan_unpoison_object_data(cache, object);
186 }
187 
188 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
189 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
190 							void *object)
191 {
192 	if (kasan_enabled())
193 		__kasan_poison_object_data(cache, object);
194 }
195 
196 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
197 					  const void *object);
198 static __always_inline void * __must_check kasan_init_slab_obj(
199 				struct kmem_cache *cache, const void *object)
200 {
201 	if (kasan_enabled())
202 		return __kasan_init_slab_obj(cache, object);
203 	return (void *)object;
204 }
205 
206 bool __kasan_slab_free(struct kmem_cache *s, void *object,
207 			unsigned long ip, bool init);
208 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
209 						void *object, bool init)
210 {
211 	if (kasan_enabled())
212 		return __kasan_slab_free(s, object, _RET_IP_, init);
213 	return false;
214 }
215 
216 void __kasan_kfree_large(void *ptr, unsigned long ip);
217 static __always_inline void kasan_kfree_large(void *ptr)
218 {
219 	if (kasan_enabled())
220 		__kasan_kfree_large(ptr, _RET_IP_);
221 }
222 
223 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
224 static __always_inline void kasan_slab_free_mempool(void *ptr)
225 {
226 	if (kasan_enabled())
227 		__kasan_slab_free_mempool(ptr, _RET_IP_);
228 }
229 
230 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
231 				       void *object, gfp_t flags, bool init);
232 static __always_inline void * __must_check kasan_slab_alloc(
233 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
234 {
235 	if (kasan_enabled())
236 		return __kasan_slab_alloc(s, object, flags, init);
237 	return object;
238 }
239 
240 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
241 				    size_t size, gfp_t flags);
242 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
243 				const void *object, size_t size, gfp_t flags)
244 {
245 	if (kasan_enabled())
246 		return __kasan_kmalloc(s, object, size, flags);
247 	return (void *)object;
248 }
249 
250 void * __must_check __kasan_kmalloc_large(const void *ptr,
251 					  size_t size, gfp_t flags);
252 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
253 						      size_t size, gfp_t flags)
254 {
255 	if (kasan_enabled())
256 		return __kasan_kmalloc_large(ptr, size, flags);
257 	return (void *)ptr;
258 }
259 
260 void * __must_check __kasan_krealloc(const void *object,
261 				     size_t new_size, gfp_t flags);
262 static __always_inline void * __must_check kasan_krealloc(const void *object,
263 						 size_t new_size, gfp_t flags)
264 {
265 	if (kasan_enabled())
266 		return __kasan_krealloc(object, new_size, flags);
267 	return (void *)object;
268 }
269 
270 /*
271  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
272  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
273  */
274 bool __kasan_check_byte(const void *addr, unsigned long ip);
275 static __always_inline bool kasan_check_byte(const void *addr)
276 {
277 	if (kasan_enabled())
278 		return __kasan_check_byte(addr, _RET_IP_);
279 	return true;
280 }
281 
282 
283 bool kasan_save_enable_multi_shot(void);
284 void kasan_restore_multi_shot(bool enabled);
285 
286 #else /* CONFIG_KASAN */
287 
288 static inline bool kasan_enabled(void)
289 {
290 	return false;
291 }
292 static inline bool kasan_has_integrated_init(void)
293 {
294 	return false;
295 }
296 static inline slab_flags_t kasan_never_merge(void)
297 {
298 	return 0;
299 }
300 static inline void kasan_unpoison_range(const void *address, size_t size) {}
301 static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
302 static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
303 static inline void kasan_cache_create(struct kmem_cache *cache,
304 				      unsigned int *size,
305 				      slab_flags_t *flags) {}
306 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
307 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
308 static inline void kasan_poison_slab(struct page *page) {}
309 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
310 					void *object) {}
311 static inline void kasan_poison_object_data(struct kmem_cache *cache,
312 					void *object) {}
313 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
314 				const void *object)
315 {
316 	return (void *)object;
317 }
318 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
319 {
320 	return false;
321 }
322 static inline void kasan_kfree_large(void *ptr) {}
323 static inline void kasan_slab_free_mempool(void *ptr) {}
324 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
325 				   gfp_t flags, bool init)
326 {
327 	return object;
328 }
329 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
330 				size_t size, gfp_t flags)
331 {
332 	return (void *)object;
333 }
334 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
335 {
336 	return (void *)ptr;
337 }
338 static inline void *kasan_krealloc(const void *object, size_t new_size,
339 				 gfp_t flags)
340 {
341 	return (void *)object;
342 }
343 static inline bool kasan_check_byte(const void *address)
344 {
345 	return true;
346 }
347 
348 #endif /* CONFIG_KASAN */
349 
350 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
351 void kasan_unpoison_task_stack(struct task_struct *task);
352 #else
353 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
354 #endif
355 
356 #ifdef CONFIG_KASAN_GENERIC
357 
358 void kasan_cache_shrink(struct kmem_cache *cache);
359 void kasan_cache_shutdown(struct kmem_cache *cache);
360 void kasan_record_aux_stack(void *ptr);
361 
362 #else /* CONFIG_KASAN_GENERIC */
363 
364 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
365 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
366 static inline void kasan_record_aux_stack(void *ptr) {}
367 
368 #endif /* CONFIG_KASAN_GENERIC */
369 
370 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
371 
372 static inline void *kasan_reset_tag(const void *addr)
373 {
374 	return (void *)arch_kasan_reset_tag(addr);
375 }
376 
377 /**
378  * kasan_report - print a report about a bad memory access detected by KASAN
379  * @addr: address of the bad access
380  * @size: size of the bad access
381  * @is_write: whether the bad access is a write or a read
382  * @ip: instruction pointer for the accessibility check or the bad access itself
383  */
384 bool kasan_report(unsigned long addr, size_t size,
385 		bool is_write, unsigned long ip);
386 
387 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
388 
389 static inline void *kasan_reset_tag(const void *addr)
390 {
391 	return (void *)addr;
392 }
393 
394 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
395 
396 #ifdef CONFIG_KASAN_HW_TAGS
397 
398 void kasan_report_async(void);
399 
400 #endif /* CONFIG_KASAN_HW_TAGS */
401 
402 #ifdef CONFIG_KASAN_SW_TAGS
403 void __init kasan_init_sw_tags(void);
404 #else
405 static inline void kasan_init_sw_tags(void) { }
406 #endif
407 
408 #ifdef CONFIG_KASAN_HW_TAGS
409 void kasan_init_hw_tags_cpu(void);
410 void __init kasan_init_hw_tags(void);
411 #else
412 static inline void kasan_init_hw_tags_cpu(void) { }
413 static inline void kasan_init_hw_tags(void) { }
414 #endif
415 
416 #ifdef CONFIG_KASAN_VMALLOC
417 
418 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
419 void kasan_poison_vmalloc(const void *start, unsigned long size);
420 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
421 void kasan_release_vmalloc(unsigned long start, unsigned long end,
422 			   unsigned long free_region_start,
423 			   unsigned long free_region_end);
424 
425 #else /* CONFIG_KASAN_VMALLOC */
426 
427 static inline int kasan_populate_vmalloc(unsigned long start,
428 					unsigned long size)
429 {
430 	return 0;
431 }
432 
433 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
434 { }
435 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
436 { }
437 static inline void kasan_release_vmalloc(unsigned long start,
438 					 unsigned long end,
439 					 unsigned long free_region_start,
440 					 unsigned long free_region_end) {}
441 
442 #endif /* CONFIG_KASAN_VMALLOC */
443 
444 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
445 		!defined(CONFIG_KASAN_VMALLOC)
446 
447 /*
448  * These functions provide a special case to support backing module
449  * allocations with real shadow memory. With KASAN vmalloc, the special
450  * case is unnecessary, as the work is handled in the generic case.
451  */
452 int kasan_module_alloc(void *addr, size_t size);
453 void kasan_free_shadow(const struct vm_struct *vm);
454 
455 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
456 
457 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
458 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
459 
460 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
461 
462 #ifdef CONFIG_KASAN_INLINE
463 void kasan_non_canonical_hook(unsigned long addr);
464 #else /* CONFIG_KASAN_INLINE */
465 static inline void kasan_non_canonical_hook(unsigned long addr) { }
466 #endif /* CONFIG_KASAN_INLINE */
467 
468 #endif /* LINUX_KASAN_H */
469