xref: /openbmc/linux/include/linux/kasan.h (revision 83b975b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
10 
11 struct kmem_cache;
12 struct page;
13 struct slab;
14 struct vm_struct;
15 struct task_struct;
16 
17 #ifdef CONFIG_KASAN
18 
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
21 
22 #endif
23 
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
25 
26 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
30 
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
32 
33 #include <linux/pgtable.h>
34 
35 /* Software KASAN implementations use shadow memory. */
36 
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
40 #else
41 #define KASAN_SHADOW_INIT 0
42 #endif
43 
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
46 #endif
47 
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
53 
54 int kasan_populate_early_shadow(const void *shadow_start,
55 				const void *shadow_end);
56 
57 static inline void *kasan_mem_to_shadow(const void *addr)
58 {
59 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
60 		+ KASAN_SHADOW_OFFSET;
61 }
62 
63 int kasan_add_zero_shadow(void *start, unsigned long size);
64 void kasan_remove_zero_shadow(void *start, unsigned long size);
65 
66 /* Enable reporting bugs after kasan_disable_current() */
67 extern void kasan_enable_current(void);
68 
69 /* Disable reporting bugs for current task */
70 extern void kasan_disable_current(void);
71 
72 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
73 
74 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
75 {
76 	return 0;
77 }
78 static inline void kasan_remove_zero_shadow(void *start,
79 					unsigned long size)
80 {}
81 
82 static inline void kasan_enable_current(void) {}
83 static inline void kasan_disable_current(void) {}
84 
85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
86 
87 #ifdef CONFIG_KASAN_HW_TAGS
88 
89 #else /* CONFIG_KASAN_HW_TAGS */
90 
91 #endif /* CONFIG_KASAN_HW_TAGS */
92 
93 static inline bool kasan_has_integrated_init(void)
94 {
95 	return kasan_hw_tags_enabled();
96 }
97 
98 #ifdef CONFIG_KASAN
99 
100 struct kasan_cache {
101 #ifdef CONFIG_KASAN_GENERIC
102 	int alloc_meta_offset;
103 	int free_meta_offset;
104 #endif
105 	bool is_kmalloc;
106 };
107 
108 void __kasan_unpoison_range(const void *addr, size_t size);
109 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
110 {
111 	if (kasan_enabled())
112 		__kasan_unpoison_range(addr, size);
113 }
114 
115 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
116 static __always_inline void kasan_poison_pages(struct page *page,
117 						unsigned int order, bool init)
118 {
119 	if (kasan_enabled())
120 		__kasan_poison_pages(page, order, init);
121 }
122 
123 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
124 static __always_inline void kasan_unpoison_pages(struct page *page,
125 						 unsigned int order, bool init)
126 {
127 	if (kasan_enabled())
128 		__kasan_unpoison_pages(page, order, init);
129 }
130 
131 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
132 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
133 {
134 	if (kasan_enabled())
135 		__kasan_cache_create_kmalloc(cache);
136 }
137 
138 void __kasan_poison_slab(struct slab *slab);
139 static __always_inline void kasan_poison_slab(struct slab *slab)
140 {
141 	if (kasan_enabled())
142 		__kasan_poison_slab(slab);
143 }
144 
145 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
146 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
147 							void *object)
148 {
149 	if (kasan_enabled())
150 		__kasan_unpoison_object_data(cache, object);
151 }
152 
153 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
154 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
155 							void *object)
156 {
157 	if (kasan_enabled())
158 		__kasan_poison_object_data(cache, object);
159 }
160 
161 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
162 					  const void *object);
163 static __always_inline void * __must_check kasan_init_slab_obj(
164 				struct kmem_cache *cache, const void *object)
165 {
166 	if (kasan_enabled())
167 		return __kasan_init_slab_obj(cache, object);
168 	return (void *)object;
169 }
170 
171 bool __kasan_slab_free(struct kmem_cache *s, void *object,
172 			unsigned long ip, bool init);
173 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
174 						void *object, bool init)
175 {
176 	if (kasan_enabled())
177 		return __kasan_slab_free(s, object, _RET_IP_, init);
178 	return false;
179 }
180 
181 void __kasan_kfree_large(void *ptr, unsigned long ip);
182 static __always_inline void kasan_kfree_large(void *ptr)
183 {
184 	if (kasan_enabled())
185 		__kasan_kfree_large(ptr, _RET_IP_);
186 }
187 
188 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
189 static __always_inline void kasan_slab_free_mempool(void *ptr)
190 {
191 	if (kasan_enabled())
192 		__kasan_slab_free_mempool(ptr, _RET_IP_);
193 }
194 
195 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
196 				       void *object, gfp_t flags, bool init);
197 static __always_inline void * __must_check kasan_slab_alloc(
198 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
199 {
200 	if (kasan_enabled())
201 		return __kasan_slab_alloc(s, object, flags, init);
202 	return object;
203 }
204 
205 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
206 				    size_t size, gfp_t flags);
207 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
208 				const void *object, size_t size, gfp_t flags)
209 {
210 	if (kasan_enabled())
211 		return __kasan_kmalloc(s, object, size, flags);
212 	return (void *)object;
213 }
214 
215 void * __must_check __kasan_kmalloc_large(const void *ptr,
216 					  size_t size, gfp_t flags);
217 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
218 						      size_t size, gfp_t flags)
219 {
220 	if (kasan_enabled())
221 		return __kasan_kmalloc_large(ptr, size, flags);
222 	return (void *)ptr;
223 }
224 
225 void * __must_check __kasan_krealloc(const void *object,
226 				     size_t new_size, gfp_t flags);
227 static __always_inline void * __must_check kasan_krealloc(const void *object,
228 						 size_t new_size, gfp_t flags)
229 {
230 	if (kasan_enabled())
231 		return __kasan_krealloc(object, new_size, flags);
232 	return (void *)object;
233 }
234 
235 /*
236  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
237  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
238  */
239 bool __kasan_check_byte(const void *addr, unsigned long ip);
240 static __always_inline bool kasan_check_byte(const void *addr)
241 {
242 	if (kasan_enabled())
243 		return __kasan_check_byte(addr, _RET_IP_);
244 	return true;
245 }
246 
247 #else /* CONFIG_KASAN */
248 
249 static inline void kasan_unpoison_range(const void *address, size_t size) {}
250 static inline void kasan_poison_pages(struct page *page, unsigned int order,
251 				      bool init) {}
252 static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
253 					bool init) {}
254 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
255 static inline void kasan_poison_slab(struct slab *slab) {}
256 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
257 					void *object) {}
258 static inline void kasan_poison_object_data(struct kmem_cache *cache,
259 					void *object) {}
260 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
261 				const void *object)
262 {
263 	return (void *)object;
264 }
265 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
266 {
267 	return false;
268 }
269 static inline void kasan_kfree_large(void *ptr) {}
270 static inline void kasan_slab_free_mempool(void *ptr) {}
271 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
272 				   gfp_t flags, bool init)
273 {
274 	return object;
275 }
276 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
277 				size_t size, gfp_t flags)
278 {
279 	return (void *)object;
280 }
281 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
282 {
283 	return (void *)ptr;
284 }
285 static inline void *kasan_krealloc(const void *object, size_t new_size,
286 				 gfp_t flags)
287 {
288 	return (void *)object;
289 }
290 static inline bool kasan_check_byte(const void *address)
291 {
292 	return true;
293 }
294 
295 #endif /* CONFIG_KASAN */
296 
297 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
298 void kasan_unpoison_task_stack(struct task_struct *task);
299 #else
300 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
301 #endif
302 
303 #ifdef CONFIG_KASAN_GENERIC
304 
305 size_t kasan_metadata_size(struct kmem_cache *cache);
306 slab_flags_t kasan_never_merge(void);
307 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
308 			slab_flags_t *flags);
309 
310 void kasan_cache_shrink(struct kmem_cache *cache);
311 void kasan_cache_shutdown(struct kmem_cache *cache);
312 void kasan_record_aux_stack(void *ptr);
313 void kasan_record_aux_stack_noalloc(void *ptr);
314 
315 #else /* CONFIG_KASAN_GENERIC */
316 
317 /* Tag-based KASAN modes do not use per-object metadata. */
318 static inline size_t kasan_metadata_size(struct kmem_cache *cache)
319 {
320 	return 0;
321 }
322 /* And thus nothing prevents cache merging. */
323 static inline slab_flags_t kasan_never_merge(void)
324 {
325 	return 0;
326 }
327 /* And no cache-related metadata initialization is required. */
328 static inline void kasan_cache_create(struct kmem_cache *cache,
329 				      unsigned int *size,
330 				      slab_flags_t *flags) {}
331 
332 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
333 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
334 static inline void kasan_record_aux_stack(void *ptr) {}
335 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
336 
337 #endif /* CONFIG_KASAN_GENERIC */
338 
339 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
340 
341 static inline void *kasan_reset_tag(const void *addr)
342 {
343 	return (void *)arch_kasan_reset_tag(addr);
344 }
345 
346 /**
347  * kasan_report - print a report about a bad memory access detected by KASAN
348  * @addr: address of the bad access
349  * @size: size of the bad access
350  * @is_write: whether the bad access is a write or a read
351  * @ip: instruction pointer for the accessibility check or the bad access itself
352  */
353 bool kasan_report(unsigned long addr, size_t size,
354 		bool is_write, unsigned long ip);
355 
356 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
357 
358 static inline void *kasan_reset_tag(const void *addr)
359 {
360 	return (void *)addr;
361 }
362 
363 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
364 
365 #ifdef CONFIG_KASAN_HW_TAGS
366 
367 void kasan_report_async(void);
368 
369 #endif /* CONFIG_KASAN_HW_TAGS */
370 
371 #ifdef CONFIG_KASAN_SW_TAGS
372 void __init kasan_init_sw_tags(void);
373 #else
374 static inline void kasan_init_sw_tags(void) { }
375 #endif
376 
377 #ifdef CONFIG_KASAN_HW_TAGS
378 void kasan_init_hw_tags_cpu(void);
379 void __init kasan_init_hw_tags(void);
380 #else
381 static inline void kasan_init_hw_tags_cpu(void) { }
382 static inline void kasan_init_hw_tags(void) { }
383 #endif
384 
385 #ifdef CONFIG_KASAN_VMALLOC
386 
387 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
388 
389 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
390 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
391 void kasan_release_vmalloc(unsigned long start, unsigned long end,
392 			   unsigned long free_region_start,
393 			   unsigned long free_region_end);
394 
395 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
396 
397 static inline void kasan_populate_early_vm_area_shadow(void *start,
398 						       unsigned long size)
399 { }
400 static inline int kasan_populate_vmalloc(unsigned long start,
401 					unsigned long size)
402 {
403 	return 0;
404 }
405 static inline void kasan_release_vmalloc(unsigned long start,
406 					 unsigned long end,
407 					 unsigned long free_region_start,
408 					 unsigned long free_region_end) { }
409 
410 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
411 
412 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
413 			       kasan_vmalloc_flags_t flags);
414 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
415 						unsigned long size,
416 						kasan_vmalloc_flags_t flags)
417 {
418 	if (kasan_enabled())
419 		return __kasan_unpoison_vmalloc(start, size, flags);
420 	return (void *)start;
421 }
422 
423 void __kasan_poison_vmalloc(const void *start, unsigned long size);
424 static __always_inline void kasan_poison_vmalloc(const void *start,
425 						 unsigned long size)
426 {
427 	if (kasan_enabled())
428 		__kasan_poison_vmalloc(start, size);
429 }
430 
431 #else /* CONFIG_KASAN_VMALLOC */
432 
433 static inline void kasan_populate_early_vm_area_shadow(void *start,
434 						       unsigned long size) { }
435 static inline int kasan_populate_vmalloc(unsigned long start,
436 					unsigned long size)
437 {
438 	return 0;
439 }
440 static inline void kasan_release_vmalloc(unsigned long start,
441 					 unsigned long end,
442 					 unsigned long free_region_start,
443 					 unsigned long free_region_end) { }
444 
445 static inline void *kasan_unpoison_vmalloc(const void *start,
446 					   unsigned long size,
447 					   kasan_vmalloc_flags_t flags)
448 {
449 	return (void *)start;
450 }
451 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
452 { }
453 
454 #endif /* CONFIG_KASAN_VMALLOC */
455 
456 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
457 		!defined(CONFIG_KASAN_VMALLOC)
458 
459 /*
460  * These functions allocate and free shadow memory for kernel modules.
461  * They are only required when KASAN_VMALLOC is not supported, as otherwise
462  * shadow memory is allocated by the generic vmalloc handlers.
463  */
464 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
465 void kasan_free_module_shadow(const struct vm_struct *vm);
466 
467 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
468 
469 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
470 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
471 
472 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
473 
474 #ifdef CONFIG_KASAN_INLINE
475 void kasan_non_canonical_hook(unsigned long addr);
476 #else /* CONFIG_KASAN_INLINE */
477 static inline void kasan_non_canonical_hook(unsigned long addr) { }
478 #endif /* CONFIG_KASAN_INLINE */
479 
480 #endif /* LINUX_KASAN_H */
481