xref: /openbmc/linux/include/linux/kasan.h (revision 59e09100)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
10 
11 struct kmem_cache;
12 struct page;
13 struct slab;
14 struct vm_struct;
15 struct task_struct;
16 
17 #ifdef CONFIG_KASAN
18 
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
21 
22 #endif
23 
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
25 
26 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
30 
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
32 
33 #include <linux/pgtable.h>
34 
35 /* Software KASAN implementations use shadow memory. */
36 
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
40 #else
41 #define KASAN_SHADOW_INIT 0
42 #endif
43 
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
46 #endif
47 
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
53 
54 int kasan_populate_early_shadow(const void *shadow_start,
55 				const void *shadow_end);
56 
57 static inline void *kasan_mem_to_shadow(const void *addr)
58 {
59 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
60 		+ KASAN_SHADOW_OFFSET;
61 }
62 
63 int kasan_add_zero_shadow(void *start, unsigned long size);
64 void kasan_remove_zero_shadow(void *start, unsigned long size);
65 
66 /* Enable reporting bugs after kasan_disable_current() */
67 extern void kasan_enable_current(void);
68 
69 /* Disable reporting bugs for current task */
70 extern void kasan_disable_current(void);
71 
72 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
73 
74 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
75 {
76 	return 0;
77 }
78 static inline void kasan_remove_zero_shadow(void *start,
79 					unsigned long size)
80 {}
81 
82 static inline void kasan_enable_current(void) {}
83 static inline void kasan_disable_current(void) {}
84 
85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
86 
87 #ifdef CONFIG_KASAN_HW_TAGS
88 
89 #else /* CONFIG_KASAN_HW_TAGS */
90 
91 #endif /* CONFIG_KASAN_HW_TAGS */
92 
93 static inline bool kasan_has_integrated_init(void)
94 {
95 	return kasan_hw_tags_enabled();
96 }
97 
98 #ifdef CONFIG_KASAN
99 void __kasan_unpoison_range(const void *addr, size_t size);
100 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
101 {
102 	if (kasan_enabled())
103 		__kasan_unpoison_range(addr, size);
104 }
105 
106 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
107 static __always_inline void kasan_poison_pages(struct page *page,
108 						unsigned int order, bool init)
109 {
110 	if (kasan_enabled())
111 		__kasan_poison_pages(page, order, init);
112 }
113 
114 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
115 static __always_inline bool kasan_unpoison_pages(struct page *page,
116 						 unsigned int order, bool init)
117 {
118 	if (kasan_enabled())
119 		return __kasan_unpoison_pages(page, order, init);
120 	return false;
121 }
122 
123 void __kasan_poison_slab(struct slab *slab);
124 static __always_inline void kasan_poison_slab(struct slab *slab)
125 {
126 	if (kasan_enabled())
127 		__kasan_poison_slab(slab);
128 }
129 
130 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
131 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
132 							void *object)
133 {
134 	if (kasan_enabled())
135 		__kasan_unpoison_object_data(cache, object);
136 }
137 
138 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
139 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
140 							void *object)
141 {
142 	if (kasan_enabled())
143 		__kasan_poison_object_data(cache, object);
144 }
145 
146 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
147 					  const void *object);
148 static __always_inline void * __must_check kasan_init_slab_obj(
149 				struct kmem_cache *cache, const void *object)
150 {
151 	if (kasan_enabled())
152 		return __kasan_init_slab_obj(cache, object);
153 	return (void *)object;
154 }
155 
156 bool __kasan_slab_free(struct kmem_cache *s, void *object,
157 			unsigned long ip, bool init);
158 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
159 						void *object, bool init)
160 {
161 	if (kasan_enabled())
162 		return __kasan_slab_free(s, object, _RET_IP_, init);
163 	return false;
164 }
165 
166 void __kasan_kfree_large(void *ptr, unsigned long ip);
167 static __always_inline void kasan_kfree_large(void *ptr)
168 {
169 	if (kasan_enabled())
170 		__kasan_kfree_large(ptr, _RET_IP_);
171 }
172 
173 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
174 static __always_inline void kasan_slab_free_mempool(void *ptr)
175 {
176 	if (kasan_enabled())
177 		__kasan_slab_free_mempool(ptr, _RET_IP_);
178 }
179 
180 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
181 				       void *object, gfp_t flags, bool init);
182 static __always_inline void * __must_check kasan_slab_alloc(
183 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
184 {
185 	if (kasan_enabled())
186 		return __kasan_slab_alloc(s, object, flags, init);
187 	return object;
188 }
189 
190 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
191 				    size_t size, gfp_t flags);
192 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
193 				const void *object, size_t size, gfp_t flags)
194 {
195 	if (kasan_enabled())
196 		return __kasan_kmalloc(s, object, size, flags);
197 	return (void *)object;
198 }
199 
200 void * __must_check __kasan_kmalloc_large(const void *ptr,
201 					  size_t size, gfp_t flags);
202 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
203 						      size_t size, gfp_t flags)
204 {
205 	if (kasan_enabled())
206 		return __kasan_kmalloc_large(ptr, size, flags);
207 	return (void *)ptr;
208 }
209 
210 void * __must_check __kasan_krealloc(const void *object,
211 				     size_t new_size, gfp_t flags);
212 static __always_inline void * __must_check kasan_krealloc(const void *object,
213 						 size_t new_size, gfp_t flags)
214 {
215 	if (kasan_enabled())
216 		return __kasan_krealloc(object, new_size, flags);
217 	return (void *)object;
218 }
219 
220 /*
221  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
222  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
223  */
224 bool __kasan_check_byte(const void *addr, unsigned long ip);
225 static __always_inline bool kasan_check_byte(const void *addr)
226 {
227 	if (kasan_enabled())
228 		return __kasan_check_byte(addr, _RET_IP_);
229 	return true;
230 }
231 
232 #else /* CONFIG_KASAN */
233 
234 static inline void kasan_unpoison_range(const void *address, size_t size) {}
235 static inline void kasan_poison_pages(struct page *page, unsigned int order,
236 				      bool init) {}
237 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
238 					bool init)
239 {
240 	return false;
241 }
242 static inline void kasan_poison_slab(struct slab *slab) {}
243 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
244 					void *object) {}
245 static inline void kasan_poison_object_data(struct kmem_cache *cache,
246 					void *object) {}
247 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
248 				const void *object)
249 {
250 	return (void *)object;
251 }
252 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
253 {
254 	return false;
255 }
256 static inline void kasan_kfree_large(void *ptr) {}
257 static inline void kasan_slab_free_mempool(void *ptr) {}
258 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
259 				   gfp_t flags, bool init)
260 {
261 	return object;
262 }
263 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
264 				size_t size, gfp_t flags)
265 {
266 	return (void *)object;
267 }
268 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
269 {
270 	return (void *)ptr;
271 }
272 static inline void *kasan_krealloc(const void *object, size_t new_size,
273 				 gfp_t flags)
274 {
275 	return (void *)object;
276 }
277 static inline bool kasan_check_byte(const void *address)
278 {
279 	return true;
280 }
281 
282 #endif /* CONFIG_KASAN */
283 
284 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
285 void kasan_unpoison_task_stack(struct task_struct *task);
286 #else
287 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
288 #endif
289 
290 #ifdef CONFIG_KASAN_GENERIC
291 
292 struct kasan_cache {
293 	int alloc_meta_offset;
294 	int free_meta_offset;
295 };
296 
297 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
298 slab_flags_t kasan_never_merge(void);
299 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
300 			slab_flags_t *flags);
301 
302 void kasan_cache_shrink(struct kmem_cache *cache);
303 void kasan_cache_shutdown(struct kmem_cache *cache);
304 void kasan_record_aux_stack(void *ptr);
305 void kasan_record_aux_stack_noalloc(void *ptr);
306 
307 #else /* CONFIG_KASAN_GENERIC */
308 
309 /* Tag-based KASAN modes do not use per-object metadata. */
310 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
311 						bool in_object)
312 {
313 	return 0;
314 }
315 /* And thus nothing prevents cache merging. */
316 static inline slab_flags_t kasan_never_merge(void)
317 {
318 	return 0;
319 }
320 /* And no cache-related metadata initialization is required. */
321 static inline void kasan_cache_create(struct kmem_cache *cache,
322 				      unsigned int *size,
323 				      slab_flags_t *flags) {}
324 
325 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
326 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
327 static inline void kasan_record_aux_stack(void *ptr) {}
328 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
329 
330 #endif /* CONFIG_KASAN_GENERIC */
331 
332 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
333 
334 static inline void *kasan_reset_tag(const void *addr)
335 {
336 	return (void *)arch_kasan_reset_tag(addr);
337 }
338 
339 /**
340  * kasan_report - print a report about a bad memory access detected by KASAN
341  * @addr: address of the bad access
342  * @size: size of the bad access
343  * @is_write: whether the bad access is a write or a read
344  * @ip: instruction pointer for the accessibility check or the bad access itself
345  */
346 bool kasan_report(const void *addr, size_t size,
347 		bool is_write, unsigned long ip);
348 
349 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
350 
351 static inline void *kasan_reset_tag(const void *addr)
352 {
353 	return (void *)addr;
354 }
355 
356 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
357 
358 #ifdef CONFIG_KASAN_HW_TAGS
359 
360 void kasan_report_async(void);
361 
362 #endif /* CONFIG_KASAN_HW_TAGS */
363 
364 #ifdef CONFIG_KASAN_SW_TAGS
365 void __init kasan_init_sw_tags(void);
366 #else
367 static inline void kasan_init_sw_tags(void) { }
368 #endif
369 
370 #ifdef CONFIG_KASAN_HW_TAGS
371 void kasan_init_hw_tags_cpu(void);
372 void __init kasan_init_hw_tags(void);
373 #else
374 static inline void kasan_init_hw_tags_cpu(void) { }
375 static inline void kasan_init_hw_tags(void) { }
376 #endif
377 
378 #ifdef CONFIG_KASAN_VMALLOC
379 
380 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
381 
382 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
383 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
384 void kasan_release_vmalloc(unsigned long start, unsigned long end,
385 			   unsigned long free_region_start,
386 			   unsigned long free_region_end);
387 
388 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
389 
390 static inline void kasan_populate_early_vm_area_shadow(void *start,
391 						       unsigned long size)
392 { }
393 static inline int kasan_populate_vmalloc(unsigned long start,
394 					unsigned long size)
395 {
396 	return 0;
397 }
398 static inline void kasan_release_vmalloc(unsigned long start,
399 					 unsigned long end,
400 					 unsigned long free_region_start,
401 					 unsigned long free_region_end) { }
402 
403 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
404 
405 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
406 			       kasan_vmalloc_flags_t flags);
407 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
408 						unsigned long size,
409 						kasan_vmalloc_flags_t flags)
410 {
411 	if (kasan_enabled())
412 		return __kasan_unpoison_vmalloc(start, size, flags);
413 	return (void *)start;
414 }
415 
416 void __kasan_poison_vmalloc(const void *start, unsigned long size);
417 static __always_inline void kasan_poison_vmalloc(const void *start,
418 						 unsigned long size)
419 {
420 	if (kasan_enabled())
421 		__kasan_poison_vmalloc(start, size);
422 }
423 
424 #else /* CONFIG_KASAN_VMALLOC */
425 
426 static inline void kasan_populate_early_vm_area_shadow(void *start,
427 						       unsigned long size) { }
428 static inline int kasan_populate_vmalloc(unsigned long start,
429 					unsigned long size)
430 {
431 	return 0;
432 }
433 static inline void kasan_release_vmalloc(unsigned long start,
434 					 unsigned long end,
435 					 unsigned long free_region_start,
436 					 unsigned long free_region_end) { }
437 
438 static inline void *kasan_unpoison_vmalloc(const void *start,
439 					   unsigned long size,
440 					   kasan_vmalloc_flags_t flags)
441 {
442 	return (void *)start;
443 }
444 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
445 { }
446 
447 #endif /* CONFIG_KASAN_VMALLOC */
448 
449 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
450 		!defined(CONFIG_KASAN_VMALLOC)
451 
452 /*
453  * These functions allocate and free shadow memory for kernel modules.
454  * They are only required when KASAN_VMALLOC is not supported, as otherwise
455  * shadow memory is allocated by the generic vmalloc handlers.
456  */
457 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
458 void kasan_free_module_shadow(const struct vm_struct *vm);
459 
460 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
461 
462 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
463 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
464 
465 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
466 
467 #ifdef CONFIG_KASAN_INLINE
468 void kasan_non_canonical_hook(unsigned long addr);
469 #else /* CONFIG_KASAN_INLINE */
470 static inline void kasan_non_canonical_hook(unsigned long addr) { }
471 #endif /* CONFIG_KASAN_INLINE */
472 
473 #endif /* LINUX_KASAN_H */
474