xref: /openbmc/linux/include/linux/kasan.h (revision 17c17567)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
10 
11 struct kmem_cache;
12 struct page;
13 struct slab;
14 struct vm_struct;
15 struct task_struct;
16 
17 #ifdef CONFIG_KASAN
18 
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
21 
22 #endif
23 
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
25 
26 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
30 
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
32 
33 #include <linux/pgtable.h>
34 
35 /* Software KASAN implementations use shadow memory. */
36 
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
40 #else
41 #define KASAN_SHADOW_INIT 0
42 #endif
43 
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
46 #endif
47 
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
53 
54 int kasan_populate_early_shadow(const void *shadow_start,
55 				const void *shadow_end);
56 
57 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)58 static inline void *kasan_mem_to_shadow(const void *addr)
59 {
60 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
61 		+ KASAN_SHADOW_OFFSET;
62 }
63 #endif
64 
65 int kasan_add_zero_shadow(void *start, unsigned long size);
66 void kasan_remove_zero_shadow(void *start, unsigned long size);
67 
68 /* Enable reporting bugs after kasan_disable_current() */
69 extern void kasan_enable_current(void);
70 
71 /* Disable reporting bugs for current task */
72 extern void kasan_disable_current(void);
73 
74 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
75 
kasan_add_zero_shadow(void * start,unsigned long size)76 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
77 {
78 	return 0;
79 }
kasan_remove_zero_shadow(void * start,unsigned long size)80 static inline void kasan_remove_zero_shadow(void *start,
81 					unsigned long size)
82 {}
83 
kasan_enable_current(void)84 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)85 static inline void kasan_disable_current(void) {}
86 
87 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
88 
89 #ifdef CONFIG_KASAN_HW_TAGS
90 
91 #else /* CONFIG_KASAN_HW_TAGS */
92 
93 #endif /* CONFIG_KASAN_HW_TAGS */
94 
kasan_has_integrated_init(void)95 static inline bool kasan_has_integrated_init(void)
96 {
97 	return kasan_hw_tags_enabled();
98 }
99 
100 #ifdef CONFIG_KASAN
101 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)102 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
103 {
104 	if (kasan_enabled())
105 		__kasan_unpoison_range(addr, size);
106 }
107 
108 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)109 static __always_inline void kasan_poison_pages(struct page *page,
110 						unsigned int order, bool init)
111 {
112 	if (kasan_enabled())
113 		__kasan_poison_pages(page, order, init);
114 }
115 
116 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)117 static __always_inline bool kasan_unpoison_pages(struct page *page,
118 						 unsigned int order, bool init)
119 {
120 	if (kasan_enabled())
121 		return __kasan_unpoison_pages(page, order, init);
122 	return false;
123 }
124 
125 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)126 static __always_inline void kasan_poison_slab(struct slab *slab)
127 {
128 	if (kasan_enabled())
129 		__kasan_poison_slab(slab);
130 }
131 
132 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)133 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
134 							void *object)
135 {
136 	if (kasan_enabled())
137 		__kasan_unpoison_object_data(cache, object);
138 }
139 
140 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
kasan_poison_object_data(struct kmem_cache * cache,void * object)141 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
142 							void *object)
143 {
144 	if (kasan_enabled())
145 		__kasan_poison_object_data(cache, object);
146 }
147 
148 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
149 					  const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)150 static __always_inline void * __must_check kasan_init_slab_obj(
151 				struct kmem_cache *cache, const void *object)
152 {
153 	if (kasan_enabled())
154 		return __kasan_init_slab_obj(cache, object);
155 	return (void *)object;
156 }
157 
158 bool __kasan_slab_free(struct kmem_cache *s, void *object,
159 			unsigned long ip, bool init);
kasan_slab_free(struct kmem_cache * s,void * object,bool init)160 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
161 						void *object, bool init)
162 {
163 	if (kasan_enabled())
164 		return __kasan_slab_free(s, object, _RET_IP_, init);
165 	return false;
166 }
167 
168 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)169 static __always_inline void kasan_kfree_large(void *ptr)
170 {
171 	if (kasan_enabled())
172 		__kasan_kfree_large(ptr, _RET_IP_);
173 }
174 
175 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
kasan_slab_free_mempool(void * ptr)176 static __always_inline void kasan_slab_free_mempool(void *ptr)
177 {
178 	if (kasan_enabled())
179 		__kasan_slab_free_mempool(ptr, _RET_IP_);
180 }
181 
182 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
183 				       void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)184 static __always_inline void * __must_check kasan_slab_alloc(
185 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
186 {
187 	if (kasan_enabled())
188 		return __kasan_slab_alloc(s, object, flags, init);
189 	return object;
190 }
191 
192 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
193 				    size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)194 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
195 				const void *object, size_t size, gfp_t flags)
196 {
197 	if (kasan_enabled())
198 		return __kasan_kmalloc(s, object, size, flags);
199 	return (void *)object;
200 }
201 
202 void * __must_check __kasan_kmalloc_large(const void *ptr,
203 					  size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)204 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
205 						      size_t size, gfp_t flags)
206 {
207 	if (kasan_enabled())
208 		return __kasan_kmalloc_large(ptr, size, flags);
209 	return (void *)ptr;
210 }
211 
212 void * __must_check __kasan_krealloc(const void *object,
213 				     size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)214 static __always_inline void * __must_check kasan_krealloc(const void *object,
215 						 size_t new_size, gfp_t flags)
216 {
217 	if (kasan_enabled())
218 		return __kasan_krealloc(object, new_size, flags);
219 	return (void *)object;
220 }
221 
222 /*
223  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
224  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
225  */
226 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)227 static __always_inline bool kasan_check_byte(const void *addr)
228 {
229 	if (kasan_enabled())
230 		return __kasan_check_byte(addr, _RET_IP_);
231 	return true;
232 }
233 
234 #else /* CONFIG_KASAN */
235 
kasan_unpoison_range(const void * address,size_t size)236 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)237 static inline void kasan_poison_pages(struct page *page, unsigned int order,
238 				      bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)239 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
240 					bool init)
241 {
242 	return false;
243 }
kasan_poison_slab(struct slab * slab)244 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)245 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
246 					void *object) {}
kasan_poison_object_data(struct kmem_cache * cache,void * object)247 static inline void kasan_poison_object_data(struct kmem_cache *cache,
248 					void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)249 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
250 				const void *object)
251 {
252 	return (void *)object;
253 }
kasan_slab_free(struct kmem_cache * s,void * object,bool init)254 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
255 {
256 	return false;
257 }
kasan_kfree_large(void * ptr)258 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_free_mempool(void * ptr)259 static inline void kasan_slab_free_mempool(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)260 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
261 				   gfp_t flags, bool init)
262 {
263 	return object;
264 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)265 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
266 				size_t size, gfp_t flags)
267 {
268 	return (void *)object;
269 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)270 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
271 {
272 	return (void *)ptr;
273 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)274 static inline void *kasan_krealloc(const void *object, size_t new_size,
275 				 gfp_t flags)
276 {
277 	return (void *)object;
278 }
kasan_check_byte(const void * address)279 static inline bool kasan_check_byte(const void *address)
280 {
281 	return true;
282 }
283 
284 #endif /* CONFIG_KASAN */
285 
286 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
287 void kasan_unpoison_task_stack(struct task_struct *task);
288 #else
kasan_unpoison_task_stack(struct task_struct * task)289 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
290 #endif
291 
292 #ifdef CONFIG_KASAN_GENERIC
293 
294 struct kasan_cache {
295 	int alloc_meta_offset;
296 	int free_meta_offset;
297 };
298 
299 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
300 slab_flags_t kasan_never_merge(void);
301 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
302 			slab_flags_t *flags);
303 
304 void kasan_cache_shrink(struct kmem_cache *cache);
305 void kasan_cache_shutdown(struct kmem_cache *cache);
306 void kasan_record_aux_stack(void *ptr);
307 void kasan_record_aux_stack_noalloc(void *ptr);
308 
309 #else /* CONFIG_KASAN_GENERIC */
310 
311 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)312 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
313 						bool in_object)
314 {
315 	return 0;
316 }
317 /* And thus nothing prevents cache merging. */
kasan_never_merge(void)318 static inline slab_flags_t kasan_never_merge(void)
319 {
320 	return 0;
321 }
322 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)323 static inline void kasan_cache_create(struct kmem_cache *cache,
324 				      unsigned int *size,
325 				      slab_flags_t *flags) {}
326 
kasan_cache_shrink(struct kmem_cache * cache)327 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)328 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)329 static inline void kasan_record_aux_stack(void *ptr) {}
kasan_record_aux_stack_noalloc(void * ptr)330 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
331 
332 #endif /* CONFIG_KASAN_GENERIC */
333 
334 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
335 
kasan_reset_tag(const void * addr)336 static inline void *kasan_reset_tag(const void *addr)
337 {
338 	return (void *)arch_kasan_reset_tag(addr);
339 }
340 
341 /**
342  * kasan_report - print a report about a bad memory access detected by KASAN
343  * @addr: address of the bad access
344  * @size: size of the bad access
345  * @is_write: whether the bad access is a write or a read
346  * @ip: instruction pointer for the accessibility check or the bad access itself
347  */
348 bool kasan_report(const void *addr, size_t size,
349 		bool is_write, unsigned long ip);
350 
351 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
352 
kasan_reset_tag(const void * addr)353 static inline void *kasan_reset_tag(const void *addr)
354 {
355 	return (void *)addr;
356 }
357 
358 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
359 
360 #ifdef CONFIG_KASAN_HW_TAGS
361 
362 void kasan_report_async(void);
363 
364 #endif /* CONFIG_KASAN_HW_TAGS */
365 
366 #ifdef CONFIG_KASAN_SW_TAGS
367 void __init kasan_init_sw_tags(void);
368 #else
kasan_init_sw_tags(void)369 static inline void kasan_init_sw_tags(void) { }
370 #endif
371 
372 #ifdef CONFIG_KASAN_HW_TAGS
373 void kasan_init_hw_tags_cpu(void);
374 void __init kasan_init_hw_tags(void);
375 #else
kasan_init_hw_tags_cpu(void)376 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)377 static inline void kasan_init_hw_tags(void) { }
378 #endif
379 
380 #ifdef CONFIG_KASAN_VMALLOC
381 
382 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
383 
384 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
385 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
386 void kasan_release_vmalloc(unsigned long start, unsigned long end,
387 			   unsigned long free_region_start,
388 			   unsigned long free_region_end);
389 
390 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
391 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)392 static inline void kasan_populate_early_vm_area_shadow(void *start,
393 						       unsigned long size)
394 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)395 static inline int kasan_populate_vmalloc(unsigned long start,
396 					unsigned long size)
397 {
398 	return 0;
399 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)400 static inline void kasan_release_vmalloc(unsigned long start,
401 					 unsigned long end,
402 					 unsigned long free_region_start,
403 					 unsigned long free_region_end) { }
404 
405 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
406 
407 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
408 			       kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)409 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
410 						unsigned long size,
411 						kasan_vmalloc_flags_t flags)
412 {
413 	if (kasan_enabled())
414 		return __kasan_unpoison_vmalloc(start, size, flags);
415 	return (void *)start;
416 }
417 
418 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)419 static __always_inline void kasan_poison_vmalloc(const void *start,
420 						 unsigned long size)
421 {
422 	if (kasan_enabled())
423 		__kasan_poison_vmalloc(start, size);
424 }
425 
426 #else /* CONFIG_KASAN_VMALLOC */
427 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)428 static inline void kasan_populate_early_vm_area_shadow(void *start,
429 						       unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)430 static inline int kasan_populate_vmalloc(unsigned long start,
431 					unsigned long size)
432 {
433 	return 0;
434 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)435 static inline void kasan_release_vmalloc(unsigned long start,
436 					 unsigned long end,
437 					 unsigned long free_region_start,
438 					 unsigned long free_region_end) { }
439 
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)440 static inline void *kasan_unpoison_vmalloc(const void *start,
441 					   unsigned long size,
442 					   kasan_vmalloc_flags_t flags)
443 {
444 	return (void *)start;
445 }
kasan_poison_vmalloc(const void * start,unsigned long size)446 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
447 { }
448 
449 #endif /* CONFIG_KASAN_VMALLOC */
450 
451 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
452 		!defined(CONFIG_KASAN_VMALLOC)
453 
454 /*
455  * These functions allocate and free shadow memory for kernel modules.
456  * They are only required when KASAN_VMALLOC is not supported, as otherwise
457  * shadow memory is allocated by the generic vmalloc handlers.
458  */
459 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
460 void kasan_free_module_shadow(const struct vm_struct *vm);
461 
462 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
463 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)464 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)465 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
466 
467 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
468 
469 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
470 void kasan_non_canonical_hook(unsigned long addr);
471 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)472 static inline void kasan_non_canonical_hook(unsigned long addr) { }
473 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
474 
475 #endif /* LINUX_KASAN_H */
476