xref: /openbmc/linux/mm/kasan/kasan.h (revision 3df83c91)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __MM_KASAN_KASAN_H
3 #define __MM_KASAN_KASAN_H
4 
5 #include <linux/kasan.h>
6 #include <linux/kfence.h>
7 #include <linux/stackdepot.h>
8 
9 #ifdef CONFIG_KASAN_HW_TAGS
10 #include <linux/static_key.h>
11 DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
12 static inline bool kasan_stack_collection_enabled(void)
13 {
14 	return static_branch_unlikely(&kasan_flag_stacktrace);
15 }
16 #else
17 static inline bool kasan_stack_collection_enabled(void)
18 {
19 	return true;
20 }
21 #endif
22 
23 extern bool kasan_flag_panic __ro_after_init;
24 
25 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
26 #define KASAN_GRANULE_SIZE	(1UL << KASAN_SHADOW_SCALE_SHIFT)
27 #else
28 #include <asm/mte-kasan.h>
29 #define KASAN_GRANULE_SIZE	MTE_GRANULE_SIZE
30 #endif
31 
32 #define KASAN_GRANULE_MASK	(KASAN_GRANULE_SIZE - 1)
33 
34 #define KASAN_MEMORY_PER_SHADOW_PAGE	(KASAN_GRANULE_SIZE << PAGE_SHIFT)
35 
36 #define KASAN_TAG_KERNEL	0xFF /* native kernel pointers tag */
37 #define KASAN_TAG_INVALID	0xFE /* inaccessible memory tag */
38 #define KASAN_TAG_MAX		0xFD /* maximum value for random tags */
39 
40 #ifdef CONFIG_KASAN_HW_TAGS
41 #define KASAN_TAG_MIN		0xF0 /* mimimum value for random tags */
42 #else
43 #define KASAN_TAG_MIN		0x00 /* mimimum value for random tags */
44 #endif
45 
46 #ifdef CONFIG_KASAN_GENERIC
47 #define KASAN_FREE_PAGE         0xFF  /* page was freed */
48 #define KASAN_PAGE_REDZONE      0xFE  /* redzone for kmalloc_large allocations */
49 #define KASAN_KMALLOC_REDZONE   0xFC  /* redzone inside slub object */
50 #define KASAN_KMALLOC_FREE      0xFB  /* object was freed (kmem_cache_free/kfree) */
51 #define KASAN_KMALLOC_FREETRACK 0xFA  /* object was freed and has free track set */
52 #else
53 #define KASAN_FREE_PAGE         KASAN_TAG_INVALID
54 #define KASAN_PAGE_REDZONE      KASAN_TAG_INVALID
55 #define KASAN_KMALLOC_REDZONE   KASAN_TAG_INVALID
56 #define KASAN_KMALLOC_FREE      KASAN_TAG_INVALID
57 #define KASAN_KMALLOC_FREETRACK KASAN_TAG_INVALID
58 #endif
59 
60 #define KASAN_GLOBAL_REDZONE    0xF9  /* redzone for global variable */
61 #define KASAN_VMALLOC_INVALID   0xF8  /* unallocated space in vmapped page */
62 
63 /*
64  * Stack redzone shadow values
65  * (Those are compiler's ABI, don't change them)
66  */
67 #define KASAN_STACK_LEFT        0xF1
68 #define KASAN_STACK_MID         0xF2
69 #define KASAN_STACK_RIGHT       0xF3
70 #define KASAN_STACK_PARTIAL     0xF4
71 
72 /*
73  * alloca redzone shadow values
74  */
75 #define KASAN_ALLOCA_LEFT	0xCA
76 #define KASAN_ALLOCA_RIGHT	0xCB
77 
78 #define KASAN_ALLOCA_REDZONE_SIZE	32
79 
80 /*
81  * Stack frame marker (compiler ABI).
82  */
83 #define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
84 
85 /* Don't break randconfig/all*config builds */
86 #ifndef KASAN_ABI_VERSION
87 #define KASAN_ABI_VERSION 1
88 #endif
89 
90 /* Metadata layout customization. */
91 #define META_BYTES_PER_BLOCK 1
92 #define META_BLOCKS_PER_ROW 16
93 #define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
94 #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
95 #define META_ROWS_AROUND_ADDR 2
96 
97 struct kasan_access_info {
98 	const void *access_addr;
99 	const void *first_bad_addr;
100 	size_t access_size;
101 	bool is_write;
102 	unsigned long ip;
103 };
104 
105 /* The layout of struct dictated by compiler */
106 struct kasan_source_location {
107 	const char *filename;
108 	int line_no;
109 	int column_no;
110 };
111 
112 /* The layout of struct dictated by compiler */
113 struct kasan_global {
114 	const void *beg;		/* Address of the beginning of the global variable. */
115 	size_t size;			/* Size of the global variable. */
116 	size_t size_with_redzone;	/* Size of the variable + size of the red zone. 32 bytes aligned */
117 	const void *name;
118 	const void *module_name;	/* Name of the module where the global variable is declared. */
119 	unsigned long has_dynamic_init;	/* This needed for C++ */
120 #if KASAN_ABI_VERSION >= 4
121 	struct kasan_source_location *location;
122 #endif
123 #if KASAN_ABI_VERSION >= 5
124 	char *odr_indicator;
125 #endif
126 };
127 
128 /**
129  * Structures to keep alloc and free tracks *
130  */
131 
132 #define KASAN_STACK_DEPTH 64
133 
134 struct kasan_track {
135 	u32 pid;
136 	depot_stack_handle_t stack;
137 };
138 
139 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
140 #define KASAN_NR_FREE_STACKS 5
141 #else
142 #define KASAN_NR_FREE_STACKS 1
143 #endif
144 
145 struct kasan_alloc_meta {
146 	struct kasan_track alloc_track;
147 #ifdef CONFIG_KASAN_GENERIC
148 	/*
149 	 * call_rcu() call stack is stored into struct kasan_alloc_meta.
150 	 * The free stack is stored into struct kasan_free_meta.
151 	 */
152 	depot_stack_handle_t aux_stack[2];
153 #else
154 	struct kasan_track free_track[KASAN_NR_FREE_STACKS];
155 #endif
156 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
157 	u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
158 	u8 free_track_idx;
159 #endif
160 };
161 
162 struct qlist_node {
163 	struct qlist_node *next;
164 };
165 
166 /*
167  * Generic mode either stores free meta in the object itself or in the redzone
168  * after the object. In the former case free meta offset is 0, in the latter
169  * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
170  * offset when free meta isn't present.
171  */
172 #define KASAN_NO_FREE_META INT_MAX
173 
174 struct kasan_free_meta {
175 #ifdef CONFIG_KASAN_GENERIC
176 	/* This field is used while the object is in the quarantine.
177 	 * Otherwise it might be used for the allocator freelist.
178 	 */
179 	struct qlist_node quarantine_link;
180 	struct kasan_track free_track;
181 #endif
182 };
183 
184 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
185 						const void *object);
186 #ifdef CONFIG_KASAN_GENERIC
187 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
188 						const void *object);
189 #endif
190 
191 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
192 
193 static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
194 {
195 	return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
196 		<< KASAN_SHADOW_SCALE_SHIFT);
197 }
198 
199 static inline bool addr_has_metadata(const void *addr)
200 {
201 	return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
202 }
203 
204 /**
205  * kasan_check_range - Check memory region, and report if invalid access.
206  * @addr: the accessed address
207  * @size: the accessed size
208  * @write: true if access is a write access
209  * @ret_ip: return address
210  * @return: true if access was valid, false if invalid
211  */
212 bool kasan_check_range(unsigned long addr, size_t size, bool write,
213 				unsigned long ret_ip);
214 
215 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
216 
217 static inline bool addr_has_metadata(const void *addr)
218 {
219 	return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
220 }
221 
222 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
223 
224 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
225 void kasan_print_tags(u8 addr_tag, const void *addr);
226 #else
227 static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
228 #endif
229 
230 void *kasan_find_first_bad_addr(void *addr, size_t size);
231 const char *kasan_get_bug_type(struct kasan_access_info *info);
232 void kasan_metadata_fetch_row(char *buffer, void *row);
233 
234 #if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
235 void kasan_print_address_stack_frame(const void *addr);
236 #else
237 static inline void kasan_print_address_stack_frame(const void *addr) { }
238 #endif
239 
240 bool kasan_report(unsigned long addr, size_t size,
241 		bool is_write, unsigned long ip);
242 void kasan_report_invalid_free(void *object, unsigned long ip);
243 
244 struct page *kasan_addr_to_page(const void *addr);
245 
246 depot_stack_handle_t kasan_save_stack(gfp_t flags);
247 void kasan_set_track(struct kasan_track *track, gfp_t flags);
248 void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag);
249 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
250 				void *object, u8 tag);
251 
252 #if defined(CONFIG_KASAN_GENERIC) && \
253 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
254 bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
255 void kasan_quarantine_reduce(void);
256 void kasan_quarantine_remove_cache(struct kmem_cache *cache);
257 #else
258 static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
259 static inline void kasan_quarantine_reduce(void) { }
260 static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
261 #endif
262 
263 #ifndef arch_kasan_set_tag
264 static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
265 {
266 	return addr;
267 }
268 #endif
269 #ifndef arch_kasan_get_tag
270 #define arch_kasan_get_tag(addr)	0
271 #endif
272 
273 #define set_tag(addr, tag)	((void *)arch_kasan_set_tag((addr), (tag)))
274 #define get_tag(addr)		arch_kasan_get_tag(addr)
275 
276 #ifdef CONFIG_KASAN_HW_TAGS
277 
278 #ifndef arch_enable_tagging
279 #define arch_enable_tagging()
280 #endif
281 #ifndef arch_init_tags
282 #define arch_init_tags(max_tag)
283 #endif
284 #ifndef arch_set_tagging_report_once
285 #define arch_set_tagging_report_once(state)
286 #endif
287 #ifndef arch_get_random_tag
288 #define arch_get_random_tag()	(0xFF)
289 #endif
290 #ifndef arch_get_mem_tag
291 #define arch_get_mem_tag(addr)	(0xFF)
292 #endif
293 #ifndef arch_set_mem_tag_range
294 #define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
295 #endif
296 
297 #define hw_enable_tagging()			arch_enable_tagging()
298 #define hw_init_tags(max_tag)			arch_init_tags(max_tag)
299 #define hw_set_tagging_report_once(state)	arch_set_tagging_report_once(state)
300 #define hw_get_random_tag()			arch_get_random_tag()
301 #define hw_get_mem_tag(addr)			arch_get_mem_tag(addr)
302 #define hw_set_mem_tag_range(addr, size, tag)	arch_set_mem_tag_range((addr), (size), (tag))
303 
304 #else /* CONFIG_KASAN_HW_TAGS */
305 
306 #define hw_enable_tagging()
307 #define hw_set_tagging_report_once(state)
308 
309 #endif /* CONFIG_KASAN_HW_TAGS */
310 
311 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
312 
313 void kasan_set_tagging_report_once(bool state);
314 void kasan_enable_tagging(void);
315 
316 #else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
317 
318 static inline void kasan_set_tagging_report_once(bool state) { }
319 static inline void kasan_enable_tagging(void) { }
320 
321 #endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
322 
323 #ifdef CONFIG_KASAN_SW_TAGS
324 u8 kasan_random_tag(void);
325 #elif defined(CONFIG_KASAN_HW_TAGS)
326 static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
327 #else
328 static inline u8 kasan_random_tag(void) { return 0; }
329 #endif
330 
331 #ifdef CONFIG_KASAN_HW_TAGS
332 
333 static inline void kasan_poison(const void *addr, size_t size, u8 value)
334 {
335 	addr = kasan_reset_tag(addr);
336 
337 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
338 	if (is_kfence_address(addr))
339 		return;
340 
341 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
342 		return;
343 	if (WARN_ON(size & KASAN_GRANULE_MASK))
344 		return;
345 
346 	hw_set_mem_tag_range((void *)addr, size, value);
347 }
348 
349 static inline void kasan_unpoison(const void *addr, size_t size)
350 {
351 	u8 tag = get_tag(addr);
352 
353 	addr = kasan_reset_tag(addr);
354 
355 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
356 	if (is_kfence_address(addr))
357 		return;
358 
359 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
360 		return;
361 	size = round_up(size, KASAN_GRANULE_SIZE);
362 
363 	hw_set_mem_tag_range((void *)addr, size, tag);
364 }
365 
366 static inline bool kasan_byte_accessible(const void *addr)
367 {
368 	u8 ptr_tag = get_tag(addr);
369 	u8 mem_tag = hw_get_mem_tag((void *)addr);
370 
371 	return (mem_tag != KASAN_TAG_INVALID) &&
372 		(ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag);
373 }
374 
375 #else /* CONFIG_KASAN_HW_TAGS */
376 
377 /**
378  * kasan_poison - mark the memory range as unaccessible
379  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
380  * @size - range size, must be aligned to KASAN_GRANULE_SIZE
381  * @value - value that's written to metadata for the range
382  *
383  * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
384  */
385 void kasan_poison(const void *addr, size_t size, u8 value);
386 
387 /**
388  * kasan_unpoison - mark the memory range as accessible
389  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
390  * @size - range size, can be unaligned
391  *
392  * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
393  * marking the range.
394  * For the generic mode, the last granule of the memory range gets partially
395  * unpoisoned based on the @size.
396  */
397 void kasan_unpoison(const void *addr, size_t size);
398 
399 bool kasan_byte_accessible(const void *addr);
400 
401 #endif /* CONFIG_KASAN_HW_TAGS */
402 
403 #ifdef CONFIG_KASAN_GENERIC
404 
405 /**
406  * kasan_poison_last_granule - mark the last granule of the memory range as
407  * unaccessible
408  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
409  * @size - range size
410  *
411  * This function is only available for the generic mode, as it's the only mode
412  * that has partially poisoned memory granules.
413  */
414 void kasan_poison_last_granule(const void *address, size_t size);
415 
416 #else /* CONFIG_KASAN_GENERIC */
417 
418 static inline void kasan_poison_last_granule(const void *address, size_t size) { }
419 
420 #endif /* CONFIG_KASAN_GENERIC */
421 
422 /*
423  * Exported functions for interfaces called from assembly or from generated
424  * code. Declarations here to avoid warning about missing declarations.
425  */
426 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
427 void __asan_register_globals(struct kasan_global *globals, size_t size);
428 void __asan_unregister_globals(struct kasan_global *globals, size_t size);
429 void __asan_handle_no_return(void);
430 void __asan_alloca_poison(unsigned long addr, size_t size);
431 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
432 
433 void __asan_load1(unsigned long addr);
434 void __asan_store1(unsigned long addr);
435 void __asan_load2(unsigned long addr);
436 void __asan_store2(unsigned long addr);
437 void __asan_load4(unsigned long addr);
438 void __asan_store4(unsigned long addr);
439 void __asan_load8(unsigned long addr);
440 void __asan_store8(unsigned long addr);
441 void __asan_load16(unsigned long addr);
442 void __asan_store16(unsigned long addr);
443 void __asan_loadN(unsigned long addr, size_t size);
444 void __asan_storeN(unsigned long addr, size_t size);
445 
446 void __asan_load1_noabort(unsigned long addr);
447 void __asan_store1_noabort(unsigned long addr);
448 void __asan_load2_noabort(unsigned long addr);
449 void __asan_store2_noabort(unsigned long addr);
450 void __asan_load4_noabort(unsigned long addr);
451 void __asan_store4_noabort(unsigned long addr);
452 void __asan_load8_noabort(unsigned long addr);
453 void __asan_store8_noabort(unsigned long addr);
454 void __asan_load16_noabort(unsigned long addr);
455 void __asan_store16_noabort(unsigned long addr);
456 void __asan_loadN_noabort(unsigned long addr, size_t size);
457 void __asan_storeN_noabort(unsigned long addr, size_t size);
458 
459 void __asan_report_load1_noabort(unsigned long addr);
460 void __asan_report_store1_noabort(unsigned long addr);
461 void __asan_report_load2_noabort(unsigned long addr);
462 void __asan_report_store2_noabort(unsigned long addr);
463 void __asan_report_load4_noabort(unsigned long addr);
464 void __asan_report_store4_noabort(unsigned long addr);
465 void __asan_report_load8_noabort(unsigned long addr);
466 void __asan_report_store8_noabort(unsigned long addr);
467 void __asan_report_load16_noabort(unsigned long addr);
468 void __asan_report_store16_noabort(unsigned long addr);
469 void __asan_report_load_n_noabort(unsigned long addr, size_t size);
470 void __asan_report_store_n_noabort(unsigned long addr, size_t size);
471 
472 void __asan_set_shadow_00(const void *addr, size_t size);
473 void __asan_set_shadow_f1(const void *addr, size_t size);
474 void __asan_set_shadow_f2(const void *addr, size_t size);
475 void __asan_set_shadow_f3(const void *addr, size_t size);
476 void __asan_set_shadow_f5(const void *addr, size_t size);
477 void __asan_set_shadow_f8(const void *addr, size_t size);
478 
479 void __hwasan_load1_noabort(unsigned long addr);
480 void __hwasan_store1_noabort(unsigned long addr);
481 void __hwasan_load2_noabort(unsigned long addr);
482 void __hwasan_store2_noabort(unsigned long addr);
483 void __hwasan_load4_noabort(unsigned long addr);
484 void __hwasan_store4_noabort(unsigned long addr);
485 void __hwasan_load8_noabort(unsigned long addr);
486 void __hwasan_store8_noabort(unsigned long addr);
487 void __hwasan_load16_noabort(unsigned long addr);
488 void __hwasan_store16_noabort(unsigned long addr);
489 void __hwasan_loadN_noabort(unsigned long addr, size_t size);
490 void __hwasan_storeN_noabort(unsigned long addr, size_t size);
491 
492 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
493 
494 #endif
495