xref: /openbmc/linux/lib/stackdepot.c (revision 7e043a80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic stack depot for storing stack traces.
4  *
5  * Some debugging tools need to save stack traces of certain events which can
6  * be later presented to the user. For example, KASAN needs to safe alloc and
7  * free stacks for each object, but storing two stack traces per object
8  * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9  * that).
10  *
11  * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12  * and free stacks repeat a lot, we save about 100x space.
13  * Stacks are never removed from depot, so we store them contiguously one after
14  * another in a contiguous memory allocation.
15  *
16  * Author: Alexander Potapenko <glider@google.com>
17  * Copyright (C) 2016 Google, Inc.
18  *
19  * Based on code by Dmitry Chernenkov.
20  */
21 
22 #include <linux/gfp.h>
23 #include <linux/jhash.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/printk.h>
29 #include <linux/slab.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/memblock.h>
35 #include <linux/kasan-enabled.h>
36 
37 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
38 
39 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
40 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
41 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
42 #define STACK_ALLOC_ALIGN 4
43 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
44 					STACK_ALLOC_ALIGN)
45 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
46 		STACK_ALLOC_NULL_PROTECTION_BITS - \
47 		STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
48 #define STACK_ALLOC_SLABS_CAP 8192
49 #define STACK_ALLOC_MAX_SLABS \
50 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
51 	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
52 
53 /* The compact structure to store the reference to stacks. */
54 union handle_parts {
55 	depot_stack_handle_t handle;
56 	struct {
57 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
58 		u32 offset : STACK_ALLOC_OFFSET_BITS;
59 		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
60 		u32 extra : STACK_DEPOT_EXTRA_BITS;
61 	};
62 };
63 
64 struct stack_record {
65 	struct stack_record *next;	/* Link in the hashtable */
66 	u32 hash;			/* Hash in the hastable */
67 	u32 size;			/* Number of frames in the stack */
68 	union handle_parts handle;
69 	unsigned long entries[];	/* Variable-sized array of entries. */
70 };
71 
72 static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
73 static bool __stack_depot_early_init_passed __initdata;
74 
75 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
76 
77 static int depot_index;
78 static int next_slab_inited;
79 static size_t depot_offset;
80 static DEFINE_RAW_SPINLOCK(depot_lock);
81 
82 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
83 {
84 	union handle_parts parts = { .handle = handle };
85 
86 	return parts.extra;
87 }
88 EXPORT_SYMBOL(stack_depot_get_extra_bits);
89 
90 static bool init_stack_slab(void **prealloc)
91 {
92 	if (!*prealloc)
93 		return false;
94 	/*
95 	 * This smp_load_acquire() pairs with smp_store_release() to
96 	 * |next_slab_inited| below and in depot_alloc_stack().
97 	 */
98 	if (smp_load_acquire(&next_slab_inited))
99 		return true;
100 	if (stack_slabs[depot_index] == NULL) {
101 		stack_slabs[depot_index] = *prealloc;
102 		*prealloc = NULL;
103 	} else {
104 		/* If this is the last depot slab, do not touch the next one. */
105 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
106 			stack_slabs[depot_index + 1] = *prealloc;
107 			*prealloc = NULL;
108 		}
109 		/*
110 		 * This smp_store_release pairs with smp_load_acquire() from
111 		 * |next_slab_inited| above and in stack_depot_save().
112 		 */
113 		smp_store_release(&next_slab_inited, 1);
114 	}
115 	return true;
116 }
117 
118 /* Allocation of a new stack in raw storage */
119 static struct stack_record *
120 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
121 {
122 	struct stack_record *stack;
123 	size_t required_size = struct_size(stack, entries, size);
124 
125 	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
126 
127 	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
128 		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
129 			WARN_ONCE(1, "Stack depot reached limit capacity");
130 			return NULL;
131 		}
132 		depot_index++;
133 		depot_offset = 0;
134 		/*
135 		 * smp_store_release() here pairs with smp_load_acquire() from
136 		 * |next_slab_inited| in stack_depot_save() and
137 		 * init_stack_slab().
138 		 */
139 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
140 			smp_store_release(&next_slab_inited, 0);
141 	}
142 	init_stack_slab(prealloc);
143 	if (stack_slabs[depot_index] == NULL)
144 		return NULL;
145 
146 	stack = stack_slabs[depot_index] + depot_offset;
147 
148 	stack->hash = hash;
149 	stack->size = size;
150 	stack->handle.slabindex = depot_index;
151 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
152 	stack->handle.valid = 1;
153 	stack->handle.extra = 0;
154 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
155 	depot_offset += required_size;
156 
157 	return stack;
158 }
159 
160 /* one hash table bucket entry per 16kB of memory */
161 #define STACK_HASH_SCALE	14
162 /* limited between 4k and 1M buckets */
163 #define STACK_HASH_ORDER_MIN	12
164 #define STACK_HASH_ORDER_MAX	20
165 #define STACK_HASH_SEED 0x9747b28c
166 
167 static unsigned int stack_hash_order;
168 static unsigned int stack_hash_mask;
169 
170 static bool stack_depot_disable;
171 static struct stack_record **stack_table;
172 
173 static int __init is_stack_depot_disabled(char *str)
174 {
175 	int ret;
176 
177 	ret = kstrtobool(str, &stack_depot_disable);
178 	if (!ret && stack_depot_disable) {
179 		pr_info("Stack Depot is disabled\n");
180 		stack_table = NULL;
181 	}
182 	return 0;
183 }
184 early_param("stack_depot_disable", is_stack_depot_disabled);
185 
186 void __init stack_depot_want_early_init(void)
187 {
188 	/* Too late to request early init now */
189 	WARN_ON(__stack_depot_early_init_passed);
190 
191 	__stack_depot_want_early_init = true;
192 }
193 
194 int __init stack_depot_early_init(void)
195 {
196 	unsigned long entries = 0;
197 
198 	/* This is supposed to be called only once, from mm_init() */
199 	if (WARN_ON(__stack_depot_early_init_passed))
200 		return 0;
201 
202 	__stack_depot_early_init_passed = true;
203 
204 	if (kasan_enabled() && !stack_hash_order)
205 		stack_hash_order = STACK_HASH_ORDER_MAX;
206 
207 	if (!__stack_depot_want_early_init || stack_depot_disable)
208 		return 0;
209 
210 	if (stack_hash_order)
211 		entries = 1UL <<  stack_hash_order;
212 	stack_table = alloc_large_system_hash("stackdepot",
213 						sizeof(struct stack_record *),
214 						entries,
215 						STACK_HASH_SCALE,
216 						HASH_EARLY | HASH_ZERO,
217 						NULL,
218 						&stack_hash_mask,
219 						1UL << STACK_HASH_ORDER_MIN,
220 						1UL << STACK_HASH_ORDER_MAX);
221 
222 	if (!stack_table) {
223 		pr_err("Stack Depot hash table allocation failed, disabling\n");
224 		stack_depot_disable = true;
225 		return -ENOMEM;
226 	}
227 
228 	return 0;
229 }
230 
231 int stack_depot_init(void)
232 {
233 	static DEFINE_MUTEX(stack_depot_init_mutex);
234 	int ret = 0;
235 
236 	mutex_lock(&stack_depot_init_mutex);
237 	if (!stack_depot_disable && !stack_table) {
238 		unsigned long entries;
239 		int scale = STACK_HASH_SCALE;
240 
241 		if (stack_hash_order) {
242 			entries = 1UL << stack_hash_order;
243 		} else {
244 			entries = nr_free_buffer_pages();
245 			entries = roundup_pow_of_two(entries);
246 
247 			if (scale > PAGE_SHIFT)
248 				entries >>= (scale - PAGE_SHIFT);
249 			else
250 				entries <<= (PAGE_SHIFT - scale);
251 		}
252 
253 		if (entries < 1UL << STACK_HASH_ORDER_MIN)
254 			entries = 1UL << STACK_HASH_ORDER_MIN;
255 		if (entries > 1UL << STACK_HASH_ORDER_MAX)
256 			entries = 1UL << STACK_HASH_ORDER_MAX;
257 
258 		pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n",
259 				entries);
260 		stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
261 		if (!stack_table) {
262 			pr_err("Stack Depot hash table allocation failed, disabling\n");
263 			stack_depot_disable = true;
264 			ret = -ENOMEM;
265 		}
266 		stack_hash_mask = entries - 1;
267 	}
268 	mutex_unlock(&stack_depot_init_mutex);
269 	return ret;
270 }
271 EXPORT_SYMBOL_GPL(stack_depot_init);
272 
273 /* Calculate hash for a stack */
274 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
275 {
276 	return jhash2((u32 *)entries,
277 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
278 		      STACK_HASH_SEED);
279 }
280 
281 /* Use our own, non-instrumented version of memcmp().
282  *
283  * We actually don't care about the order, just the equality.
284  */
285 static inline
286 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
287 			unsigned int n)
288 {
289 	for ( ; n-- ; u1++, u2++) {
290 		if (*u1 != *u2)
291 			return 1;
292 	}
293 	return 0;
294 }
295 
296 /* Find a stack that is equal to the one stored in entries in the hash */
297 static inline struct stack_record *find_stack(struct stack_record *bucket,
298 					     unsigned long *entries, int size,
299 					     u32 hash)
300 {
301 	struct stack_record *found;
302 
303 	for (found = bucket; found; found = found->next) {
304 		if (found->hash == hash &&
305 		    found->size == size &&
306 		    !stackdepot_memcmp(entries, found->entries, size))
307 			return found;
308 	}
309 	return NULL;
310 }
311 
312 /**
313  * stack_depot_snprint - print stack entries from a depot into a buffer
314  *
315  * @handle:	Stack depot handle which was returned from
316  *		stack_depot_save().
317  * @buf:	Pointer to the print buffer
318  *
319  * @size:	Size of the print buffer
320  *
321  * @spaces:	Number of leading spaces to print
322  *
323  * Return:	Number of bytes printed.
324  */
325 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
326 		       int spaces)
327 {
328 	unsigned long *entries;
329 	unsigned int nr_entries;
330 
331 	nr_entries = stack_depot_fetch(handle, &entries);
332 	return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
333 						spaces) : 0;
334 }
335 EXPORT_SYMBOL_GPL(stack_depot_snprint);
336 
337 /**
338  * stack_depot_print - print stack entries from a depot
339  *
340  * @stack:		Stack depot handle which was returned from
341  *			stack_depot_save().
342  *
343  */
344 void stack_depot_print(depot_stack_handle_t stack)
345 {
346 	unsigned long *entries;
347 	unsigned int nr_entries;
348 
349 	nr_entries = stack_depot_fetch(stack, &entries);
350 	if (nr_entries > 0)
351 		stack_trace_print(entries, nr_entries, 0);
352 }
353 EXPORT_SYMBOL_GPL(stack_depot_print);
354 
355 /**
356  * stack_depot_fetch - Fetch stack entries from a depot
357  *
358  * @handle:		Stack depot handle which was returned from
359  *			stack_depot_save().
360  * @entries:		Pointer to store the entries address
361  *
362  * Return: The number of trace entries for this depot.
363  */
364 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
365 			       unsigned long **entries)
366 {
367 	union handle_parts parts = { .handle = handle };
368 	void *slab;
369 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
370 	struct stack_record *stack;
371 
372 	*entries = NULL;
373 	if (!handle)
374 		return 0;
375 
376 	if (parts.slabindex > depot_index) {
377 		WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
378 			parts.slabindex, depot_index, handle);
379 		return 0;
380 	}
381 	slab = stack_slabs[parts.slabindex];
382 	if (!slab)
383 		return 0;
384 	stack = slab + offset;
385 
386 	*entries = stack->entries;
387 	return stack->size;
388 }
389 EXPORT_SYMBOL_GPL(stack_depot_fetch);
390 
391 /**
392  * __stack_depot_save - Save a stack trace from an array
393  *
394  * @entries:		Pointer to storage array
395  * @nr_entries:		Size of the storage array
396  * @extra_bits:		Flags to store in unused bits of depot_stack_handle_t
397  * @alloc_flags:	Allocation gfp flags
398  * @can_alloc:		Allocate stack slabs (increased chance of failure if false)
399  *
400  * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
401  * %true, is allowed to replenish the stack slab pool in case no space is left
402  * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
403  * any allocations and will fail if no space is left to store the stack trace.
404  *
405  * If the stack trace in @entries is from an interrupt, only the portion up to
406  * interrupt entry is saved.
407  *
408  * Additional opaque flags can be passed in @extra_bits, stored in the unused
409  * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
410  * without calling stack_depot_fetch().
411  *
412  * Context: Any context, but setting @can_alloc to %false is required if
413  *          alloc_pages() cannot be used from the current context. Currently
414  *          this is the case from contexts where neither %GFP_ATOMIC nor
415  *          %GFP_NOWAIT can be used (NMI, raw_spin_lock).
416  *
417  * Return: The handle of the stack struct stored in depot, 0 on failure.
418  */
419 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
420 					unsigned int nr_entries,
421 					unsigned int extra_bits,
422 					gfp_t alloc_flags, bool can_alloc)
423 {
424 	struct stack_record *found = NULL, **bucket;
425 	union handle_parts retval = { .handle = 0 };
426 	struct page *page = NULL;
427 	void *prealloc = NULL;
428 	unsigned long flags;
429 	u32 hash;
430 
431 	/*
432 	 * If this stack trace is from an interrupt, including anything before
433 	 * interrupt entry usually leads to unbounded stackdepot growth.
434 	 *
435 	 * Because use of filter_irq_stacks() is a requirement to ensure
436 	 * stackdepot can efficiently deduplicate interrupt stacks, always
437 	 * filter_irq_stacks() to simplify all callers' use of stackdepot.
438 	 */
439 	nr_entries = filter_irq_stacks(entries, nr_entries);
440 
441 	if (unlikely(nr_entries == 0) || stack_depot_disable)
442 		goto fast_exit;
443 
444 	hash = hash_stack(entries, nr_entries);
445 	bucket = &stack_table[hash & stack_hash_mask];
446 
447 	/*
448 	 * Fast path: look the stack trace up without locking.
449 	 * The smp_load_acquire() here pairs with smp_store_release() to
450 	 * |bucket| below.
451 	 */
452 	found = find_stack(smp_load_acquire(bucket), entries,
453 			   nr_entries, hash);
454 	if (found)
455 		goto exit;
456 
457 	/*
458 	 * Check if the current or the next stack slab need to be initialized.
459 	 * If so, allocate the memory - we won't be able to do that under the
460 	 * lock.
461 	 *
462 	 * The smp_load_acquire() here pairs with smp_store_release() to
463 	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
464 	 */
465 	if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
466 		/*
467 		 * Zero out zone modifiers, as we don't have specific zone
468 		 * requirements. Keep the flags related to allocation in atomic
469 		 * contexts and I/O.
470 		 */
471 		alloc_flags &= ~GFP_ZONEMASK;
472 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
473 		alloc_flags |= __GFP_NOWARN;
474 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
475 		if (page)
476 			prealloc = page_address(page);
477 	}
478 
479 	raw_spin_lock_irqsave(&depot_lock, flags);
480 
481 	found = find_stack(*bucket, entries, nr_entries, hash);
482 	if (!found) {
483 		struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
484 
485 		if (new) {
486 			new->next = *bucket;
487 			/*
488 			 * This smp_store_release() pairs with
489 			 * smp_load_acquire() from |bucket| above.
490 			 */
491 			smp_store_release(bucket, new);
492 			found = new;
493 		}
494 	} else if (prealloc) {
495 		/*
496 		 * We didn't need to store this stack trace, but let's keep
497 		 * the preallocated memory for the future.
498 		 */
499 		WARN_ON(!init_stack_slab(&prealloc));
500 	}
501 
502 	raw_spin_unlock_irqrestore(&depot_lock, flags);
503 exit:
504 	if (prealloc) {
505 		/* Nobody used this memory, ok to free it. */
506 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
507 	}
508 	if (found)
509 		retval.handle = found->handle.handle;
510 fast_exit:
511 	retval.extra = extra_bits;
512 
513 	return retval.handle;
514 }
515 EXPORT_SYMBOL_GPL(__stack_depot_save);
516 
517 /**
518  * stack_depot_save - Save a stack trace from an array
519  *
520  * @entries:		Pointer to storage array
521  * @nr_entries:		Size of the storage array
522  * @alloc_flags:	Allocation gfp flags
523  *
524  * Context: Contexts where allocations via alloc_pages() are allowed.
525  *          See __stack_depot_save() for more details.
526  *
527  * Return: The handle of the stack struct stored in depot, 0 on failure.
528  */
529 depot_stack_handle_t stack_depot_save(unsigned long *entries,
530 				      unsigned int nr_entries,
531 				      gfp_t alloc_flags)
532 {
533 	return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
534 }
535 EXPORT_SYMBOL_GPL(stack_depot_save);
536