xref: /openbmc/linux/lib/stackdepot.c (revision 35267cea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic stack depot for storing stack traces.
4  *
5  * Some debugging tools need to save stack traces of certain events which can
6  * be later presented to the user. For example, KASAN needs to safe alloc and
7  * free stacks for each object, but storing two stack traces per object
8  * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9  * that).
10  *
11  * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12  * and free stacks repeat a lot, we save about 100x space.
13  * Stacks are never removed from depot, so we store them contiguously one after
14  * another in a contiguous memory allocation.
15  *
16  * Author: Alexander Potapenko <glider@google.com>
17  * Copyright (C) 2016 Google, Inc.
18  *
19  * Based on code by Dmitry Chernenkov.
20  */
21 
22 #include <linux/gfp.h>
23 #include <linux/interrupt.h>
24 #include <linux/jhash.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/percpu.h>
28 #include <linux/printk.h>
29 #include <linux/slab.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/memblock.h>
35 
36 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
37 
38 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
39 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
40 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41 #define STACK_ALLOC_ALIGN 4
42 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
43 					STACK_ALLOC_ALIGN)
44 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
46 #define STACK_ALLOC_SLABS_CAP 8192
47 #define STACK_ALLOC_MAX_SLABS \
48 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
50 
51 /* The compact structure to store the reference to stacks. */
52 union handle_parts {
53 	depot_stack_handle_t handle;
54 	struct {
55 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
56 		u32 offset : STACK_ALLOC_OFFSET_BITS;
57 		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
58 	};
59 };
60 
61 struct stack_record {
62 	struct stack_record *next;	/* Link in the hashtable */
63 	u32 hash;			/* Hash in the hastable */
64 	u32 size;			/* Number of frames in the stack */
65 	union handle_parts handle;
66 	unsigned long entries[];	/* Variable-sized array of entries. */
67 };
68 
69 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
70 
71 static int depot_index;
72 static int next_slab_inited;
73 static size_t depot_offset;
74 static DEFINE_RAW_SPINLOCK(depot_lock);
75 
76 static bool init_stack_slab(void **prealloc)
77 {
78 	if (!*prealloc)
79 		return false;
80 	/*
81 	 * This smp_load_acquire() pairs with smp_store_release() to
82 	 * |next_slab_inited| below and in depot_alloc_stack().
83 	 */
84 	if (smp_load_acquire(&next_slab_inited))
85 		return true;
86 	if (stack_slabs[depot_index] == NULL) {
87 		stack_slabs[depot_index] = *prealloc;
88 		*prealloc = NULL;
89 	} else {
90 		/* If this is the last depot slab, do not touch the next one. */
91 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
92 			stack_slabs[depot_index + 1] = *prealloc;
93 			*prealloc = NULL;
94 		}
95 		/*
96 		 * This smp_store_release pairs with smp_load_acquire() from
97 		 * |next_slab_inited| above and in stack_depot_save().
98 		 */
99 		smp_store_release(&next_slab_inited, 1);
100 	}
101 	return true;
102 }
103 
104 /* Allocation of a new stack in raw storage */
105 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
106 		u32 hash, void **prealloc, gfp_t alloc_flags)
107 {
108 	struct stack_record *stack;
109 	size_t required_size = struct_size(stack, entries, size);
110 
111 	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112 
113 	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 			WARN_ONCE(1, "Stack depot reached limit capacity");
116 			return NULL;
117 		}
118 		depot_index++;
119 		depot_offset = 0;
120 		/*
121 		 * smp_store_release() here pairs with smp_load_acquire() from
122 		 * |next_slab_inited| in stack_depot_save() and
123 		 * init_stack_slab().
124 		 */
125 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 			smp_store_release(&next_slab_inited, 0);
127 	}
128 	init_stack_slab(prealloc);
129 	if (stack_slabs[depot_index] == NULL)
130 		return NULL;
131 
132 	stack = stack_slabs[depot_index] + depot_offset;
133 
134 	stack->hash = hash;
135 	stack->size = size;
136 	stack->handle.slabindex = depot_index;
137 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
138 	stack->handle.valid = 1;
139 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
140 	depot_offset += required_size;
141 
142 	return stack;
143 }
144 
145 #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
146 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147 #define STACK_HASH_SEED 0x9747b28c
148 
149 static bool stack_depot_disable;
150 static struct stack_record **stack_table;
151 
152 static int __init is_stack_depot_disabled(char *str)
153 {
154 	int ret;
155 
156 	ret = kstrtobool(str, &stack_depot_disable);
157 	if (!ret && stack_depot_disable) {
158 		pr_info("Stack Depot is disabled\n");
159 		stack_table = NULL;
160 	}
161 	return 0;
162 }
163 early_param("stack_depot_disable", is_stack_depot_disabled);
164 
165 int __init stack_depot_init(void)
166 {
167 	if (!stack_depot_disable) {
168 		size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
169 		int i;
170 
171 		stack_table = memblock_alloc(size, size);
172 		for (i = 0; i < STACK_HASH_SIZE;  i++)
173 			stack_table[i] = NULL;
174 	}
175 	return 0;
176 }
177 
178 /* Calculate hash for a stack */
179 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
180 {
181 	return jhash2((u32 *)entries,
182 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
183 		      STACK_HASH_SEED);
184 }
185 
186 /* Use our own, non-instrumented version of memcmp().
187  *
188  * We actually don't care about the order, just the equality.
189  */
190 static inline
191 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
192 			unsigned int n)
193 {
194 	for ( ; n-- ; u1++, u2++) {
195 		if (*u1 != *u2)
196 			return 1;
197 	}
198 	return 0;
199 }
200 
201 /* Find a stack that is equal to the one stored in entries in the hash */
202 static inline struct stack_record *find_stack(struct stack_record *bucket,
203 					     unsigned long *entries, int size,
204 					     u32 hash)
205 {
206 	struct stack_record *found;
207 
208 	for (found = bucket; found; found = found->next) {
209 		if (found->hash == hash &&
210 		    found->size == size &&
211 		    !stackdepot_memcmp(entries, found->entries, size))
212 			return found;
213 	}
214 	return NULL;
215 }
216 
217 /**
218  * stack_depot_fetch - Fetch stack entries from a depot
219  *
220  * @handle:		Stack depot handle which was returned from
221  *			stack_depot_save().
222  * @entries:		Pointer to store the entries address
223  *
224  * Return: The number of trace entries for this depot.
225  */
226 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
227 			       unsigned long **entries)
228 {
229 	union handle_parts parts = { .handle = handle };
230 	void *slab;
231 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
232 	struct stack_record *stack;
233 
234 	*entries = NULL;
235 	if (parts.slabindex > depot_index) {
236 		WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
237 			parts.slabindex, depot_index, handle);
238 		return 0;
239 	}
240 	slab = stack_slabs[parts.slabindex];
241 	if (!slab)
242 		return 0;
243 	stack = slab + offset;
244 
245 	*entries = stack->entries;
246 	return stack->size;
247 }
248 EXPORT_SYMBOL_GPL(stack_depot_fetch);
249 
250 /**
251  * stack_depot_save - Save a stack trace from an array
252  *
253  * @entries:		Pointer to storage array
254  * @nr_entries:		Size of the storage array
255  * @alloc_flags:	Allocation gfp flags
256  *
257  * Return: The handle of the stack struct stored in depot
258  */
259 depot_stack_handle_t stack_depot_save(unsigned long *entries,
260 				      unsigned int nr_entries,
261 				      gfp_t alloc_flags)
262 {
263 	struct stack_record *found = NULL, **bucket;
264 	depot_stack_handle_t retval = 0;
265 	struct page *page = NULL;
266 	void *prealloc = NULL;
267 	unsigned long flags;
268 	u32 hash;
269 
270 	if (unlikely(nr_entries == 0) || stack_depot_disable)
271 		goto fast_exit;
272 
273 	hash = hash_stack(entries, nr_entries);
274 	bucket = &stack_table[hash & STACK_HASH_MASK];
275 
276 	/*
277 	 * Fast path: look the stack trace up without locking.
278 	 * The smp_load_acquire() here pairs with smp_store_release() to
279 	 * |bucket| below.
280 	 */
281 	found = find_stack(smp_load_acquire(bucket), entries,
282 			   nr_entries, hash);
283 	if (found)
284 		goto exit;
285 
286 	/*
287 	 * Check if the current or the next stack slab need to be initialized.
288 	 * If so, allocate the memory - we won't be able to do that under the
289 	 * lock.
290 	 *
291 	 * The smp_load_acquire() here pairs with smp_store_release() to
292 	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
293 	 */
294 	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
295 		/*
296 		 * Zero out zone modifiers, as we don't have specific zone
297 		 * requirements. Keep the flags related to allocation in atomic
298 		 * contexts and I/O.
299 		 */
300 		alloc_flags &= ~GFP_ZONEMASK;
301 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
302 		alloc_flags |= __GFP_NOWARN;
303 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
304 		if (page)
305 			prealloc = page_address(page);
306 	}
307 
308 	raw_spin_lock_irqsave(&depot_lock, flags);
309 
310 	found = find_stack(*bucket, entries, nr_entries, hash);
311 	if (!found) {
312 		struct stack_record *new =
313 			depot_alloc_stack(entries, nr_entries,
314 					  hash, &prealloc, alloc_flags);
315 		if (new) {
316 			new->next = *bucket;
317 			/*
318 			 * This smp_store_release() pairs with
319 			 * smp_load_acquire() from |bucket| above.
320 			 */
321 			smp_store_release(bucket, new);
322 			found = new;
323 		}
324 	} else if (prealloc) {
325 		/*
326 		 * We didn't need to store this stack trace, but let's keep
327 		 * the preallocated memory for the future.
328 		 */
329 		WARN_ON(!init_stack_slab(&prealloc));
330 	}
331 
332 	raw_spin_unlock_irqrestore(&depot_lock, flags);
333 exit:
334 	if (prealloc) {
335 		/* Nobody used this memory, ok to free it. */
336 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
337 	}
338 	if (found)
339 		retval = found->handle.handle;
340 fast_exit:
341 	return retval;
342 }
343 EXPORT_SYMBOL_GPL(stack_depot_save);
344 
345 static inline int in_irqentry_text(unsigned long ptr)
346 {
347 	return (ptr >= (unsigned long)&__irqentry_text_start &&
348 		ptr < (unsigned long)&__irqentry_text_end) ||
349 		(ptr >= (unsigned long)&__softirqentry_text_start &&
350 		 ptr < (unsigned long)&__softirqentry_text_end);
351 }
352 
353 unsigned int filter_irq_stacks(unsigned long *entries,
354 					     unsigned int nr_entries)
355 {
356 	unsigned int i;
357 
358 	for (i = 0; i < nr_entries; i++) {
359 		if (in_irqentry_text(entries[i])) {
360 			/* Include the irqentry function into the stack. */
361 			return i + 1;
362 		}
363 	}
364 	return nr_entries;
365 }
366 EXPORT_SYMBOL_GPL(filter_irq_stacks);
367