xref: /openbmc/linux/lib/stackdepot.c (revision f17f06a0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic stack depot for storing stack traces.
4  *
5  * Some debugging tools need to save stack traces of certain events which can
6  * be later presented to the user. For example, KASAN needs to safe alloc and
7  * free stacks for each object, but storing two stack traces per object
8  * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9  * that).
10  *
11  * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12  * and free stacks repeat a lot, we save about 100x space.
13  * Stacks are never removed from depot, so we store them contiguously one after
14  * another in a contiguos memory allocation.
15  *
16  * Author: Alexander Potapenko <glider@google.com>
17  * Copyright (C) 2016 Google, Inc.
18  *
19  * Based on code by Dmitry Chernenkov.
20  */
21 
22 #include <linux/gfp.h>
23 #include <linux/jhash.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/printk.h>
28 #include <linux/slab.h>
29 #include <linux/stacktrace.h>
30 #include <linux/stackdepot.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 
34 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
35 
36 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
37 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
38 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
39 #define STACK_ALLOC_ALIGN 4
40 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
41 					STACK_ALLOC_ALIGN)
42 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
43 		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
44 #define STACK_ALLOC_SLABS_CAP 8192
45 #define STACK_ALLOC_MAX_SLABS \
46 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
47 	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
48 
49 /* The compact structure to store the reference to stacks. */
50 union handle_parts {
51 	depot_stack_handle_t handle;
52 	struct {
53 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
54 		u32 offset : STACK_ALLOC_OFFSET_BITS;
55 		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
56 	};
57 };
58 
59 struct stack_record {
60 	struct stack_record *next;	/* Link in the hashtable */
61 	u32 hash;			/* Hash in the hastable */
62 	u32 size;			/* Number of frames in the stack */
63 	union handle_parts handle;
64 	unsigned long entries[1];	/* Variable-sized array of entries. */
65 };
66 
67 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
68 
69 static int depot_index;
70 static int next_slab_inited;
71 static size_t depot_offset;
72 static DEFINE_SPINLOCK(depot_lock);
73 
74 static bool init_stack_slab(void **prealloc)
75 {
76 	if (!*prealloc)
77 		return false;
78 	/*
79 	 * This smp_load_acquire() pairs with smp_store_release() to
80 	 * |next_slab_inited| below and in depot_alloc_stack().
81 	 */
82 	if (smp_load_acquire(&next_slab_inited))
83 		return true;
84 	if (stack_slabs[depot_index] == NULL) {
85 		stack_slabs[depot_index] = *prealloc;
86 		*prealloc = NULL;
87 	} else {
88 		/* If this is the last depot slab, do not touch the next one. */
89 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
90 			stack_slabs[depot_index + 1] = *prealloc;
91 			*prealloc = NULL;
92 		}
93 		/*
94 		 * This smp_store_release pairs with smp_load_acquire() from
95 		 * |next_slab_inited| above and in stack_depot_save().
96 		 */
97 		smp_store_release(&next_slab_inited, 1);
98 	}
99 	return true;
100 }
101 
102 /* Allocation of a new stack in raw storage */
103 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
104 		u32 hash, void **prealloc, gfp_t alloc_flags)
105 {
106 	int required_size = offsetof(struct stack_record, entries) +
107 		sizeof(unsigned long) * size;
108 	struct stack_record *stack;
109 
110 	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
111 
112 	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
113 		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
114 			WARN_ONCE(1, "Stack depot reached limit capacity");
115 			return NULL;
116 		}
117 		depot_index++;
118 		depot_offset = 0;
119 		/*
120 		 * smp_store_release() here pairs with smp_load_acquire() from
121 		 * |next_slab_inited| in stack_depot_save() and
122 		 * init_stack_slab().
123 		 */
124 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
125 			smp_store_release(&next_slab_inited, 0);
126 	}
127 	init_stack_slab(prealloc);
128 	if (stack_slabs[depot_index] == NULL)
129 		return NULL;
130 
131 	stack = stack_slabs[depot_index] + depot_offset;
132 
133 	stack->hash = hash;
134 	stack->size = size;
135 	stack->handle.slabindex = depot_index;
136 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
137 	stack->handle.valid = 1;
138 	memcpy(stack->entries, entries, size * sizeof(unsigned long));
139 	depot_offset += required_size;
140 
141 	return stack;
142 }
143 
144 #define STACK_HASH_ORDER 20
145 #define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
146 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147 #define STACK_HASH_SEED 0x9747b28c
148 
149 static struct stack_record *stack_table[STACK_HASH_SIZE] = {
150 	[0 ...	STACK_HASH_SIZE - 1] = NULL
151 };
152 
153 /* Calculate hash for a stack */
154 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
155 {
156 	return jhash2((u32 *)entries,
157 			       size * sizeof(unsigned long) / sizeof(u32),
158 			       STACK_HASH_SEED);
159 }
160 
161 /* Use our own, non-instrumented version of memcmp().
162  *
163  * We actually don't care about the order, just the equality.
164  */
165 static inline
166 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
167 			unsigned int n)
168 {
169 	for ( ; n-- ; u1++, u2++) {
170 		if (*u1 != *u2)
171 			return 1;
172 	}
173 	return 0;
174 }
175 
176 /* Find a stack that is equal to the one stored in entries in the hash */
177 static inline struct stack_record *find_stack(struct stack_record *bucket,
178 					     unsigned long *entries, int size,
179 					     u32 hash)
180 {
181 	struct stack_record *found;
182 
183 	for (found = bucket; found; found = found->next) {
184 		if (found->hash == hash &&
185 		    found->size == size &&
186 		    !stackdepot_memcmp(entries, found->entries, size))
187 			return found;
188 	}
189 	return NULL;
190 }
191 
192 /**
193  * stack_depot_fetch - Fetch stack entries from a depot
194  *
195  * @handle:		Stack depot handle which was returned from
196  *			stack_depot_save().
197  * @entries:		Pointer to store the entries address
198  *
199  * Return: The number of trace entries for this depot.
200  */
201 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
202 			       unsigned long **entries)
203 {
204 	union handle_parts parts = { .handle = handle };
205 	void *slab = stack_slabs[parts.slabindex];
206 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
207 	struct stack_record *stack = slab + offset;
208 
209 	*entries = stack->entries;
210 	return stack->size;
211 }
212 EXPORT_SYMBOL_GPL(stack_depot_fetch);
213 
214 /**
215  * stack_depot_save - Save a stack trace from an array
216  *
217  * @entries:		Pointer to storage array
218  * @nr_entries:		Size of the storage array
219  * @alloc_flags:	Allocation gfp flags
220  *
221  * Return: The handle of the stack struct stored in depot
222  */
223 depot_stack_handle_t stack_depot_save(unsigned long *entries,
224 				      unsigned int nr_entries,
225 				      gfp_t alloc_flags)
226 {
227 	struct stack_record *found = NULL, **bucket;
228 	depot_stack_handle_t retval = 0;
229 	struct page *page = NULL;
230 	void *prealloc = NULL;
231 	unsigned long flags;
232 	u32 hash;
233 
234 	if (unlikely(nr_entries == 0))
235 		goto fast_exit;
236 
237 	hash = hash_stack(entries, nr_entries);
238 	bucket = &stack_table[hash & STACK_HASH_MASK];
239 
240 	/*
241 	 * Fast path: look the stack trace up without locking.
242 	 * The smp_load_acquire() here pairs with smp_store_release() to
243 	 * |bucket| below.
244 	 */
245 	found = find_stack(smp_load_acquire(bucket), entries,
246 			   nr_entries, hash);
247 	if (found)
248 		goto exit;
249 
250 	/*
251 	 * Check if the current or the next stack slab need to be initialized.
252 	 * If so, allocate the memory - we won't be able to do that under the
253 	 * lock.
254 	 *
255 	 * The smp_load_acquire() here pairs with smp_store_release() to
256 	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
257 	 */
258 	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
259 		/*
260 		 * Zero out zone modifiers, as we don't have specific zone
261 		 * requirements. Keep the flags related to allocation in atomic
262 		 * contexts and I/O.
263 		 */
264 		alloc_flags &= ~GFP_ZONEMASK;
265 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
266 		alloc_flags |= __GFP_NOWARN;
267 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
268 		if (page)
269 			prealloc = page_address(page);
270 	}
271 
272 	spin_lock_irqsave(&depot_lock, flags);
273 
274 	found = find_stack(*bucket, entries, nr_entries, hash);
275 	if (!found) {
276 		struct stack_record *new =
277 			depot_alloc_stack(entries, nr_entries,
278 					  hash, &prealloc, alloc_flags);
279 		if (new) {
280 			new->next = *bucket;
281 			/*
282 			 * This smp_store_release() pairs with
283 			 * smp_load_acquire() from |bucket| above.
284 			 */
285 			smp_store_release(bucket, new);
286 			found = new;
287 		}
288 	} else if (prealloc) {
289 		/*
290 		 * We didn't need to store this stack trace, but let's keep
291 		 * the preallocated memory for the future.
292 		 */
293 		WARN_ON(!init_stack_slab(&prealloc));
294 	}
295 
296 	spin_unlock_irqrestore(&depot_lock, flags);
297 exit:
298 	if (prealloc) {
299 		/* Nobody used this memory, ok to free it. */
300 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
301 	}
302 	if (found)
303 		retval = found->handle.handle;
304 fast_exit:
305 	return retval;
306 }
307 EXPORT_SYMBOL_GPL(stack_depot_save);
308