xref: /openbmc/linux/mm/kasan/quarantine.c (revision 0f4b20ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KASAN quarantine.
4  *
5  * Author: Alexander Potapenko <glider@google.com>
6  * Copyright (C) 2016 Google, Inc.
7  *
8  * Based on code by Dmitry Chernenkov.
9  */
10 
11 #include <linux/gfp.h>
12 #include <linux/hash.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/percpu.h>
16 #include <linux/printk.h>
17 #include <linux/shrinker.h>
18 #include <linux/slab.h>
19 #include <linux/srcu.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/cpuhotplug.h>
23 
24 #include "../slab.h"
25 #include "kasan.h"
26 
27 /* Data structure and operations for quarantine queues. */
28 
29 /*
30  * Each queue is a single-linked list, which also stores the total size of
31  * objects inside of it.
32  */
33 struct qlist_head {
34 	struct qlist_node *head;
35 	struct qlist_node *tail;
36 	size_t bytes;
37 	bool offline;
38 };
39 
40 #define QLIST_INIT { NULL, NULL, 0 }
41 
42 static bool qlist_empty(struct qlist_head *q)
43 {
44 	return !q->head;
45 }
46 
47 static void qlist_init(struct qlist_head *q)
48 {
49 	q->head = q->tail = NULL;
50 	q->bytes = 0;
51 }
52 
53 static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
54 		size_t size)
55 {
56 	if (unlikely(qlist_empty(q)))
57 		q->head = qlink;
58 	else
59 		q->tail->next = qlink;
60 	q->tail = qlink;
61 	qlink->next = NULL;
62 	q->bytes += size;
63 }
64 
65 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
66 {
67 	if (unlikely(qlist_empty(from)))
68 		return;
69 
70 	if (qlist_empty(to)) {
71 		*to = *from;
72 		qlist_init(from);
73 		return;
74 	}
75 
76 	to->tail->next = from->head;
77 	to->tail = from->tail;
78 	to->bytes += from->bytes;
79 
80 	qlist_init(from);
81 }
82 
83 #define QUARANTINE_PERCPU_SIZE (1 << 20)
84 #define QUARANTINE_BATCHES \
85 	(1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
86 
87 /*
88  * The object quarantine consists of per-cpu queues and a global queue,
89  * guarded by quarantine_lock.
90  */
91 static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
92 
93 /* Round-robin FIFO array of batches. */
94 static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
95 static int quarantine_head;
96 static int quarantine_tail;
97 /* Total size of all objects in global_quarantine across all batches. */
98 static unsigned long quarantine_size;
99 static DEFINE_RAW_SPINLOCK(quarantine_lock);
100 DEFINE_STATIC_SRCU(remove_cache_srcu);
101 
102 /* Maximum size of the global queue. */
103 static unsigned long quarantine_max_size;
104 
105 /*
106  * Target size of a batch in global_quarantine.
107  * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
108  */
109 static unsigned long quarantine_batch_size;
110 
111 /*
112  * The fraction of physical memory the quarantine is allowed to occupy.
113  * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
114  * the ratio low to avoid OOM.
115  */
116 #define QUARANTINE_FRACTION 32
117 
118 static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
119 {
120 	return virt_to_slab(qlink)->slab_cache;
121 }
122 
123 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
124 {
125 	struct kasan_free_meta *free_info =
126 		container_of(qlink, struct kasan_free_meta,
127 			     quarantine_link);
128 
129 	return ((void *)free_info) - cache->kasan_info.free_meta_offset;
130 }
131 
132 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
133 {
134 	void *object = qlink_to_object(qlink, cache);
135 	struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
136 	unsigned long flags;
137 
138 	if (IS_ENABLED(CONFIG_SLAB))
139 		local_irq_save(flags);
140 
141 	/*
142 	 * If init_on_free is enabled and KASAN's free metadata is stored in
143 	 * the object, zero the metadata. Otherwise, the object's memory will
144 	 * not be properly zeroed, as KASAN saves the metadata after the slab
145 	 * allocator zeroes the object.
146 	 */
147 	if (slab_want_init_on_free(cache) &&
148 	    cache->kasan_info.free_meta_offset == 0)
149 		memzero_explicit(meta, sizeof(*meta));
150 
151 	/*
152 	 * As the object now gets freed from the quarantine, assume that its
153 	 * free track is no longer valid.
154 	 */
155 	*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
156 
157 	___cache_free(cache, object, _THIS_IP_);
158 
159 	if (IS_ENABLED(CONFIG_SLAB))
160 		local_irq_restore(flags);
161 }
162 
163 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
164 {
165 	struct qlist_node *qlink;
166 
167 	if (unlikely(qlist_empty(q)))
168 		return;
169 
170 	qlink = q->head;
171 	while (qlink) {
172 		struct kmem_cache *obj_cache =
173 			cache ? cache :	qlink_to_cache(qlink);
174 		struct qlist_node *next = qlink->next;
175 
176 		qlink_free(qlink, obj_cache);
177 		qlink = next;
178 	}
179 	qlist_init(q);
180 }
181 
182 bool kasan_quarantine_put(struct kmem_cache *cache, void *object)
183 {
184 	unsigned long flags;
185 	struct qlist_head *q;
186 	struct qlist_head temp = QLIST_INIT;
187 	struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
188 
189 	/*
190 	 * If there's no metadata for this object, don't put it into
191 	 * quarantine.
192 	 */
193 	if (!meta)
194 		return false;
195 
196 	/*
197 	 * Note: irq must be disabled until after we move the batch to the
198 	 * global quarantine. Otherwise kasan_quarantine_remove_cache() can
199 	 * miss some objects belonging to the cache if they are in our local
200 	 * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
201 	 * at the beginning which ensures that it either sees the objects in
202 	 * per-cpu lists or in the global quarantine.
203 	 */
204 	local_irq_save(flags);
205 
206 	q = this_cpu_ptr(&cpu_quarantine);
207 	if (q->offline) {
208 		local_irq_restore(flags);
209 		return false;
210 	}
211 	qlist_put(q, &meta->quarantine_link, cache->size);
212 	if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
213 		qlist_move_all(q, &temp);
214 
215 		raw_spin_lock(&quarantine_lock);
216 		WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
217 		qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
218 		if (global_quarantine[quarantine_tail].bytes >=
219 				READ_ONCE(quarantine_batch_size)) {
220 			int new_tail;
221 
222 			new_tail = quarantine_tail + 1;
223 			if (new_tail == QUARANTINE_BATCHES)
224 				new_tail = 0;
225 			if (new_tail != quarantine_head)
226 				quarantine_tail = new_tail;
227 		}
228 		raw_spin_unlock(&quarantine_lock);
229 	}
230 
231 	local_irq_restore(flags);
232 
233 	return true;
234 }
235 
236 void kasan_quarantine_reduce(void)
237 {
238 	size_t total_size, new_quarantine_size, percpu_quarantines;
239 	unsigned long flags;
240 	int srcu_idx;
241 	struct qlist_head to_free = QLIST_INIT;
242 
243 	if (likely(READ_ONCE(quarantine_size) <=
244 		   READ_ONCE(quarantine_max_size)))
245 		return;
246 
247 	/*
248 	 * srcu critical section ensures that kasan_quarantine_remove_cache()
249 	 * will not miss objects belonging to the cache while they are in our
250 	 * local to_free list. srcu is chosen because (1) it gives us private
251 	 * grace period domain that does not interfere with anything else,
252 	 * and (2) it allows synchronize_srcu() to return without waiting
253 	 * if there are no pending read critical sections (which is the
254 	 * expected case).
255 	 */
256 	srcu_idx = srcu_read_lock(&remove_cache_srcu);
257 	raw_spin_lock_irqsave(&quarantine_lock, flags);
258 
259 	/*
260 	 * Update quarantine size in case of hotplug. Allocate a fraction of
261 	 * the installed memory to quarantine minus per-cpu queue limits.
262 	 */
263 	total_size = (totalram_pages() << PAGE_SHIFT) /
264 		QUARANTINE_FRACTION;
265 	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
266 	new_quarantine_size = (total_size < percpu_quarantines) ?
267 		0 : total_size - percpu_quarantines;
268 	WRITE_ONCE(quarantine_max_size, new_quarantine_size);
269 	/* Aim at consuming at most 1/2 of slots in quarantine. */
270 	WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
271 		2 * total_size / QUARANTINE_BATCHES));
272 
273 	if (likely(quarantine_size > quarantine_max_size)) {
274 		qlist_move_all(&global_quarantine[quarantine_head], &to_free);
275 		WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
276 		quarantine_head++;
277 		if (quarantine_head == QUARANTINE_BATCHES)
278 			quarantine_head = 0;
279 	}
280 
281 	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
282 
283 	qlist_free_all(&to_free, NULL);
284 	srcu_read_unlock(&remove_cache_srcu, srcu_idx);
285 }
286 
287 static void qlist_move_cache(struct qlist_head *from,
288 				   struct qlist_head *to,
289 				   struct kmem_cache *cache)
290 {
291 	struct qlist_node *curr;
292 
293 	if (unlikely(qlist_empty(from)))
294 		return;
295 
296 	curr = from->head;
297 	qlist_init(from);
298 	while (curr) {
299 		struct qlist_node *next = curr->next;
300 		struct kmem_cache *obj_cache = qlink_to_cache(curr);
301 
302 		if (obj_cache == cache)
303 			qlist_put(to, curr, obj_cache->size);
304 		else
305 			qlist_put(from, curr, obj_cache->size);
306 
307 		curr = next;
308 	}
309 }
310 
311 static void per_cpu_remove_cache(void *arg)
312 {
313 	struct kmem_cache *cache = arg;
314 	struct qlist_head to_free = QLIST_INIT;
315 	struct qlist_head *q;
316 
317 	q = this_cpu_ptr(&cpu_quarantine);
318 	qlist_move_cache(q, &to_free, cache);
319 	qlist_free_all(&to_free, cache);
320 }
321 
322 /* Free all quarantined objects belonging to cache. */
323 void kasan_quarantine_remove_cache(struct kmem_cache *cache)
324 {
325 	unsigned long flags, i;
326 	struct qlist_head to_free = QLIST_INIT;
327 
328 	/*
329 	 * Must be careful to not miss any objects that are being moved from
330 	 * per-cpu list to the global quarantine in kasan_quarantine_put(),
331 	 * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
332 	 * achieves the first goal, while synchronize_srcu() achieves the
333 	 * second.
334 	 */
335 	on_each_cpu(per_cpu_remove_cache, cache, 1);
336 
337 	raw_spin_lock_irqsave(&quarantine_lock, flags);
338 	for (i = 0; i < QUARANTINE_BATCHES; i++) {
339 		if (qlist_empty(&global_quarantine[i]))
340 			continue;
341 		qlist_move_cache(&global_quarantine[i], &to_free, cache);
342 		/* Scanning whole quarantine can take a while. */
343 		raw_spin_unlock_irqrestore(&quarantine_lock, flags);
344 		cond_resched();
345 		raw_spin_lock_irqsave(&quarantine_lock, flags);
346 	}
347 	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
348 
349 	qlist_free_all(&to_free, cache);
350 
351 	synchronize_srcu(&remove_cache_srcu);
352 }
353 
354 static int kasan_cpu_online(unsigned int cpu)
355 {
356 	this_cpu_ptr(&cpu_quarantine)->offline = false;
357 	return 0;
358 }
359 
360 static int kasan_cpu_offline(unsigned int cpu)
361 {
362 	struct qlist_head *q;
363 
364 	q = this_cpu_ptr(&cpu_quarantine);
365 	/* Ensure the ordering between the writing to q->offline and
366 	 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
367 	 * by interrupt.
368 	 */
369 	WRITE_ONCE(q->offline, true);
370 	barrier();
371 	qlist_free_all(q, NULL);
372 	return 0;
373 }
374 
375 static int __init kasan_cpu_quarantine_init(void)
376 {
377 	int ret = 0;
378 
379 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
380 				kasan_cpu_online, kasan_cpu_offline);
381 	if (ret < 0)
382 		pr_err("kasan cpu quarantine register failed [%d]\n", ret);
383 	return ret;
384 }
385 late_initcall(kasan_cpu_quarantine_init);
386