xref: /openbmc/linux/mm/kasan/quarantine.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KASAN quarantine.
4  *
5  * Author: Alexander Potapenko <glider@google.com>
6  * Copyright (C) 2016 Google, Inc.
7  *
8  * Based on code by Dmitry Chernenkov.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  */
20 
21 #include <linux/gfp.h>
22 #include <linux/hash.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/printk.h>
27 #include <linux/shrinker.h>
28 #include <linux/slab.h>
29 #include <linux/srcu.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 
33 #include "../slab.h"
34 #include "kasan.h"
35 
36 /* Data structure and operations for quarantine queues. */
37 
38 /*
39  * Each queue is a signle-linked list, which also stores the total size of
40  * objects inside of it.
41  */
42 struct qlist_head {
43 	struct qlist_node *head;
44 	struct qlist_node *tail;
45 	size_t bytes;
46 };
47 
48 #define QLIST_INIT { NULL, NULL, 0 }
49 
50 static bool qlist_empty(struct qlist_head *q)
51 {
52 	return !q->head;
53 }
54 
55 static void qlist_init(struct qlist_head *q)
56 {
57 	q->head = q->tail = NULL;
58 	q->bytes = 0;
59 }
60 
61 static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
62 		size_t size)
63 {
64 	if (unlikely(qlist_empty(q)))
65 		q->head = qlink;
66 	else
67 		q->tail->next = qlink;
68 	q->tail = qlink;
69 	qlink->next = NULL;
70 	q->bytes += size;
71 }
72 
73 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
74 {
75 	if (unlikely(qlist_empty(from)))
76 		return;
77 
78 	if (qlist_empty(to)) {
79 		*to = *from;
80 		qlist_init(from);
81 		return;
82 	}
83 
84 	to->tail->next = from->head;
85 	to->tail = from->tail;
86 	to->bytes += from->bytes;
87 
88 	qlist_init(from);
89 }
90 
91 #define QUARANTINE_PERCPU_SIZE (1 << 20)
92 #define QUARANTINE_BATCHES \
93 	(1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
94 
95 /*
96  * The object quarantine consists of per-cpu queues and a global queue,
97  * guarded by quarantine_lock.
98  */
99 static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
100 
101 /* Round-robin FIFO array of batches. */
102 static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
103 static int quarantine_head;
104 static int quarantine_tail;
105 /* Total size of all objects in global_quarantine across all batches. */
106 static unsigned long quarantine_size;
107 static DEFINE_RAW_SPINLOCK(quarantine_lock);
108 DEFINE_STATIC_SRCU(remove_cache_srcu);
109 
110 /* Maximum size of the global queue. */
111 static unsigned long quarantine_max_size;
112 
113 /*
114  * Target size of a batch in global_quarantine.
115  * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
116  */
117 static unsigned long quarantine_batch_size;
118 
119 /*
120  * The fraction of physical memory the quarantine is allowed to occupy.
121  * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
122  * the ratio low to avoid OOM.
123  */
124 #define QUARANTINE_FRACTION 32
125 
126 static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
127 {
128 	return virt_to_head_page(qlink)->slab_cache;
129 }
130 
131 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
132 {
133 	struct kasan_free_meta *free_info =
134 		container_of(qlink, struct kasan_free_meta,
135 			     quarantine_link);
136 
137 	return ((void *)free_info) - cache->kasan_info.free_meta_offset;
138 }
139 
140 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
141 {
142 	void *object = qlink_to_object(qlink, cache);
143 	unsigned long flags;
144 
145 	if (IS_ENABLED(CONFIG_SLAB))
146 		local_irq_save(flags);
147 
148 	___cache_free(cache, object, _THIS_IP_);
149 
150 	if (IS_ENABLED(CONFIG_SLAB))
151 		local_irq_restore(flags);
152 }
153 
154 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
155 {
156 	struct qlist_node *qlink;
157 
158 	if (unlikely(qlist_empty(q)))
159 		return;
160 
161 	qlink = q->head;
162 	while (qlink) {
163 		struct kmem_cache *obj_cache =
164 			cache ? cache :	qlink_to_cache(qlink);
165 		struct qlist_node *next = qlink->next;
166 
167 		qlink_free(qlink, obj_cache);
168 		qlink = next;
169 	}
170 	qlist_init(q);
171 }
172 
173 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
174 {
175 	unsigned long flags;
176 	struct qlist_head *q;
177 	struct qlist_head temp = QLIST_INIT;
178 
179 	/*
180 	 * Note: irq must be disabled until after we move the batch to the
181 	 * global quarantine. Otherwise quarantine_remove_cache() can miss
182 	 * some objects belonging to the cache if they are in our local temp
183 	 * list. quarantine_remove_cache() executes on_each_cpu() at the
184 	 * beginning which ensures that it either sees the objects in per-cpu
185 	 * lists or in the global quarantine.
186 	 */
187 	local_irq_save(flags);
188 
189 	q = this_cpu_ptr(&cpu_quarantine);
190 	qlist_put(q, &info->quarantine_link, cache->size);
191 	if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
192 		qlist_move_all(q, &temp);
193 
194 		raw_spin_lock(&quarantine_lock);
195 		WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
196 		qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
197 		if (global_quarantine[quarantine_tail].bytes >=
198 				READ_ONCE(quarantine_batch_size)) {
199 			int new_tail;
200 
201 			new_tail = quarantine_tail + 1;
202 			if (new_tail == QUARANTINE_BATCHES)
203 				new_tail = 0;
204 			if (new_tail != quarantine_head)
205 				quarantine_tail = new_tail;
206 		}
207 		raw_spin_unlock(&quarantine_lock);
208 	}
209 
210 	local_irq_restore(flags);
211 }
212 
213 void quarantine_reduce(void)
214 {
215 	size_t total_size, new_quarantine_size, percpu_quarantines;
216 	unsigned long flags;
217 	int srcu_idx;
218 	struct qlist_head to_free = QLIST_INIT;
219 
220 	if (likely(READ_ONCE(quarantine_size) <=
221 		   READ_ONCE(quarantine_max_size)))
222 		return;
223 
224 	/*
225 	 * srcu critical section ensures that quarantine_remove_cache()
226 	 * will not miss objects belonging to the cache while they are in our
227 	 * local to_free list. srcu is chosen because (1) it gives us private
228 	 * grace period domain that does not interfere with anything else,
229 	 * and (2) it allows synchronize_srcu() to return without waiting
230 	 * if there are no pending read critical sections (which is the
231 	 * expected case).
232 	 */
233 	srcu_idx = srcu_read_lock(&remove_cache_srcu);
234 	raw_spin_lock_irqsave(&quarantine_lock, flags);
235 
236 	/*
237 	 * Update quarantine size in case of hotplug. Allocate a fraction of
238 	 * the installed memory to quarantine minus per-cpu queue limits.
239 	 */
240 	total_size = (totalram_pages() << PAGE_SHIFT) /
241 		QUARANTINE_FRACTION;
242 	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
243 	new_quarantine_size = (total_size < percpu_quarantines) ?
244 		0 : total_size - percpu_quarantines;
245 	WRITE_ONCE(quarantine_max_size, new_quarantine_size);
246 	/* Aim at consuming at most 1/2 of slots in quarantine. */
247 	WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
248 		2 * total_size / QUARANTINE_BATCHES));
249 
250 	if (likely(quarantine_size > quarantine_max_size)) {
251 		qlist_move_all(&global_quarantine[quarantine_head], &to_free);
252 		WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
253 		quarantine_head++;
254 		if (quarantine_head == QUARANTINE_BATCHES)
255 			quarantine_head = 0;
256 	}
257 
258 	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
259 
260 	qlist_free_all(&to_free, NULL);
261 	srcu_read_unlock(&remove_cache_srcu, srcu_idx);
262 }
263 
264 static void qlist_move_cache(struct qlist_head *from,
265 				   struct qlist_head *to,
266 				   struct kmem_cache *cache)
267 {
268 	struct qlist_node *curr;
269 
270 	if (unlikely(qlist_empty(from)))
271 		return;
272 
273 	curr = from->head;
274 	qlist_init(from);
275 	while (curr) {
276 		struct qlist_node *next = curr->next;
277 		struct kmem_cache *obj_cache = qlink_to_cache(curr);
278 
279 		if (obj_cache == cache)
280 			qlist_put(to, curr, obj_cache->size);
281 		else
282 			qlist_put(from, curr, obj_cache->size);
283 
284 		curr = next;
285 	}
286 }
287 
288 static void per_cpu_remove_cache(void *arg)
289 {
290 	struct kmem_cache *cache = arg;
291 	struct qlist_head to_free = QLIST_INIT;
292 	struct qlist_head *q;
293 
294 	q = this_cpu_ptr(&cpu_quarantine);
295 	qlist_move_cache(q, &to_free, cache);
296 	qlist_free_all(&to_free, cache);
297 }
298 
299 /* Free all quarantined objects belonging to cache. */
300 void quarantine_remove_cache(struct kmem_cache *cache)
301 {
302 	unsigned long flags, i;
303 	struct qlist_head to_free = QLIST_INIT;
304 
305 	/*
306 	 * Must be careful to not miss any objects that are being moved from
307 	 * per-cpu list to the global quarantine in quarantine_put(),
308 	 * nor objects being freed in quarantine_reduce(). on_each_cpu()
309 	 * achieves the first goal, while synchronize_srcu() achieves the
310 	 * second.
311 	 */
312 	on_each_cpu(per_cpu_remove_cache, cache, 1);
313 
314 	raw_spin_lock_irqsave(&quarantine_lock, flags);
315 	for (i = 0; i < QUARANTINE_BATCHES; i++) {
316 		if (qlist_empty(&global_quarantine[i]))
317 			continue;
318 		qlist_move_cache(&global_quarantine[i], &to_free, cache);
319 		/* Scanning whole quarantine can take a while. */
320 		raw_spin_unlock_irqrestore(&quarantine_lock, flags);
321 		cond_resched();
322 		raw_spin_lock_irqsave(&quarantine_lock, flags);
323 	}
324 	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
325 
326 	qlist_free_all(&to_free, cache);
327 
328 	synchronize_srcu(&remove_cache_srcu);
329 }
330