xref: /openbmc/linux/kernel/bpf/local_storage.c (revision e657c18a)
1 //SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/bug.h>
6 #include <linux/filter.h>
7 #include <linux/mm.h>
8 #include <linux/rbtree.h>
9 #include <linux/slab.h>
10 #include <uapi/linux/btf.h>
11 
12 DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
13 
14 #ifdef CONFIG_CGROUP_BPF
15 
16 #define LOCAL_STORAGE_CREATE_FLAG_MASK					\
17 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
18 
19 struct bpf_cgroup_storage_map {
20 	struct bpf_map map;
21 
22 	spinlock_t lock;
23 	struct bpf_prog *prog;
24 	struct rb_root root;
25 	struct list_head list;
26 };
27 
28 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
29 {
30 	return container_of(map, struct bpf_cgroup_storage_map, map);
31 }
32 
33 static int bpf_cgroup_storage_key_cmp(
34 	const struct bpf_cgroup_storage_key *key1,
35 	const struct bpf_cgroup_storage_key *key2)
36 {
37 	if (key1->cgroup_inode_id < key2->cgroup_inode_id)
38 		return -1;
39 	else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
40 		return 1;
41 	else if (key1->attach_type < key2->attach_type)
42 		return -1;
43 	else if (key1->attach_type > key2->attach_type)
44 		return 1;
45 	return 0;
46 }
47 
48 static struct bpf_cgroup_storage *cgroup_storage_lookup(
49 	struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
50 	bool locked)
51 {
52 	struct rb_root *root = &map->root;
53 	struct rb_node *node;
54 
55 	if (!locked)
56 		spin_lock_bh(&map->lock);
57 
58 	node = root->rb_node;
59 	while (node) {
60 		struct bpf_cgroup_storage *storage;
61 
62 		storage = container_of(node, struct bpf_cgroup_storage, node);
63 
64 		switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
65 		case -1:
66 			node = node->rb_left;
67 			break;
68 		case 1:
69 			node = node->rb_right;
70 			break;
71 		default:
72 			if (!locked)
73 				spin_unlock_bh(&map->lock);
74 			return storage;
75 		}
76 	}
77 
78 	if (!locked)
79 		spin_unlock_bh(&map->lock);
80 
81 	return NULL;
82 }
83 
84 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
85 				 struct bpf_cgroup_storage *storage)
86 {
87 	struct rb_root *root = &map->root;
88 	struct rb_node **new = &(root->rb_node), *parent = NULL;
89 
90 	while (*new) {
91 		struct bpf_cgroup_storage *this;
92 
93 		this = container_of(*new, struct bpf_cgroup_storage, node);
94 
95 		parent = *new;
96 		switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
97 		case -1:
98 			new = &((*new)->rb_left);
99 			break;
100 		case 1:
101 			new = &((*new)->rb_right);
102 			break;
103 		default:
104 			return -EEXIST;
105 		}
106 	}
107 
108 	rb_link_node(&storage->node, parent, new);
109 	rb_insert_color(&storage->node, root);
110 
111 	return 0;
112 }
113 
114 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
115 {
116 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
117 	struct bpf_cgroup_storage_key *key = _key;
118 	struct bpf_cgroup_storage *storage;
119 
120 	storage = cgroup_storage_lookup(map, key, false);
121 	if (!storage)
122 		return NULL;
123 
124 	return &READ_ONCE(storage->buf)->data[0];
125 }
126 
127 static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
128 				      void *value, u64 flags)
129 {
130 	struct bpf_cgroup_storage_key *key = _key;
131 	struct bpf_cgroup_storage *storage;
132 	struct bpf_storage_buffer *new;
133 
134 	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST)))
135 		return -EINVAL;
136 
137 	if (unlikely(flags & BPF_NOEXIST))
138 		return -EINVAL;
139 
140 	if (unlikely((flags & BPF_F_LOCK) &&
141 		     !map_value_has_spin_lock(map)))
142 		return -EINVAL;
143 
144 	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
145 					key, false);
146 	if (!storage)
147 		return -ENOENT;
148 
149 	if (flags & BPF_F_LOCK) {
150 		copy_map_value_locked(map, storage->buf->data, value, false);
151 		return 0;
152 	}
153 
154 	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
155 			   map->value_size,
156 			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
157 			   map->numa_node);
158 	if (!new)
159 		return -ENOMEM;
160 
161 	memcpy(&new->data[0], value, map->value_size);
162 	check_and_init_map_lock(map, new->data);
163 
164 	new = xchg(&storage->buf, new);
165 	kfree_rcu(new, rcu);
166 
167 	return 0;
168 }
169 
170 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
171 				   void *value)
172 {
173 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
174 	struct bpf_cgroup_storage_key *key = _key;
175 	struct bpf_cgroup_storage *storage;
176 	int cpu, off = 0;
177 	u32 size;
178 
179 	rcu_read_lock();
180 	storage = cgroup_storage_lookup(map, key, false);
181 	if (!storage) {
182 		rcu_read_unlock();
183 		return -ENOENT;
184 	}
185 
186 	/* per_cpu areas are zero-filled and bpf programs can only
187 	 * access 'value_size' of them, so copying rounded areas
188 	 * will not leak any kernel data
189 	 */
190 	size = round_up(_map->value_size, 8);
191 	for_each_possible_cpu(cpu) {
192 		bpf_long_memcpy(value + off,
193 				per_cpu_ptr(storage->percpu_buf, cpu), size);
194 		off += size;
195 	}
196 	rcu_read_unlock();
197 	return 0;
198 }
199 
200 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
201 				     void *value, u64 map_flags)
202 {
203 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
204 	struct bpf_cgroup_storage_key *key = _key;
205 	struct bpf_cgroup_storage *storage;
206 	int cpu, off = 0;
207 	u32 size;
208 
209 	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
210 		return -EINVAL;
211 
212 	rcu_read_lock();
213 	storage = cgroup_storage_lookup(map, key, false);
214 	if (!storage) {
215 		rcu_read_unlock();
216 		return -ENOENT;
217 	}
218 
219 	/* the user space will provide round_up(value_size, 8) bytes that
220 	 * will be copied into per-cpu area. bpf programs can only access
221 	 * value_size of it. During lookup the same extra bytes will be
222 	 * returned or zeros which were zero-filled by percpu_alloc,
223 	 * so no kernel data leaks possible
224 	 */
225 	size = round_up(_map->value_size, 8);
226 	for_each_possible_cpu(cpu) {
227 		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
228 				value + off, size);
229 		off += size;
230 	}
231 	rcu_read_unlock();
232 	return 0;
233 }
234 
235 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
236 				       void *_next_key)
237 {
238 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
239 	struct bpf_cgroup_storage_key *key = _key;
240 	struct bpf_cgroup_storage_key *next = _next_key;
241 	struct bpf_cgroup_storage *storage;
242 
243 	spin_lock_bh(&map->lock);
244 
245 	if (list_empty(&map->list))
246 		goto enoent;
247 
248 	if (key) {
249 		storage = cgroup_storage_lookup(map, key, true);
250 		if (!storage)
251 			goto enoent;
252 
253 		storage = list_next_entry(storage, list);
254 		if (!storage)
255 			goto enoent;
256 	} else {
257 		storage = list_first_entry(&map->list,
258 					 struct bpf_cgroup_storage, list);
259 	}
260 
261 	spin_unlock_bh(&map->lock);
262 	next->attach_type = storage->key.attach_type;
263 	next->cgroup_inode_id = storage->key.cgroup_inode_id;
264 	return 0;
265 
266 enoent:
267 	spin_unlock_bh(&map->lock);
268 	return -ENOENT;
269 }
270 
271 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
272 {
273 	int numa_node = bpf_map_attr_numa_node(attr);
274 	struct bpf_cgroup_storage_map *map;
275 
276 	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
277 		return ERR_PTR(-EINVAL);
278 
279 	if (attr->value_size == 0)
280 		return ERR_PTR(-EINVAL);
281 
282 	if (attr->value_size > PAGE_SIZE)
283 		return ERR_PTR(-E2BIG);
284 
285 	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
286 		/* reserved bits should not be used */
287 		return ERR_PTR(-EINVAL);
288 
289 	if (attr->max_entries)
290 		/* max_entries is not used and enforced to be 0 */
291 		return ERR_PTR(-EINVAL);
292 
293 	map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
294 			   __GFP_ZERO | GFP_USER, numa_node);
295 	if (!map)
296 		return ERR_PTR(-ENOMEM);
297 
298 	map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
299 				  PAGE_SIZE) >> PAGE_SHIFT;
300 
301 	/* copy mandatory map attributes */
302 	bpf_map_init_from_attr(&map->map, attr);
303 
304 	spin_lock_init(&map->lock);
305 	map->root = RB_ROOT;
306 	INIT_LIST_HEAD(&map->list);
307 
308 	return &map->map;
309 }
310 
311 static void cgroup_storage_map_free(struct bpf_map *_map)
312 {
313 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
314 
315 	WARN_ON(!RB_EMPTY_ROOT(&map->root));
316 	WARN_ON(!list_empty(&map->list));
317 
318 	kfree(map);
319 }
320 
321 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
322 {
323 	return -EINVAL;
324 }
325 
326 static int cgroup_storage_check_btf(const struct bpf_map *map,
327 				    const struct btf *btf,
328 				    const struct btf_type *key_type,
329 				    const struct btf_type *value_type)
330 {
331 	struct btf_member *m;
332 	u32 offset, size;
333 
334 	/* Key is expected to be of struct bpf_cgroup_storage_key type,
335 	 * which is:
336 	 * struct bpf_cgroup_storage_key {
337 	 *	__u64	cgroup_inode_id;
338 	 *	__u32	attach_type;
339 	 * };
340 	 */
341 
342 	/*
343 	 * Key_type must be a structure with two fields.
344 	 */
345 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
346 	    BTF_INFO_VLEN(key_type->info) != 2)
347 		return -EINVAL;
348 
349 	/*
350 	 * The first field must be a 64 bit integer at 0 offset.
351 	 */
352 	m = (struct btf_member *)(key_type + 1);
353 	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
354 	if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
355 		return -EINVAL;
356 
357 	/*
358 	 * The second field must be a 32 bit integer at 64 bit offset.
359 	 */
360 	m++;
361 	offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
362 	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
363 	if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
364 		return -EINVAL;
365 
366 	return 0;
367 }
368 
369 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
370 					 struct seq_file *m)
371 {
372 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
373 	struct bpf_cgroup_storage_key *key = _key;
374 	struct bpf_cgroup_storage *storage;
375 	int cpu;
376 
377 	rcu_read_lock();
378 	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
379 	if (!storage) {
380 		rcu_read_unlock();
381 		return;
382 	}
383 
384 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
385 	stype = cgroup_storage_type(map);
386 	if (stype == BPF_CGROUP_STORAGE_SHARED) {
387 		seq_puts(m, ": ");
388 		btf_type_seq_show(map->btf, map->btf_value_type_id,
389 				  &READ_ONCE(storage->buf)->data[0], m);
390 		seq_puts(m, "\n");
391 	} else {
392 		seq_puts(m, ": {\n");
393 		for_each_possible_cpu(cpu) {
394 			seq_printf(m, "\tcpu%d: ", cpu);
395 			btf_type_seq_show(map->btf, map->btf_value_type_id,
396 					  per_cpu_ptr(storage->percpu_buf, cpu),
397 					  m);
398 			seq_puts(m, "\n");
399 		}
400 		seq_puts(m, "}\n");
401 	}
402 	rcu_read_unlock();
403 }
404 
405 const struct bpf_map_ops cgroup_storage_map_ops = {
406 	.map_alloc = cgroup_storage_map_alloc,
407 	.map_free = cgroup_storage_map_free,
408 	.map_get_next_key = cgroup_storage_get_next_key,
409 	.map_lookup_elem = cgroup_storage_lookup_elem,
410 	.map_update_elem = cgroup_storage_update_elem,
411 	.map_delete_elem = cgroup_storage_delete_elem,
412 	.map_check_btf = cgroup_storage_check_btf,
413 	.map_seq_show_elem = cgroup_storage_seq_show_elem,
414 };
415 
416 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
417 {
418 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
419 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
420 	int ret = -EBUSY;
421 
422 	spin_lock_bh(&map->lock);
423 
424 	if (map->prog && map->prog != prog)
425 		goto unlock;
426 	if (prog->aux->cgroup_storage[stype] &&
427 	    prog->aux->cgroup_storage[stype] != _map)
428 		goto unlock;
429 
430 	map->prog = prog;
431 	prog->aux->cgroup_storage[stype] = _map;
432 	ret = 0;
433 unlock:
434 	spin_unlock_bh(&map->lock);
435 
436 	return ret;
437 }
438 
439 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
440 {
441 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
442 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
443 
444 	spin_lock_bh(&map->lock);
445 	if (map->prog == prog) {
446 		WARN_ON(prog->aux->cgroup_storage[stype] != _map);
447 		map->prog = NULL;
448 		prog->aux->cgroup_storage[stype] = NULL;
449 	}
450 	spin_unlock_bh(&map->lock);
451 }
452 
453 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
454 {
455 	size_t size;
456 
457 	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
458 		size = sizeof(struct bpf_storage_buffer) + map->value_size;
459 		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
460 				  PAGE_SIZE) >> PAGE_SHIFT;
461 	} else {
462 		size = map->value_size;
463 		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
464 				  PAGE_SIZE) >> PAGE_SHIFT;
465 	}
466 
467 	return size;
468 }
469 
470 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
471 					enum bpf_cgroup_storage_type stype)
472 {
473 	struct bpf_cgroup_storage *storage;
474 	struct bpf_map *map;
475 	gfp_t flags;
476 	size_t size;
477 	u32 pages;
478 
479 	map = prog->aux->cgroup_storage[stype];
480 	if (!map)
481 		return NULL;
482 
483 	size = bpf_cgroup_storage_calculate_size(map, &pages);
484 
485 	if (bpf_map_charge_memlock(map, pages))
486 		return ERR_PTR(-EPERM);
487 
488 	storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
489 			       __GFP_ZERO | GFP_USER, map->numa_node);
490 	if (!storage)
491 		goto enomem;
492 
493 	flags = __GFP_ZERO | GFP_USER;
494 
495 	if (stype == BPF_CGROUP_STORAGE_SHARED) {
496 		storage->buf = kmalloc_node(size, flags, map->numa_node);
497 		if (!storage->buf)
498 			goto enomem;
499 		check_and_init_map_lock(map, storage->buf->data);
500 	} else {
501 		storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
502 		if (!storage->percpu_buf)
503 			goto enomem;
504 	}
505 
506 	storage->map = (struct bpf_cgroup_storage_map *)map;
507 
508 	return storage;
509 
510 enomem:
511 	bpf_map_uncharge_memlock(map, pages);
512 	kfree(storage);
513 	return ERR_PTR(-ENOMEM);
514 }
515 
516 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
517 {
518 	struct bpf_cgroup_storage *storage =
519 		container_of(rcu, struct bpf_cgroup_storage, rcu);
520 
521 	kfree(storage->buf);
522 	kfree(storage);
523 }
524 
525 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
526 {
527 	struct bpf_cgroup_storage *storage =
528 		container_of(rcu, struct bpf_cgroup_storage, rcu);
529 
530 	free_percpu(storage->percpu_buf);
531 	kfree(storage);
532 }
533 
534 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
535 {
536 	enum bpf_cgroup_storage_type stype;
537 	struct bpf_map *map;
538 	u32 pages;
539 
540 	if (!storage)
541 		return;
542 
543 	map = &storage->map->map;
544 
545 	bpf_cgroup_storage_calculate_size(map, &pages);
546 	bpf_map_uncharge_memlock(map, pages);
547 
548 	stype = cgroup_storage_type(map);
549 	if (stype == BPF_CGROUP_STORAGE_SHARED)
550 		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
551 	else
552 		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
553 }
554 
555 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
556 			     struct cgroup *cgroup,
557 			     enum bpf_attach_type type)
558 {
559 	struct bpf_cgroup_storage_map *map;
560 
561 	if (!storage)
562 		return;
563 
564 	storage->key.attach_type = type;
565 	storage->key.cgroup_inode_id = cgroup->kn->id.id;
566 
567 	map = storage->map;
568 
569 	spin_lock_bh(&map->lock);
570 	WARN_ON(cgroup_storage_insert(map, storage));
571 	list_add(&storage->list, &map->list);
572 	spin_unlock_bh(&map->lock);
573 }
574 
575 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
576 {
577 	struct bpf_cgroup_storage_map *map;
578 	struct rb_root *root;
579 
580 	if (!storage)
581 		return;
582 
583 	map = storage->map;
584 
585 	spin_lock_bh(&map->lock);
586 	root = &map->root;
587 	rb_erase(&storage->node, root);
588 
589 	list_del(&storage->list);
590 	spin_unlock_bh(&map->lock);
591 }
592 
593 #endif
594