xref: /openbmc/linux/kernel/bpf/devmap.c (revision 4b4f3acc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
21  * until all bits are cleared indicating outstanding flush operations have
22  * completed.
23  *
24  * BPF syscalls may race with BPF program calls on any of the update, delete
25  * or lookup operations. As noted above the xchg() operation also keep the
26  * netdev_map consistent in this case. From the devmap side BPF programs
27  * calling into these operations are the same as multiple user space threads
28  * making system calls.
29  *
30  * Finally, any of the above may race with a netdev_unregister notifier. The
31  * unregister notifier must search for net devices in the map structure that
32  * contain a reference to the net device and remove them. This is a two step
33  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
34  * check to see if the ifindex is the same as the net_device being removed.
35  * When removing the dev a cmpxchg() is used to ensure the correct dev is
36  * removed, in the case of a concurrent update or delete operation it is
37  * possible that the initially referenced dev is no longer in the map. As the
38  * notifier hook walks the map we know that new dev references can not be
39  * added by the user because core infrastructure ensures dev_get_by_index()
40  * calls will fail at this point.
41  */
42 #include <linux/bpf.h>
43 #include <net/xdp.h>
44 #include <linux/filter.h>
45 #include <trace/events/xdp.h>
46 
47 #define DEV_CREATE_FLAG_MASK \
48 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
49 
50 #define DEV_MAP_BULK_SIZE 16
51 struct xdp_bulk_queue {
52 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
53 	struct net_device *dev_rx;
54 	unsigned int count;
55 };
56 
57 struct bpf_dtab_netdev {
58 	struct net_device *dev; /* must be first member, due to tracepoint */
59 	struct bpf_dtab *dtab;
60 	unsigned int bit;
61 	struct xdp_bulk_queue __percpu *bulkq;
62 	struct rcu_head rcu;
63 };
64 
65 struct bpf_dtab {
66 	struct bpf_map map;
67 	struct bpf_dtab_netdev **netdev_map;
68 	unsigned long __percpu *flush_needed;
69 	struct list_head list;
70 };
71 
72 static DEFINE_SPINLOCK(dev_map_lock);
73 static LIST_HEAD(dev_map_list);
74 
75 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
76 {
77 	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
78 }
79 
80 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
81 {
82 	struct bpf_dtab *dtab;
83 	int err = -EINVAL;
84 	u64 cost;
85 
86 	if (!capable(CAP_NET_ADMIN))
87 		return ERR_PTR(-EPERM);
88 
89 	/* check sanity of attributes */
90 	if (attr->max_entries == 0 || attr->key_size != 4 ||
91 	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
92 		return ERR_PTR(-EINVAL);
93 
94 	dtab = kzalloc(sizeof(*dtab), GFP_USER);
95 	if (!dtab)
96 		return ERR_PTR(-ENOMEM);
97 
98 	bpf_map_init_from_attr(&dtab->map, attr);
99 
100 	/* make sure page count doesn't overflow */
101 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
102 	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
103 	if (cost >= U32_MAX - PAGE_SIZE)
104 		goto free_dtab;
105 
106 	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
107 
108 	/* if map size is larger than memlock limit, reject it early */
109 	err = bpf_map_precharge_memlock(dtab->map.pages);
110 	if (err)
111 		goto free_dtab;
112 
113 	err = -ENOMEM;
114 
115 	/* A per cpu bitfield with a bit per possible net device */
116 	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
117 						__alignof__(unsigned long),
118 						GFP_KERNEL | __GFP_NOWARN);
119 	if (!dtab->flush_needed)
120 		goto free_dtab;
121 
122 	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
123 					      sizeof(struct bpf_dtab_netdev *),
124 					      dtab->map.numa_node);
125 	if (!dtab->netdev_map)
126 		goto free_dtab;
127 
128 	spin_lock(&dev_map_lock);
129 	list_add_tail_rcu(&dtab->list, &dev_map_list);
130 	spin_unlock(&dev_map_lock);
131 
132 	return &dtab->map;
133 free_dtab:
134 	free_percpu(dtab->flush_needed);
135 	kfree(dtab);
136 	return ERR_PTR(err);
137 }
138 
139 static void dev_map_free(struct bpf_map *map)
140 {
141 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
142 	int i, cpu;
143 
144 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
145 	 * so the programs (can be more than one that used this map) were
146 	 * disconnected from events. Wait for outstanding critical sections in
147 	 * these programs to complete. The rcu critical section only guarantees
148 	 * no further reads against netdev_map. It does __not__ ensure pending
149 	 * flush operations (if any) are complete.
150 	 */
151 
152 	spin_lock(&dev_map_lock);
153 	list_del_rcu(&dtab->list);
154 	spin_unlock(&dev_map_lock);
155 
156 	bpf_clear_redirect_map(map);
157 	synchronize_rcu();
158 
159 	/* Make sure prior __dev_map_entry_free() have completed. */
160 	rcu_barrier();
161 
162 	/* To ensure all pending flush operations have completed wait for flush
163 	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
164 	 * Because the above synchronize_rcu() ensures the map is disconnected
165 	 * from the program we can assume no new bits will be set.
166 	 */
167 	for_each_online_cpu(cpu) {
168 		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
169 
170 		while (!bitmap_empty(bitmap, dtab->map.max_entries))
171 			cond_resched();
172 	}
173 
174 	for (i = 0; i < dtab->map.max_entries; i++) {
175 		struct bpf_dtab_netdev *dev;
176 
177 		dev = dtab->netdev_map[i];
178 		if (!dev)
179 			continue;
180 
181 		free_percpu(dev->bulkq);
182 		dev_put(dev->dev);
183 		kfree(dev);
184 	}
185 
186 	free_percpu(dtab->flush_needed);
187 	bpf_map_area_free(dtab->netdev_map);
188 	kfree(dtab);
189 }
190 
191 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
192 {
193 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
194 	u32 index = key ? *(u32 *)key : U32_MAX;
195 	u32 *next = next_key;
196 
197 	if (index >= dtab->map.max_entries) {
198 		*next = 0;
199 		return 0;
200 	}
201 
202 	if (index == dtab->map.max_entries - 1)
203 		return -ENOENT;
204 	*next = index + 1;
205 	return 0;
206 }
207 
208 void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
209 {
210 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
211 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
212 
213 	__set_bit(bit, bitmap);
214 }
215 
216 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
217 		       struct xdp_bulk_queue *bq, u32 flags,
218 		       bool in_napi_ctx)
219 {
220 	struct net_device *dev = obj->dev;
221 	int sent = 0, drops = 0, err = 0;
222 	int i;
223 
224 	if (unlikely(!bq->count))
225 		return 0;
226 
227 	for (i = 0; i < bq->count; i++) {
228 		struct xdp_frame *xdpf = bq->q[i];
229 
230 		prefetch(xdpf);
231 	}
232 
233 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
234 	if (sent < 0) {
235 		err = sent;
236 		sent = 0;
237 		goto error;
238 	}
239 	drops = bq->count - sent;
240 out:
241 	bq->count = 0;
242 
243 	trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
244 			      sent, drops, bq->dev_rx, dev, err);
245 	bq->dev_rx = NULL;
246 	return 0;
247 error:
248 	/* If ndo_xdp_xmit fails with an errno, no frames have been
249 	 * xmit'ed and it's our responsibility to them free all.
250 	 */
251 	for (i = 0; i < bq->count; i++) {
252 		struct xdp_frame *xdpf = bq->q[i];
253 
254 		/* RX path under NAPI protection, can return frames faster */
255 		if (likely(in_napi_ctx))
256 			xdp_return_frame_rx_napi(xdpf);
257 		else
258 			xdp_return_frame(xdpf);
259 		drops++;
260 	}
261 	goto out;
262 }
263 
264 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
265  * from the driver before returning from its napi->poll() routine. The poll()
266  * routine is called either from busy_poll context or net_rx_action signaled
267  * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
268  * net device can be torn down. On devmap tear down we ensure the ctx bitmap
269  * is zeroed before completing to ensure all flush operations have completed.
270  */
271 void __dev_map_flush(struct bpf_map *map)
272 {
273 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
274 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
275 	u32 bit;
276 
277 	rcu_read_lock();
278 	for_each_set_bit(bit, bitmap, map->max_entries) {
279 		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
280 		struct xdp_bulk_queue *bq;
281 
282 		/* This is possible if the dev entry is removed by user space
283 		 * between xdp redirect and flush op.
284 		 */
285 		if (unlikely(!dev))
286 			continue;
287 
288 		bq = this_cpu_ptr(dev->bulkq);
289 		bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
290 
291 		__clear_bit(bit, bitmap);
292 	}
293 	rcu_read_unlock();
294 }
295 
296 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
297  * update happens in parallel here a dev_put wont happen until after reading the
298  * ifindex.
299  */
300 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
301 {
302 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
303 	struct bpf_dtab_netdev *obj;
304 
305 	if (key >= map->max_entries)
306 		return NULL;
307 
308 	obj = READ_ONCE(dtab->netdev_map[key]);
309 	return obj;
310 }
311 
312 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
313  * Thus, safe percpu variable access.
314  */
315 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
316 		      struct net_device *dev_rx)
317 
318 {
319 	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
320 
321 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
322 		bq_xmit_all(obj, bq, 0, true);
323 
324 	/* Ingress dev_rx will be the same for all xdp_frame's in
325 	 * bulk_queue, because bq stored per-CPU and must be flushed
326 	 * from net_device drivers NAPI func end.
327 	 */
328 	if (!bq->dev_rx)
329 		bq->dev_rx = dev_rx;
330 
331 	bq->q[bq->count++] = xdpf;
332 	return 0;
333 }
334 
335 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
336 		    struct net_device *dev_rx)
337 {
338 	struct net_device *dev = dst->dev;
339 	struct xdp_frame *xdpf;
340 	int err;
341 
342 	if (!dev->netdev_ops->ndo_xdp_xmit)
343 		return -EOPNOTSUPP;
344 
345 	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
346 	if (unlikely(err))
347 		return err;
348 
349 	xdpf = convert_to_xdp_frame(xdp);
350 	if (unlikely(!xdpf))
351 		return -EOVERFLOW;
352 
353 	return bq_enqueue(dst, xdpf, dev_rx);
354 }
355 
356 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
357 			     struct bpf_prog *xdp_prog)
358 {
359 	int err;
360 
361 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
362 	if (unlikely(err))
363 		return err;
364 	skb->dev = dst->dev;
365 	generic_xdp_tx(skb, xdp_prog);
366 
367 	return 0;
368 }
369 
370 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
371 {
372 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
373 	struct net_device *dev = obj ? obj->dev : NULL;
374 
375 	return dev ? &dev->ifindex : NULL;
376 }
377 
378 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
379 {
380 	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
381 		struct xdp_bulk_queue *bq;
382 		unsigned long *bitmap;
383 
384 		int cpu;
385 
386 		rcu_read_lock();
387 		for_each_online_cpu(cpu) {
388 			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
389 			__clear_bit(dev->bit, bitmap);
390 
391 			bq = per_cpu_ptr(dev->bulkq, cpu);
392 			bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
393 		}
394 		rcu_read_unlock();
395 	}
396 }
397 
398 static void __dev_map_entry_free(struct rcu_head *rcu)
399 {
400 	struct bpf_dtab_netdev *dev;
401 
402 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
403 	dev_map_flush_old(dev);
404 	free_percpu(dev->bulkq);
405 	dev_put(dev->dev);
406 	kfree(dev);
407 }
408 
409 static int dev_map_delete_elem(struct bpf_map *map, void *key)
410 {
411 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
412 	struct bpf_dtab_netdev *old_dev;
413 	int k = *(u32 *)key;
414 
415 	if (k >= map->max_entries)
416 		return -EINVAL;
417 
418 	/* Use call_rcu() here to ensure any rcu critical sections have
419 	 * completed, but this does not guarantee a flush has happened
420 	 * yet. Because driver side rcu_read_lock/unlock only protects the
421 	 * running XDP program. However, for pending flush operations the
422 	 * dev and ctx are stored in another per cpu map. And additionally,
423 	 * the driver tear down ensures all soft irqs are complete before
424 	 * removing the net device in the case of dev_put equals zero.
425 	 */
426 	old_dev = xchg(&dtab->netdev_map[k], NULL);
427 	if (old_dev)
428 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
429 	return 0;
430 }
431 
432 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
433 				u64 map_flags)
434 {
435 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
436 	struct net *net = current->nsproxy->net_ns;
437 	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
438 	struct bpf_dtab_netdev *dev, *old_dev;
439 	u32 i = *(u32 *)key;
440 	u32 ifindex = *(u32 *)value;
441 
442 	if (unlikely(map_flags > BPF_EXIST))
443 		return -EINVAL;
444 	if (unlikely(i >= dtab->map.max_entries))
445 		return -E2BIG;
446 	if (unlikely(map_flags == BPF_NOEXIST))
447 		return -EEXIST;
448 
449 	if (!ifindex) {
450 		dev = NULL;
451 	} else {
452 		dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
453 		if (!dev)
454 			return -ENOMEM;
455 
456 		dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
457 						sizeof(void *), gfp);
458 		if (!dev->bulkq) {
459 			kfree(dev);
460 			return -ENOMEM;
461 		}
462 
463 		dev->dev = dev_get_by_index(net, ifindex);
464 		if (!dev->dev) {
465 			free_percpu(dev->bulkq);
466 			kfree(dev);
467 			return -EINVAL;
468 		}
469 
470 		dev->bit = i;
471 		dev->dtab = dtab;
472 	}
473 
474 	/* Use call_rcu() here to ensure rcu critical sections have completed
475 	 * Remembering the driver side flush operation will happen before the
476 	 * net device is removed.
477 	 */
478 	old_dev = xchg(&dtab->netdev_map[i], dev);
479 	if (old_dev)
480 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
481 
482 	return 0;
483 }
484 
485 const struct bpf_map_ops dev_map_ops = {
486 	.map_alloc = dev_map_alloc,
487 	.map_free = dev_map_free,
488 	.map_get_next_key = dev_map_get_next_key,
489 	.map_lookup_elem = dev_map_lookup_elem,
490 	.map_update_elem = dev_map_update_elem,
491 	.map_delete_elem = dev_map_delete_elem,
492 	.map_check_btf = map_check_no_btf,
493 };
494 
495 static int dev_map_notification(struct notifier_block *notifier,
496 				ulong event, void *ptr)
497 {
498 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
499 	struct bpf_dtab *dtab;
500 	int i;
501 
502 	switch (event) {
503 	case NETDEV_UNREGISTER:
504 		/* This rcu_read_lock/unlock pair is needed because
505 		 * dev_map_list is an RCU list AND to ensure a delete
506 		 * operation does not free a netdev_map entry while we
507 		 * are comparing it against the netdev being unregistered.
508 		 */
509 		rcu_read_lock();
510 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
511 			for (i = 0; i < dtab->map.max_entries; i++) {
512 				struct bpf_dtab_netdev *dev, *odev;
513 
514 				dev = READ_ONCE(dtab->netdev_map[i]);
515 				if (!dev || netdev != dev->dev)
516 					continue;
517 				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
518 				if (dev == odev)
519 					call_rcu(&dev->rcu,
520 						 __dev_map_entry_free);
521 			}
522 		}
523 		rcu_read_unlock();
524 		break;
525 	default:
526 		break;
527 	}
528 	return NOTIFY_OK;
529 }
530 
531 static struct notifier_block dev_map_notifier = {
532 	.notifier_call = dev_map_notification,
533 };
534 
535 static int __init dev_map_init(void)
536 {
537 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
538 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
539 		     offsetof(struct _bpf_dtab_netdev, dev));
540 	register_netdevice_notifier(&dev_map_notifier);
541 	return 0;
542 }
543 
544 subsys_initcall(dev_map_init);
545