xref: /openbmc/linux/kernel/bpf/devmap.c (revision 49454f09)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct xdp_dev_bulk_queue {
57 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 	struct list_head flush_node;
59 	struct net_device *dev;
60 	struct net_device *dev_rx;
61 	struct bpf_prog *xdp_prog;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct hlist_node index_hlist;
68 	struct bpf_prog *xdp_prog;
69 	struct rcu_head rcu;
70 	unsigned int idx;
71 	struct bpf_devmap_val val;
72 };
73 
74 struct bpf_dtab {
75 	struct bpf_map map;
76 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 	struct list_head list;
78 
79 	/* these are only used for DEVMAP_HASH type maps */
80 	struct hlist_head *dev_index_head;
81 	spinlock_t index_lock;
82 	unsigned int items;
83 	u32 n_buckets;
84 };
85 
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
89 
dev_map_create_hash(unsigned int entries,int numa_node)90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
91 					      int numa_node)
92 {
93 	int i;
94 	struct hlist_head *hash;
95 
96 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97 	if (hash != NULL)
98 		for (i = 0; i < entries; i++)
99 			INIT_HLIST_HEAD(&hash[i]);
100 
101 	return hash;
102 }
103 
dev_map_index_hash(struct bpf_dtab * dtab,int idx)104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 						    int idx)
106 {
107 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109 
dev_map_init_map(struct bpf_dtab * dtab,union bpf_attr * attr)110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 {
112 	u32 valsize = attr->value_size;
113 
114 	/* check sanity of attributes. 2 value sizes supported:
115 	 * 4 bytes: ifindex
116 	 * 8 bytes: ifindex + prog fd
117 	 */
118 	if (attr->max_entries == 0 || attr->key_size != 4 ||
119 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122 		return -EINVAL;
123 
124 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 	 * verifier prevents writes from the BPF side
126 	 */
127 	attr->map_flags |= BPF_F_RDONLY_PROG;
128 
129 
130 	bpf_map_init_from_attr(&dtab->map, attr);
131 
132 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 		/* hash table size must be power of 2; roundup_pow_of_two() can
134 		 * overflow into UB on 32-bit arches, so check that first
135 		 */
136 		if (dtab->map.max_entries > 1UL << 31)
137 			return -EINVAL;
138 
139 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
140 
141 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
142 							   dtab->map.numa_node);
143 		if (!dtab->dev_index_head)
144 			return -ENOMEM;
145 
146 		spin_lock_init(&dtab->index_lock);
147 	} else {
148 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
149 						      sizeof(struct bpf_dtab_netdev *),
150 						      dtab->map.numa_node);
151 		if (!dtab->netdev_map)
152 			return -ENOMEM;
153 	}
154 
155 	return 0;
156 }
157 
dev_map_alloc(union bpf_attr * attr)158 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159 {
160 	struct bpf_dtab *dtab;
161 	int err;
162 
163 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
164 	if (!dtab)
165 		return ERR_PTR(-ENOMEM);
166 
167 	err = dev_map_init_map(dtab, attr);
168 	if (err) {
169 		bpf_map_area_free(dtab);
170 		return ERR_PTR(err);
171 	}
172 
173 	spin_lock(&dev_map_lock);
174 	list_add_tail_rcu(&dtab->list, &dev_map_list);
175 	spin_unlock(&dev_map_lock);
176 
177 	return &dtab->map;
178 }
179 
dev_map_free(struct bpf_map * map)180 static void dev_map_free(struct bpf_map *map)
181 {
182 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
183 	int i;
184 
185 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
186 	 * so the programs (can be more than one that used this map) were
187 	 * disconnected from events. The following synchronize_rcu() guarantees
188 	 * both rcu read critical sections complete and waits for
189 	 * preempt-disable regions (NAPI being the relevant context here) so we
190 	 * are certain there will be no further reads against the netdev_map and
191 	 * all flush operations are complete. Flush operations can only be done
192 	 * from NAPI context for this reason.
193 	 */
194 
195 	spin_lock(&dev_map_lock);
196 	list_del_rcu(&dtab->list);
197 	spin_unlock(&dev_map_lock);
198 
199 	bpf_clear_redirect_map(map);
200 	synchronize_rcu();
201 
202 	/* Make sure prior __dev_map_entry_free() have completed. */
203 	rcu_barrier();
204 
205 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
206 		for (i = 0; i < dtab->n_buckets; i++) {
207 			struct bpf_dtab_netdev *dev;
208 			struct hlist_head *head;
209 			struct hlist_node *next;
210 
211 			head = dev_map_index_hash(dtab, i);
212 
213 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
214 				hlist_del_rcu(&dev->index_hlist);
215 				if (dev->xdp_prog)
216 					bpf_prog_put(dev->xdp_prog);
217 				dev_put(dev->dev);
218 				kfree(dev);
219 			}
220 		}
221 
222 		bpf_map_area_free(dtab->dev_index_head);
223 	} else {
224 		for (i = 0; i < dtab->map.max_entries; i++) {
225 			struct bpf_dtab_netdev *dev;
226 
227 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
228 			if (!dev)
229 				continue;
230 
231 			if (dev->xdp_prog)
232 				bpf_prog_put(dev->xdp_prog);
233 			dev_put(dev->dev);
234 			kfree(dev);
235 		}
236 
237 		bpf_map_area_free(dtab->netdev_map);
238 	}
239 
240 	bpf_map_area_free(dtab);
241 }
242 
dev_map_get_next_key(struct bpf_map * map,void * key,void * next_key)243 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
244 {
245 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
246 	u32 index = key ? *(u32 *)key : U32_MAX;
247 	u32 *next = next_key;
248 
249 	if (index >= dtab->map.max_entries) {
250 		*next = 0;
251 		return 0;
252 	}
253 
254 	if (index == dtab->map.max_entries - 1)
255 		return -ENOENT;
256 	*next = index + 1;
257 	return 0;
258 }
259 
260 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
261  * by local_bh_disable() (from XDP calls inside NAPI). The
262  * rcu_read_lock_bh_held() below makes lockdep accept both.
263  */
__dev_map_hash_lookup_elem(struct bpf_map * map,u32 key)264 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
265 {
266 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
267 	struct hlist_head *head = dev_map_index_hash(dtab, key);
268 	struct bpf_dtab_netdev *dev;
269 
270 	hlist_for_each_entry_rcu(dev, head, index_hlist,
271 				 lockdep_is_held(&dtab->index_lock))
272 		if (dev->idx == key)
273 			return dev;
274 
275 	return NULL;
276 }
277 
dev_map_hash_get_next_key(struct bpf_map * map,void * key,void * next_key)278 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
279 				    void *next_key)
280 {
281 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
282 	u32 idx, *next = next_key;
283 	struct bpf_dtab_netdev *dev, *next_dev;
284 	struct hlist_head *head;
285 	int i = 0;
286 
287 	if (!key)
288 		goto find_first;
289 
290 	idx = *(u32 *)key;
291 
292 	dev = __dev_map_hash_lookup_elem(map, idx);
293 	if (!dev)
294 		goto find_first;
295 
296 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
297 				    struct bpf_dtab_netdev, index_hlist);
298 
299 	if (next_dev) {
300 		*next = next_dev->idx;
301 		return 0;
302 	}
303 
304 	i = idx & (dtab->n_buckets - 1);
305 	i++;
306 
307  find_first:
308 	for (; i < dtab->n_buckets; i++) {
309 		head = dev_map_index_hash(dtab, i);
310 
311 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
312 					    struct bpf_dtab_netdev,
313 					    index_hlist);
314 		if (next_dev) {
315 			*next = next_dev->idx;
316 			return 0;
317 		}
318 	}
319 
320 	return -ENOENT;
321 }
322 
dev_map_bpf_prog_run(struct bpf_prog * xdp_prog,struct xdp_frame ** frames,int n,struct net_device * tx_dev,struct net_device * rx_dev)323 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
324 				struct xdp_frame **frames, int n,
325 				struct net_device *tx_dev,
326 				struct net_device *rx_dev)
327 {
328 	struct xdp_txq_info txq = { .dev = tx_dev };
329 	struct xdp_rxq_info rxq = { .dev = rx_dev };
330 	struct xdp_buff xdp;
331 	int i, nframes = 0;
332 
333 	for (i = 0; i < n; i++) {
334 		struct xdp_frame *xdpf = frames[i];
335 		u32 act;
336 		int err;
337 
338 		xdp_convert_frame_to_buff(xdpf, &xdp);
339 		xdp.txq = &txq;
340 		xdp.rxq = &rxq;
341 
342 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
343 		switch (act) {
344 		case XDP_PASS:
345 			err = xdp_update_frame_from_buff(&xdp, xdpf);
346 			if (unlikely(err < 0))
347 				xdp_return_frame_rx_napi(xdpf);
348 			else
349 				frames[nframes++] = xdpf;
350 			break;
351 		default:
352 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
353 			fallthrough;
354 		case XDP_ABORTED:
355 			trace_xdp_exception(tx_dev, xdp_prog, act);
356 			fallthrough;
357 		case XDP_DROP:
358 			xdp_return_frame_rx_napi(xdpf);
359 			break;
360 		}
361 	}
362 	return nframes; /* sent frames count */
363 }
364 
bq_xmit_all(struct xdp_dev_bulk_queue * bq,u32 flags)365 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
366 {
367 	struct net_device *dev = bq->dev;
368 	unsigned int cnt = bq->count;
369 	int sent = 0, err = 0;
370 	int to_send = cnt;
371 	int i;
372 
373 	if (unlikely(!cnt))
374 		return;
375 
376 	for (i = 0; i < cnt; i++) {
377 		struct xdp_frame *xdpf = bq->q[i];
378 
379 		prefetch(xdpf);
380 	}
381 
382 	if (bq->xdp_prog) {
383 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
384 		if (!to_send)
385 			goto out;
386 	}
387 
388 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
389 	if (sent < 0) {
390 		/* If ndo_xdp_xmit fails with an errno, no frames have
391 		 * been xmit'ed.
392 		 */
393 		err = sent;
394 		sent = 0;
395 	}
396 
397 	/* If not all frames have been transmitted, it is our
398 	 * responsibility to free them
399 	 */
400 	for (i = sent; unlikely(i < to_send); i++)
401 		xdp_return_frame_rx_napi(bq->q[i]);
402 
403 out:
404 	bq->count = 0;
405 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
406 }
407 
408 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
409  * driver before returning from its napi->poll() routine. See the comment above
410  * xdp_do_flush() in filter.c.
411  */
__dev_flush(void)412 void __dev_flush(void)
413 {
414 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
415 	struct xdp_dev_bulk_queue *bq, *tmp;
416 
417 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
418 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
419 		bq->dev_rx = NULL;
420 		bq->xdp_prog = NULL;
421 		__list_del_clearprev(&bq->flush_node);
422 	}
423 }
424 
425 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
426  * by local_bh_disable() (from XDP calls inside NAPI). The
427  * rcu_read_lock_bh_held() below makes lockdep accept both.
428  */
__dev_map_lookup_elem(struct bpf_map * map,u32 key)429 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
430 {
431 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
432 	struct bpf_dtab_netdev *obj;
433 
434 	if (key >= map->max_entries)
435 		return NULL;
436 
437 	obj = rcu_dereference_check(dtab->netdev_map[key],
438 				    rcu_read_lock_bh_held());
439 	return obj;
440 }
441 
442 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
443  * variable access, and map elements stick around. See comment above
444  * xdp_do_flush() in filter.c.
445  */
bq_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)446 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
447 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
448 {
449 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
450 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
451 
452 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
453 		bq_xmit_all(bq, 0);
454 
455 	/* Ingress dev_rx will be the same for all xdp_frame's in
456 	 * bulk_queue, because bq stored per-CPU and must be flushed
457 	 * from net_device drivers NAPI func end.
458 	 *
459 	 * Do the same with xdp_prog and flush_list since these fields
460 	 * are only ever modified together.
461 	 */
462 	if (!bq->dev_rx) {
463 		bq->dev_rx = dev_rx;
464 		bq->xdp_prog = xdp_prog;
465 		list_add(&bq->flush_node, flush_list);
466 	}
467 
468 	bq->q[bq->count++] = xdpf;
469 }
470 
__xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)471 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
472 				struct net_device *dev_rx,
473 				struct bpf_prog *xdp_prog)
474 {
475 	int err;
476 
477 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
478 		return -EOPNOTSUPP;
479 
480 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
481 		     xdp_frame_has_frags(xdpf)))
482 		return -EOPNOTSUPP;
483 
484 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
485 	if (unlikely(err))
486 		return err;
487 
488 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
489 	return 0;
490 }
491 
dev_map_bpf_prog_run_skb(struct sk_buff * skb,struct bpf_dtab_netdev * dst)492 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
493 {
494 	struct xdp_txq_info txq = { .dev = dst->dev };
495 	struct xdp_buff xdp;
496 	u32 act;
497 
498 	if (!dst->xdp_prog)
499 		return XDP_PASS;
500 
501 	__skb_pull(skb, skb->mac_len);
502 	xdp.txq = &txq;
503 
504 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
505 	switch (act) {
506 	case XDP_PASS:
507 		__skb_push(skb, skb->mac_len);
508 		break;
509 	default:
510 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
511 		fallthrough;
512 	case XDP_ABORTED:
513 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
514 		fallthrough;
515 	case XDP_DROP:
516 		kfree_skb(skb);
517 		break;
518 	}
519 
520 	return act;
521 }
522 
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)523 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
524 		    struct net_device *dev_rx)
525 {
526 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
527 }
528 
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)529 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
530 		    struct net_device *dev_rx)
531 {
532 	struct net_device *dev = dst->dev;
533 
534 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
535 }
536 
is_valid_dst(struct bpf_dtab_netdev * obj,struct xdp_frame * xdpf)537 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
538 {
539 	if (!obj)
540 		return false;
541 
542 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
543 		return false;
544 
545 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
546 		     xdp_frame_has_frags(xdpf)))
547 		return false;
548 
549 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
550 		return false;
551 
552 	return true;
553 }
554 
dev_map_enqueue_clone(struct bpf_dtab_netdev * obj,struct net_device * dev_rx,struct xdp_frame * xdpf)555 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
556 				 struct net_device *dev_rx,
557 				 struct xdp_frame *xdpf)
558 {
559 	struct xdp_frame *nxdpf;
560 
561 	nxdpf = xdpf_clone(xdpf);
562 	if (!nxdpf)
563 		return -ENOMEM;
564 
565 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
566 
567 	return 0;
568 }
569 
is_ifindex_excluded(int * excluded,int num_excluded,int ifindex)570 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
571 {
572 	while (num_excluded--) {
573 		if (ifindex == excluded[num_excluded])
574 			return true;
575 	}
576 	return false;
577 }
578 
579 /* Get ifindex of each upper device. 'indexes' must be able to hold at
580  * least MAX_NEST_DEV elements.
581  * Returns the number of ifindexes added.
582  */
get_upper_ifindexes(struct net_device * dev,int * indexes)583 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
584 {
585 	struct net_device *upper;
586 	struct list_head *iter;
587 	int n = 0;
588 
589 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
590 		indexes[n++] = upper->ifindex;
591 	}
592 	return n;
593 }
594 
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)595 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
596 			  struct bpf_map *map, bool exclude_ingress)
597 {
598 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
599 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
600 	int excluded_devices[1+MAX_NEST_DEV];
601 	struct hlist_head *head;
602 	int num_excluded = 0;
603 	unsigned int i;
604 	int err;
605 
606 	if (exclude_ingress) {
607 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
608 		excluded_devices[num_excluded++] = dev_rx->ifindex;
609 	}
610 
611 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
612 		for (i = 0; i < map->max_entries; i++) {
613 			dst = rcu_dereference_check(dtab->netdev_map[i],
614 						    rcu_read_lock_bh_held());
615 			if (!is_valid_dst(dst, xdpf))
616 				continue;
617 
618 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
619 				continue;
620 
621 			/* we only need n-1 clones; last_dst enqueued below */
622 			if (!last_dst) {
623 				last_dst = dst;
624 				continue;
625 			}
626 
627 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
628 			if (err)
629 				return err;
630 
631 			last_dst = dst;
632 		}
633 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
634 		for (i = 0; i < dtab->n_buckets; i++) {
635 			head = dev_map_index_hash(dtab, i);
636 			hlist_for_each_entry_rcu(dst, head, index_hlist,
637 						 lockdep_is_held(&dtab->index_lock)) {
638 				if (!is_valid_dst(dst, xdpf))
639 					continue;
640 
641 				if (is_ifindex_excluded(excluded_devices, num_excluded,
642 							dst->dev->ifindex))
643 					continue;
644 
645 				/* we only need n-1 clones; last_dst enqueued below */
646 				if (!last_dst) {
647 					last_dst = dst;
648 					continue;
649 				}
650 
651 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
652 				if (err)
653 					return err;
654 
655 				last_dst = dst;
656 			}
657 		}
658 	}
659 
660 	/* consume the last copy of the frame */
661 	if (last_dst)
662 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
663 	else
664 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
665 
666 	return 0;
667 }
668 
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)669 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
670 			     struct bpf_prog *xdp_prog)
671 {
672 	int err;
673 
674 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
675 	if (unlikely(err))
676 		return err;
677 
678 	/* Redirect has already succeeded semantically at this point, so we just
679 	 * return 0 even if packet is dropped. Helper below takes care of
680 	 * freeing skb.
681 	 */
682 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
683 		return 0;
684 
685 	skb->dev = dst->dev;
686 	generic_xdp_tx(skb, xdp_prog);
687 
688 	return 0;
689 }
690 
dev_map_redirect_clone(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)691 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
692 				  struct sk_buff *skb,
693 				  struct bpf_prog *xdp_prog)
694 {
695 	struct sk_buff *nskb;
696 	int err;
697 
698 	nskb = skb_clone(skb, GFP_ATOMIC);
699 	if (!nskb)
700 		return -ENOMEM;
701 
702 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
703 	if (unlikely(err)) {
704 		consume_skb(nskb);
705 		return err;
706 	}
707 
708 	return 0;
709 }
710 
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)711 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
712 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
713 			   bool exclude_ingress)
714 {
715 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
716 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
717 	int excluded_devices[1+MAX_NEST_DEV];
718 	struct hlist_head *head;
719 	struct hlist_node *next;
720 	int num_excluded = 0;
721 	unsigned int i;
722 	int err;
723 
724 	if (exclude_ingress) {
725 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
726 		excluded_devices[num_excluded++] = dev->ifindex;
727 	}
728 
729 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
730 		for (i = 0; i < map->max_entries; i++) {
731 			dst = rcu_dereference_check(dtab->netdev_map[i],
732 						    rcu_read_lock_bh_held());
733 			if (!dst)
734 				continue;
735 
736 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
737 				continue;
738 
739 			/* we only need n-1 clones; last_dst enqueued below */
740 			if (!last_dst) {
741 				last_dst = dst;
742 				continue;
743 			}
744 
745 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
746 			if (err)
747 				return err;
748 
749 			last_dst = dst;
750 
751 		}
752 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
753 		for (i = 0; i < dtab->n_buckets; i++) {
754 			head = dev_map_index_hash(dtab, i);
755 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
756 				if (!dst)
757 					continue;
758 
759 				if (is_ifindex_excluded(excluded_devices, num_excluded,
760 							dst->dev->ifindex))
761 					continue;
762 
763 				/* we only need n-1 clones; last_dst enqueued below */
764 				if (!last_dst) {
765 					last_dst = dst;
766 					continue;
767 				}
768 
769 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
770 				if (err)
771 					return err;
772 
773 				last_dst = dst;
774 			}
775 		}
776 	}
777 
778 	/* consume the first skb and return */
779 	if (last_dst)
780 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
781 
782 	/* dtab is empty */
783 	consume_skb(skb);
784 	return 0;
785 }
786 
dev_map_lookup_elem(struct bpf_map * map,void * key)787 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
788 {
789 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
790 
791 	return obj ? &obj->val : NULL;
792 }
793 
dev_map_hash_lookup_elem(struct bpf_map * map,void * key)794 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
795 {
796 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
797 								*(u32 *)key);
798 	return obj ? &obj->val : NULL;
799 }
800 
__dev_map_entry_free(struct rcu_head * rcu)801 static void __dev_map_entry_free(struct rcu_head *rcu)
802 {
803 	struct bpf_dtab_netdev *dev;
804 
805 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
806 	if (dev->xdp_prog)
807 		bpf_prog_put(dev->xdp_prog);
808 	dev_put(dev->dev);
809 	kfree(dev);
810 }
811 
dev_map_delete_elem(struct bpf_map * map,void * key)812 static long dev_map_delete_elem(struct bpf_map *map, void *key)
813 {
814 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
815 	struct bpf_dtab_netdev *old_dev;
816 	int k = *(u32 *)key;
817 
818 	if (k >= map->max_entries)
819 		return -EINVAL;
820 
821 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
822 	if (old_dev) {
823 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
824 		atomic_dec((atomic_t *)&dtab->items);
825 	}
826 	return 0;
827 }
828 
dev_map_hash_delete_elem(struct bpf_map * map,void * key)829 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
830 {
831 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
832 	struct bpf_dtab_netdev *old_dev;
833 	int k = *(u32 *)key;
834 	unsigned long flags;
835 	int ret = -ENOENT;
836 
837 	spin_lock_irqsave(&dtab->index_lock, flags);
838 
839 	old_dev = __dev_map_hash_lookup_elem(map, k);
840 	if (old_dev) {
841 		dtab->items--;
842 		hlist_del_init_rcu(&old_dev->index_hlist);
843 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
844 		ret = 0;
845 	}
846 	spin_unlock_irqrestore(&dtab->index_lock, flags);
847 
848 	return ret;
849 }
850 
__dev_map_alloc_node(struct net * net,struct bpf_dtab * dtab,struct bpf_devmap_val * val,unsigned int idx)851 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
852 						    struct bpf_dtab *dtab,
853 						    struct bpf_devmap_val *val,
854 						    unsigned int idx)
855 {
856 	struct bpf_prog *prog = NULL;
857 	struct bpf_dtab_netdev *dev;
858 
859 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
860 				   GFP_NOWAIT | __GFP_NOWARN,
861 				   dtab->map.numa_node);
862 	if (!dev)
863 		return ERR_PTR(-ENOMEM);
864 
865 	dev->dev = dev_get_by_index(net, val->ifindex);
866 	if (!dev->dev)
867 		goto err_out;
868 
869 	if (val->bpf_prog.fd > 0) {
870 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
871 					     BPF_PROG_TYPE_XDP, false);
872 		if (IS_ERR(prog))
873 			goto err_put_dev;
874 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
875 		    !bpf_prog_map_compatible(&dtab->map, prog))
876 			goto err_put_prog;
877 	}
878 
879 	dev->idx = idx;
880 	if (prog) {
881 		dev->xdp_prog = prog;
882 		dev->val.bpf_prog.id = prog->aux->id;
883 	} else {
884 		dev->xdp_prog = NULL;
885 		dev->val.bpf_prog.id = 0;
886 	}
887 	dev->val.ifindex = val->ifindex;
888 
889 	return dev;
890 err_put_prog:
891 	bpf_prog_put(prog);
892 err_put_dev:
893 	dev_put(dev->dev);
894 err_out:
895 	kfree(dev);
896 	return ERR_PTR(-EINVAL);
897 }
898 
__dev_map_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)899 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
900 				  void *key, void *value, u64 map_flags)
901 {
902 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
903 	struct bpf_dtab_netdev *dev, *old_dev;
904 	struct bpf_devmap_val val = {};
905 	u32 i = *(u32 *)key;
906 
907 	if (unlikely(map_flags > BPF_EXIST))
908 		return -EINVAL;
909 	if (unlikely(i >= dtab->map.max_entries))
910 		return -E2BIG;
911 	if (unlikely(map_flags == BPF_NOEXIST))
912 		return -EEXIST;
913 
914 	/* already verified value_size <= sizeof val */
915 	memcpy(&val, value, map->value_size);
916 
917 	if (!val.ifindex) {
918 		dev = NULL;
919 		/* can not specify fd if ifindex is 0 */
920 		if (val.bpf_prog.fd > 0)
921 			return -EINVAL;
922 	} else {
923 		dev = __dev_map_alloc_node(net, dtab, &val, i);
924 		if (IS_ERR(dev))
925 			return PTR_ERR(dev);
926 	}
927 
928 	/* Use call_rcu() here to ensure rcu critical sections have completed
929 	 * Remembering the driver side flush operation will happen before the
930 	 * net device is removed.
931 	 */
932 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
933 	if (old_dev)
934 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
935 	else
936 		atomic_inc((atomic_t *)&dtab->items);
937 
938 	return 0;
939 }
940 
dev_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)941 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
942 				u64 map_flags)
943 {
944 	return __dev_map_update_elem(current->nsproxy->net_ns,
945 				     map, key, value, map_flags);
946 }
947 
__dev_map_hash_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)948 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
949 				       void *key, void *value, u64 map_flags)
950 {
951 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
952 	struct bpf_dtab_netdev *dev, *old_dev;
953 	struct bpf_devmap_val val = {};
954 	u32 idx = *(u32 *)key;
955 	unsigned long flags;
956 	int err = -EEXIST;
957 
958 	/* already verified value_size <= sizeof val */
959 	memcpy(&val, value, map->value_size);
960 
961 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
962 		return -EINVAL;
963 
964 	spin_lock_irqsave(&dtab->index_lock, flags);
965 
966 	old_dev = __dev_map_hash_lookup_elem(map, idx);
967 	if (old_dev && (map_flags & BPF_NOEXIST))
968 		goto out_err;
969 
970 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
971 	if (IS_ERR(dev)) {
972 		err = PTR_ERR(dev);
973 		goto out_err;
974 	}
975 
976 	if (old_dev) {
977 		hlist_del_rcu(&old_dev->index_hlist);
978 	} else {
979 		if (dtab->items >= dtab->map.max_entries) {
980 			spin_unlock_irqrestore(&dtab->index_lock, flags);
981 			call_rcu(&dev->rcu, __dev_map_entry_free);
982 			return -E2BIG;
983 		}
984 		dtab->items++;
985 	}
986 
987 	hlist_add_head_rcu(&dev->index_hlist,
988 			   dev_map_index_hash(dtab, idx));
989 	spin_unlock_irqrestore(&dtab->index_lock, flags);
990 
991 	if (old_dev)
992 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
993 
994 	return 0;
995 
996 out_err:
997 	spin_unlock_irqrestore(&dtab->index_lock, flags);
998 	return err;
999 }
1000 
dev_map_hash_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1001 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1002 				     u64 map_flags)
1003 {
1004 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1005 					 map, key, value, map_flags);
1006 }
1007 
dev_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1008 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1009 {
1010 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1011 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1012 				      __dev_map_lookup_elem);
1013 }
1014 
dev_hash_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1015 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1016 {
1017 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1018 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1019 				      __dev_map_hash_lookup_elem);
1020 }
1021 
dev_map_mem_usage(const struct bpf_map * map)1022 static u64 dev_map_mem_usage(const struct bpf_map *map)
1023 {
1024 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1025 	u64 usage = sizeof(struct bpf_dtab);
1026 
1027 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1028 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1029 	else
1030 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1031 	usage += atomic_read((atomic_t *)&dtab->items) *
1032 			 (u64)sizeof(struct bpf_dtab_netdev);
1033 	return usage;
1034 }
1035 
1036 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1037 const struct bpf_map_ops dev_map_ops = {
1038 	.map_meta_equal = bpf_map_meta_equal,
1039 	.map_alloc = dev_map_alloc,
1040 	.map_free = dev_map_free,
1041 	.map_get_next_key = dev_map_get_next_key,
1042 	.map_lookup_elem = dev_map_lookup_elem,
1043 	.map_update_elem = dev_map_update_elem,
1044 	.map_delete_elem = dev_map_delete_elem,
1045 	.map_check_btf = map_check_no_btf,
1046 	.map_mem_usage = dev_map_mem_usage,
1047 	.map_btf_id = &dev_map_btf_ids[0],
1048 	.map_redirect = dev_map_redirect,
1049 };
1050 
1051 const struct bpf_map_ops dev_map_hash_ops = {
1052 	.map_meta_equal = bpf_map_meta_equal,
1053 	.map_alloc = dev_map_alloc,
1054 	.map_free = dev_map_free,
1055 	.map_get_next_key = dev_map_hash_get_next_key,
1056 	.map_lookup_elem = dev_map_hash_lookup_elem,
1057 	.map_update_elem = dev_map_hash_update_elem,
1058 	.map_delete_elem = dev_map_hash_delete_elem,
1059 	.map_check_btf = map_check_no_btf,
1060 	.map_mem_usage = dev_map_mem_usage,
1061 	.map_btf_id = &dev_map_btf_ids[0],
1062 	.map_redirect = dev_hash_map_redirect,
1063 };
1064 
dev_map_hash_remove_netdev(struct bpf_dtab * dtab,struct net_device * netdev)1065 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1066 				       struct net_device *netdev)
1067 {
1068 	unsigned long flags;
1069 	u32 i;
1070 
1071 	spin_lock_irqsave(&dtab->index_lock, flags);
1072 	for (i = 0; i < dtab->n_buckets; i++) {
1073 		struct bpf_dtab_netdev *dev;
1074 		struct hlist_head *head;
1075 		struct hlist_node *next;
1076 
1077 		head = dev_map_index_hash(dtab, i);
1078 
1079 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1080 			if (netdev != dev->dev)
1081 				continue;
1082 
1083 			dtab->items--;
1084 			hlist_del_rcu(&dev->index_hlist);
1085 			call_rcu(&dev->rcu, __dev_map_entry_free);
1086 		}
1087 	}
1088 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1089 }
1090 
dev_map_notification(struct notifier_block * notifier,ulong event,void * ptr)1091 static int dev_map_notification(struct notifier_block *notifier,
1092 				ulong event, void *ptr)
1093 {
1094 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1095 	struct bpf_dtab *dtab;
1096 	int i, cpu;
1097 
1098 	switch (event) {
1099 	case NETDEV_REGISTER:
1100 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1101 			break;
1102 
1103 		/* will be freed in free_netdev() */
1104 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1105 		if (!netdev->xdp_bulkq)
1106 			return NOTIFY_BAD;
1107 
1108 		for_each_possible_cpu(cpu)
1109 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1110 		break;
1111 	case NETDEV_UNREGISTER:
1112 		/* This rcu_read_lock/unlock pair is needed because
1113 		 * dev_map_list is an RCU list AND to ensure a delete
1114 		 * operation does not free a netdev_map entry while we
1115 		 * are comparing it against the netdev being unregistered.
1116 		 */
1117 		rcu_read_lock();
1118 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1119 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1120 				dev_map_hash_remove_netdev(dtab, netdev);
1121 				continue;
1122 			}
1123 
1124 			for (i = 0; i < dtab->map.max_entries; i++) {
1125 				struct bpf_dtab_netdev *dev, *odev;
1126 
1127 				dev = rcu_dereference(dtab->netdev_map[i]);
1128 				if (!dev || netdev != dev->dev)
1129 					continue;
1130 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1131 				if (dev == odev) {
1132 					call_rcu(&dev->rcu,
1133 						 __dev_map_entry_free);
1134 					atomic_dec((atomic_t *)&dtab->items);
1135 				}
1136 			}
1137 		}
1138 		rcu_read_unlock();
1139 		break;
1140 	default:
1141 		break;
1142 	}
1143 	return NOTIFY_OK;
1144 }
1145 
1146 static struct notifier_block dev_map_notifier = {
1147 	.notifier_call = dev_map_notification,
1148 };
1149 
dev_map_init(void)1150 static int __init dev_map_init(void)
1151 {
1152 	int cpu;
1153 
1154 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1155 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1156 		     offsetof(struct _bpf_dtab_netdev, dev));
1157 	register_netdevice_notifier(&dev_map_notifier);
1158 
1159 	for_each_possible_cpu(cpu)
1160 		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1161 	return 0;
1162 }
1163 
1164 subsys_initcall(dev_map_init);
1165