xref: /openbmc/linux/kernel/bpf/devmap.c (revision 68f436a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct xdp_dev_bulk_queue {
57 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 	struct list_head flush_node;
59 	struct net_device *dev;
60 	struct net_device *dev_rx;
61 	struct bpf_prog *xdp_prog;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct hlist_node index_hlist;
68 	struct bpf_dtab *dtab;
69 	struct bpf_prog *xdp_prog;
70 	struct rcu_head rcu;
71 	unsigned int idx;
72 	struct bpf_devmap_val val;
73 };
74 
75 struct bpf_dtab {
76 	struct bpf_map map;
77 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
78 	struct list_head list;
79 
80 	/* these are only used for DEVMAP_HASH type maps */
81 	struct hlist_head *dev_index_head;
82 	spinlock_t index_lock;
83 	unsigned int items;
84 	u32 n_buckets;
85 };
86 
87 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
90 
91 static struct hlist_head *dev_map_create_hash(unsigned int entries,
92 					      int numa_node)
93 {
94 	int i;
95 	struct hlist_head *hash;
96 
97 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98 	if (hash != NULL)
99 		for (i = 0; i < entries; i++)
100 			INIT_HLIST_HEAD(&hash[i]);
101 
102 	return hash;
103 }
104 
105 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 						    int idx)
107 {
108 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109 }
110 
111 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
112 {
113 	u32 valsize = attr->value_size;
114 
115 	/* check sanity of attributes. 2 value sizes supported:
116 	 * 4 bytes: ifindex
117 	 * 8 bytes: ifindex + prog fd
118 	 */
119 	if (attr->max_entries == 0 || attr->key_size != 4 ||
120 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 		return -EINVAL;
124 
125 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
126 	 * verifier prevents writes from the BPF side
127 	 */
128 	attr->map_flags |= BPF_F_RDONLY_PROG;
129 
130 
131 	bpf_map_init_from_attr(&dtab->map, attr);
132 
133 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135 
136 		if (!dtab->n_buckets) /* Overflow check */
137 			return -EINVAL;
138 	}
139 
140 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
141 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
142 							   dtab->map.numa_node);
143 		if (!dtab->dev_index_head)
144 			return -ENOMEM;
145 
146 		spin_lock_init(&dtab->index_lock);
147 	} else {
148 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
149 						      sizeof(struct bpf_dtab_netdev *),
150 						      dtab->map.numa_node);
151 		if (!dtab->netdev_map)
152 			return -ENOMEM;
153 	}
154 
155 	return 0;
156 }
157 
158 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159 {
160 	struct bpf_dtab *dtab;
161 	int err;
162 
163 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
164 	if (!dtab)
165 		return ERR_PTR(-ENOMEM);
166 
167 	err = dev_map_init_map(dtab, attr);
168 	if (err) {
169 		bpf_map_area_free(dtab);
170 		return ERR_PTR(err);
171 	}
172 
173 	spin_lock(&dev_map_lock);
174 	list_add_tail_rcu(&dtab->list, &dev_map_list);
175 	spin_unlock(&dev_map_lock);
176 
177 	return &dtab->map;
178 }
179 
180 static void dev_map_free(struct bpf_map *map)
181 {
182 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
183 	int i;
184 
185 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
186 	 * so the programs (can be more than one that used this map) were
187 	 * disconnected from events. The following synchronize_rcu() guarantees
188 	 * both rcu read critical sections complete and waits for
189 	 * preempt-disable regions (NAPI being the relevant context here) so we
190 	 * are certain there will be no further reads against the netdev_map and
191 	 * all flush operations are complete. Flush operations can only be done
192 	 * from NAPI context for this reason.
193 	 */
194 
195 	spin_lock(&dev_map_lock);
196 	list_del_rcu(&dtab->list);
197 	spin_unlock(&dev_map_lock);
198 
199 	bpf_clear_redirect_map(map);
200 	synchronize_rcu();
201 
202 	/* Make sure prior __dev_map_entry_free() have completed. */
203 	rcu_barrier();
204 
205 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
206 		for (i = 0; i < dtab->n_buckets; i++) {
207 			struct bpf_dtab_netdev *dev;
208 			struct hlist_head *head;
209 			struct hlist_node *next;
210 
211 			head = dev_map_index_hash(dtab, i);
212 
213 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
214 				hlist_del_rcu(&dev->index_hlist);
215 				if (dev->xdp_prog)
216 					bpf_prog_put(dev->xdp_prog);
217 				dev_put(dev->dev);
218 				kfree(dev);
219 			}
220 		}
221 
222 		bpf_map_area_free(dtab->dev_index_head);
223 	} else {
224 		for (i = 0; i < dtab->map.max_entries; i++) {
225 			struct bpf_dtab_netdev *dev;
226 
227 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
228 			if (!dev)
229 				continue;
230 
231 			if (dev->xdp_prog)
232 				bpf_prog_put(dev->xdp_prog);
233 			dev_put(dev->dev);
234 			kfree(dev);
235 		}
236 
237 		bpf_map_area_free(dtab->netdev_map);
238 	}
239 
240 	bpf_map_area_free(dtab);
241 }
242 
243 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
244 {
245 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
246 	u32 index = key ? *(u32 *)key : U32_MAX;
247 	u32 *next = next_key;
248 
249 	if (index >= dtab->map.max_entries) {
250 		*next = 0;
251 		return 0;
252 	}
253 
254 	if (index == dtab->map.max_entries - 1)
255 		return -ENOENT;
256 	*next = index + 1;
257 	return 0;
258 }
259 
260 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
261  * by local_bh_disable() (from XDP calls inside NAPI). The
262  * rcu_read_lock_bh_held() below makes lockdep accept both.
263  */
264 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
265 {
266 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
267 	struct hlist_head *head = dev_map_index_hash(dtab, key);
268 	struct bpf_dtab_netdev *dev;
269 
270 	hlist_for_each_entry_rcu(dev, head, index_hlist,
271 				 lockdep_is_held(&dtab->index_lock))
272 		if (dev->idx == key)
273 			return dev;
274 
275 	return NULL;
276 }
277 
278 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
279 				    void *next_key)
280 {
281 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
282 	u32 idx, *next = next_key;
283 	struct bpf_dtab_netdev *dev, *next_dev;
284 	struct hlist_head *head;
285 	int i = 0;
286 
287 	if (!key)
288 		goto find_first;
289 
290 	idx = *(u32 *)key;
291 
292 	dev = __dev_map_hash_lookup_elem(map, idx);
293 	if (!dev)
294 		goto find_first;
295 
296 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
297 				    struct bpf_dtab_netdev, index_hlist);
298 
299 	if (next_dev) {
300 		*next = next_dev->idx;
301 		return 0;
302 	}
303 
304 	i = idx & (dtab->n_buckets - 1);
305 	i++;
306 
307  find_first:
308 	for (; i < dtab->n_buckets; i++) {
309 		head = dev_map_index_hash(dtab, i);
310 
311 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
312 					    struct bpf_dtab_netdev,
313 					    index_hlist);
314 		if (next_dev) {
315 			*next = next_dev->idx;
316 			return 0;
317 		}
318 	}
319 
320 	return -ENOENT;
321 }
322 
323 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
324 				struct xdp_frame **frames, int n,
325 				struct net_device *dev)
326 {
327 	struct xdp_txq_info txq = { .dev = dev };
328 	struct xdp_buff xdp;
329 	int i, nframes = 0;
330 
331 	for (i = 0; i < n; i++) {
332 		struct xdp_frame *xdpf = frames[i];
333 		u32 act;
334 		int err;
335 
336 		xdp_convert_frame_to_buff(xdpf, &xdp);
337 		xdp.txq = &txq;
338 
339 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
340 		switch (act) {
341 		case XDP_PASS:
342 			err = xdp_update_frame_from_buff(&xdp, xdpf);
343 			if (unlikely(err < 0))
344 				xdp_return_frame_rx_napi(xdpf);
345 			else
346 				frames[nframes++] = xdpf;
347 			break;
348 		default:
349 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
350 			fallthrough;
351 		case XDP_ABORTED:
352 			trace_xdp_exception(dev, xdp_prog, act);
353 			fallthrough;
354 		case XDP_DROP:
355 			xdp_return_frame_rx_napi(xdpf);
356 			break;
357 		}
358 	}
359 	return nframes; /* sent frames count */
360 }
361 
362 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
363 {
364 	struct net_device *dev = bq->dev;
365 	unsigned int cnt = bq->count;
366 	int sent = 0, err = 0;
367 	int to_send = cnt;
368 	int i;
369 
370 	if (unlikely(!cnt))
371 		return;
372 
373 	for (i = 0; i < cnt; i++) {
374 		struct xdp_frame *xdpf = bq->q[i];
375 
376 		prefetch(xdpf);
377 	}
378 
379 	if (bq->xdp_prog) {
380 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
381 		if (!to_send)
382 			goto out;
383 	}
384 
385 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
386 	if (sent < 0) {
387 		/* If ndo_xdp_xmit fails with an errno, no frames have
388 		 * been xmit'ed.
389 		 */
390 		err = sent;
391 		sent = 0;
392 	}
393 
394 	/* If not all frames have been transmitted, it is our
395 	 * responsibility to free them
396 	 */
397 	for (i = sent; unlikely(i < to_send); i++)
398 		xdp_return_frame_rx_napi(bq->q[i]);
399 
400 out:
401 	bq->count = 0;
402 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
403 }
404 
405 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
406  * driver before returning from its napi->poll() routine. See the comment above
407  * xdp_do_flush() in filter.c.
408  */
409 void __dev_flush(void)
410 {
411 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
412 	struct xdp_dev_bulk_queue *bq, *tmp;
413 
414 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
415 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
416 		bq->dev_rx = NULL;
417 		bq->xdp_prog = NULL;
418 		__list_del_clearprev(&bq->flush_node);
419 	}
420 }
421 
422 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
423  * by local_bh_disable() (from XDP calls inside NAPI). The
424  * rcu_read_lock_bh_held() below makes lockdep accept both.
425  */
426 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
427 {
428 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
429 	struct bpf_dtab_netdev *obj;
430 
431 	if (key >= map->max_entries)
432 		return NULL;
433 
434 	obj = rcu_dereference_check(dtab->netdev_map[key],
435 				    rcu_read_lock_bh_held());
436 	return obj;
437 }
438 
439 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
440  * variable access, and map elements stick around. See comment above
441  * xdp_do_flush() in filter.c.
442  */
443 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
444 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
445 {
446 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
447 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
448 
449 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
450 		bq_xmit_all(bq, 0);
451 
452 	/* Ingress dev_rx will be the same for all xdp_frame's in
453 	 * bulk_queue, because bq stored per-CPU and must be flushed
454 	 * from net_device drivers NAPI func end.
455 	 *
456 	 * Do the same with xdp_prog and flush_list since these fields
457 	 * are only ever modified together.
458 	 */
459 	if (!bq->dev_rx) {
460 		bq->dev_rx = dev_rx;
461 		bq->xdp_prog = xdp_prog;
462 		list_add(&bq->flush_node, flush_list);
463 	}
464 
465 	bq->q[bq->count++] = xdpf;
466 }
467 
468 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
469 				struct net_device *dev_rx,
470 				struct bpf_prog *xdp_prog)
471 {
472 	int err;
473 
474 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
475 		return -EOPNOTSUPP;
476 
477 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
478 		     xdp_frame_has_frags(xdpf)))
479 		return -EOPNOTSUPP;
480 
481 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
482 	if (unlikely(err))
483 		return err;
484 
485 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
486 	return 0;
487 }
488 
489 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
490 {
491 	struct xdp_txq_info txq = { .dev = dst->dev };
492 	struct xdp_buff xdp;
493 	u32 act;
494 
495 	if (!dst->xdp_prog)
496 		return XDP_PASS;
497 
498 	__skb_pull(skb, skb->mac_len);
499 	xdp.txq = &txq;
500 
501 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
502 	switch (act) {
503 	case XDP_PASS:
504 		__skb_push(skb, skb->mac_len);
505 		break;
506 	default:
507 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
508 		fallthrough;
509 	case XDP_ABORTED:
510 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
511 		fallthrough;
512 	case XDP_DROP:
513 		kfree_skb(skb);
514 		break;
515 	}
516 
517 	return act;
518 }
519 
520 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
521 		    struct net_device *dev_rx)
522 {
523 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
524 }
525 
526 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
527 		    struct net_device *dev_rx)
528 {
529 	struct net_device *dev = dst->dev;
530 
531 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
532 }
533 
534 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
535 {
536 	if (!obj)
537 		return false;
538 
539 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
540 		return false;
541 
542 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
543 		     xdp_frame_has_frags(xdpf)))
544 		return false;
545 
546 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
547 		return false;
548 
549 	return true;
550 }
551 
552 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
553 				 struct net_device *dev_rx,
554 				 struct xdp_frame *xdpf)
555 {
556 	struct xdp_frame *nxdpf;
557 
558 	nxdpf = xdpf_clone(xdpf);
559 	if (!nxdpf)
560 		return -ENOMEM;
561 
562 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
563 
564 	return 0;
565 }
566 
567 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
568 {
569 	while (num_excluded--) {
570 		if (ifindex == excluded[num_excluded])
571 			return true;
572 	}
573 	return false;
574 }
575 
576 /* Get ifindex of each upper device. 'indexes' must be able to hold at
577  * least MAX_NEST_DEV elements.
578  * Returns the number of ifindexes added.
579  */
580 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
581 {
582 	struct net_device *upper;
583 	struct list_head *iter;
584 	int n = 0;
585 
586 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
587 		indexes[n++] = upper->ifindex;
588 	}
589 	return n;
590 }
591 
592 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
593 			  struct bpf_map *map, bool exclude_ingress)
594 {
595 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
596 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
597 	int excluded_devices[1+MAX_NEST_DEV];
598 	struct hlist_head *head;
599 	int num_excluded = 0;
600 	unsigned int i;
601 	int err;
602 
603 	if (exclude_ingress) {
604 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
605 		excluded_devices[num_excluded++] = dev_rx->ifindex;
606 	}
607 
608 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
609 		for (i = 0; i < map->max_entries; i++) {
610 			dst = rcu_dereference_check(dtab->netdev_map[i],
611 						    rcu_read_lock_bh_held());
612 			if (!is_valid_dst(dst, xdpf))
613 				continue;
614 
615 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
616 				continue;
617 
618 			/* we only need n-1 clones; last_dst enqueued below */
619 			if (!last_dst) {
620 				last_dst = dst;
621 				continue;
622 			}
623 
624 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
625 			if (err)
626 				return err;
627 
628 			last_dst = dst;
629 		}
630 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
631 		for (i = 0; i < dtab->n_buckets; i++) {
632 			head = dev_map_index_hash(dtab, i);
633 			hlist_for_each_entry_rcu(dst, head, index_hlist,
634 						 lockdep_is_held(&dtab->index_lock)) {
635 				if (!is_valid_dst(dst, xdpf))
636 					continue;
637 
638 				if (is_ifindex_excluded(excluded_devices, num_excluded,
639 							dst->dev->ifindex))
640 					continue;
641 
642 				/* we only need n-1 clones; last_dst enqueued below */
643 				if (!last_dst) {
644 					last_dst = dst;
645 					continue;
646 				}
647 
648 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
649 				if (err)
650 					return err;
651 
652 				last_dst = dst;
653 			}
654 		}
655 	}
656 
657 	/* consume the last copy of the frame */
658 	if (last_dst)
659 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
660 	else
661 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
662 
663 	return 0;
664 }
665 
666 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
667 			     struct bpf_prog *xdp_prog)
668 {
669 	int err;
670 
671 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
672 	if (unlikely(err))
673 		return err;
674 
675 	/* Redirect has already succeeded semantically at this point, so we just
676 	 * return 0 even if packet is dropped. Helper below takes care of
677 	 * freeing skb.
678 	 */
679 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
680 		return 0;
681 
682 	skb->dev = dst->dev;
683 	generic_xdp_tx(skb, xdp_prog);
684 
685 	return 0;
686 }
687 
688 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
689 				  struct sk_buff *skb,
690 				  struct bpf_prog *xdp_prog)
691 {
692 	struct sk_buff *nskb;
693 	int err;
694 
695 	nskb = skb_clone(skb, GFP_ATOMIC);
696 	if (!nskb)
697 		return -ENOMEM;
698 
699 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
700 	if (unlikely(err)) {
701 		consume_skb(nskb);
702 		return err;
703 	}
704 
705 	return 0;
706 }
707 
708 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
709 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
710 			   bool exclude_ingress)
711 {
712 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
713 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
714 	int excluded_devices[1+MAX_NEST_DEV];
715 	struct hlist_head *head;
716 	struct hlist_node *next;
717 	int num_excluded = 0;
718 	unsigned int i;
719 	int err;
720 
721 	if (exclude_ingress) {
722 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
723 		excluded_devices[num_excluded++] = dev->ifindex;
724 	}
725 
726 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
727 		for (i = 0; i < map->max_entries; i++) {
728 			dst = rcu_dereference_check(dtab->netdev_map[i],
729 						    rcu_read_lock_bh_held());
730 			if (!dst)
731 				continue;
732 
733 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
734 				continue;
735 
736 			/* we only need n-1 clones; last_dst enqueued below */
737 			if (!last_dst) {
738 				last_dst = dst;
739 				continue;
740 			}
741 
742 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
743 			if (err)
744 				return err;
745 
746 			last_dst = dst;
747 
748 		}
749 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
750 		for (i = 0; i < dtab->n_buckets; i++) {
751 			head = dev_map_index_hash(dtab, i);
752 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
753 				if (!dst)
754 					continue;
755 
756 				if (is_ifindex_excluded(excluded_devices, num_excluded,
757 							dst->dev->ifindex))
758 					continue;
759 
760 				/* we only need n-1 clones; last_dst enqueued below */
761 				if (!last_dst) {
762 					last_dst = dst;
763 					continue;
764 				}
765 
766 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
767 				if (err)
768 					return err;
769 
770 				last_dst = dst;
771 			}
772 		}
773 	}
774 
775 	/* consume the first skb and return */
776 	if (last_dst)
777 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
778 
779 	/* dtab is empty */
780 	consume_skb(skb);
781 	return 0;
782 }
783 
784 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
785 {
786 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
787 
788 	return obj ? &obj->val : NULL;
789 }
790 
791 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
792 {
793 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
794 								*(u32 *)key);
795 	return obj ? &obj->val : NULL;
796 }
797 
798 static void __dev_map_entry_free(struct rcu_head *rcu)
799 {
800 	struct bpf_dtab_netdev *dev;
801 
802 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
803 	if (dev->xdp_prog)
804 		bpf_prog_put(dev->xdp_prog);
805 	dev_put(dev->dev);
806 	kfree(dev);
807 }
808 
809 static long dev_map_delete_elem(struct bpf_map *map, void *key)
810 {
811 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
812 	struct bpf_dtab_netdev *old_dev;
813 	int k = *(u32 *)key;
814 
815 	if (k >= map->max_entries)
816 		return -EINVAL;
817 
818 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
819 	if (old_dev) {
820 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
821 		atomic_dec((atomic_t *)&dtab->items);
822 	}
823 	return 0;
824 }
825 
826 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
827 {
828 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
829 	struct bpf_dtab_netdev *old_dev;
830 	int k = *(u32 *)key;
831 	unsigned long flags;
832 	int ret = -ENOENT;
833 
834 	spin_lock_irqsave(&dtab->index_lock, flags);
835 
836 	old_dev = __dev_map_hash_lookup_elem(map, k);
837 	if (old_dev) {
838 		dtab->items--;
839 		hlist_del_init_rcu(&old_dev->index_hlist);
840 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
841 		ret = 0;
842 	}
843 	spin_unlock_irqrestore(&dtab->index_lock, flags);
844 
845 	return ret;
846 }
847 
848 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
849 						    struct bpf_dtab *dtab,
850 						    struct bpf_devmap_val *val,
851 						    unsigned int idx)
852 {
853 	struct bpf_prog *prog = NULL;
854 	struct bpf_dtab_netdev *dev;
855 
856 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
857 				   GFP_NOWAIT | __GFP_NOWARN,
858 				   dtab->map.numa_node);
859 	if (!dev)
860 		return ERR_PTR(-ENOMEM);
861 
862 	dev->dev = dev_get_by_index(net, val->ifindex);
863 	if (!dev->dev)
864 		goto err_out;
865 
866 	if (val->bpf_prog.fd > 0) {
867 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
868 					     BPF_PROG_TYPE_XDP, false);
869 		if (IS_ERR(prog))
870 			goto err_put_dev;
871 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
872 		    !bpf_prog_map_compatible(&dtab->map, prog))
873 			goto err_put_prog;
874 	}
875 
876 	dev->idx = idx;
877 	dev->dtab = dtab;
878 	if (prog) {
879 		dev->xdp_prog = prog;
880 		dev->val.bpf_prog.id = prog->aux->id;
881 	} else {
882 		dev->xdp_prog = NULL;
883 		dev->val.bpf_prog.id = 0;
884 	}
885 	dev->val.ifindex = val->ifindex;
886 
887 	return dev;
888 err_put_prog:
889 	bpf_prog_put(prog);
890 err_put_dev:
891 	dev_put(dev->dev);
892 err_out:
893 	kfree(dev);
894 	return ERR_PTR(-EINVAL);
895 }
896 
897 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
898 				  void *key, void *value, u64 map_flags)
899 {
900 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
901 	struct bpf_dtab_netdev *dev, *old_dev;
902 	struct bpf_devmap_val val = {};
903 	u32 i = *(u32 *)key;
904 
905 	if (unlikely(map_flags > BPF_EXIST))
906 		return -EINVAL;
907 	if (unlikely(i >= dtab->map.max_entries))
908 		return -E2BIG;
909 	if (unlikely(map_flags == BPF_NOEXIST))
910 		return -EEXIST;
911 
912 	/* already verified value_size <= sizeof val */
913 	memcpy(&val, value, map->value_size);
914 
915 	if (!val.ifindex) {
916 		dev = NULL;
917 		/* can not specify fd if ifindex is 0 */
918 		if (val.bpf_prog.fd > 0)
919 			return -EINVAL;
920 	} else {
921 		dev = __dev_map_alloc_node(net, dtab, &val, i);
922 		if (IS_ERR(dev))
923 			return PTR_ERR(dev);
924 	}
925 
926 	/* Use call_rcu() here to ensure rcu critical sections have completed
927 	 * Remembering the driver side flush operation will happen before the
928 	 * net device is removed.
929 	 */
930 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
931 	if (old_dev)
932 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
933 	else
934 		atomic_inc((atomic_t *)&dtab->items);
935 
936 	return 0;
937 }
938 
939 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
940 				u64 map_flags)
941 {
942 	return __dev_map_update_elem(current->nsproxy->net_ns,
943 				     map, key, value, map_flags);
944 }
945 
946 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
947 				       void *key, void *value, u64 map_flags)
948 {
949 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
950 	struct bpf_dtab_netdev *dev, *old_dev;
951 	struct bpf_devmap_val val = {};
952 	u32 idx = *(u32 *)key;
953 	unsigned long flags;
954 	int err = -EEXIST;
955 
956 	/* already verified value_size <= sizeof val */
957 	memcpy(&val, value, map->value_size);
958 
959 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
960 		return -EINVAL;
961 
962 	spin_lock_irqsave(&dtab->index_lock, flags);
963 
964 	old_dev = __dev_map_hash_lookup_elem(map, idx);
965 	if (old_dev && (map_flags & BPF_NOEXIST))
966 		goto out_err;
967 
968 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
969 	if (IS_ERR(dev)) {
970 		err = PTR_ERR(dev);
971 		goto out_err;
972 	}
973 
974 	if (old_dev) {
975 		hlist_del_rcu(&old_dev->index_hlist);
976 	} else {
977 		if (dtab->items >= dtab->map.max_entries) {
978 			spin_unlock_irqrestore(&dtab->index_lock, flags);
979 			call_rcu(&dev->rcu, __dev_map_entry_free);
980 			return -E2BIG;
981 		}
982 		dtab->items++;
983 	}
984 
985 	hlist_add_head_rcu(&dev->index_hlist,
986 			   dev_map_index_hash(dtab, idx));
987 	spin_unlock_irqrestore(&dtab->index_lock, flags);
988 
989 	if (old_dev)
990 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
991 
992 	return 0;
993 
994 out_err:
995 	spin_unlock_irqrestore(&dtab->index_lock, flags);
996 	return err;
997 }
998 
999 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1000 				     u64 map_flags)
1001 {
1002 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1003 					 map, key, value, map_flags);
1004 }
1005 
1006 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1007 {
1008 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1009 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1010 				      __dev_map_lookup_elem);
1011 }
1012 
1013 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1014 {
1015 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1016 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1017 				      __dev_map_hash_lookup_elem);
1018 }
1019 
1020 static u64 dev_map_mem_usage(const struct bpf_map *map)
1021 {
1022 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1023 	u64 usage = sizeof(struct bpf_dtab);
1024 
1025 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1026 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1027 	else
1028 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1029 	usage += atomic_read((atomic_t *)&dtab->items) *
1030 			 (u64)sizeof(struct bpf_dtab_netdev);
1031 	return usage;
1032 }
1033 
1034 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1035 const struct bpf_map_ops dev_map_ops = {
1036 	.map_meta_equal = bpf_map_meta_equal,
1037 	.map_alloc = dev_map_alloc,
1038 	.map_free = dev_map_free,
1039 	.map_get_next_key = dev_map_get_next_key,
1040 	.map_lookup_elem = dev_map_lookup_elem,
1041 	.map_update_elem = dev_map_update_elem,
1042 	.map_delete_elem = dev_map_delete_elem,
1043 	.map_check_btf = map_check_no_btf,
1044 	.map_mem_usage = dev_map_mem_usage,
1045 	.map_btf_id = &dev_map_btf_ids[0],
1046 	.map_redirect = dev_map_redirect,
1047 };
1048 
1049 const struct bpf_map_ops dev_map_hash_ops = {
1050 	.map_meta_equal = bpf_map_meta_equal,
1051 	.map_alloc = dev_map_alloc,
1052 	.map_free = dev_map_free,
1053 	.map_get_next_key = dev_map_hash_get_next_key,
1054 	.map_lookup_elem = dev_map_hash_lookup_elem,
1055 	.map_update_elem = dev_map_hash_update_elem,
1056 	.map_delete_elem = dev_map_hash_delete_elem,
1057 	.map_check_btf = map_check_no_btf,
1058 	.map_mem_usage = dev_map_mem_usage,
1059 	.map_btf_id = &dev_map_btf_ids[0],
1060 	.map_redirect = dev_hash_map_redirect,
1061 };
1062 
1063 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1064 				       struct net_device *netdev)
1065 {
1066 	unsigned long flags;
1067 	u32 i;
1068 
1069 	spin_lock_irqsave(&dtab->index_lock, flags);
1070 	for (i = 0; i < dtab->n_buckets; i++) {
1071 		struct bpf_dtab_netdev *dev;
1072 		struct hlist_head *head;
1073 		struct hlist_node *next;
1074 
1075 		head = dev_map_index_hash(dtab, i);
1076 
1077 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1078 			if (netdev != dev->dev)
1079 				continue;
1080 
1081 			dtab->items--;
1082 			hlist_del_rcu(&dev->index_hlist);
1083 			call_rcu(&dev->rcu, __dev_map_entry_free);
1084 		}
1085 	}
1086 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1087 }
1088 
1089 static int dev_map_notification(struct notifier_block *notifier,
1090 				ulong event, void *ptr)
1091 {
1092 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1093 	struct bpf_dtab *dtab;
1094 	int i, cpu;
1095 
1096 	switch (event) {
1097 	case NETDEV_REGISTER:
1098 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1099 			break;
1100 
1101 		/* will be freed in free_netdev() */
1102 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1103 		if (!netdev->xdp_bulkq)
1104 			return NOTIFY_BAD;
1105 
1106 		for_each_possible_cpu(cpu)
1107 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1108 		break;
1109 	case NETDEV_UNREGISTER:
1110 		/* This rcu_read_lock/unlock pair is needed because
1111 		 * dev_map_list is an RCU list AND to ensure a delete
1112 		 * operation does not free a netdev_map entry while we
1113 		 * are comparing it against the netdev being unregistered.
1114 		 */
1115 		rcu_read_lock();
1116 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1117 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1118 				dev_map_hash_remove_netdev(dtab, netdev);
1119 				continue;
1120 			}
1121 
1122 			for (i = 0; i < dtab->map.max_entries; i++) {
1123 				struct bpf_dtab_netdev *dev, *odev;
1124 
1125 				dev = rcu_dereference(dtab->netdev_map[i]);
1126 				if (!dev || netdev != dev->dev)
1127 					continue;
1128 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1129 				if (dev == odev) {
1130 					call_rcu(&dev->rcu,
1131 						 __dev_map_entry_free);
1132 					atomic_dec((atomic_t *)&dtab->items);
1133 				}
1134 			}
1135 		}
1136 		rcu_read_unlock();
1137 		break;
1138 	default:
1139 		break;
1140 	}
1141 	return NOTIFY_OK;
1142 }
1143 
1144 static struct notifier_block dev_map_notifier = {
1145 	.notifier_call = dev_map_notification,
1146 };
1147 
1148 static int __init dev_map_init(void)
1149 {
1150 	int cpu;
1151 
1152 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1153 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1154 		     offsetof(struct _bpf_dtab_netdev, dev));
1155 	register_netdevice_notifier(&dev_map_notifier);
1156 
1157 	for_each_possible_cpu(cpu)
1158 		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1159 	return 0;
1160 }
1161 
1162 subsys_initcall(dev_map_init);
1163