1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 struct bpf_prog *xdp_prog; 61 unsigned int count; 62 }; 63 64 struct bpf_dtab_netdev { 65 struct net_device *dev; /* must be first member, due to tracepoint */ 66 struct hlist_node index_hlist; 67 struct bpf_dtab *dtab; 68 struct bpf_prog *xdp_prog; 69 struct rcu_head rcu; 70 unsigned int idx; 71 struct bpf_devmap_val val; 72 }; 73 74 struct bpf_dtab { 75 struct bpf_map map; 76 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */ 77 struct list_head list; 78 79 /* these are only used for DEVMAP_HASH type maps */ 80 struct hlist_head *dev_index_head; 81 spinlock_t index_lock; 82 unsigned int items; 83 u32 n_buckets; 84 }; 85 86 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 87 static DEFINE_SPINLOCK(dev_map_lock); 88 static LIST_HEAD(dev_map_list); 89 90 static struct hlist_head *dev_map_create_hash(unsigned int entries, 91 int numa_node) 92 { 93 int i; 94 struct hlist_head *hash; 95 96 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); 97 if (hash != NULL) 98 for (i = 0; i < entries; i++) 99 INIT_HLIST_HEAD(&hash[i]); 100 101 return hash; 102 } 103 104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 105 int idx) 106 { 107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 108 } 109 110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 111 { 112 u32 valsize = attr->value_size; 113 114 /* check sanity of attributes. 2 value sizes supported: 115 * 4 bytes: ifindex 116 * 8 bytes: ifindex + prog fd 117 */ 118 if (attr->max_entries == 0 || attr->key_size != 4 || 119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 121 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 122 return -EINVAL; 123 124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 125 * verifier prevents writes from the BPF side 126 */ 127 attr->map_flags |= BPF_F_RDONLY_PROG; 128 129 130 bpf_map_init_from_attr(&dtab->map, attr); 131 132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 134 135 if (!dtab->n_buckets) /* Overflow check */ 136 return -EINVAL; 137 } 138 139 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 140 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 141 dtab->map.numa_node); 142 if (!dtab->dev_index_head) 143 return -ENOMEM; 144 145 spin_lock_init(&dtab->index_lock); 146 } else { 147 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * 148 sizeof(struct bpf_dtab_netdev *), 149 dtab->map.numa_node); 150 if (!dtab->netdev_map) 151 return -ENOMEM; 152 } 153 154 return 0; 155 } 156 157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 158 { 159 struct bpf_dtab *dtab; 160 int err; 161 162 if (!capable(CAP_NET_ADMIN)) 163 return ERR_PTR(-EPERM); 164 165 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 166 if (!dtab) 167 return ERR_PTR(-ENOMEM); 168 169 err = dev_map_init_map(dtab, attr); 170 if (err) { 171 kfree(dtab); 172 return ERR_PTR(err); 173 } 174 175 spin_lock(&dev_map_lock); 176 list_add_tail_rcu(&dtab->list, &dev_map_list); 177 spin_unlock(&dev_map_lock); 178 179 return &dtab->map; 180 } 181 182 static void dev_map_free(struct bpf_map *map) 183 { 184 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 185 int i; 186 187 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 188 * so the programs (can be more than one that used this map) were 189 * disconnected from events. The following synchronize_rcu() guarantees 190 * both rcu read critical sections complete and waits for 191 * preempt-disable regions (NAPI being the relevant context here) so we 192 * are certain there will be no further reads against the netdev_map and 193 * all flush operations are complete. Flush operations can only be done 194 * from NAPI context for this reason. 195 */ 196 197 spin_lock(&dev_map_lock); 198 list_del_rcu(&dtab->list); 199 spin_unlock(&dev_map_lock); 200 201 bpf_clear_redirect_map(map); 202 synchronize_rcu(); 203 204 /* Make sure prior __dev_map_entry_free() have completed. */ 205 rcu_barrier(); 206 207 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 208 for (i = 0; i < dtab->n_buckets; i++) { 209 struct bpf_dtab_netdev *dev; 210 struct hlist_head *head; 211 struct hlist_node *next; 212 213 head = dev_map_index_hash(dtab, i); 214 215 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 216 hlist_del_rcu(&dev->index_hlist); 217 if (dev->xdp_prog) 218 bpf_prog_put(dev->xdp_prog); 219 dev_put(dev->dev); 220 kfree(dev); 221 } 222 } 223 224 bpf_map_area_free(dtab->dev_index_head); 225 } else { 226 for (i = 0; i < dtab->map.max_entries; i++) { 227 struct bpf_dtab_netdev *dev; 228 229 dev = rcu_dereference_raw(dtab->netdev_map[i]); 230 if (!dev) 231 continue; 232 233 if (dev->xdp_prog) 234 bpf_prog_put(dev->xdp_prog); 235 dev_put(dev->dev); 236 kfree(dev); 237 } 238 239 bpf_map_area_free(dtab->netdev_map); 240 } 241 242 kfree(dtab); 243 } 244 245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 246 { 247 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 248 u32 index = key ? *(u32 *)key : U32_MAX; 249 u32 *next = next_key; 250 251 if (index >= dtab->map.max_entries) { 252 *next = 0; 253 return 0; 254 } 255 256 if (index == dtab->map.max_entries - 1) 257 return -ENOENT; 258 *next = index + 1; 259 return 0; 260 } 261 262 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or 263 * by local_bh_disable() (from XDP calls inside NAPI). The 264 * rcu_read_lock_bh_held() below makes lockdep accept both. 265 */ 266 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 267 { 268 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 269 struct hlist_head *head = dev_map_index_hash(dtab, key); 270 struct bpf_dtab_netdev *dev; 271 272 hlist_for_each_entry_rcu(dev, head, index_hlist, 273 lockdep_is_held(&dtab->index_lock)) 274 if (dev->idx == key) 275 return dev; 276 277 return NULL; 278 } 279 280 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 281 void *next_key) 282 { 283 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 284 u32 idx, *next = next_key; 285 struct bpf_dtab_netdev *dev, *next_dev; 286 struct hlist_head *head; 287 int i = 0; 288 289 if (!key) 290 goto find_first; 291 292 idx = *(u32 *)key; 293 294 dev = __dev_map_hash_lookup_elem(map, idx); 295 if (!dev) 296 goto find_first; 297 298 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 299 struct bpf_dtab_netdev, index_hlist); 300 301 if (next_dev) { 302 *next = next_dev->idx; 303 return 0; 304 } 305 306 i = idx & (dtab->n_buckets - 1); 307 i++; 308 309 find_first: 310 for (; i < dtab->n_buckets; i++) { 311 head = dev_map_index_hash(dtab, i); 312 313 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 314 struct bpf_dtab_netdev, 315 index_hlist); 316 if (next_dev) { 317 *next = next_dev->idx; 318 return 0; 319 } 320 } 321 322 return -ENOENT; 323 } 324 325 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, 326 struct xdp_frame **frames, int n, 327 struct net_device *dev) 328 { 329 struct xdp_txq_info txq = { .dev = dev }; 330 struct xdp_buff xdp; 331 int i, nframes = 0; 332 333 for (i = 0; i < n; i++) { 334 struct xdp_frame *xdpf = frames[i]; 335 u32 act; 336 int err; 337 338 xdp_convert_frame_to_buff(xdpf, &xdp); 339 xdp.txq = &txq; 340 341 act = bpf_prog_run_xdp(xdp_prog, &xdp); 342 switch (act) { 343 case XDP_PASS: 344 err = xdp_update_frame_from_buff(&xdp, xdpf); 345 if (unlikely(err < 0)) 346 xdp_return_frame_rx_napi(xdpf); 347 else 348 frames[nframes++] = xdpf; 349 break; 350 default: 351 bpf_warn_invalid_xdp_action(NULL, xdp_prog, act); 352 fallthrough; 353 case XDP_ABORTED: 354 trace_xdp_exception(dev, xdp_prog, act); 355 fallthrough; 356 case XDP_DROP: 357 xdp_return_frame_rx_napi(xdpf); 358 break; 359 } 360 } 361 return nframes; /* sent frames count */ 362 } 363 364 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 365 { 366 struct net_device *dev = bq->dev; 367 unsigned int cnt = bq->count; 368 int sent = 0, err = 0; 369 int to_send = cnt; 370 int i; 371 372 if (unlikely(!cnt)) 373 return; 374 375 for (i = 0; i < cnt; i++) { 376 struct xdp_frame *xdpf = bq->q[i]; 377 378 prefetch(xdpf); 379 } 380 381 if (bq->xdp_prog) { 382 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); 383 if (!to_send) 384 goto out; 385 } 386 387 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); 388 if (sent < 0) { 389 /* If ndo_xdp_xmit fails with an errno, no frames have 390 * been xmit'ed. 391 */ 392 err = sent; 393 sent = 0; 394 } 395 396 /* If not all frames have been transmitted, it is our 397 * responsibility to free them 398 */ 399 for (i = sent; unlikely(i < to_send); i++) 400 xdp_return_frame_rx_napi(bq->q[i]); 401 402 out: 403 bq->count = 0; 404 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); 405 } 406 407 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the 408 * driver before returning from its napi->poll() routine. See the comment above 409 * xdp_do_flush() in filter.c. 410 */ 411 void __dev_flush(void) 412 { 413 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 414 struct xdp_dev_bulk_queue *bq, *tmp; 415 416 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 417 bq_xmit_all(bq, XDP_XMIT_FLUSH); 418 bq->dev_rx = NULL; 419 bq->xdp_prog = NULL; 420 __list_del_clearprev(&bq->flush_node); 421 } 422 } 423 424 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or 425 * by local_bh_disable() (from XDP calls inside NAPI). The 426 * rcu_read_lock_bh_held() below makes lockdep accept both. 427 */ 428 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 429 { 430 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 431 struct bpf_dtab_netdev *obj; 432 433 if (key >= map->max_entries) 434 return NULL; 435 436 obj = rcu_dereference_check(dtab->netdev_map[key], 437 rcu_read_lock_bh_held()); 438 return obj; 439 } 440 441 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu 442 * variable access, and map elements stick around. See comment above 443 * xdp_do_flush() in filter.c. 444 */ 445 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 446 struct net_device *dev_rx, struct bpf_prog *xdp_prog) 447 { 448 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 449 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 450 451 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 452 bq_xmit_all(bq, 0); 453 454 /* Ingress dev_rx will be the same for all xdp_frame's in 455 * bulk_queue, because bq stored per-CPU and must be flushed 456 * from net_device drivers NAPI func end. 457 * 458 * Do the same with xdp_prog and flush_list since these fields 459 * are only ever modified together. 460 */ 461 if (!bq->dev_rx) { 462 bq->dev_rx = dev_rx; 463 bq->xdp_prog = xdp_prog; 464 list_add(&bq->flush_node, flush_list); 465 } 466 467 bq->q[bq->count++] = xdpf; 468 } 469 470 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 471 struct net_device *dev_rx, 472 struct bpf_prog *xdp_prog) 473 { 474 int err; 475 476 if (!dev->netdev_ops->ndo_xdp_xmit) 477 return -EOPNOTSUPP; 478 479 err = xdp_ok_fwd_dev(dev, xdpf->len); 480 if (unlikely(err)) 481 return err; 482 483 bq_enqueue(dev, xdpf, dev_rx, xdp_prog); 484 return 0; 485 } 486 487 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst) 488 { 489 struct xdp_txq_info txq = { .dev = dst->dev }; 490 struct xdp_buff xdp; 491 u32 act; 492 493 if (!dst->xdp_prog) 494 return XDP_PASS; 495 496 __skb_pull(skb, skb->mac_len); 497 xdp.txq = &txq; 498 499 act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog); 500 switch (act) { 501 case XDP_PASS: 502 __skb_push(skb, skb->mac_len); 503 break; 504 default: 505 bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act); 506 fallthrough; 507 case XDP_ABORTED: 508 trace_xdp_exception(dst->dev, dst->xdp_prog, act); 509 fallthrough; 510 case XDP_DROP: 511 kfree_skb(skb); 512 break; 513 } 514 515 return act; 516 } 517 518 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 519 struct net_device *dev_rx) 520 { 521 return __xdp_enqueue(dev, xdpf, dev_rx, NULL); 522 } 523 524 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 525 struct net_device *dev_rx) 526 { 527 struct net_device *dev = dst->dev; 528 529 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog); 530 } 531 532 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf) 533 { 534 if (!obj || 535 !obj->dev->netdev_ops->ndo_xdp_xmit) 536 return false; 537 538 if (xdp_ok_fwd_dev(obj->dev, xdpf->len)) 539 return false; 540 541 return true; 542 } 543 544 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj, 545 struct net_device *dev_rx, 546 struct xdp_frame *xdpf) 547 { 548 struct xdp_frame *nxdpf; 549 550 nxdpf = xdpf_clone(xdpf); 551 if (!nxdpf) 552 return -ENOMEM; 553 554 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog); 555 556 return 0; 557 } 558 559 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex) 560 { 561 while (num_excluded--) { 562 if (ifindex == excluded[num_excluded]) 563 return true; 564 } 565 return false; 566 } 567 568 /* Get ifindex of each upper device. 'indexes' must be able to hold at 569 * least MAX_NEST_DEV elements. 570 * Returns the number of ifindexes added. 571 */ 572 static int get_upper_ifindexes(struct net_device *dev, int *indexes) 573 { 574 struct net_device *upper; 575 struct list_head *iter; 576 int n = 0; 577 578 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 579 indexes[n++] = upper->ifindex; 580 } 581 return n; 582 } 583 584 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 585 struct bpf_map *map, bool exclude_ingress) 586 { 587 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 588 struct bpf_dtab_netdev *dst, *last_dst = NULL; 589 int excluded_devices[1+MAX_NEST_DEV]; 590 struct hlist_head *head; 591 int num_excluded = 0; 592 unsigned int i; 593 int err; 594 595 if (exclude_ingress) { 596 num_excluded = get_upper_ifindexes(dev_rx, excluded_devices); 597 excluded_devices[num_excluded++] = dev_rx->ifindex; 598 } 599 600 if (map->map_type == BPF_MAP_TYPE_DEVMAP) { 601 for (i = 0; i < map->max_entries; i++) { 602 dst = rcu_dereference_check(dtab->netdev_map[i], 603 rcu_read_lock_bh_held()); 604 if (!is_valid_dst(dst, xdpf)) 605 continue; 606 607 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) 608 continue; 609 610 /* we only need n-1 clones; last_dst enqueued below */ 611 if (!last_dst) { 612 last_dst = dst; 613 continue; 614 } 615 616 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); 617 if (err) 618 return err; 619 620 last_dst = dst; 621 } 622 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ 623 for (i = 0; i < dtab->n_buckets; i++) { 624 head = dev_map_index_hash(dtab, i); 625 hlist_for_each_entry_rcu(dst, head, index_hlist, 626 lockdep_is_held(&dtab->index_lock)) { 627 if (!is_valid_dst(dst, xdpf)) 628 continue; 629 630 if (is_ifindex_excluded(excluded_devices, num_excluded, 631 dst->dev->ifindex)) 632 continue; 633 634 /* we only need n-1 clones; last_dst enqueued below */ 635 if (!last_dst) { 636 last_dst = dst; 637 continue; 638 } 639 640 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); 641 if (err) 642 return err; 643 644 last_dst = dst; 645 } 646 } 647 } 648 649 /* consume the last copy of the frame */ 650 if (last_dst) 651 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog); 652 else 653 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */ 654 655 return 0; 656 } 657 658 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 659 struct bpf_prog *xdp_prog) 660 { 661 int err; 662 663 err = xdp_ok_fwd_dev(dst->dev, skb->len); 664 if (unlikely(err)) 665 return err; 666 667 /* Redirect has already succeeded semantically at this point, so we just 668 * return 0 even if packet is dropped. Helper below takes care of 669 * freeing skb. 670 */ 671 if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS) 672 return 0; 673 674 skb->dev = dst->dev; 675 generic_xdp_tx(skb, xdp_prog); 676 677 return 0; 678 } 679 680 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, 681 struct sk_buff *skb, 682 struct bpf_prog *xdp_prog) 683 { 684 struct sk_buff *nskb; 685 int err; 686 687 nskb = skb_clone(skb, GFP_ATOMIC); 688 if (!nskb) 689 return -ENOMEM; 690 691 err = dev_map_generic_redirect(dst, nskb, xdp_prog); 692 if (unlikely(err)) { 693 consume_skb(nskb); 694 return err; 695 } 696 697 return 0; 698 } 699 700 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 701 struct bpf_prog *xdp_prog, struct bpf_map *map, 702 bool exclude_ingress) 703 { 704 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 705 struct bpf_dtab_netdev *dst, *last_dst = NULL; 706 int excluded_devices[1+MAX_NEST_DEV]; 707 struct hlist_head *head; 708 struct hlist_node *next; 709 int num_excluded = 0; 710 unsigned int i; 711 int err; 712 713 if (exclude_ingress) { 714 num_excluded = get_upper_ifindexes(dev, excluded_devices); 715 excluded_devices[num_excluded++] = dev->ifindex; 716 } 717 718 if (map->map_type == BPF_MAP_TYPE_DEVMAP) { 719 for (i = 0; i < map->max_entries; i++) { 720 dst = rcu_dereference_check(dtab->netdev_map[i], 721 rcu_read_lock_bh_held()); 722 if (!dst) 723 continue; 724 725 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) 726 continue; 727 728 /* we only need n-1 clones; last_dst enqueued below */ 729 if (!last_dst) { 730 last_dst = dst; 731 continue; 732 } 733 734 err = dev_map_redirect_clone(last_dst, skb, xdp_prog); 735 if (err) 736 return err; 737 738 last_dst = dst; 739 740 } 741 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ 742 for (i = 0; i < dtab->n_buckets; i++) { 743 head = dev_map_index_hash(dtab, i); 744 hlist_for_each_entry_safe(dst, next, head, index_hlist) { 745 if (!dst) 746 continue; 747 748 if (is_ifindex_excluded(excluded_devices, num_excluded, 749 dst->dev->ifindex)) 750 continue; 751 752 /* we only need n-1 clones; last_dst enqueued below */ 753 if (!last_dst) { 754 last_dst = dst; 755 continue; 756 } 757 758 err = dev_map_redirect_clone(last_dst, skb, xdp_prog); 759 if (err) 760 return err; 761 762 last_dst = dst; 763 } 764 } 765 } 766 767 /* consume the first skb and return */ 768 if (last_dst) 769 return dev_map_generic_redirect(last_dst, skb, xdp_prog); 770 771 /* dtab is empty */ 772 consume_skb(skb); 773 return 0; 774 } 775 776 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 777 { 778 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 779 780 return obj ? &obj->val : NULL; 781 } 782 783 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 784 { 785 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 786 *(u32 *)key); 787 return obj ? &obj->val : NULL; 788 } 789 790 static void __dev_map_entry_free(struct rcu_head *rcu) 791 { 792 struct bpf_dtab_netdev *dev; 793 794 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 795 if (dev->xdp_prog) 796 bpf_prog_put(dev->xdp_prog); 797 dev_put(dev->dev); 798 kfree(dev); 799 } 800 801 static int dev_map_delete_elem(struct bpf_map *map, void *key) 802 { 803 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 804 struct bpf_dtab_netdev *old_dev; 805 int k = *(u32 *)key; 806 807 if (k >= map->max_entries) 808 return -EINVAL; 809 810 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); 811 if (old_dev) 812 call_rcu(&old_dev->rcu, __dev_map_entry_free); 813 return 0; 814 } 815 816 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 817 { 818 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 819 struct bpf_dtab_netdev *old_dev; 820 int k = *(u32 *)key; 821 unsigned long flags; 822 int ret = -ENOENT; 823 824 spin_lock_irqsave(&dtab->index_lock, flags); 825 826 old_dev = __dev_map_hash_lookup_elem(map, k); 827 if (old_dev) { 828 dtab->items--; 829 hlist_del_init_rcu(&old_dev->index_hlist); 830 call_rcu(&old_dev->rcu, __dev_map_entry_free); 831 ret = 0; 832 } 833 spin_unlock_irqrestore(&dtab->index_lock, flags); 834 835 return ret; 836 } 837 838 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 839 struct bpf_dtab *dtab, 840 struct bpf_devmap_val *val, 841 unsigned int idx) 842 { 843 struct bpf_prog *prog = NULL; 844 struct bpf_dtab_netdev *dev; 845 846 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 847 GFP_ATOMIC | __GFP_NOWARN, 848 dtab->map.numa_node); 849 if (!dev) 850 return ERR_PTR(-ENOMEM); 851 852 dev->dev = dev_get_by_index(net, val->ifindex); 853 if (!dev->dev) 854 goto err_out; 855 856 if (val->bpf_prog.fd > 0) { 857 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 858 BPF_PROG_TYPE_XDP, false); 859 if (IS_ERR(prog)) 860 goto err_put_dev; 861 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 862 goto err_put_prog; 863 } 864 865 dev->idx = idx; 866 dev->dtab = dtab; 867 if (prog) { 868 dev->xdp_prog = prog; 869 dev->val.bpf_prog.id = prog->aux->id; 870 } else { 871 dev->xdp_prog = NULL; 872 dev->val.bpf_prog.id = 0; 873 } 874 dev->val.ifindex = val->ifindex; 875 876 return dev; 877 err_put_prog: 878 bpf_prog_put(prog); 879 err_put_dev: 880 dev_put(dev->dev); 881 err_out: 882 kfree(dev); 883 return ERR_PTR(-EINVAL); 884 } 885 886 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 887 void *key, void *value, u64 map_flags) 888 { 889 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 890 struct bpf_dtab_netdev *dev, *old_dev; 891 struct bpf_devmap_val val = {}; 892 u32 i = *(u32 *)key; 893 894 if (unlikely(map_flags > BPF_EXIST)) 895 return -EINVAL; 896 if (unlikely(i >= dtab->map.max_entries)) 897 return -E2BIG; 898 if (unlikely(map_flags == BPF_NOEXIST)) 899 return -EEXIST; 900 901 /* already verified value_size <= sizeof val */ 902 memcpy(&val, value, map->value_size); 903 904 if (!val.ifindex) { 905 dev = NULL; 906 /* can not specify fd if ifindex is 0 */ 907 if (val.bpf_prog.fd > 0) 908 return -EINVAL; 909 } else { 910 dev = __dev_map_alloc_node(net, dtab, &val, i); 911 if (IS_ERR(dev)) 912 return PTR_ERR(dev); 913 } 914 915 /* Use call_rcu() here to ensure rcu critical sections have completed 916 * Remembering the driver side flush operation will happen before the 917 * net device is removed. 918 */ 919 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); 920 if (old_dev) 921 call_rcu(&old_dev->rcu, __dev_map_entry_free); 922 923 return 0; 924 } 925 926 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 927 u64 map_flags) 928 { 929 return __dev_map_update_elem(current->nsproxy->net_ns, 930 map, key, value, map_flags); 931 } 932 933 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 934 void *key, void *value, u64 map_flags) 935 { 936 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 937 struct bpf_dtab_netdev *dev, *old_dev; 938 struct bpf_devmap_val val = {}; 939 u32 idx = *(u32 *)key; 940 unsigned long flags; 941 int err = -EEXIST; 942 943 /* already verified value_size <= sizeof val */ 944 memcpy(&val, value, map->value_size); 945 946 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 947 return -EINVAL; 948 949 spin_lock_irqsave(&dtab->index_lock, flags); 950 951 old_dev = __dev_map_hash_lookup_elem(map, idx); 952 if (old_dev && (map_flags & BPF_NOEXIST)) 953 goto out_err; 954 955 dev = __dev_map_alloc_node(net, dtab, &val, idx); 956 if (IS_ERR(dev)) { 957 err = PTR_ERR(dev); 958 goto out_err; 959 } 960 961 if (old_dev) { 962 hlist_del_rcu(&old_dev->index_hlist); 963 } else { 964 if (dtab->items >= dtab->map.max_entries) { 965 spin_unlock_irqrestore(&dtab->index_lock, flags); 966 call_rcu(&dev->rcu, __dev_map_entry_free); 967 return -E2BIG; 968 } 969 dtab->items++; 970 } 971 972 hlist_add_head_rcu(&dev->index_hlist, 973 dev_map_index_hash(dtab, idx)); 974 spin_unlock_irqrestore(&dtab->index_lock, flags); 975 976 if (old_dev) 977 call_rcu(&old_dev->rcu, __dev_map_entry_free); 978 979 return 0; 980 981 out_err: 982 spin_unlock_irqrestore(&dtab->index_lock, flags); 983 return err; 984 } 985 986 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 987 u64 map_flags) 988 { 989 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 990 map, key, value, map_flags); 991 } 992 993 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 994 { 995 return __bpf_xdp_redirect_map(map, ifindex, flags, 996 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, 997 __dev_map_lookup_elem); 998 } 999 1000 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 1001 { 1002 return __bpf_xdp_redirect_map(map, ifindex, flags, 1003 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, 1004 __dev_map_hash_lookup_elem); 1005 } 1006 1007 static int dev_map_btf_id; 1008 const struct bpf_map_ops dev_map_ops = { 1009 .map_meta_equal = bpf_map_meta_equal, 1010 .map_alloc = dev_map_alloc, 1011 .map_free = dev_map_free, 1012 .map_get_next_key = dev_map_get_next_key, 1013 .map_lookup_elem = dev_map_lookup_elem, 1014 .map_update_elem = dev_map_update_elem, 1015 .map_delete_elem = dev_map_delete_elem, 1016 .map_check_btf = map_check_no_btf, 1017 .map_btf_name = "bpf_dtab", 1018 .map_btf_id = &dev_map_btf_id, 1019 .map_redirect = dev_map_redirect, 1020 }; 1021 1022 static int dev_map_hash_map_btf_id; 1023 const struct bpf_map_ops dev_map_hash_ops = { 1024 .map_meta_equal = bpf_map_meta_equal, 1025 .map_alloc = dev_map_alloc, 1026 .map_free = dev_map_free, 1027 .map_get_next_key = dev_map_hash_get_next_key, 1028 .map_lookup_elem = dev_map_hash_lookup_elem, 1029 .map_update_elem = dev_map_hash_update_elem, 1030 .map_delete_elem = dev_map_hash_delete_elem, 1031 .map_check_btf = map_check_no_btf, 1032 .map_btf_name = "bpf_dtab", 1033 .map_btf_id = &dev_map_hash_map_btf_id, 1034 .map_redirect = dev_hash_map_redirect, 1035 }; 1036 1037 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 1038 struct net_device *netdev) 1039 { 1040 unsigned long flags; 1041 u32 i; 1042 1043 spin_lock_irqsave(&dtab->index_lock, flags); 1044 for (i = 0; i < dtab->n_buckets; i++) { 1045 struct bpf_dtab_netdev *dev; 1046 struct hlist_head *head; 1047 struct hlist_node *next; 1048 1049 head = dev_map_index_hash(dtab, i); 1050 1051 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 1052 if (netdev != dev->dev) 1053 continue; 1054 1055 dtab->items--; 1056 hlist_del_rcu(&dev->index_hlist); 1057 call_rcu(&dev->rcu, __dev_map_entry_free); 1058 } 1059 } 1060 spin_unlock_irqrestore(&dtab->index_lock, flags); 1061 } 1062 1063 static int dev_map_notification(struct notifier_block *notifier, 1064 ulong event, void *ptr) 1065 { 1066 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1067 struct bpf_dtab *dtab; 1068 int i, cpu; 1069 1070 switch (event) { 1071 case NETDEV_REGISTER: 1072 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 1073 break; 1074 1075 /* will be freed in free_netdev() */ 1076 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); 1077 if (!netdev->xdp_bulkq) 1078 return NOTIFY_BAD; 1079 1080 for_each_possible_cpu(cpu) 1081 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 1082 break; 1083 case NETDEV_UNREGISTER: 1084 /* This rcu_read_lock/unlock pair is needed because 1085 * dev_map_list is an RCU list AND to ensure a delete 1086 * operation does not free a netdev_map entry while we 1087 * are comparing it against the netdev being unregistered. 1088 */ 1089 rcu_read_lock(); 1090 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 1091 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 1092 dev_map_hash_remove_netdev(dtab, netdev); 1093 continue; 1094 } 1095 1096 for (i = 0; i < dtab->map.max_entries; i++) { 1097 struct bpf_dtab_netdev *dev, *odev; 1098 1099 dev = rcu_dereference(dtab->netdev_map[i]); 1100 if (!dev || netdev != dev->dev) 1101 continue; 1102 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); 1103 if (dev == odev) 1104 call_rcu(&dev->rcu, 1105 __dev_map_entry_free); 1106 } 1107 } 1108 rcu_read_unlock(); 1109 break; 1110 default: 1111 break; 1112 } 1113 return NOTIFY_OK; 1114 } 1115 1116 static struct notifier_block dev_map_notifier = { 1117 .notifier_call = dev_map_notification, 1118 }; 1119 1120 static int __init dev_map_init(void) 1121 { 1122 int cpu; 1123 1124 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 1125 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 1126 offsetof(struct _bpf_dtab_netdev, dev)); 1127 register_netdevice_notifier(&dev_map_notifier); 1128 1129 for_each_possible_cpu(cpu) 1130 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 1131 return 0; 1132 } 1133 1134 subsys_initcall(dev_map_init); 1135