1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 struct bpf_prog *xdp_prog; 61 unsigned int count; 62 }; 63 64 struct bpf_dtab_netdev { 65 struct net_device *dev; /* must be first member, due to tracepoint */ 66 struct hlist_node index_hlist; 67 struct bpf_dtab *dtab; 68 struct bpf_prog *xdp_prog; 69 struct rcu_head rcu; 70 unsigned int idx; 71 struct bpf_devmap_val val; 72 }; 73 74 struct bpf_dtab { 75 struct bpf_map map; 76 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 77 struct list_head list; 78 79 /* these are only used for DEVMAP_HASH type maps */ 80 struct hlist_head *dev_index_head; 81 spinlock_t index_lock; 82 unsigned int items; 83 u32 n_buckets; 84 }; 85 86 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 87 static DEFINE_SPINLOCK(dev_map_lock); 88 static LIST_HEAD(dev_map_list); 89 90 static struct hlist_head *dev_map_create_hash(unsigned int entries, 91 int numa_node) 92 { 93 int i; 94 struct hlist_head *hash; 95 96 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 97 if (hash != NULL) 98 for (i = 0; i < entries; i++) 99 INIT_HLIST_HEAD(&hash[i]); 100 101 return hash; 102 } 103 104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 105 int idx) 106 { 107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 108 } 109 110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 111 { 112 u32 valsize = attr->value_size; 113 114 /* check sanity of attributes. 2 value sizes supported: 115 * 4 bytes: ifindex 116 * 8 bytes: ifindex + prog fd 117 */ 118 if (attr->max_entries == 0 || attr->key_size != 4 || 119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 121 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 122 return -EINVAL; 123 124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 125 * verifier prevents writes from the BPF side 126 */ 127 attr->map_flags |= BPF_F_RDONLY_PROG; 128 129 130 bpf_map_init_from_attr(&dtab->map, attr); 131 132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 134 135 if (!dtab->n_buckets) /* Overflow check */ 136 return -EINVAL; 137 } 138 139 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 140 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 141 dtab->map.numa_node); 142 if (!dtab->dev_index_head) 143 return -ENOMEM; 144 145 spin_lock_init(&dtab->index_lock); 146 } else { 147 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 148 sizeof(struct bpf_dtab_netdev *), 149 dtab->map.numa_node); 150 if (!dtab->netdev_map) 151 return -ENOMEM; 152 } 153 154 return 0; 155 } 156 157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 158 { 159 struct bpf_dtab *dtab; 160 int err; 161 162 if (!capable(CAP_NET_ADMIN)) 163 return ERR_PTR(-EPERM); 164 165 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 166 if (!dtab) 167 return ERR_PTR(-ENOMEM); 168 169 err = dev_map_init_map(dtab, attr); 170 if (err) { 171 kfree(dtab); 172 return ERR_PTR(err); 173 } 174 175 spin_lock(&dev_map_lock); 176 list_add_tail_rcu(&dtab->list, &dev_map_list); 177 spin_unlock(&dev_map_lock); 178 179 return &dtab->map; 180 } 181 182 static void dev_map_free(struct bpf_map *map) 183 { 184 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 185 int i; 186 187 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 188 * so the programs (can be more than one that used this map) were 189 * disconnected from events. The following synchronize_rcu() guarantees 190 * both rcu read critical sections complete and waits for 191 * preempt-disable regions (NAPI being the relevant context here) so we 192 * are certain there will be no further reads against the netdev_map and 193 * all flush operations are complete. Flush operations can only be done 194 * from NAPI context for this reason. 195 */ 196 197 spin_lock(&dev_map_lock); 198 list_del_rcu(&dtab->list); 199 spin_unlock(&dev_map_lock); 200 201 bpf_clear_redirect_map(map); 202 synchronize_rcu(); 203 204 /* Make sure prior __dev_map_entry_free() have completed. */ 205 rcu_barrier(); 206 207 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 208 for (i = 0; i < dtab->n_buckets; i++) { 209 struct bpf_dtab_netdev *dev; 210 struct hlist_head *head; 211 struct hlist_node *next; 212 213 head = dev_map_index_hash(dtab, i); 214 215 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 216 hlist_del_rcu(&dev->index_hlist); 217 if (dev->xdp_prog) 218 bpf_prog_put(dev->xdp_prog); 219 dev_put(dev->dev); 220 kfree(dev); 221 } 222 } 223 224 bpf_map_area_free(dtab->dev_index_head); 225 } else { 226 for (i = 0; i < dtab->map.max_entries; i++) { 227 struct bpf_dtab_netdev *dev; 228 229 dev = dtab->netdev_map[i]; 230 if (!dev) 231 continue; 232 233 if (dev->xdp_prog) 234 bpf_prog_put(dev->xdp_prog); 235 dev_put(dev->dev); 236 kfree(dev); 237 } 238 239 bpf_map_area_free(dtab->netdev_map); 240 } 241 242 kfree(dtab); 243 } 244 245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 246 { 247 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 248 u32 index = key ? *(u32 *)key : U32_MAX; 249 u32 *next = next_key; 250 251 if (index >= dtab->map.max_entries) { 252 *next = 0; 253 return 0; 254 } 255 256 if (index == dtab->map.max_entries - 1) 257 return -ENOENT; 258 *next = index + 1; 259 return 0; 260 } 261 262 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 263 { 264 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 265 struct hlist_head *head = dev_map_index_hash(dtab, key); 266 struct bpf_dtab_netdev *dev; 267 268 hlist_for_each_entry_rcu(dev, head, index_hlist, 269 lockdep_is_held(&dtab->index_lock)) 270 if (dev->idx == key) 271 return dev; 272 273 return NULL; 274 } 275 276 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 277 void *next_key) 278 { 279 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 280 u32 idx, *next = next_key; 281 struct bpf_dtab_netdev *dev, *next_dev; 282 struct hlist_head *head; 283 int i = 0; 284 285 if (!key) 286 goto find_first; 287 288 idx = *(u32 *)key; 289 290 dev = __dev_map_hash_lookup_elem(map, idx); 291 if (!dev) 292 goto find_first; 293 294 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 295 struct bpf_dtab_netdev, index_hlist); 296 297 if (next_dev) { 298 *next = next_dev->idx; 299 return 0; 300 } 301 302 i = idx & (dtab->n_buckets - 1); 303 i++; 304 305 find_first: 306 for (; i < dtab->n_buckets; i++) { 307 head = dev_map_index_hash(dtab, i); 308 309 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 310 struct bpf_dtab_netdev, 311 index_hlist); 312 if (next_dev) { 313 *next = next_dev->idx; 314 return 0; 315 } 316 } 317 318 return -ENOENT; 319 } 320 321 bool dev_map_can_have_prog(struct bpf_map *map) 322 { 323 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 324 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 325 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 326 return true; 327 328 return false; 329 } 330 331 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, 332 struct xdp_frame **frames, int n, 333 struct net_device *dev) 334 { 335 struct xdp_txq_info txq = { .dev = dev }; 336 struct xdp_buff xdp; 337 int i, nframes = 0; 338 339 for (i = 0; i < n; i++) { 340 struct xdp_frame *xdpf = frames[i]; 341 u32 act; 342 int err; 343 344 xdp_convert_frame_to_buff(xdpf, &xdp); 345 xdp.txq = &txq; 346 347 act = bpf_prog_run_xdp(xdp_prog, &xdp); 348 switch (act) { 349 case XDP_PASS: 350 err = xdp_update_frame_from_buff(&xdp, xdpf); 351 if (unlikely(err < 0)) 352 xdp_return_frame_rx_napi(xdpf); 353 else 354 frames[nframes++] = xdpf; 355 break; 356 default: 357 bpf_warn_invalid_xdp_action(act); 358 fallthrough; 359 case XDP_ABORTED: 360 trace_xdp_exception(dev, xdp_prog, act); 361 fallthrough; 362 case XDP_DROP: 363 xdp_return_frame_rx_napi(xdpf); 364 break; 365 } 366 } 367 return nframes; /* sent frames count */ 368 } 369 370 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 371 { 372 struct net_device *dev = bq->dev; 373 unsigned int cnt = bq->count; 374 int sent = 0, err = 0; 375 int to_send = cnt; 376 int i; 377 378 if (unlikely(!cnt)) 379 return; 380 381 for (i = 0; i < cnt; i++) { 382 struct xdp_frame *xdpf = bq->q[i]; 383 384 prefetch(xdpf); 385 } 386 387 if (bq->xdp_prog) { 388 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); 389 if (!to_send) 390 goto out; 391 } 392 393 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); 394 if (sent < 0) { 395 /* If ndo_xdp_xmit fails with an errno, no frames have 396 * been xmit'ed. 397 */ 398 err = sent; 399 sent = 0; 400 } 401 402 /* If not all frames have been transmitted, it is our 403 * responsibility to free them 404 */ 405 for (i = sent; unlikely(i < to_send); i++) 406 xdp_return_frame_rx_napi(bq->q[i]); 407 408 out: 409 bq->count = 0; 410 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); 411 } 412 413 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 414 * from the driver before returning from its napi->poll() routine. The poll() 415 * routine is called either from busy_poll context or net_rx_action signaled 416 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 417 * net device can be torn down. On devmap tear down we ensure the flush list 418 * is empty before completing to ensure all flush operations have completed. 419 * When drivers update the bpf program they may need to ensure any flush ops 420 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 421 * because both wait for napi context to exit. 422 */ 423 void __dev_flush(void) 424 { 425 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 426 struct xdp_dev_bulk_queue *bq, *tmp; 427 428 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 429 bq_xmit_all(bq, XDP_XMIT_FLUSH); 430 bq->dev_rx = NULL; 431 bq->xdp_prog = NULL; 432 __list_del_clearprev(&bq->flush_node); 433 } 434 } 435 436 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 437 * update happens in parallel here a dev_put won't happen until after reading 438 * the ifindex. 439 */ 440 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 441 { 442 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 443 struct bpf_dtab_netdev *obj; 444 445 if (key >= map->max_entries) 446 return NULL; 447 448 obj = READ_ONCE(dtab->netdev_map[key]); 449 return obj; 450 } 451 452 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 453 * Thus, safe percpu variable access. 454 */ 455 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 456 struct net_device *dev_rx, struct bpf_prog *xdp_prog) 457 { 458 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 459 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 460 461 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 462 bq_xmit_all(bq, 0); 463 464 /* Ingress dev_rx will be the same for all xdp_frame's in 465 * bulk_queue, because bq stored per-CPU and must be flushed 466 * from net_device drivers NAPI func end. 467 * 468 * Do the same with xdp_prog and flush_list since these fields 469 * are only ever modified together. 470 */ 471 if (!bq->dev_rx) { 472 bq->dev_rx = dev_rx; 473 bq->xdp_prog = xdp_prog; 474 list_add(&bq->flush_node, flush_list); 475 } 476 477 bq->q[bq->count++] = xdpf; 478 } 479 480 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 481 struct net_device *dev_rx, 482 struct bpf_prog *xdp_prog) 483 { 484 struct xdp_frame *xdpf; 485 int err; 486 487 if (!dev->netdev_ops->ndo_xdp_xmit) 488 return -EOPNOTSUPP; 489 490 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 491 if (unlikely(err)) 492 return err; 493 494 xdpf = xdp_convert_buff_to_frame(xdp); 495 if (unlikely(!xdpf)) 496 return -EOVERFLOW; 497 498 bq_enqueue(dev, xdpf, dev_rx, xdp_prog); 499 return 0; 500 } 501 502 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 503 struct net_device *dev_rx) 504 { 505 return __xdp_enqueue(dev, xdp, dev_rx, NULL); 506 } 507 508 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 509 struct net_device *dev_rx) 510 { 511 struct net_device *dev = dst->dev; 512 513 return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog); 514 } 515 516 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp, 517 int exclude_ifindex) 518 { 519 if (!obj || obj->dev->ifindex == exclude_ifindex || 520 !obj->dev->netdev_ops->ndo_xdp_xmit) 521 return false; 522 523 if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data)) 524 return false; 525 526 return true; 527 } 528 529 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj, 530 struct net_device *dev_rx, 531 struct xdp_frame *xdpf) 532 { 533 struct xdp_frame *nxdpf; 534 535 nxdpf = xdpf_clone(xdpf); 536 if (!nxdpf) 537 return -ENOMEM; 538 539 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog); 540 541 return 0; 542 } 543 544 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 545 struct bpf_map *map, bool exclude_ingress) 546 { 547 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 548 int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0; 549 struct bpf_dtab_netdev *dst, *last_dst = NULL; 550 struct hlist_head *head; 551 struct xdp_frame *xdpf; 552 unsigned int i; 553 int err; 554 555 xdpf = xdp_convert_buff_to_frame(xdp); 556 if (unlikely(!xdpf)) 557 return -EOVERFLOW; 558 559 if (map->map_type == BPF_MAP_TYPE_DEVMAP) { 560 for (i = 0; i < map->max_entries; i++) { 561 dst = READ_ONCE(dtab->netdev_map[i]); 562 if (!is_valid_dst(dst, xdp, exclude_ifindex)) 563 continue; 564 565 /* we only need n-1 clones; last_dst enqueued below */ 566 if (!last_dst) { 567 last_dst = dst; 568 continue; 569 } 570 571 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); 572 if (err) 573 return err; 574 575 last_dst = dst; 576 } 577 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ 578 for (i = 0; i < dtab->n_buckets; i++) { 579 head = dev_map_index_hash(dtab, i); 580 hlist_for_each_entry_rcu(dst, head, index_hlist, 581 lockdep_is_held(&dtab->index_lock)) { 582 if (!is_valid_dst(dst, xdp, exclude_ifindex)) 583 continue; 584 585 /* we only need n-1 clones; last_dst enqueued below */ 586 if (!last_dst) { 587 last_dst = dst; 588 continue; 589 } 590 591 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); 592 if (err) 593 return err; 594 595 last_dst = dst; 596 } 597 } 598 } 599 600 /* consume the last copy of the frame */ 601 if (last_dst) 602 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog); 603 else 604 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */ 605 606 return 0; 607 } 608 609 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 610 struct bpf_prog *xdp_prog) 611 { 612 int err; 613 614 err = xdp_ok_fwd_dev(dst->dev, skb->len); 615 if (unlikely(err)) 616 return err; 617 skb->dev = dst->dev; 618 generic_xdp_tx(skb, xdp_prog); 619 620 return 0; 621 } 622 623 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, 624 struct sk_buff *skb, 625 struct bpf_prog *xdp_prog) 626 { 627 struct sk_buff *nskb; 628 int err; 629 630 nskb = skb_clone(skb, GFP_ATOMIC); 631 if (!nskb) 632 return -ENOMEM; 633 634 err = dev_map_generic_redirect(dst, nskb, xdp_prog); 635 if (unlikely(err)) { 636 consume_skb(nskb); 637 return err; 638 } 639 640 return 0; 641 } 642 643 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 644 struct bpf_prog *xdp_prog, struct bpf_map *map, 645 bool exclude_ingress) 646 { 647 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 648 int exclude_ifindex = exclude_ingress ? dev->ifindex : 0; 649 struct bpf_dtab_netdev *dst, *last_dst = NULL; 650 struct hlist_head *head; 651 struct hlist_node *next; 652 unsigned int i; 653 int err; 654 655 if (map->map_type == BPF_MAP_TYPE_DEVMAP) { 656 for (i = 0; i < map->max_entries; i++) { 657 dst = READ_ONCE(dtab->netdev_map[i]); 658 if (!dst || dst->dev->ifindex == exclude_ifindex) 659 continue; 660 661 /* we only need n-1 clones; last_dst enqueued below */ 662 if (!last_dst) { 663 last_dst = dst; 664 continue; 665 } 666 667 err = dev_map_redirect_clone(last_dst, skb, xdp_prog); 668 if (err) 669 return err; 670 671 last_dst = dst; 672 } 673 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ 674 for (i = 0; i < dtab->n_buckets; i++) { 675 head = dev_map_index_hash(dtab, i); 676 hlist_for_each_entry_safe(dst, next, head, index_hlist) { 677 if (!dst || dst->dev->ifindex == exclude_ifindex) 678 continue; 679 680 /* we only need n-1 clones; last_dst enqueued below */ 681 if (!last_dst) { 682 last_dst = dst; 683 continue; 684 } 685 686 err = dev_map_redirect_clone(last_dst, skb, xdp_prog); 687 if (err) 688 return err; 689 690 last_dst = dst; 691 } 692 } 693 } 694 695 /* consume the first skb and return */ 696 if (last_dst) 697 return dev_map_generic_redirect(last_dst, skb, xdp_prog); 698 699 /* dtab is empty */ 700 consume_skb(skb); 701 return 0; 702 } 703 704 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 705 { 706 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 707 708 return obj ? &obj->val : NULL; 709 } 710 711 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 712 { 713 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 714 *(u32 *)key); 715 return obj ? &obj->val : NULL; 716 } 717 718 static void __dev_map_entry_free(struct rcu_head *rcu) 719 { 720 struct bpf_dtab_netdev *dev; 721 722 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 723 if (dev->xdp_prog) 724 bpf_prog_put(dev->xdp_prog); 725 dev_put(dev->dev); 726 kfree(dev); 727 } 728 729 static int dev_map_delete_elem(struct bpf_map *map, void *key) 730 { 731 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 732 struct bpf_dtab_netdev *old_dev; 733 int k = *(u32 *)key; 734 735 if (k >= map->max_entries) 736 return -EINVAL; 737 738 /* Use call_rcu() here to ensure any rcu critical sections have 739 * completed as well as any flush operations because call_rcu 740 * will wait for preempt-disable region to complete, NAPI in this 741 * context. And additionally, the driver tear down ensures all 742 * soft irqs are complete before removing the net device in the 743 * case of dev_put equals zero. 744 */ 745 old_dev = xchg(&dtab->netdev_map[k], NULL); 746 if (old_dev) 747 call_rcu(&old_dev->rcu, __dev_map_entry_free); 748 return 0; 749 } 750 751 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 752 { 753 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 754 struct bpf_dtab_netdev *old_dev; 755 int k = *(u32 *)key; 756 unsigned long flags; 757 int ret = -ENOENT; 758 759 spin_lock_irqsave(&dtab->index_lock, flags); 760 761 old_dev = __dev_map_hash_lookup_elem(map, k); 762 if (old_dev) { 763 dtab->items--; 764 hlist_del_init_rcu(&old_dev->index_hlist); 765 call_rcu(&old_dev->rcu, __dev_map_entry_free); 766 ret = 0; 767 } 768 spin_unlock_irqrestore(&dtab->index_lock, flags); 769 770 return ret; 771 } 772 773 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 774 struct bpf_dtab *dtab, 775 struct bpf_devmap_val *val, 776 unsigned int idx) 777 { 778 struct bpf_prog *prog = NULL; 779 struct bpf_dtab_netdev *dev; 780 781 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 782 GFP_ATOMIC | __GFP_NOWARN, 783 dtab->map.numa_node); 784 if (!dev) 785 return ERR_PTR(-ENOMEM); 786 787 dev->dev = dev_get_by_index(net, val->ifindex); 788 if (!dev->dev) 789 goto err_out; 790 791 if (val->bpf_prog.fd > 0) { 792 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 793 BPF_PROG_TYPE_XDP, false); 794 if (IS_ERR(prog)) 795 goto err_put_dev; 796 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 797 goto err_put_prog; 798 } 799 800 dev->idx = idx; 801 dev->dtab = dtab; 802 if (prog) { 803 dev->xdp_prog = prog; 804 dev->val.bpf_prog.id = prog->aux->id; 805 } else { 806 dev->xdp_prog = NULL; 807 dev->val.bpf_prog.id = 0; 808 } 809 dev->val.ifindex = val->ifindex; 810 811 return dev; 812 err_put_prog: 813 bpf_prog_put(prog); 814 err_put_dev: 815 dev_put(dev->dev); 816 err_out: 817 kfree(dev); 818 return ERR_PTR(-EINVAL); 819 } 820 821 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 822 void *key, void *value, u64 map_flags) 823 { 824 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 825 struct bpf_dtab_netdev *dev, *old_dev; 826 struct bpf_devmap_val val = {}; 827 u32 i = *(u32 *)key; 828 829 if (unlikely(map_flags > BPF_EXIST)) 830 return -EINVAL; 831 if (unlikely(i >= dtab->map.max_entries)) 832 return -E2BIG; 833 if (unlikely(map_flags == BPF_NOEXIST)) 834 return -EEXIST; 835 836 /* already verified value_size <= sizeof val */ 837 memcpy(&val, value, map->value_size); 838 839 if (!val.ifindex) { 840 dev = NULL; 841 /* can not specify fd if ifindex is 0 */ 842 if (val.bpf_prog.fd > 0) 843 return -EINVAL; 844 } else { 845 dev = __dev_map_alloc_node(net, dtab, &val, i); 846 if (IS_ERR(dev)) 847 return PTR_ERR(dev); 848 } 849 850 /* Use call_rcu() here to ensure rcu critical sections have completed 851 * Remembering the driver side flush operation will happen before the 852 * net device is removed. 853 */ 854 old_dev = xchg(&dtab->netdev_map[i], dev); 855 if (old_dev) 856 call_rcu(&old_dev->rcu, __dev_map_entry_free); 857 858 return 0; 859 } 860 861 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 862 u64 map_flags) 863 { 864 return __dev_map_update_elem(current->nsproxy->net_ns, 865 map, key, value, map_flags); 866 } 867 868 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 869 void *key, void *value, u64 map_flags) 870 { 871 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 872 struct bpf_dtab_netdev *dev, *old_dev; 873 struct bpf_devmap_val val = {}; 874 u32 idx = *(u32 *)key; 875 unsigned long flags; 876 int err = -EEXIST; 877 878 /* already verified value_size <= sizeof val */ 879 memcpy(&val, value, map->value_size); 880 881 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 882 return -EINVAL; 883 884 spin_lock_irqsave(&dtab->index_lock, flags); 885 886 old_dev = __dev_map_hash_lookup_elem(map, idx); 887 if (old_dev && (map_flags & BPF_NOEXIST)) 888 goto out_err; 889 890 dev = __dev_map_alloc_node(net, dtab, &val, idx); 891 if (IS_ERR(dev)) { 892 err = PTR_ERR(dev); 893 goto out_err; 894 } 895 896 if (old_dev) { 897 hlist_del_rcu(&old_dev->index_hlist); 898 } else { 899 if (dtab->items >= dtab->map.max_entries) { 900 spin_unlock_irqrestore(&dtab->index_lock, flags); 901 call_rcu(&dev->rcu, __dev_map_entry_free); 902 return -E2BIG; 903 } 904 dtab->items++; 905 } 906 907 hlist_add_head_rcu(&dev->index_hlist, 908 dev_map_index_hash(dtab, idx)); 909 spin_unlock_irqrestore(&dtab->index_lock, flags); 910 911 if (old_dev) 912 call_rcu(&old_dev->rcu, __dev_map_entry_free); 913 914 return 0; 915 916 out_err: 917 spin_unlock_irqrestore(&dtab->index_lock, flags); 918 return err; 919 } 920 921 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 922 u64 map_flags) 923 { 924 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 925 map, key, value, map_flags); 926 } 927 928 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 929 { 930 return __bpf_xdp_redirect_map(map, ifindex, flags, 931 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, 932 __dev_map_lookup_elem); 933 } 934 935 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 936 { 937 return __bpf_xdp_redirect_map(map, ifindex, flags, 938 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, 939 __dev_map_hash_lookup_elem); 940 } 941 942 static int dev_map_btf_id; 943 const struct bpf_map_ops dev_map_ops = { 944 .map_meta_equal = bpf_map_meta_equal, 945 .map_alloc = dev_map_alloc, 946 .map_free = dev_map_free, 947 .map_get_next_key = dev_map_get_next_key, 948 .map_lookup_elem = dev_map_lookup_elem, 949 .map_update_elem = dev_map_update_elem, 950 .map_delete_elem = dev_map_delete_elem, 951 .map_check_btf = map_check_no_btf, 952 .map_btf_name = "bpf_dtab", 953 .map_btf_id = &dev_map_btf_id, 954 .map_redirect = dev_map_redirect, 955 }; 956 957 static int dev_map_hash_map_btf_id; 958 const struct bpf_map_ops dev_map_hash_ops = { 959 .map_meta_equal = bpf_map_meta_equal, 960 .map_alloc = dev_map_alloc, 961 .map_free = dev_map_free, 962 .map_get_next_key = dev_map_hash_get_next_key, 963 .map_lookup_elem = dev_map_hash_lookup_elem, 964 .map_update_elem = dev_map_hash_update_elem, 965 .map_delete_elem = dev_map_hash_delete_elem, 966 .map_check_btf = map_check_no_btf, 967 .map_btf_name = "bpf_dtab", 968 .map_btf_id = &dev_map_hash_map_btf_id, 969 .map_redirect = dev_hash_map_redirect, 970 }; 971 972 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 973 struct net_device *netdev) 974 { 975 unsigned long flags; 976 u32 i; 977 978 spin_lock_irqsave(&dtab->index_lock, flags); 979 for (i = 0; i < dtab->n_buckets; i++) { 980 struct bpf_dtab_netdev *dev; 981 struct hlist_head *head; 982 struct hlist_node *next; 983 984 head = dev_map_index_hash(dtab, i); 985 986 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 987 if (netdev != dev->dev) 988 continue; 989 990 dtab->items--; 991 hlist_del_rcu(&dev->index_hlist); 992 call_rcu(&dev->rcu, __dev_map_entry_free); 993 } 994 } 995 spin_unlock_irqrestore(&dtab->index_lock, flags); 996 } 997 998 static int dev_map_notification(struct notifier_block *notifier, 999 ulong event, void *ptr) 1000 { 1001 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1002 struct bpf_dtab *dtab; 1003 int i, cpu; 1004 1005 switch (event) { 1006 case NETDEV_REGISTER: 1007 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 1008 break; 1009 1010 /* will be freed in free_netdev() */ 1011 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); 1012 if (!netdev->xdp_bulkq) 1013 return NOTIFY_BAD; 1014 1015 for_each_possible_cpu(cpu) 1016 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 1017 break; 1018 case NETDEV_UNREGISTER: 1019 /* This rcu_read_lock/unlock pair is needed because 1020 * dev_map_list is an RCU list AND to ensure a delete 1021 * operation does not free a netdev_map entry while we 1022 * are comparing it against the netdev being unregistered. 1023 */ 1024 rcu_read_lock(); 1025 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 1026 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 1027 dev_map_hash_remove_netdev(dtab, netdev); 1028 continue; 1029 } 1030 1031 for (i = 0; i < dtab->map.max_entries; i++) { 1032 struct bpf_dtab_netdev *dev, *odev; 1033 1034 dev = READ_ONCE(dtab->netdev_map[i]); 1035 if (!dev || netdev != dev->dev) 1036 continue; 1037 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 1038 if (dev == odev) 1039 call_rcu(&dev->rcu, 1040 __dev_map_entry_free); 1041 } 1042 } 1043 rcu_read_unlock(); 1044 break; 1045 default: 1046 break; 1047 } 1048 return NOTIFY_OK; 1049 } 1050 1051 static struct notifier_block dev_map_notifier = { 1052 .notifier_call = dev_map_notification, 1053 }; 1054 1055 static int __init dev_map_init(void) 1056 { 1057 int cpu; 1058 1059 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 1060 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 1061 offsetof(struct _bpf_dtab_netdev, dev)); 1062 register_netdevice_notifier(&dev_map_notifier); 1063 1064 for_each_possible_cpu(cpu) 1065 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 1066 return 0; 1067 } 1068 1069 subsys_initcall(dev_map_init); 1070