1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 struct bpf_dtab_netdev { 64 struct net_device *dev; /* must be first member, due to tracepoint */ 65 struct hlist_node index_hlist; 66 struct bpf_dtab *dtab; 67 struct bpf_prog *xdp_prog; 68 struct rcu_head rcu; 69 unsigned int idx; 70 struct bpf_devmap_val val; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 76 struct list_head list; 77 78 /* these are only used for DEVMAP_HASH type maps */ 79 struct hlist_head *dev_index_head; 80 spinlock_t index_lock; 81 unsigned int items; 82 u32 n_buckets; 83 }; 84 85 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 86 static DEFINE_SPINLOCK(dev_map_lock); 87 static LIST_HEAD(dev_map_list); 88 89 static struct hlist_head *dev_map_create_hash(unsigned int entries, 90 int numa_node) 91 { 92 int i; 93 struct hlist_head *hash; 94 95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 96 if (hash != NULL) 97 for (i = 0; i < entries; i++) 98 INIT_HLIST_HEAD(&hash[i]); 99 100 return hash; 101 } 102 103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 104 int idx) 105 { 106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 107 } 108 109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 110 { 111 u32 valsize = attr->value_size; 112 113 /* check sanity of attributes. 2 value sizes supported: 114 * 4 bytes: ifindex 115 * 8 bytes: ifindex + prog fd 116 */ 117 if (attr->max_entries == 0 || attr->key_size != 4 || 118 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 119 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 120 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 121 return -EINVAL; 122 123 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 124 * verifier prevents writes from the BPF side 125 */ 126 attr->map_flags |= BPF_F_RDONLY_PROG; 127 128 129 bpf_map_init_from_attr(&dtab->map, attr); 130 131 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 132 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 133 134 if (!dtab->n_buckets) /* Overflow check */ 135 return -EINVAL; 136 } 137 138 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 139 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 140 dtab->map.numa_node); 141 if (!dtab->dev_index_head) 142 return -ENOMEM; 143 144 spin_lock_init(&dtab->index_lock); 145 } else { 146 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 147 sizeof(struct bpf_dtab_netdev *), 148 dtab->map.numa_node); 149 if (!dtab->netdev_map) 150 return -ENOMEM; 151 } 152 153 return 0; 154 } 155 156 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 157 { 158 struct bpf_dtab *dtab; 159 int err; 160 161 if (!capable(CAP_NET_ADMIN)) 162 return ERR_PTR(-EPERM); 163 164 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 165 if (!dtab) 166 return ERR_PTR(-ENOMEM); 167 168 err = dev_map_init_map(dtab, attr); 169 if (err) { 170 kfree(dtab); 171 return ERR_PTR(err); 172 } 173 174 spin_lock(&dev_map_lock); 175 list_add_tail_rcu(&dtab->list, &dev_map_list); 176 spin_unlock(&dev_map_lock); 177 178 return &dtab->map; 179 } 180 181 static void dev_map_free(struct bpf_map *map) 182 { 183 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 184 int i; 185 186 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 187 * so the programs (can be more than one that used this map) were 188 * disconnected from events. The following synchronize_rcu() guarantees 189 * both rcu read critical sections complete and waits for 190 * preempt-disable regions (NAPI being the relevant context here) so we 191 * are certain there will be no further reads against the netdev_map and 192 * all flush operations are complete. Flush operations can only be done 193 * from NAPI context for this reason. 194 */ 195 196 spin_lock(&dev_map_lock); 197 list_del_rcu(&dtab->list); 198 spin_unlock(&dev_map_lock); 199 200 synchronize_rcu(); 201 202 /* Make sure prior __dev_map_entry_free() have completed. */ 203 rcu_barrier(); 204 205 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 206 for (i = 0; i < dtab->n_buckets; i++) { 207 struct bpf_dtab_netdev *dev; 208 struct hlist_head *head; 209 struct hlist_node *next; 210 211 head = dev_map_index_hash(dtab, i); 212 213 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 214 hlist_del_rcu(&dev->index_hlist); 215 if (dev->xdp_prog) 216 bpf_prog_put(dev->xdp_prog); 217 dev_put(dev->dev); 218 kfree(dev); 219 } 220 } 221 222 bpf_map_area_free(dtab->dev_index_head); 223 } else { 224 for (i = 0; i < dtab->map.max_entries; i++) { 225 struct bpf_dtab_netdev *dev; 226 227 dev = dtab->netdev_map[i]; 228 if (!dev) 229 continue; 230 231 if (dev->xdp_prog) 232 bpf_prog_put(dev->xdp_prog); 233 dev_put(dev->dev); 234 kfree(dev); 235 } 236 237 bpf_map_area_free(dtab->netdev_map); 238 } 239 240 kfree(dtab); 241 } 242 243 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 244 { 245 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 246 u32 index = key ? *(u32 *)key : U32_MAX; 247 u32 *next = next_key; 248 249 if (index >= dtab->map.max_entries) { 250 *next = 0; 251 return 0; 252 } 253 254 if (index == dtab->map.max_entries - 1) 255 return -ENOENT; 256 *next = index + 1; 257 return 0; 258 } 259 260 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 261 { 262 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 263 struct hlist_head *head = dev_map_index_hash(dtab, key); 264 struct bpf_dtab_netdev *dev; 265 266 hlist_for_each_entry_rcu(dev, head, index_hlist, 267 lockdep_is_held(&dtab->index_lock)) 268 if (dev->idx == key) 269 return dev; 270 271 return NULL; 272 } 273 274 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 275 void *next_key) 276 { 277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 278 u32 idx, *next = next_key; 279 struct bpf_dtab_netdev *dev, *next_dev; 280 struct hlist_head *head; 281 int i = 0; 282 283 if (!key) 284 goto find_first; 285 286 idx = *(u32 *)key; 287 288 dev = __dev_map_hash_lookup_elem(map, idx); 289 if (!dev) 290 goto find_first; 291 292 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 293 struct bpf_dtab_netdev, index_hlist); 294 295 if (next_dev) { 296 *next = next_dev->idx; 297 return 0; 298 } 299 300 i = idx & (dtab->n_buckets - 1); 301 i++; 302 303 find_first: 304 for (; i < dtab->n_buckets; i++) { 305 head = dev_map_index_hash(dtab, i); 306 307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 308 struct bpf_dtab_netdev, 309 index_hlist); 310 if (next_dev) { 311 *next = next_dev->idx; 312 return 0; 313 } 314 } 315 316 return -ENOENT; 317 } 318 319 bool dev_map_can_have_prog(struct bpf_map *map) 320 { 321 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 322 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 323 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 324 return true; 325 326 return false; 327 } 328 329 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 330 { 331 struct net_device *dev = bq->dev; 332 int sent = 0, drops = 0, err = 0; 333 int i; 334 335 if (unlikely(!bq->count)) 336 return; 337 338 for (i = 0; i < bq->count; i++) { 339 struct xdp_frame *xdpf = bq->q[i]; 340 341 prefetch(xdpf); 342 } 343 344 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 345 if (sent < 0) { 346 err = sent; 347 sent = 0; 348 goto error; 349 } 350 drops = bq->count - sent; 351 out: 352 bq->count = 0; 353 354 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); 355 bq->dev_rx = NULL; 356 __list_del_clearprev(&bq->flush_node); 357 return; 358 error: 359 /* If ndo_xdp_xmit fails with an errno, no frames have been 360 * xmit'ed and it's our responsibility to them free all. 361 */ 362 for (i = 0; i < bq->count; i++) { 363 struct xdp_frame *xdpf = bq->q[i]; 364 365 xdp_return_frame_rx_napi(xdpf); 366 drops++; 367 } 368 goto out; 369 } 370 371 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 372 * from the driver before returning from its napi->poll() routine. The poll() 373 * routine is called either from busy_poll context or net_rx_action signaled 374 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 375 * net device can be torn down. On devmap tear down we ensure the flush list 376 * is empty before completing to ensure all flush operations have completed. 377 * When drivers update the bpf program they may need to ensure any flush ops 378 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 379 * because both wait for napi context to exit. 380 */ 381 void __dev_flush(void) 382 { 383 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 384 struct xdp_dev_bulk_queue *bq, *tmp; 385 386 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 387 bq_xmit_all(bq, XDP_XMIT_FLUSH); 388 } 389 390 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 391 * update happens in parallel here a dev_put wont happen until after reading the 392 * ifindex. 393 */ 394 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 395 { 396 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 397 struct bpf_dtab_netdev *obj; 398 399 if (key >= map->max_entries) 400 return NULL; 401 402 obj = READ_ONCE(dtab->netdev_map[key]); 403 return obj; 404 } 405 406 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 407 * Thus, safe percpu variable access. 408 */ 409 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 410 struct net_device *dev_rx) 411 { 412 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 413 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 414 415 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 416 bq_xmit_all(bq, 0); 417 418 /* Ingress dev_rx will be the same for all xdp_frame's in 419 * bulk_queue, because bq stored per-CPU and must be flushed 420 * from net_device drivers NAPI func end. 421 */ 422 if (!bq->dev_rx) 423 bq->dev_rx = dev_rx; 424 425 bq->q[bq->count++] = xdpf; 426 427 if (!bq->flush_node.prev) 428 list_add(&bq->flush_node, flush_list); 429 } 430 431 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 432 struct net_device *dev_rx) 433 { 434 struct xdp_frame *xdpf; 435 int err; 436 437 if (!dev->netdev_ops->ndo_xdp_xmit) 438 return -EOPNOTSUPP; 439 440 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 441 if (unlikely(err)) 442 return err; 443 444 xdpf = xdp_convert_buff_to_frame(xdp); 445 if (unlikely(!xdpf)) 446 return -EOVERFLOW; 447 448 bq_enqueue(dev, xdpf, dev_rx); 449 return 0; 450 } 451 452 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 453 struct xdp_buff *xdp, 454 struct bpf_prog *xdp_prog) 455 { 456 struct xdp_txq_info txq = { .dev = dev }; 457 u32 act; 458 459 xdp_set_data_meta_invalid(xdp); 460 xdp->txq = &txq; 461 462 act = bpf_prog_run_xdp(xdp_prog, xdp); 463 switch (act) { 464 case XDP_PASS: 465 return xdp; 466 case XDP_DROP: 467 break; 468 default: 469 bpf_warn_invalid_xdp_action(act); 470 fallthrough; 471 case XDP_ABORTED: 472 trace_xdp_exception(dev, xdp_prog, act); 473 break; 474 } 475 476 xdp_return_buff(xdp); 477 return NULL; 478 } 479 480 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 481 struct net_device *dev_rx) 482 { 483 return __xdp_enqueue(dev, xdp, dev_rx); 484 } 485 486 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 487 struct net_device *dev_rx) 488 { 489 struct net_device *dev = dst->dev; 490 491 if (dst->xdp_prog) { 492 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 493 if (!xdp) 494 return 0; 495 } 496 return __xdp_enqueue(dev, xdp, dev_rx); 497 } 498 499 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 500 struct bpf_prog *xdp_prog) 501 { 502 int err; 503 504 err = xdp_ok_fwd_dev(dst->dev, skb->len); 505 if (unlikely(err)) 506 return err; 507 skb->dev = dst->dev; 508 generic_xdp_tx(skb, xdp_prog); 509 510 return 0; 511 } 512 513 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 514 { 515 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 516 517 return obj ? &obj->val : NULL; 518 } 519 520 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 521 { 522 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 523 *(u32 *)key); 524 return obj ? &obj->val : NULL; 525 } 526 527 static void __dev_map_entry_free(struct rcu_head *rcu) 528 { 529 struct bpf_dtab_netdev *dev; 530 531 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 532 if (dev->xdp_prog) 533 bpf_prog_put(dev->xdp_prog); 534 dev_put(dev->dev); 535 kfree(dev); 536 } 537 538 static int dev_map_delete_elem(struct bpf_map *map, void *key) 539 { 540 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 541 struct bpf_dtab_netdev *old_dev; 542 int k = *(u32 *)key; 543 544 if (k >= map->max_entries) 545 return -EINVAL; 546 547 /* Use call_rcu() here to ensure any rcu critical sections have 548 * completed as well as any flush operations because call_rcu 549 * will wait for preempt-disable region to complete, NAPI in this 550 * context. And additionally, the driver tear down ensures all 551 * soft irqs are complete before removing the net device in the 552 * case of dev_put equals zero. 553 */ 554 old_dev = xchg(&dtab->netdev_map[k], NULL); 555 if (old_dev) 556 call_rcu(&old_dev->rcu, __dev_map_entry_free); 557 return 0; 558 } 559 560 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 561 { 562 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 563 struct bpf_dtab_netdev *old_dev; 564 int k = *(u32 *)key; 565 unsigned long flags; 566 int ret = -ENOENT; 567 568 spin_lock_irqsave(&dtab->index_lock, flags); 569 570 old_dev = __dev_map_hash_lookup_elem(map, k); 571 if (old_dev) { 572 dtab->items--; 573 hlist_del_init_rcu(&old_dev->index_hlist); 574 call_rcu(&old_dev->rcu, __dev_map_entry_free); 575 ret = 0; 576 } 577 spin_unlock_irqrestore(&dtab->index_lock, flags); 578 579 return ret; 580 } 581 582 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 583 struct bpf_dtab *dtab, 584 struct bpf_devmap_val *val, 585 unsigned int idx) 586 { 587 struct bpf_prog *prog = NULL; 588 struct bpf_dtab_netdev *dev; 589 590 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 591 GFP_ATOMIC | __GFP_NOWARN, 592 dtab->map.numa_node); 593 if (!dev) 594 return ERR_PTR(-ENOMEM); 595 596 dev->dev = dev_get_by_index(net, val->ifindex); 597 if (!dev->dev) 598 goto err_out; 599 600 if (val->bpf_prog.fd > 0) { 601 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 602 BPF_PROG_TYPE_XDP, false); 603 if (IS_ERR(prog)) 604 goto err_put_dev; 605 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 606 goto err_put_prog; 607 } 608 609 dev->idx = idx; 610 dev->dtab = dtab; 611 if (prog) { 612 dev->xdp_prog = prog; 613 dev->val.bpf_prog.id = prog->aux->id; 614 } else { 615 dev->xdp_prog = NULL; 616 dev->val.bpf_prog.id = 0; 617 } 618 dev->val.ifindex = val->ifindex; 619 620 return dev; 621 err_put_prog: 622 bpf_prog_put(prog); 623 err_put_dev: 624 dev_put(dev->dev); 625 err_out: 626 kfree(dev); 627 return ERR_PTR(-EINVAL); 628 } 629 630 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 631 void *key, void *value, u64 map_flags) 632 { 633 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 634 struct bpf_dtab_netdev *dev, *old_dev; 635 struct bpf_devmap_val val = {}; 636 u32 i = *(u32 *)key; 637 638 if (unlikely(map_flags > BPF_EXIST)) 639 return -EINVAL; 640 if (unlikely(i >= dtab->map.max_entries)) 641 return -E2BIG; 642 if (unlikely(map_flags == BPF_NOEXIST)) 643 return -EEXIST; 644 645 /* already verified value_size <= sizeof val */ 646 memcpy(&val, value, map->value_size); 647 648 if (!val.ifindex) { 649 dev = NULL; 650 /* can not specify fd if ifindex is 0 */ 651 if (val.bpf_prog.fd > 0) 652 return -EINVAL; 653 } else { 654 dev = __dev_map_alloc_node(net, dtab, &val, i); 655 if (IS_ERR(dev)) 656 return PTR_ERR(dev); 657 } 658 659 /* Use call_rcu() here to ensure rcu critical sections have completed 660 * Remembering the driver side flush operation will happen before the 661 * net device is removed. 662 */ 663 old_dev = xchg(&dtab->netdev_map[i], dev); 664 if (old_dev) 665 call_rcu(&old_dev->rcu, __dev_map_entry_free); 666 667 return 0; 668 } 669 670 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 671 u64 map_flags) 672 { 673 return __dev_map_update_elem(current->nsproxy->net_ns, 674 map, key, value, map_flags); 675 } 676 677 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 678 void *key, void *value, u64 map_flags) 679 { 680 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 681 struct bpf_dtab_netdev *dev, *old_dev; 682 struct bpf_devmap_val val = {}; 683 u32 idx = *(u32 *)key; 684 unsigned long flags; 685 int err = -EEXIST; 686 687 /* already verified value_size <= sizeof val */ 688 memcpy(&val, value, map->value_size); 689 690 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 691 return -EINVAL; 692 693 spin_lock_irqsave(&dtab->index_lock, flags); 694 695 old_dev = __dev_map_hash_lookup_elem(map, idx); 696 if (old_dev && (map_flags & BPF_NOEXIST)) 697 goto out_err; 698 699 dev = __dev_map_alloc_node(net, dtab, &val, idx); 700 if (IS_ERR(dev)) { 701 err = PTR_ERR(dev); 702 goto out_err; 703 } 704 705 if (old_dev) { 706 hlist_del_rcu(&old_dev->index_hlist); 707 } else { 708 if (dtab->items >= dtab->map.max_entries) { 709 spin_unlock_irqrestore(&dtab->index_lock, flags); 710 call_rcu(&dev->rcu, __dev_map_entry_free); 711 return -E2BIG; 712 } 713 dtab->items++; 714 } 715 716 hlist_add_head_rcu(&dev->index_hlist, 717 dev_map_index_hash(dtab, idx)); 718 spin_unlock_irqrestore(&dtab->index_lock, flags); 719 720 if (old_dev) 721 call_rcu(&old_dev->rcu, __dev_map_entry_free); 722 723 return 0; 724 725 out_err: 726 spin_unlock_irqrestore(&dtab->index_lock, flags); 727 return err; 728 } 729 730 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 731 u64 map_flags) 732 { 733 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 734 map, key, value, map_flags); 735 } 736 737 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 738 { 739 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem); 740 } 741 742 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 743 { 744 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem); 745 } 746 747 static int dev_map_btf_id; 748 const struct bpf_map_ops dev_map_ops = { 749 .map_meta_equal = bpf_map_meta_equal, 750 .map_alloc = dev_map_alloc, 751 .map_free = dev_map_free, 752 .map_get_next_key = dev_map_get_next_key, 753 .map_lookup_elem = dev_map_lookup_elem, 754 .map_update_elem = dev_map_update_elem, 755 .map_delete_elem = dev_map_delete_elem, 756 .map_check_btf = map_check_no_btf, 757 .map_btf_name = "bpf_dtab", 758 .map_btf_id = &dev_map_btf_id, 759 .map_redirect = dev_map_redirect, 760 }; 761 762 static int dev_map_hash_map_btf_id; 763 const struct bpf_map_ops dev_map_hash_ops = { 764 .map_meta_equal = bpf_map_meta_equal, 765 .map_alloc = dev_map_alloc, 766 .map_free = dev_map_free, 767 .map_get_next_key = dev_map_hash_get_next_key, 768 .map_lookup_elem = dev_map_hash_lookup_elem, 769 .map_update_elem = dev_map_hash_update_elem, 770 .map_delete_elem = dev_map_hash_delete_elem, 771 .map_check_btf = map_check_no_btf, 772 .map_btf_name = "bpf_dtab", 773 .map_btf_id = &dev_map_hash_map_btf_id, 774 .map_redirect = dev_hash_map_redirect, 775 }; 776 777 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 778 struct net_device *netdev) 779 { 780 unsigned long flags; 781 u32 i; 782 783 spin_lock_irqsave(&dtab->index_lock, flags); 784 for (i = 0; i < dtab->n_buckets; i++) { 785 struct bpf_dtab_netdev *dev; 786 struct hlist_head *head; 787 struct hlist_node *next; 788 789 head = dev_map_index_hash(dtab, i); 790 791 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 792 if (netdev != dev->dev) 793 continue; 794 795 dtab->items--; 796 hlist_del_rcu(&dev->index_hlist); 797 call_rcu(&dev->rcu, __dev_map_entry_free); 798 } 799 } 800 spin_unlock_irqrestore(&dtab->index_lock, flags); 801 } 802 803 static int dev_map_notification(struct notifier_block *notifier, 804 ulong event, void *ptr) 805 { 806 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 807 struct bpf_dtab *dtab; 808 int i, cpu; 809 810 switch (event) { 811 case NETDEV_REGISTER: 812 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 813 break; 814 815 /* will be freed in free_netdev() */ 816 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); 817 if (!netdev->xdp_bulkq) 818 return NOTIFY_BAD; 819 820 for_each_possible_cpu(cpu) 821 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 822 break; 823 case NETDEV_UNREGISTER: 824 /* This rcu_read_lock/unlock pair is needed because 825 * dev_map_list is an RCU list AND to ensure a delete 826 * operation does not free a netdev_map entry while we 827 * are comparing it against the netdev being unregistered. 828 */ 829 rcu_read_lock(); 830 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 831 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 832 dev_map_hash_remove_netdev(dtab, netdev); 833 continue; 834 } 835 836 for (i = 0; i < dtab->map.max_entries; i++) { 837 struct bpf_dtab_netdev *dev, *odev; 838 839 dev = READ_ONCE(dtab->netdev_map[i]); 840 if (!dev || netdev != dev->dev) 841 continue; 842 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 843 if (dev == odev) 844 call_rcu(&dev->rcu, 845 __dev_map_entry_free); 846 } 847 } 848 rcu_read_unlock(); 849 break; 850 default: 851 break; 852 } 853 return NOTIFY_OK; 854 } 855 856 static struct notifier_block dev_map_notifier = { 857 .notifier_call = dev_map_notification, 858 }; 859 860 static int __init dev_map_init(void) 861 { 862 int cpu; 863 864 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 865 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 866 offsetof(struct _bpf_dtab_netdev, dev)); 867 register_netdevice_notifier(&dev_map_notifier); 868 869 for_each_possible_cpu(cpu) 870 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 871 return 0; 872 } 873 874 subsys_initcall(dev_map_init); 875