1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 struct bpf_dtab_netdev { 64 struct net_device *dev; /* must be first member, due to tracepoint */ 65 struct hlist_node index_hlist; 66 struct bpf_dtab *dtab; 67 struct bpf_prog *xdp_prog; 68 struct rcu_head rcu; 69 unsigned int idx; 70 struct bpf_devmap_val val; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 76 struct list_head list; 77 78 /* these are only used for DEVMAP_HASH type maps */ 79 struct hlist_head *dev_index_head; 80 spinlock_t index_lock; 81 unsigned int items; 82 u32 n_buckets; 83 }; 84 85 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 86 static DEFINE_SPINLOCK(dev_map_lock); 87 static LIST_HEAD(dev_map_list); 88 89 static struct hlist_head *dev_map_create_hash(unsigned int entries, 90 int numa_node) 91 { 92 int i; 93 struct hlist_head *hash; 94 95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 96 if (hash != NULL) 97 for (i = 0; i < entries; i++) 98 INIT_HLIST_HEAD(&hash[i]); 99 100 return hash; 101 } 102 103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 104 int idx) 105 { 106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 107 } 108 109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 110 { 111 u32 valsize = attr->value_size; 112 113 /* check sanity of attributes. 2 value sizes supported: 114 * 4 bytes: ifindex 115 * 8 bytes: ifindex + prog fd 116 */ 117 if (attr->max_entries == 0 || attr->key_size != 4 || 118 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 119 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 120 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 121 return -EINVAL; 122 123 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 124 * verifier prevents writes from the BPF side 125 */ 126 attr->map_flags |= BPF_F_RDONLY_PROG; 127 128 129 bpf_map_init_from_attr(&dtab->map, attr); 130 131 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 132 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 133 134 if (!dtab->n_buckets) /* Overflow check */ 135 return -EINVAL; 136 } 137 138 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 139 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 140 dtab->map.numa_node); 141 if (!dtab->dev_index_head) 142 return -ENOMEM; 143 144 spin_lock_init(&dtab->index_lock); 145 } else { 146 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 147 sizeof(struct bpf_dtab_netdev *), 148 dtab->map.numa_node); 149 if (!dtab->netdev_map) 150 return -ENOMEM; 151 } 152 153 return 0; 154 } 155 156 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 157 { 158 struct bpf_dtab *dtab; 159 int err; 160 161 if (!capable(CAP_NET_ADMIN)) 162 return ERR_PTR(-EPERM); 163 164 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 165 if (!dtab) 166 return ERR_PTR(-ENOMEM); 167 168 err = dev_map_init_map(dtab, attr); 169 if (err) { 170 kfree(dtab); 171 return ERR_PTR(err); 172 } 173 174 spin_lock(&dev_map_lock); 175 list_add_tail_rcu(&dtab->list, &dev_map_list); 176 spin_unlock(&dev_map_lock); 177 178 return &dtab->map; 179 } 180 181 static void dev_map_free(struct bpf_map *map) 182 { 183 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 184 int i; 185 186 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 187 * so the programs (can be more than one that used this map) were 188 * disconnected from events. The following synchronize_rcu() guarantees 189 * both rcu read critical sections complete and waits for 190 * preempt-disable regions (NAPI being the relevant context here) so we 191 * are certain there will be no further reads against the netdev_map and 192 * all flush operations are complete. Flush operations can only be done 193 * from NAPI context for this reason. 194 */ 195 196 spin_lock(&dev_map_lock); 197 list_del_rcu(&dtab->list); 198 spin_unlock(&dev_map_lock); 199 200 bpf_clear_redirect_map(map); 201 synchronize_rcu(); 202 203 /* Make sure prior __dev_map_entry_free() have completed. */ 204 rcu_barrier(); 205 206 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 207 for (i = 0; i < dtab->n_buckets; i++) { 208 struct bpf_dtab_netdev *dev; 209 struct hlist_head *head; 210 struct hlist_node *next; 211 212 head = dev_map_index_hash(dtab, i); 213 214 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 215 hlist_del_rcu(&dev->index_hlist); 216 if (dev->xdp_prog) 217 bpf_prog_put(dev->xdp_prog); 218 dev_put(dev->dev); 219 kfree(dev); 220 } 221 } 222 223 bpf_map_area_free(dtab->dev_index_head); 224 } else { 225 for (i = 0; i < dtab->map.max_entries; i++) { 226 struct bpf_dtab_netdev *dev; 227 228 dev = dtab->netdev_map[i]; 229 if (!dev) 230 continue; 231 232 if (dev->xdp_prog) 233 bpf_prog_put(dev->xdp_prog); 234 dev_put(dev->dev); 235 kfree(dev); 236 } 237 238 bpf_map_area_free(dtab->netdev_map); 239 } 240 241 kfree(dtab); 242 } 243 244 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 245 { 246 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 247 u32 index = key ? *(u32 *)key : U32_MAX; 248 u32 *next = next_key; 249 250 if (index >= dtab->map.max_entries) { 251 *next = 0; 252 return 0; 253 } 254 255 if (index == dtab->map.max_entries - 1) 256 return -ENOENT; 257 *next = index + 1; 258 return 0; 259 } 260 261 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 262 { 263 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 264 struct hlist_head *head = dev_map_index_hash(dtab, key); 265 struct bpf_dtab_netdev *dev; 266 267 hlist_for_each_entry_rcu(dev, head, index_hlist, 268 lockdep_is_held(&dtab->index_lock)) 269 if (dev->idx == key) 270 return dev; 271 272 return NULL; 273 } 274 275 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 276 void *next_key) 277 { 278 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 279 u32 idx, *next = next_key; 280 struct bpf_dtab_netdev *dev, *next_dev; 281 struct hlist_head *head; 282 int i = 0; 283 284 if (!key) 285 goto find_first; 286 287 idx = *(u32 *)key; 288 289 dev = __dev_map_hash_lookup_elem(map, idx); 290 if (!dev) 291 goto find_first; 292 293 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 294 struct bpf_dtab_netdev, index_hlist); 295 296 if (next_dev) { 297 *next = next_dev->idx; 298 return 0; 299 } 300 301 i = idx & (dtab->n_buckets - 1); 302 i++; 303 304 find_first: 305 for (; i < dtab->n_buckets; i++) { 306 head = dev_map_index_hash(dtab, i); 307 308 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 309 struct bpf_dtab_netdev, 310 index_hlist); 311 if (next_dev) { 312 *next = next_dev->idx; 313 return 0; 314 } 315 } 316 317 return -ENOENT; 318 } 319 320 bool dev_map_can_have_prog(struct bpf_map *map) 321 { 322 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 323 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 324 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 325 return true; 326 327 return false; 328 } 329 330 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 331 { 332 struct net_device *dev = bq->dev; 333 int sent = 0, drops = 0, err = 0; 334 int i; 335 336 if (unlikely(!bq->count)) 337 return; 338 339 for (i = 0; i < bq->count; i++) { 340 struct xdp_frame *xdpf = bq->q[i]; 341 342 prefetch(xdpf); 343 } 344 345 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 346 if (sent < 0) { 347 err = sent; 348 sent = 0; 349 goto error; 350 } 351 drops = bq->count - sent; 352 out: 353 bq->count = 0; 354 355 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); 356 bq->dev_rx = NULL; 357 __list_del_clearprev(&bq->flush_node); 358 return; 359 error: 360 /* If ndo_xdp_xmit fails with an errno, no frames have been 361 * xmit'ed and it's our responsibility to them free all. 362 */ 363 for (i = 0; i < bq->count; i++) { 364 struct xdp_frame *xdpf = bq->q[i]; 365 366 xdp_return_frame_rx_napi(xdpf); 367 drops++; 368 } 369 goto out; 370 } 371 372 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 373 * from the driver before returning from its napi->poll() routine. The poll() 374 * routine is called either from busy_poll context or net_rx_action signaled 375 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 376 * net device can be torn down. On devmap tear down we ensure the flush list 377 * is empty before completing to ensure all flush operations have completed. 378 * When drivers update the bpf program they may need to ensure any flush ops 379 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 380 * because both wait for napi context to exit. 381 */ 382 void __dev_flush(void) 383 { 384 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 385 struct xdp_dev_bulk_queue *bq, *tmp; 386 387 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 388 bq_xmit_all(bq, XDP_XMIT_FLUSH); 389 } 390 391 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 392 * update happens in parallel here a dev_put wont happen until after reading the 393 * ifindex. 394 */ 395 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 396 { 397 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 398 struct bpf_dtab_netdev *obj; 399 400 if (key >= map->max_entries) 401 return NULL; 402 403 obj = READ_ONCE(dtab->netdev_map[key]); 404 return obj; 405 } 406 407 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 408 * Thus, safe percpu variable access. 409 */ 410 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 411 struct net_device *dev_rx) 412 { 413 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 414 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 415 416 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 417 bq_xmit_all(bq, 0); 418 419 /* Ingress dev_rx will be the same for all xdp_frame's in 420 * bulk_queue, because bq stored per-CPU and must be flushed 421 * from net_device drivers NAPI func end. 422 */ 423 if (!bq->dev_rx) 424 bq->dev_rx = dev_rx; 425 426 bq->q[bq->count++] = xdpf; 427 428 if (!bq->flush_node.prev) 429 list_add(&bq->flush_node, flush_list); 430 } 431 432 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 433 struct net_device *dev_rx) 434 { 435 struct xdp_frame *xdpf; 436 int err; 437 438 if (!dev->netdev_ops->ndo_xdp_xmit) 439 return -EOPNOTSUPP; 440 441 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 442 if (unlikely(err)) 443 return err; 444 445 xdpf = xdp_convert_buff_to_frame(xdp); 446 if (unlikely(!xdpf)) 447 return -EOVERFLOW; 448 449 bq_enqueue(dev, xdpf, dev_rx); 450 return 0; 451 } 452 453 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 454 struct xdp_buff *xdp, 455 struct bpf_prog *xdp_prog) 456 { 457 struct xdp_txq_info txq = { .dev = dev }; 458 u32 act; 459 460 xdp_set_data_meta_invalid(xdp); 461 xdp->txq = &txq; 462 463 act = bpf_prog_run_xdp(xdp_prog, xdp); 464 switch (act) { 465 case XDP_PASS: 466 return xdp; 467 case XDP_DROP: 468 break; 469 default: 470 bpf_warn_invalid_xdp_action(act); 471 fallthrough; 472 case XDP_ABORTED: 473 trace_xdp_exception(dev, xdp_prog, act); 474 break; 475 } 476 477 xdp_return_buff(xdp); 478 return NULL; 479 } 480 481 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 482 struct net_device *dev_rx) 483 { 484 return __xdp_enqueue(dev, xdp, dev_rx); 485 } 486 487 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 488 struct net_device *dev_rx) 489 { 490 struct net_device *dev = dst->dev; 491 492 if (dst->xdp_prog) { 493 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 494 if (!xdp) 495 return 0; 496 } 497 return __xdp_enqueue(dev, xdp, dev_rx); 498 } 499 500 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 501 struct bpf_prog *xdp_prog) 502 { 503 int err; 504 505 err = xdp_ok_fwd_dev(dst->dev, skb->len); 506 if (unlikely(err)) 507 return err; 508 skb->dev = dst->dev; 509 generic_xdp_tx(skb, xdp_prog); 510 511 return 0; 512 } 513 514 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 515 { 516 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 517 518 return obj ? &obj->val : NULL; 519 } 520 521 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 522 { 523 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 524 *(u32 *)key); 525 return obj ? &obj->val : NULL; 526 } 527 528 static void __dev_map_entry_free(struct rcu_head *rcu) 529 { 530 struct bpf_dtab_netdev *dev; 531 532 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 533 if (dev->xdp_prog) 534 bpf_prog_put(dev->xdp_prog); 535 dev_put(dev->dev); 536 kfree(dev); 537 } 538 539 static int dev_map_delete_elem(struct bpf_map *map, void *key) 540 { 541 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 542 struct bpf_dtab_netdev *old_dev; 543 int k = *(u32 *)key; 544 545 if (k >= map->max_entries) 546 return -EINVAL; 547 548 /* Use call_rcu() here to ensure any rcu critical sections have 549 * completed as well as any flush operations because call_rcu 550 * will wait for preempt-disable region to complete, NAPI in this 551 * context. And additionally, the driver tear down ensures all 552 * soft irqs are complete before removing the net device in the 553 * case of dev_put equals zero. 554 */ 555 old_dev = xchg(&dtab->netdev_map[k], NULL); 556 if (old_dev) 557 call_rcu(&old_dev->rcu, __dev_map_entry_free); 558 return 0; 559 } 560 561 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 562 { 563 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 564 struct bpf_dtab_netdev *old_dev; 565 int k = *(u32 *)key; 566 unsigned long flags; 567 int ret = -ENOENT; 568 569 spin_lock_irqsave(&dtab->index_lock, flags); 570 571 old_dev = __dev_map_hash_lookup_elem(map, k); 572 if (old_dev) { 573 dtab->items--; 574 hlist_del_init_rcu(&old_dev->index_hlist); 575 call_rcu(&old_dev->rcu, __dev_map_entry_free); 576 ret = 0; 577 } 578 spin_unlock_irqrestore(&dtab->index_lock, flags); 579 580 return ret; 581 } 582 583 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 584 struct bpf_dtab *dtab, 585 struct bpf_devmap_val *val, 586 unsigned int idx) 587 { 588 struct bpf_prog *prog = NULL; 589 struct bpf_dtab_netdev *dev; 590 591 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 592 GFP_ATOMIC | __GFP_NOWARN, 593 dtab->map.numa_node); 594 if (!dev) 595 return ERR_PTR(-ENOMEM); 596 597 dev->dev = dev_get_by_index(net, val->ifindex); 598 if (!dev->dev) 599 goto err_out; 600 601 if (val->bpf_prog.fd > 0) { 602 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 603 BPF_PROG_TYPE_XDP, false); 604 if (IS_ERR(prog)) 605 goto err_put_dev; 606 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 607 goto err_put_prog; 608 } 609 610 dev->idx = idx; 611 dev->dtab = dtab; 612 if (prog) { 613 dev->xdp_prog = prog; 614 dev->val.bpf_prog.id = prog->aux->id; 615 } else { 616 dev->xdp_prog = NULL; 617 dev->val.bpf_prog.id = 0; 618 } 619 dev->val.ifindex = val->ifindex; 620 621 return dev; 622 err_put_prog: 623 bpf_prog_put(prog); 624 err_put_dev: 625 dev_put(dev->dev); 626 err_out: 627 kfree(dev); 628 return ERR_PTR(-EINVAL); 629 } 630 631 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 632 void *key, void *value, u64 map_flags) 633 { 634 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 635 struct bpf_dtab_netdev *dev, *old_dev; 636 struct bpf_devmap_val val = {}; 637 u32 i = *(u32 *)key; 638 639 if (unlikely(map_flags > BPF_EXIST)) 640 return -EINVAL; 641 if (unlikely(i >= dtab->map.max_entries)) 642 return -E2BIG; 643 if (unlikely(map_flags == BPF_NOEXIST)) 644 return -EEXIST; 645 646 /* already verified value_size <= sizeof val */ 647 memcpy(&val, value, map->value_size); 648 649 if (!val.ifindex) { 650 dev = NULL; 651 /* can not specify fd if ifindex is 0 */ 652 if (val.bpf_prog.fd > 0) 653 return -EINVAL; 654 } else { 655 dev = __dev_map_alloc_node(net, dtab, &val, i); 656 if (IS_ERR(dev)) 657 return PTR_ERR(dev); 658 } 659 660 /* Use call_rcu() here to ensure rcu critical sections have completed 661 * Remembering the driver side flush operation will happen before the 662 * net device is removed. 663 */ 664 old_dev = xchg(&dtab->netdev_map[i], dev); 665 if (old_dev) 666 call_rcu(&old_dev->rcu, __dev_map_entry_free); 667 668 return 0; 669 } 670 671 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 672 u64 map_flags) 673 { 674 return __dev_map_update_elem(current->nsproxy->net_ns, 675 map, key, value, map_flags); 676 } 677 678 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 679 void *key, void *value, u64 map_flags) 680 { 681 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 682 struct bpf_dtab_netdev *dev, *old_dev; 683 struct bpf_devmap_val val = {}; 684 u32 idx = *(u32 *)key; 685 unsigned long flags; 686 int err = -EEXIST; 687 688 /* already verified value_size <= sizeof val */ 689 memcpy(&val, value, map->value_size); 690 691 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 692 return -EINVAL; 693 694 spin_lock_irqsave(&dtab->index_lock, flags); 695 696 old_dev = __dev_map_hash_lookup_elem(map, idx); 697 if (old_dev && (map_flags & BPF_NOEXIST)) 698 goto out_err; 699 700 dev = __dev_map_alloc_node(net, dtab, &val, idx); 701 if (IS_ERR(dev)) { 702 err = PTR_ERR(dev); 703 goto out_err; 704 } 705 706 if (old_dev) { 707 hlist_del_rcu(&old_dev->index_hlist); 708 } else { 709 if (dtab->items >= dtab->map.max_entries) { 710 spin_unlock_irqrestore(&dtab->index_lock, flags); 711 call_rcu(&dev->rcu, __dev_map_entry_free); 712 return -E2BIG; 713 } 714 dtab->items++; 715 } 716 717 hlist_add_head_rcu(&dev->index_hlist, 718 dev_map_index_hash(dtab, idx)); 719 spin_unlock_irqrestore(&dtab->index_lock, flags); 720 721 if (old_dev) 722 call_rcu(&old_dev->rcu, __dev_map_entry_free); 723 724 return 0; 725 726 out_err: 727 spin_unlock_irqrestore(&dtab->index_lock, flags); 728 return err; 729 } 730 731 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 732 u64 map_flags) 733 { 734 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 735 map, key, value, map_flags); 736 } 737 738 static int dev_map_btf_id; 739 const struct bpf_map_ops dev_map_ops = { 740 .map_meta_equal = bpf_map_meta_equal, 741 .map_alloc = dev_map_alloc, 742 .map_free = dev_map_free, 743 .map_get_next_key = dev_map_get_next_key, 744 .map_lookup_elem = dev_map_lookup_elem, 745 .map_update_elem = dev_map_update_elem, 746 .map_delete_elem = dev_map_delete_elem, 747 .map_check_btf = map_check_no_btf, 748 .map_btf_name = "bpf_dtab", 749 .map_btf_id = &dev_map_btf_id, 750 }; 751 752 static int dev_map_hash_map_btf_id; 753 const struct bpf_map_ops dev_map_hash_ops = { 754 .map_meta_equal = bpf_map_meta_equal, 755 .map_alloc = dev_map_alloc, 756 .map_free = dev_map_free, 757 .map_get_next_key = dev_map_hash_get_next_key, 758 .map_lookup_elem = dev_map_hash_lookup_elem, 759 .map_update_elem = dev_map_hash_update_elem, 760 .map_delete_elem = dev_map_hash_delete_elem, 761 .map_check_btf = map_check_no_btf, 762 .map_btf_name = "bpf_dtab", 763 .map_btf_id = &dev_map_hash_map_btf_id, 764 }; 765 766 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 767 struct net_device *netdev) 768 { 769 unsigned long flags; 770 u32 i; 771 772 spin_lock_irqsave(&dtab->index_lock, flags); 773 for (i = 0; i < dtab->n_buckets; i++) { 774 struct bpf_dtab_netdev *dev; 775 struct hlist_head *head; 776 struct hlist_node *next; 777 778 head = dev_map_index_hash(dtab, i); 779 780 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 781 if (netdev != dev->dev) 782 continue; 783 784 dtab->items--; 785 hlist_del_rcu(&dev->index_hlist); 786 call_rcu(&dev->rcu, __dev_map_entry_free); 787 } 788 } 789 spin_unlock_irqrestore(&dtab->index_lock, flags); 790 } 791 792 static int dev_map_notification(struct notifier_block *notifier, 793 ulong event, void *ptr) 794 { 795 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 796 struct bpf_dtab *dtab; 797 int i, cpu; 798 799 switch (event) { 800 case NETDEV_REGISTER: 801 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 802 break; 803 804 /* will be freed in free_netdev() */ 805 netdev->xdp_bulkq = 806 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), 807 sizeof(void *), GFP_ATOMIC); 808 if (!netdev->xdp_bulkq) 809 return NOTIFY_BAD; 810 811 for_each_possible_cpu(cpu) 812 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 813 break; 814 case NETDEV_UNREGISTER: 815 /* This rcu_read_lock/unlock pair is needed because 816 * dev_map_list is an RCU list AND to ensure a delete 817 * operation does not free a netdev_map entry while we 818 * are comparing it against the netdev being unregistered. 819 */ 820 rcu_read_lock(); 821 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 822 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 823 dev_map_hash_remove_netdev(dtab, netdev); 824 continue; 825 } 826 827 for (i = 0; i < dtab->map.max_entries; i++) { 828 struct bpf_dtab_netdev *dev, *odev; 829 830 dev = READ_ONCE(dtab->netdev_map[i]); 831 if (!dev || netdev != dev->dev) 832 continue; 833 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 834 if (dev == odev) 835 call_rcu(&dev->rcu, 836 __dev_map_entry_free); 837 } 838 } 839 rcu_read_unlock(); 840 break; 841 default: 842 break; 843 } 844 return NOTIFY_OK; 845 } 846 847 static struct notifier_block dev_map_notifier = { 848 .notifier_call = dev_map_notification, 849 }; 850 851 static int __init dev_map_init(void) 852 { 853 int cpu; 854 855 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 856 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 857 offsetof(struct _bpf_dtab_netdev, dev)); 858 register_netdevice_notifier(&dev_map_notifier); 859 860 for_each_possible_cpu(cpu) 861 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 862 return 0; 863 } 864 865 subsys_initcall(dev_map_init); 866