1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 /* DEVMAP values */ 64 struct bpf_devmap_val { 65 u32 ifindex; /* device index */ 66 union { 67 int fd; /* prog fd on map write */ 68 u32 id; /* prog id on map read */ 69 } bpf_prog; 70 }; 71 72 struct bpf_dtab_netdev { 73 struct net_device *dev; /* must be first member, due to tracepoint */ 74 struct hlist_node index_hlist; 75 struct bpf_dtab *dtab; 76 struct bpf_prog *xdp_prog; 77 struct rcu_head rcu; 78 unsigned int idx; 79 struct bpf_devmap_val val; 80 }; 81 82 struct bpf_dtab { 83 struct bpf_map map; 84 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 85 struct list_head list; 86 87 /* these are only used for DEVMAP_HASH type maps */ 88 struct hlist_head *dev_index_head; 89 spinlock_t index_lock; 90 unsigned int items; 91 u32 n_buckets; 92 }; 93 94 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 95 static DEFINE_SPINLOCK(dev_map_lock); 96 static LIST_HEAD(dev_map_list); 97 98 static struct hlist_head *dev_map_create_hash(unsigned int entries) 99 { 100 int i; 101 struct hlist_head *hash; 102 103 hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); 104 if (hash != NULL) 105 for (i = 0; i < entries; i++) 106 INIT_HLIST_HEAD(&hash[i]); 107 108 return hash; 109 } 110 111 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 112 int idx) 113 { 114 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 115 } 116 117 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 118 { 119 u32 valsize = attr->value_size; 120 u64 cost = 0; 121 int err; 122 123 /* check sanity of attributes. 2 value sizes supported: 124 * 4 bytes: ifindex 125 * 8 bytes: ifindex + prog fd 126 */ 127 if (attr->max_entries == 0 || attr->key_size != 4 || 128 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 129 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 130 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 131 return -EINVAL; 132 133 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 134 * verifier prevents writes from the BPF side 135 */ 136 attr->map_flags |= BPF_F_RDONLY_PROG; 137 138 139 bpf_map_init_from_attr(&dtab->map, attr); 140 141 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 142 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 143 144 if (!dtab->n_buckets) /* Overflow check */ 145 return -EINVAL; 146 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; 147 } else { 148 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 149 } 150 151 /* if map size is larger than memlock limit, reject it */ 152 err = bpf_map_charge_init(&dtab->map.memory, cost); 153 if (err) 154 return -EINVAL; 155 156 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 157 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); 158 if (!dtab->dev_index_head) 159 goto free_charge; 160 161 spin_lock_init(&dtab->index_lock); 162 } else { 163 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 164 sizeof(struct bpf_dtab_netdev *), 165 dtab->map.numa_node); 166 if (!dtab->netdev_map) 167 goto free_charge; 168 } 169 170 return 0; 171 172 free_charge: 173 bpf_map_charge_finish(&dtab->map.memory); 174 return -ENOMEM; 175 } 176 177 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 178 { 179 struct bpf_dtab *dtab; 180 int err; 181 182 if (!capable(CAP_NET_ADMIN)) 183 return ERR_PTR(-EPERM); 184 185 dtab = kzalloc(sizeof(*dtab), GFP_USER); 186 if (!dtab) 187 return ERR_PTR(-ENOMEM); 188 189 err = dev_map_init_map(dtab, attr); 190 if (err) { 191 kfree(dtab); 192 return ERR_PTR(err); 193 } 194 195 spin_lock(&dev_map_lock); 196 list_add_tail_rcu(&dtab->list, &dev_map_list); 197 spin_unlock(&dev_map_lock); 198 199 return &dtab->map; 200 } 201 202 static void dev_map_free(struct bpf_map *map) 203 { 204 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 205 int i; 206 207 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 208 * so the programs (can be more than one that used this map) were 209 * disconnected from events. The following synchronize_rcu() guarantees 210 * both rcu read critical sections complete and waits for 211 * preempt-disable regions (NAPI being the relevant context here) so we 212 * are certain there will be no further reads against the netdev_map and 213 * all flush operations are complete. Flush operations can only be done 214 * from NAPI context for this reason. 215 */ 216 217 spin_lock(&dev_map_lock); 218 list_del_rcu(&dtab->list); 219 spin_unlock(&dev_map_lock); 220 221 bpf_clear_redirect_map(map); 222 synchronize_rcu(); 223 224 /* Make sure prior __dev_map_entry_free() have completed. */ 225 rcu_barrier(); 226 227 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 228 for (i = 0; i < dtab->n_buckets; i++) { 229 struct bpf_dtab_netdev *dev; 230 struct hlist_head *head; 231 struct hlist_node *next; 232 233 head = dev_map_index_hash(dtab, i); 234 235 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 236 hlist_del_rcu(&dev->index_hlist); 237 if (dev->xdp_prog) 238 bpf_prog_put(dev->xdp_prog); 239 dev_put(dev->dev); 240 kfree(dev); 241 } 242 } 243 244 kfree(dtab->dev_index_head); 245 } else { 246 for (i = 0; i < dtab->map.max_entries; i++) { 247 struct bpf_dtab_netdev *dev; 248 249 dev = dtab->netdev_map[i]; 250 if (!dev) 251 continue; 252 253 if (dev->xdp_prog) 254 bpf_prog_put(dev->xdp_prog); 255 dev_put(dev->dev); 256 kfree(dev); 257 } 258 259 bpf_map_area_free(dtab->netdev_map); 260 } 261 262 kfree(dtab); 263 } 264 265 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 266 { 267 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 268 u32 index = key ? *(u32 *)key : U32_MAX; 269 u32 *next = next_key; 270 271 if (index >= dtab->map.max_entries) { 272 *next = 0; 273 return 0; 274 } 275 276 if (index == dtab->map.max_entries - 1) 277 return -ENOENT; 278 *next = index + 1; 279 return 0; 280 } 281 282 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 283 { 284 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 285 struct hlist_head *head = dev_map_index_hash(dtab, key); 286 struct bpf_dtab_netdev *dev; 287 288 hlist_for_each_entry_rcu(dev, head, index_hlist, 289 lockdep_is_held(&dtab->index_lock)) 290 if (dev->idx == key) 291 return dev; 292 293 return NULL; 294 } 295 296 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 297 void *next_key) 298 { 299 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 300 u32 idx, *next = next_key; 301 struct bpf_dtab_netdev *dev, *next_dev; 302 struct hlist_head *head; 303 int i = 0; 304 305 if (!key) 306 goto find_first; 307 308 idx = *(u32 *)key; 309 310 dev = __dev_map_hash_lookup_elem(map, idx); 311 if (!dev) 312 goto find_first; 313 314 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 315 struct bpf_dtab_netdev, index_hlist); 316 317 if (next_dev) { 318 *next = next_dev->idx; 319 return 0; 320 } 321 322 i = idx & (dtab->n_buckets - 1); 323 i++; 324 325 find_first: 326 for (; i < dtab->n_buckets; i++) { 327 head = dev_map_index_hash(dtab, i); 328 329 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 330 struct bpf_dtab_netdev, 331 index_hlist); 332 if (next_dev) { 333 *next = next_dev->idx; 334 return 0; 335 } 336 } 337 338 return -ENOENT; 339 } 340 341 bool dev_map_can_have_prog(struct bpf_map *map) 342 { 343 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 344 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 345 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 346 return true; 347 348 return false; 349 } 350 351 static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 352 { 353 struct net_device *dev = bq->dev; 354 int sent = 0, drops = 0, err = 0; 355 int i; 356 357 if (unlikely(!bq->count)) 358 return 0; 359 360 for (i = 0; i < bq->count; i++) { 361 struct xdp_frame *xdpf = bq->q[i]; 362 363 prefetch(xdpf); 364 } 365 366 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 367 if (sent < 0) { 368 err = sent; 369 sent = 0; 370 goto error; 371 } 372 drops = bq->count - sent; 373 out: 374 bq->count = 0; 375 376 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); 377 bq->dev_rx = NULL; 378 __list_del_clearprev(&bq->flush_node); 379 return 0; 380 error: 381 /* If ndo_xdp_xmit fails with an errno, no frames have been 382 * xmit'ed and it's our responsibility to them free all. 383 */ 384 for (i = 0; i < bq->count; i++) { 385 struct xdp_frame *xdpf = bq->q[i]; 386 387 xdp_return_frame_rx_napi(xdpf); 388 drops++; 389 } 390 goto out; 391 } 392 393 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 394 * from the driver before returning from its napi->poll() routine. The poll() 395 * routine is called either from busy_poll context or net_rx_action signaled 396 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 397 * net device can be torn down. On devmap tear down we ensure the flush list 398 * is empty before completing to ensure all flush operations have completed. 399 * When drivers update the bpf program they may need to ensure any flush ops 400 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 401 * because both wait for napi context to exit. 402 */ 403 void __dev_flush(void) 404 { 405 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 406 struct xdp_dev_bulk_queue *bq, *tmp; 407 408 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 409 bq_xmit_all(bq, XDP_XMIT_FLUSH); 410 } 411 412 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 413 * update happens in parallel here a dev_put wont happen until after reading the 414 * ifindex. 415 */ 416 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 417 { 418 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 419 struct bpf_dtab_netdev *obj; 420 421 if (key >= map->max_entries) 422 return NULL; 423 424 obj = READ_ONCE(dtab->netdev_map[key]); 425 return obj; 426 } 427 428 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 429 * Thus, safe percpu variable access. 430 */ 431 static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 432 struct net_device *dev_rx) 433 { 434 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 435 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 436 437 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 438 bq_xmit_all(bq, 0); 439 440 /* Ingress dev_rx will be the same for all xdp_frame's in 441 * bulk_queue, because bq stored per-CPU and must be flushed 442 * from net_device drivers NAPI func end. 443 */ 444 if (!bq->dev_rx) 445 bq->dev_rx = dev_rx; 446 447 bq->q[bq->count++] = xdpf; 448 449 if (!bq->flush_node.prev) 450 list_add(&bq->flush_node, flush_list); 451 452 return 0; 453 } 454 455 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 456 struct net_device *dev_rx) 457 { 458 struct xdp_frame *xdpf; 459 int err; 460 461 if (!dev->netdev_ops->ndo_xdp_xmit) 462 return -EOPNOTSUPP; 463 464 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 465 if (unlikely(err)) 466 return err; 467 468 xdpf = xdp_convert_buff_to_frame(xdp); 469 if (unlikely(!xdpf)) 470 return -EOVERFLOW; 471 472 return bq_enqueue(dev, xdpf, dev_rx); 473 } 474 475 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 476 struct xdp_buff *xdp, 477 struct bpf_prog *xdp_prog) 478 { 479 struct xdp_txq_info txq = { .dev = dev }; 480 u32 act; 481 482 xdp->txq = &txq; 483 484 act = bpf_prog_run_xdp(xdp_prog, xdp); 485 switch (act) { 486 case XDP_PASS: 487 return xdp; 488 case XDP_DROP: 489 break; 490 default: 491 bpf_warn_invalid_xdp_action(act); 492 fallthrough; 493 case XDP_ABORTED: 494 trace_xdp_exception(dev, xdp_prog, act); 495 break; 496 } 497 498 xdp_return_buff(xdp); 499 return NULL; 500 } 501 502 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 503 struct net_device *dev_rx) 504 { 505 return __xdp_enqueue(dev, xdp, dev_rx); 506 } 507 508 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 509 struct net_device *dev_rx) 510 { 511 struct net_device *dev = dst->dev; 512 513 if (dst->xdp_prog) { 514 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 515 if (!xdp) 516 return 0; 517 } 518 return __xdp_enqueue(dev, xdp, dev_rx); 519 } 520 521 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 522 struct bpf_prog *xdp_prog) 523 { 524 int err; 525 526 err = xdp_ok_fwd_dev(dst->dev, skb->len); 527 if (unlikely(err)) 528 return err; 529 skb->dev = dst->dev; 530 generic_xdp_tx(skb, xdp_prog); 531 532 return 0; 533 } 534 535 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 536 { 537 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 538 539 return obj ? &obj->val : NULL; 540 } 541 542 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 543 { 544 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 545 *(u32 *)key); 546 return obj ? &obj->val : NULL; 547 } 548 549 static void __dev_map_entry_free(struct rcu_head *rcu) 550 { 551 struct bpf_dtab_netdev *dev; 552 553 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 554 if (dev->xdp_prog) 555 bpf_prog_put(dev->xdp_prog); 556 dev_put(dev->dev); 557 kfree(dev); 558 } 559 560 static int dev_map_delete_elem(struct bpf_map *map, void *key) 561 { 562 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 563 struct bpf_dtab_netdev *old_dev; 564 int k = *(u32 *)key; 565 566 if (k >= map->max_entries) 567 return -EINVAL; 568 569 /* Use call_rcu() here to ensure any rcu critical sections have 570 * completed as well as any flush operations because call_rcu 571 * will wait for preempt-disable region to complete, NAPI in this 572 * context. And additionally, the driver tear down ensures all 573 * soft irqs are complete before removing the net device in the 574 * case of dev_put equals zero. 575 */ 576 old_dev = xchg(&dtab->netdev_map[k], NULL); 577 if (old_dev) 578 call_rcu(&old_dev->rcu, __dev_map_entry_free); 579 return 0; 580 } 581 582 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 583 { 584 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 585 struct bpf_dtab_netdev *old_dev; 586 int k = *(u32 *)key; 587 unsigned long flags; 588 int ret = -ENOENT; 589 590 spin_lock_irqsave(&dtab->index_lock, flags); 591 592 old_dev = __dev_map_hash_lookup_elem(map, k); 593 if (old_dev) { 594 dtab->items--; 595 hlist_del_init_rcu(&old_dev->index_hlist); 596 call_rcu(&old_dev->rcu, __dev_map_entry_free); 597 ret = 0; 598 } 599 spin_unlock_irqrestore(&dtab->index_lock, flags); 600 601 return ret; 602 } 603 604 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 605 struct bpf_dtab *dtab, 606 struct bpf_devmap_val *val, 607 unsigned int idx) 608 { 609 struct bpf_prog *prog = NULL; 610 struct bpf_dtab_netdev *dev; 611 612 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, 613 dtab->map.numa_node); 614 if (!dev) 615 return ERR_PTR(-ENOMEM); 616 617 dev->dev = dev_get_by_index(net, val->ifindex); 618 if (!dev->dev) 619 goto err_out; 620 621 if (val->bpf_prog.fd >= 0) { 622 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 623 BPF_PROG_TYPE_XDP, false); 624 if (IS_ERR(prog)) 625 goto err_put_dev; 626 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 627 goto err_put_prog; 628 } 629 630 dev->idx = idx; 631 dev->dtab = dtab; 632 if (prog) { 633 dev->xdp_prog = prog; 634 dev->val.bpf_prog.id = prog->aux->id; 635 } else { 636 dev->xdp_prog = NULL; 637 dev->val.bpf_prog.id = 0; 638 } 639 dev->val.ifindex = val->ifindex; 640 641 return dev; 642 err_put_prog: 643 bpf_prog_put(prog); 644 err_put_dev: 645 dev_put(dev->dev); 646 err_out: 647 kfree(dev); 648 return ERR_PTR(-EINVAL); 649 } 650 651 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 652 void *key, void *value, u64 map_flags) 653 { 654 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 655 struct bpf_devmap_val val = { .bpf_prog.fd = -1 }; 656 struct bpf_dtab_netdev *dev, *old_dev; 657 u32 i = *(u32 *)key; 658 659 if (unlikely(map_flags > BPF_EXIST)) 660 return -EINVAL; 661 if (unlikely(i >= dtab->map.max_entries)) 662 return -E2BIG; 663 if (unlikely(map_flags == BPF_NOEXIST)) 664 return -EEXIST; 665 666 /* already verified value_size <= sizeof val */ 667 memcpy(&val, value, map->value_size); 668 669 if (!val.ifindex) { 670 dev = NULL; 671 /* can not specify fd if ifindex is 0 */ 672 if (val.bpf_prog.fd != -1) 673 return -EINVAL; 674 } else { 675 dev = __dev_map_alloc_node(net, dtab, &val, i); 676 if (IS_ERR(dev)) 677 return PTR_ERR(dev); 678 } 679 680 /* Use call_rcu() here to ensure rcu critical sections have completed 681 * Remembering the driver side flush operation will happen before the 682 * net device is removed. 683 */ 684 old_dev = xchg(&dtab->netdev_map[i], dev); 685 if (old_dev) 686 call_rcu(&old_dev->rcu, __dev_map_entry_free); 687 688 return 0; 689 } 690 691 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 692 u64 map_flags) 693 { 694 return __dev_map_update_elem(current->nsproxy->net_ns, 695 map, key, value, map_flags); 696 } 697 698 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 699 void *key, void *value, u64 map_flags) 700 { 701 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 702 struct bpf_devmap_val val = { .bpf_prog.fd = -1 }; 703 struct bpf_dtab_netdev *dev, *old_dev; 704 u32 idx = *(u32 *)key; 705 unsigned long flags; 706 int err = -EEXIST; 707 708 /* already verified value_size <= sizeof val */ 709 memcpy(&val, value, map->value_size); 710 711 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 712 return -EINVAL; 713 714 spin_lock_irqsave(&dtab->index_lock, flags); 715 716 old_dev = __dev_map_hash_lookup_elem(map, idx); 717 if (old_dev && (map_flags & BPF_NOEXIST)) 718 goto out_err; 719 720 dev = __dev_map_alloc_node(net, dtab, &val, idx); 721 if (IS_ERR(dev)) { 722 err = PTR_ERR(dev); 723 goto out_err; 724 } 725 726 if (old_dev) { 727 hlist_del_rcu(&old_dev->index_hlist); 728 } else { 729 if (dtab->items >= dtab->map.max_entries) { 730 spin_unlock_irqrestore(&dtab->index_lock, flags); 731 call_rcu(&dev->rcu, __dev_map_entry_free); 732 return -E2BIG; 733 } 734 dtab->items++; 735 } 736 737 hlist_add_head_rcu(&dev->index_hlist, 738 dev_map_index_hash(dtab, idx)); 739 spin_unlock_irqrestore(&dtab->index_lock, flags); 740 741 if (old_dev) 742 call_rcu(&old_dev->rcu, __dev_map_entry_free); 743 744 return 0; 745 746 out_err: 747 spin_unlock_irqrestore(&dtab->index_lock, flags); 748 return err; 749 } 750 751 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 752 u64 map_flags) 753 { 754 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 755 map, key, value, map_flags); 756 } 757 758 const struct bpf_map_ops dev_map_ops = { 759 .map_alloc = dev_map_alloc, 760 .map_free = dev_map_free, 761 .map_get_next_key = dev_map_get_next_key, 762 .map_lookup_elem = dev_map_lookup_elem, 763 .map_update_elem = dev_map_update_elem, 764 .map_delete_elem = dev_map_delete_elem, 765 .map_check_btf = map_check_no_btf, 766 }; 767 768 const struct bpf_map_ops dev_map_hash_ops = { 769 .map_alloc = dev_map_alloc, 770 .map_free = dev_map_free, 771 .map_get_next_key = dev_map_hash_get_next_key, 772 .map_lookup_elem = dev_map_hash_lookup_elem, 773 .map_update_elem = dev_map_hash_update_elem, 774 .map_delete_elem = dev_map_hash_delete_elem, 775 .map_check_btf = map_check_no_btf, 776 }; 777 778 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 779 struct net_device *netdev) 780 { 781 unsigned long flags; 782 u32 i; 783 784 spin_lock_irqsave(&dtab->index_lock, flags); 785 for (i = 0; i < dtab->n_buckets; i++) { 786 struct bpf_dtab_netdev *dev; 787 struct hlist_head *head; 788 struct hlist_node *next; 789 790 head = dev_map_index_hash(dtab, i); 791 792 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 793 if (netdev != dev->dev) 794 continue; 795 796 dtab->items--; 797 hlist_del_rcu(&dev->index_hlist); 798 call_rcu(&dev->rcu, __dev_map_entry_free); 799 } 800 } 801 spin_unlock_irqrestore(&dtab->index_lock, flags); 802 } 803 804 static int dev_map_notification(struct notifier_block *notifier, 805 ulong event, void *ptr) 806 { 807 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 808 struct bpf_dtab *dtab; 809 int i, cpu; 810 811 switch (event) { 812 case NETDEV_REGISTER: 813 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 814 break; 815 816 /* will be freed in free_netdev() */ 817 netdev->xdp_bulkq = 818 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), 819 sizeof(void *), GFP_ATOMIC); 820 if (!netdev->xdp_bulkq) 821 return NOTIFY_BAD; 822 823 for_each_possible_cpu(cpu) 824 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 825 break; 826 case NETDEV_UNREGISTER: 827 /* This rcu_read_lock/unlock pair is needed because 828 * dev_map_list is an RCU list AND to ensure a delete 829 * operation does not free a netdev_map entry while we 830 * are comparing it against the netdev being unregistered. 831 */ 832 rcu_read_lock(); 833 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 834 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 835 dev_map_hash_remove_netdev(dtab, netdev); 836 continue; 837 } 838 839 for (i = 0; i < dtab->map.max_entries; i++) { 840 struct bpf_dtab_netdev *dev, *odev; 841 842 dev = READ_ONCE(dtab->netdev_map[i]); 843 if (!dev || netdev != dev->dev) 844 continue; 845 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 846 if (dev == odev) 847 call_rcu(&dev->rcu, 848 __dev_map_entry_free); 849 } 850 } 851 rcu_read_unlock(); 852 break; 853 default: 854 break; 855 } 856 return NOTIFY_OK; 857 } 858 859 static struct notifier_block dev_map_notifier = { 860 .notifier_call = dev_map_notification, 861 }; 862 863 static int __init dev_map_init(void) 864 { 865 int cpu; 866 867 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 868 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 869 offsetof(struct _bpf_dtab_netdev, dev)); 870 register_netdevice_notifier(&dev_map_notifier); 871 872 for_each_possible_cpu(cpu) 873 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 874 return 0; 875 } 876 877 subsys_initcall(dev_map_init); 878