1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 struct bpf_dtab_netdev { 64 struct net_device *dev; /* must be first member, due to tracepoint */ 65 struct hlist_node index_hlist; 66 struct bpf_dtab *dtab; 67 struct bpf_prog *xdp_prog; 68 struct rcu_head rcu; 69 unsigned int idx; 70 struct bpf_devmap_val val; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 76 struct list_head list; 77 78 /* these are only used for DEVMAP_HASH type maps */ 79 struct hlist_head *dev_index_head; 80 spinlock_t index_lock; 81 unsigned int items; 82 u32 n_buckets; 83 }; 84 85 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 86 static DEFINE_SPINLOCK(dev_map_lock); 87 static LIST_HEAD(dev_map_list); 88 89 static struct hlist_head *dev_map_create_hash(unsigned int entries, 90 int numa_node) 91 { 92 int i; 93 struct hlist_head *hash; 94 95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 96 if (hash != NULL) 97 for (i = 0; i < entries; i++) 98 INIT_HLIST_HEAD(&hash[i]); 99 100 return hash; 101 } 102 103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 104 int idx) 105 { 106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 107 } 108 109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 110 { 111 u32 valsize = attr->value_size; 112 u64 cost = 0; 113 int err; 114 115 /* check sanity of attributes. 2 value sizes supported: 116 * 4 bytes: ifindex 117 * 8 bytes: ifindex + prog fd 118 */ 119 if (attr->max_entries == 0 || attr->key_size != 4 || 120 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 121 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 122 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 123 return -EINVAL; 124 125 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 126 * verifier prevents writes from the BPF side 127 */ 128 attr->map_flags |= BPF_F_RDONLY_PROG; 129 130 131 bpf_map_init_from_attr(&dtab->map, attr); 132 133 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 135 136 if (!dtab->n_buckets) /* Overflow check */ 137 return -EINVAL; 138 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; 139 } else { 140 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 141 } 142 143 /* if map size is larger than memlock limit, reject it */ 144 err = bpf_map_charge_init(&dtab->map.memory, cost); 145 if (err) 146 return -EINVAL; 147 148 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 149 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 150 dtab->map.numa_node); 151 if (!dtab->dev_index_head) 152 goto free_charge; 153 154 spin_lock_init(&dtab->index_lock); 155 } else { 156 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 157 sizeof(struct bpf_dtab_netdev *), 158 dtab->map.numa_node); 159 if (!dtab->netdev_map) 160 goto free_charge; 161 } 162 163 return 0; 164 165 free_charge: 166 bpf_map_charge_finish(&dtab->map.memory); 167 return -ENOMEM; 168 } 169 170 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 171 { 172 struct bpf_dtab *dtab; 173 int err; 174 175 if (!capable(CAP_NET_ADMIN)) 176 return ERR_PTR(-EPERM); 177 178 dtab = kzalloc(sizeof(*dtab), GFP_USER); 179 if (!dtab) 180 return ERR_PTR(-ENOMEM); 181 182 err = dev_map_init_map(dtab, attr); 183 if (err) { 184 kfree(dtab); 185 return ERR_PTR(err); 186 } 187 188 spin_lock(&dev_map_lock); 189 list_add_tail_rcu(&dtab->list, &dev_map_list); 190 spin_unlock(&dev_map_lock); 191 192 return &dtab->map; 193 } 194 195 static void dev_map_free(struct bpf_map *map) 196 { 197 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 198 int i; 199 200 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 201 * so the programs (can be more than one that used this map) were 202 * disconnected from events. The following synchronize_rcu() guarantees 203 * both rcu read critical sections complete and waits for 204 * preempt-disable regions (NAPI being the relevant context here) so we 205 * are certain there will be no further reads against the netdev_map and 206 * all flush operations are complete. Flush operations can only be done 207 * from NAPI context for this reason. 208 */ 209 210 spin_lock(&dev_map_lock); 211 list_del_rcu(&dtab->list); 212 spin_unlock(&dev_map_lock); 213 214 bpf_clear_redirect_map(map); 215 synchronize_rcu(); 216 217 /* Make sure prior __dev_map_entry_free() have completed. */ 218 rcu_barrier(); 219 220 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 221 for (i = 0; i < dtab->n_buckets; i++) { 222 struct bpf_dtab_netdev *dev; 223 struct hlist_head *head; 224 struct hlist_node *next; 225 226 head = dev_map_index_hash(dtab, i); 227 228 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 229 hlist_del_rcu(&dev->index_hlist); 230 if (dev->xdp_prog) 231 bpf_prog_put(dev->xdp_prog); 232 dev_put(dev->dev); 233 kfree(dev); 234 } 235 } 236 237 bpf_map_area_free(dtab->dev_index_head); 238 } else { 239 for (i = 0; i < dtab->map.max_entries; i++) { 240 struct bpf_dtab_netdev *dev; 241 242 dev = dtab->netdev_map[i]; 243 if (!dev) 244 continue; 245 246 if (dev->xdp_prog) 247 bpf_prog_put(dev->xdp_prog); 248 dev_put(dev->dev); 249 kfree(dev); 250 } 251 252 bpf_map_area_free(dtab->netdev_map); 253 } 254 255 kfree(dtab); 256 } 257 258 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 259 { 260 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 261 u32 index = key ? *(u32 *)key : U32_MAX; 262 u32 *next = next_key; 263 264 if (index >= dtab->map.max_entries) { 265 *next = 0; 266 return 0; 267 } 268 269 if (index == dtab->map.max_entries - 1) 270 return -ENOENT; 271 *next = index + 1; 272 return 0; 273 } 274 275 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 276 { 277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 278 struct hlist_head *head = dev_map_index_hash(dtab, key); 279 struct bpf_dtab_netdev *dev; 280 281 hlist_for_each_entry_rcu(dev, head, index_hlist, 282 lockdep_is_held(&dtab->index_lock)) 283 if (dev->idx == key) 284 return dev; 285 286 return NULL; 287 } 288 289 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 290 void *next_key) 291 { 292 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 293 u32 idx, *next = next_key; 294 struct bpf_dtab_netdev *dev, *next_dev; 295 struct hlist_head *head; 296 int i = 0; 297 298 if (!key) 299 goto find_first; 300 301 idx = *(u32 *)key; 302 303 dev = __dev_map_hash_lookup_elem(map, idx); 304 if (!dev) 305 goto find_first; 306 307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 308 struct bpf_dtab_netdev, index_hlist); 309 310 if (next_dev) { 311 *next = next_dev->idx; 312 return 0; 313 } 314 315 i = idx & (dtab->n_buckets - 1); 316 i++; 317 318 find_first: 319 for (; i < dtab->n_buckets; i++) { 320 head = dev_map_index_hash(dtab, i); 321 322 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 323 struct bpf_dtab_netdev, 324 index_hlist); 325 if (next_dev) { 326 *next = next_dev->idx; 327 return 0; 328 } 329 } 330 331 return -ENOENT; 332 } 333 334 bool dev_map_can_have_prog(struct bpf_map *map) 335 { 336 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 337 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 338 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 339 return true; 340 341 return false; 342 } 343 344 static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 345 { 346 struct net_device *dev = bq->dev; 347 int sent = 0, drops = 0, err = 0; 348 int i; 349 350 if (unlikely(!bq->count)) 351 return 0; 352 353 for (i = 0; i < bq->count; i++) { 354 struct xdp_frame *xdpf = bq->q[i]; 355 356 prefetch(xdpf); 357 } 358 359 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 360 if (sent < 0) { 361 err = sent; 362 sent = 0; 363 goto error; 364 } 365 drops = bq->count - sent; 366 out: 367 bq->count = 0; 368 369 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); 370 bq->dev_rx = NULL; 371 __list_del_clearprev(&bq->flush_node); 372 return 0; 373 error: 374 /* If ndo_xdp_xmit fails with an errno, no frames have been 375 * xmit'ed and it's our responsibility to them free all. 376 */ 377 for (i = 0; i < bq->count; i++) { 378 struct xdp_frame *xdpf = bq->q[i]; 379 380 xdp_return_frame_rx_napi(xdpf); 381 drops++; 382 } 383 goto out; 384 } 385 386 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 387 * from the driver before returning from its napi->poll() routine. The poll() 388 * routine is called either from busy_poll context or net_rx_action signaled 389 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 390 * net device can be torn down. On devmap tear down we ensure the flush list 391 * is empty before completing to ensure all flush operations have completed. 392 * When drivers update the bpf program they may need to ensure any flush ops 393 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 394 * because both wait for napi context to exit. 395 */ 396 void __dev_flush(void) 397 { 398 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 399 struct xdp_dev_bulk_queue *bq, *tmp; 400 401 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 402 bq_xmit_all(bq, XDP_XMIT_FLUSH); 403 } 404 405 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 406 * update happens in parallel here a dev_put wont happen until after reading the 407 * ifindex. 408 */ 409 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 410 { 411 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 412 struct bpf_dtab_netdev *obj; 413 414 if (key >= map->max_entries) 415 return NULL; 416 417 obj = READ_ONCE(dtab->netdev_map[key]); 418 return obj; 419 } 420 421 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 422 * Thus, safe percpu variable access. 423 */ 424 static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 425 struct net_device *dev_rx) 426 { 427 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 428 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 429 430 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 431 bq_xmit_all(bq, 0); 432 433 /* Ingress dev_rx will be the same for all xdp_frame's in 434 * bulk_queue, because bq stored per-CPU and must be flushed 435 * from net_device drivers NAPI func end. 436 */ 437 if (!bq->dev_rx) 438 bq->dev_rx = dev_rx; 439 440 bq->q[bq->count++] = xdpf; 441 442 if (!bq->flush_node.prev) 443 list_add(&bq->flush_node, flush_list); 444 445 return 0; 446 } 447 448 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 449 struct net_device *dev_rx) 450 { 451 struct xdp_frame *xdpf; 452 int err; 453 454 if (!dev->netdev_ops->ndo_xdp_xmit) 455 return -EOPNOTSUPP; 456 457 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 458 if (unlikely(err)) 459 return err; 460 461 xdpf = xdp_convert_buff_to_frame(xdp); 462 if (unlikely(!xdpf)) 463 return -EOVERFLOW; 464 465 return bq_enqueue(dev, xdpf, dev_rx); 466 } 467 468 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 469 struct xdp_buff *xdp, 470 struct bpf_prog *xdp_prog) 471 { 472 struct xdp_txq_info txq = { .dev = dev }; 473 u32 act; 474 475 xdp_set_data_meta_invalid(xdp); 476 xdp->txq = &txq; 477 478 act = bpf_prog_run_xdp(xdp_prog, xdp); 479 switch (act) { 480 case XDP_PASS: 481 return xdp; 482 case XDP_DROP: 483 break; 484 default: 485 bpf_warn_invalid_xdp_action(act); 486 fallthrough; 487 case XDP_ABORTED: 488 trace_xdp_exception(dev, xdp_prog, act); 489 break; 490 } 491 492 xdp_return_buff(xdp); 493 return NULL; 494 } 495 496 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 497 struct net_device *dev_rx) 498 { 499 return __xdp_enqueue(dev, xdp, dev_rx); 500 } 501 502 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 503 struct net_device *dev_rx) 504 { 505 struct net_device *dev = dst->dev; 506 507 if (dst->xdp_prog) { 508 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 509 if (!xdp) 510 return 0; 511 } 512 return __xdp_enqueue(dev, xdp, dev_rx); 513 } 514 515 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 516 struct bpf_prog *xdp_prog) 517 { 518 int err; 519 520 err = xdp_ok_fwd_dev(dst->dev, skb->len); 521 if (unlikely(err)) 522 return err; 523 skb->dev = dst->dev; 524 generic_xdp_tx(skb, xdp_prog); 525 526 return 0; 527 } 528 529 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 530 { 531 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 532 533 return obj ? &obj->val : NULL; 534 } 535 536 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 537 { 538 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 539 *(u32 *)key); 540 return obj ? &obj->val : NULL; 541 } 542 543 static void __dev_map_entry_free(struct rcu_head *rcu) 544 { 545 struct bpf_dtab_netdev *dev; 546 547 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 548 if (dev->xdp_prog) 549 bpf_prog_put(dev->xdp_prog); 550 dev_put(dev->dev); 551 kfree(dev); 552 } 553 554 static int dev_map_delete_elem(struct bpf_map *map, void *key) 555 { 556 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 557 struct bpf_dtab_netdev *old_dev; 558 int k = *(u32 *)key; 559 560 if (k >= map->max_entries) 561 return -EINVAL; 562 563 /* Use call_rcu() here to ensure any rcu critical sections have 564 * completed as well as any flush operations because call_rcu 565 * will wait for preempt-disable region to complete, NAPI in this 566 * context. And additionally, the driver tear down ensures all 567 * soft irqs are complete before removing the net device in the 568 * case of dev_put equals zero. 569 */ 570 old_dev = xchg(&dtab->netdev_map[k], NULL); 571 if (old_dev) 572 call_rcu(&old_dev->rcu, __dev_map_entry_free); 573 return 0; 574 } 575 576 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 577 { 578 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 579 struct bpf_dtab_netdev *old_dev; 580 int k = *(u32 *)key; 581 unsigned long flags; 582 int ret = -ENOENT; 583 584 spin_lock_irqsave(&dtab->index_lock, flags); 585 586 old_dev = __dev_map_hash_lookup_elem(map, k); 587 if (old_dev) { 588 dtab->items--; 589 hlist_del_init_rcu(&old_dev->index_hlist); 590 call_rcu(&old_dev->rcu, __dev_map_entry_free); 591 ret = 0; 592 } 593 spin_unlock_irqrestore(&dtab->index_lock, flags); 594 595 return ret; 596 } 597 598 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 599 struct bpf_dtab *dtab, 600 struct bpf_devmap_val *val, 601 unsigned int idx) 602 { 603 struct bpf_prog *prog = NULL; 604 struct bpf_dtab_netdev *dev; 605 606 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, 607 dtab->map.numa_node); 608 if (!dev) 609 return ERR_PTR(-ENOMEM); 610 611 dev->dev = dev_get_by_index(net, val->ifindex); 612 if (!dev->dev) 613 goto err_out; 614 615 if (val->bpf_prog.fd > 0) { 616 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 617 BPF_PROG_TYPE_XDP, false); 618 if (IS_ERR(prog)) 619 goto err_put_dev; 620 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 621 goto err_put_prog; 622 } 623 624 dev->idx = idx; 625 dev->dtab = dtab; 626 if (prog) { 627 dev->xdp_prog = prog; 628 dev->val.bpf_prog.id = prog->aux->id; 629 } else { 630 dev->xdp_prog = NULL; 631 dev->val.bpf_prog.id = 0; 632 } 633 dev->val.ifindex = val->ifindex; 634 635 return dev; 636 err_put_prog: 637 bpf_prog_put(prog); 638 err_put_dev: 639 dev_put(dev->dev); 640 err_out: 641 kfree(dev); 642 return ERR_PTR(-EINVAL); 643 } 644 645 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 646 void *key, void *value, u64 map_flags) 647 { 648 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 649 struct bpf_dtab_netdev *dev, *old_dev; 650 struct bpf_devmap_val val = {}; 651 u32 i = *(u32 *)key; 652 653 if (unlikely(map_flags > BPF_EXIST)) 654 return -EINVAL; 655 if (unlikely(i >= dtab->map.max_entries)) 656 return -E2BIG; 657 if (unlikely(map_flags == BPF_NOEXIST)) 658 return -EEXIST; 659 660 /* already verified value_size <= sizeof val */ 661 memcpy(&val, value, map->value_size); 662 663 if (!val.ifindex) { 664 dev = NULL; 665 /* can not specify fd if ifindex is 0 */ 666 if (val.bpf_prog.fd > 0) 667 return -EINVAL; 668 } else { 669 dev = __dev_map_alloc_node(net, dtab, &val, i); 670 if (IS_ERR(dev)) 671 return PTR_ERR(dev); 672 } 673 674 /* Use call_rcu() here to ensure rcu critical sections have completed 675 * Remembering the driver side flush operation will happen before the 676 * net device is removed. 677 */ 678 old_dev = xchg(&dtab->netdev_map[i], dev); 679 if (old_dev) 680 call_rcu(&old_dev->rcu, __dev_map_entry_free); 681 682 return 0; 683 } 684 685 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 686 u64 map_flags) 687 { 688 return __dev_map_update_elem(current->nsproxy->net_ns, 689 map, key, value, map_flags); 690 } 691 692 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 693 void *key, void *value, u64 map_flags) 694 { 695 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 696 struct bpf_dtab_netdev *dev, *old_dev; 697 struct bpf_devmap_val val = {}; 698 u32 idx = *(u32 *)key; 699 unsigned long flags; 700 int err = -EEXIST; 701 702 /* already verified value_size <= sizeof val */ 703 memcpy(&val, value, map->value_size); 704 705 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 706 return -EINVAL; 707 708 spin_lock_irqsave(&dtab->index_lock, flags); 709 710 old_dev = __dev_map_hash_lookup_elem(map, idx); 711 if (old_dev && (map_flags & BPF_NOEXIST)) 712 goto out_err; 713 714 dev = __dev_map_alloc_node(net, dtab, &val, idx); 715 if (IS_ERR(dev)) { 716 err = PTR_ERR(dev); 717 goto out_err; 718 } 719 720 if (old_dev) { 721 hlist_del_rcu(&old_dev->index_hlist); 722 } else { 723 if (dtab->items >= dtab->map.max_entries) { 724 spin_unlock_irqrestore(&dtab->index_lock, flags); 725 call_rcu(&dev->rcu, __dev_map_entry_free); 726 return -E2BIG; 727 } 728 dtab->items++; 729 } 730 731 hlist_add_head_rcu(&dev->index_hlist, 732 dev_map_index_hash(dtab, idx)); 733 spin_unlock_irqrestore(&dtab->index_lock, flags); 734 735 if (old_dev) 736 call_rcu(&old_dev->rcu, __dev_map_entry_free); 737 738 return 0; 739 740 out_err: 741 spin_unlock_irqrestore(&dtab->index_lock, flags); 742 return err; 743 } 744 745 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 746 u64 map_flags) 747 { 748 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 749 map, key, value, map_flags); 750 } 751 752 const struct bpf_map_ops dev_map_ops = { 753 .map_alloc = dev_map_alloc, 754 .map_free = dev_map_free, 755 .map_get_next_key = dev_map_get_next_key, 756 .map_lookup_elem = dev_map_lookup_elem, 757 .map_update_elem = dev_map_update_elem, 758 .map_delete_elem = dev_map_delete_elem, 759 .map_check_btf = map_check_no_btf, 760 }; 761 762 const struct bpf_map_ops dev_map_hash_ops = { 763 .map_alloc = dev_map_alloc, 764 .map_free = dev_map_free, 765 .map_get_next_key = dev_map_hash_get_next_key, 766 .map_lookup_elem = dev_map_hash_lookup_elem, 767 .map_update_elem = dev_map_hash_update_elem, 768 .map_delete_elem = dev_map_hash_delete_elem, 769 .map_check_btf = map_check_no_btf, 770 }; 771 772 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 773 struct net_device *netdev) 774 { 775 unsigned long flags; 776 u32 i; 777 778 spin_lock_irqsave(&dtab->index_lock, flags); 779 for (i = 0; i < dtab->n_buckets; i++) { 780 struct bpf_dtab_netdev *dev; 781 struct hlist_head *head; 782 struct hlist_node *next; 783 784 head = dev_map_index_hash(dtab, i); 785 786 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 787 if (netdev != dev->dev) 788 continue; 789 790 dtab->items--; 791 hlist_del_rcu(&dev->index_hlist); 792 call_rcu(&dev->rcu, __dev_map_entry_free); 793 } 794 } 795 spin_unlock_irqrestore(&dtab->index_lock, flags); 796 } 797 798 static int dev_map_notification(struct notifier_block *notifier, 799 ulong event, void *ptr) 800 { 801 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 802 struct bpf_dtab *dtab; 803 int i, cpu; 804 805 switch (event) { 806 case NETDEV_REGISTER: 807 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 808 break; 809 810 /* will be freed in free_netdev() */ 811 netdev->xdp_bulkq = 812 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), 813 sizeof(void *), GFP_ATOMIC); 814 if (!netdev->xdp_bulkq) 815 return NOTIFY_BAD; 816 817 for_each_possible_cpu(cpu) 818 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 819 break; 820 case NETDEV_UNREGISTER: 821 /* This rcu_read_lock/unlock pair is needed because 822 * dev_map_list is an RCU list AND to ensure a delete 823 * operation does not free a netdev_map entry while we 824 * are comparing it against the netdev being unregistered. 825 */ 826 rcu_read_lock(); 827 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 828 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 829 dev_map_hash_remove_netdev(dtab, netdev); 830 continue; 831 } 832 833 for (i = 0; i < dtab->map.max_entries; i++) { 834 struct bpf_dtab_netdev *dev, *odev; 835 836 dev = READ_ONCE(dtab->netdev_map[i]); 837 if (!dev || netdev != dev->dev) 838 continue; 839 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 840 if (dev == odev) 841 call_rcu(&dev->rcu, 842 __dev_map_entry_free); 843 } 844 } 845 rcu_read_unlock(); 846 break; 847 default: 848 break; 849 } 850 return NOTIFY_OK; 851 } 852 853 static struct notifier_block dev_map_notifier = { 854 .notifier_call = dev_map_notification, 855 }; 856 857 static int __init dev_map_init(void) 858 { 859 int cpu; 860 861 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 862 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 863 offsetof(struct _bpf_dtab_netdev, dev)); 864 register_netdevice_notifier(&dev_map_notifier); 865 866 for_each_possible_cpu(cpu) 867 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 868 return 0; 869 } 870 871 subsys_initcall(dev_map_init); 872