1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 struct bpf_dtab_netdev { 64 struct net_device *dev; /* must be first member, due to tracepoint */ 65 struct hlist_node index_hlist; 66 struct bpf_dtab *dtab; 67 struct bpf_prog *xdp_prog; 68 struct rcu_head rcu; 69 unsigned int idx; 70 struct bpf_devmap_val val; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 76 struct list_head list; 77 78 /* these are only used for DEVMAP_HASH type maps */ 79 struct hlist_head *dev_index_head; 80 spinlock_t index_lock; 81 unsigned int items; 82 u32 n_buckets; 83 }; 84 85 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 86 static DEFINE_SPINLOCK(dev_map_lock); 87 static LIST_HEAD(dev_map_list); 88 89 static struct hlist_head *dev_map_create_hash(unsigned int entries, 90 int numa_node) 91 { 92 int i; 93 struct hlist_head *hash; 94 95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 96 if (hash != NULL) 97 for (i = 0; i < entries; i++) 98 INIT_HLIST_HEAD(&hash[i]); 99 100 return hash; 101 } 102 103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 104 int idx) 105 { 106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 107 } 108 109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 110 { 111 u32 valsize = attr->value_size; 112 113 /* check sanity of attributes. 2 value sizes supported: 114 * 4 bytes: ifindex 115 * 8 bytes: ifindex + prog fd 116 */ 117 if (attr->max_entries == 0 || attr->key_size != 4 || 118 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 119 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 120 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 121 return -EINVAL; 122 123 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 124 * verifier prevents writes from the BPF side 125 */ 126 attr->map_flags |= BPF_F_RDONLY_PROG; 127 128 129 bpf_map_init_from_attr(&dtab->map, attr); 130 131 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 132 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 133 134 if (!dtab->n_buckets) /* Overflow check */ 135 return -EINVAL; 136 } 137 138 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 139 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 140 dtab->map.numa_node); 141 if (!dtab->dev_index_head) 142 return -ENOMEM; 143 144 spin_lock_init(&dtab->index_lock); 145 } else { 146 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 147 sizeof(struct bpf_dtab_netdev *), 148 dtab->map.numa_node); 149 if (!dtab->netdev_map) 150 return -ENOMEM; 151 } 152 153 return 0; 154 } 155 156 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 157 { 158 struct bpf_dtab *dtab; 159 int err; 160 161 if (!capable(CAP_NET_ADMIN)) 162 return ERR_PTR(-EPERM); 163 164 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 165 if (!dtab) 166 return ERR_PTR(-ENOMEM); 167 168 err = dev_map_init_map(dtab, attr); 169 if (err) { 170 kfree(dtab); 171 return ERR_PTR(err); 172 } 173 174 spin_lock(&dev_map_lock); 175 list_add_tail_rcu(&dtab->list, &dev_map_list); 176 spin_unlock(&dev_map_lock); 177 178 return &dtab->map; 179 } 180 181 static void dev_map_free(struct bpf_map *map) 182 { 183 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 184 int i; 185 186 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 187 * so the programs (can be more than one that used this map) were 188 * disconnected from events. The following synchronize_rcu() guarantees 189 * both rcu read critical sections complete and waits for 190 * preempt-disable regions (NAPI being the relevant context here) so we 191 * are certain there will be no further reads against the netdev_map and 192 * all flush operations are complete. Flush operations can only be done 193 * from NAPI context for this reason. 194 */ 195 196 spin_lock(&dev_map_lock); 197 list_del_rcu(&dtab->list); 198 spin_unlock(&dev_map_lock); 199 200 synchronize_rcu(); 201 202 /* Make sure prior __dev_map_entry_free() have completed. */ 203 rcu_barrier(); 204 205 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 206 for (i = 0; i < dtab->n_buckets; i++) { 207 struct bpf_dtab_netdev *dev; 208 struct hlist_head *head; 209 struct hlist_node *next; 210 211 head = dev_map_index_hash(dtab, i); 212 213 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 214 hlist_del_rcu(&dev->index_hlist); 215 if (dev->xdp_prog) 216 bpf_prog_put(dev->xdp_prog); 217 dev_put(dev->dev); 218 kfree(dev); 219 } 220 } 221 222 bpf_map_area_free(dtab->dev_index_head); 223 } else { 224 for (i = 0; i < dtab->map.max_entries; i++) { 225 struct bpf_dtab_netdev *dev; 226 227 dev = dtab->netdev_map[i]; 228 if (!dev) 229 continue; 230 231 if (dev->xdp_prog) 232 bpf_prog_put(dev->xdp_prog); 233 dev_put(dev->dev); 234 kfree(dev); 235 } 236 237 bpf_map_area_free(dtab->netdev_map); 238 } 239 240 kfree(dtab); 241 } 242 243 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 244 { 245 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 246 u32 index = key ? *(u32 *)key : U32_MAX; 247 u32 *next = next_key; 248 249 if (index >= dtab->map.max_entries) { 250 *next = 0; 251 return 0; 252 } 253 254 if (index == dtab->map.max_entries - 1) 255 return -ENOENT; 256 *next = index + 1; 257 return 0; 258 } 259 260 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 261 { 262 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 263 struct hlist_head *head = dev_map_index_hash(dtab, key); 264 struct bpf_dtab_netdev *dev; 265 266 hlist_for_each_entry_rcu(dev, head, index_hlist, 267 lockdep_is_held(&dtab->index_lock)) 268 if (dev->idx == key) 269 return dev; 270 271 return NULL; 272 } 273 274 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 275 void *next_key) 276 { 277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 278 u32 idx, *next = next_key; 279 struct bpf_dtab_netdev *dev, *next_dev; 280 struct hlist_head *head; 281 int i = 0; 282 283 if (!key) 284 goto find_first; 285 286 idx = *(u32 *)key; 287 288 dev = __dev_map_hash_lookup_elem(map, idx); 289 if (!dev) 290 goto find_first; 291 292 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 293 struct bpf_dtab_netdev, index_hlist); 294 295 if (next_dev) { 296 *next = next_dev->idx; 297 return 0; 298 } 299 300 i = idx & (dtab->n_buckets - 1); 301 i++; 302 303 find_first: 304 for (; i < dtab->n_buckets; i++) { 305 head = dev_map_index_hash(dtab, i); 306 307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 308 struct bpf_dtab_netdev, 309 index_hlist); 310 if (next_dev) { 311 *next = next_dev->idx; 312 return 0; 313 } 314 } 315 316 return -ENOENT; 317 } 318 319 bool dev_map_can_have_prog(struct bpf_map *map) 320 { 321 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 322 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 323 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 324 return true; 325 326 return false; 327 } 328 329 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 330 { 331 struct net_device *dev = bq->dev; 332 int sent = 0, err = 0; 333 int i; 334 335 if (unlikely(!bq->count)) 336 return; 337 338 for (i = 0; i < bq->count; i++) { 339 struct xdp_frame *xdpf = bq->q[i]; 340 341 prefetch(xdpf); 342 } 343 344 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 345 if (sent < 0) { 346 /* If ndo_xdp_xmit fails with an errno, no frames have 347 * been xmit'ed. 348 */ 349 err = sent; 350 sent = 0; 351 } 352 353 /* If not all frames have been transmitted, it is our 354 * responsibility to free them 355 */ 356 for (i = sent; unlikely(i < bq->count); i++) 357 xdp_return_frame_rx_napi(bq->q[i]); 358 359 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err); 360 bq->dev_rx = NULL; 361 bq->count = 0; 362 __list_del_clearprev(&bq->flush_node); 363 } 364 365 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 366 * from the driver before returning from its napi->poll() routine. The poll() 367 * routine is called either from busy_poll context or net_rx_action signaled 368 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 369 * net device can be torn down. On devmap tear down we ensure the flush list 370 * is empty before completing to ensure all flush operations have completed. 371 * When drivers update the bpf program they may need to ensure any flush ops 372 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 373 * because both wait for napi context to exit. 374 */ 375 void __dev_flush(void) 376 { 377 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 378 struct xdp_dev_bulk_queue *bq, *tmp; 379 380 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 381 bq_xmit_all(bq, XDP_XMIT_FLUSH); 382 } 383 384 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 385 * update happens in parallel here a dev_put wont happen until after reading the 386 * ifindex. 387 */ 388 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 389 { 390 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 391 struct bpf_dtab_netdev *obj; 392 393 if (key >= map->max_entries) 394 return NULL; 395 396 obj = READ_ONCE(dtab->netdev_map[key]); 397 return obj; 398 } 399 400 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 401 * Thus, safe percpu variable access. 402 */ 403 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 404 struct net_device *dev_rx) 405 { 406 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 407 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 408 409 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 410 bq_xmit_all(bq, 0); 411 412 /* Ingress dev_rx will be the same for all xdp_frame's in 413 * bulk_queue, because bq stored per-CPU and must be flushed 414 * from net_device drivers NAPI func end. 415 */ 416 if (!bq->dev_rx) 417 bq->dev_rx = dev_rx; 418 419 bq->q[bq->count++] = xdpf; 420 421 if (!bq->flush_node.prev) 422 list_add(&bq->flush_node, flush_list); 423 } 424 425 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 426 struct net_device *dev_rx) 427 { 428 struct xdp_frame *xdpf; 429 int err; 430 431 if (!dev->netdev_ops->ndo_xdp_xmit) 432 return -EOPNOTSUPP; 433 434 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 435 if (unlikely(err)) 436 return err; 437 438 xdpf = xdp_convert_buff_to_frame(xdp); 439 if (unlikely(!xdpf)) 440 return -EOVERFLOW; 441 442 bq_enqueue(dev, xdpf, dev_rx); 443 return 0; 444 } 445 446 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 447 struct xdp_buff *xdp, 448 struct bpf_prog *xdp_prog) 449 { 450 struct xdp_txq_info txq = { .dev = dev }; 451 u32 act; 452 453 xdp_set_data_meta_invalid(xdp); 454 xdp->txq = &txq; 455 456 act = bpf_prog_run_xdp(xdp_prog, xdp); 457 switch (act) { 458 case XDP_PASS: 459 return xdp; 460 case XDP_DROP: 461 break; 462 default: 463 bpf_warn_invalid_xdp_action(act); 464 fallthrough; 465 case XDP_ABORTED: 466 trace_xdp_exception(dev, xdp_prog, act); 467 break; 468 } 469 470 xdp_return_buff(xdp); 471 return NULL; 472 } 473 474 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 475 struct net_device *dev_rx) 476 { 477 return __xdp_enqueue(dev, xdp, dev_rx); 478 } 479 480 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 481 struct net_device *dev_rx) 482 { 483 struct net_device *dev = dst->dev; 484 485 if (dst->xdp_prog) { 486 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 487 if (!xdp) 488 return 0; 489 } 490 return __xdp_enqueue(dev, xdp, dev_rx); 491 } 492 493 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 494 struct bpf_prog *xdp_prog) 495 { 496 int err; 497 498 err = xdp_ok_fwd_dev(dst->dev, skb->len); 499 if (unlikely(err)) 500 return err; 501 skb->dev = dst->dev; 502 generic_xdp_tx(skb, xdp_prog); 503 504 return 0; 505 } 506 507 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 508 { 509 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 510 511 return obj ? &obj->val : NULL; 512 } 513 514 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 515 { 516 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 517 *(u32 *)key); 518 return obj ? &obj->val : NULL; 519 } 520 521 static void __dev_map_entry_free(struct rcu_head *rcu) 522 { 523 struct bpf_dtab_netdev *dev; 524 525 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 526 if (dev->xdp_prog) 527 bpf_prog_put(dev->xdp_prog); 528 dev_put(dev->dev); 529 kfree(dev); 530 } 531 532 static int dev_map_delete_elem(struct bpf_map *map, void *key) 533 { 534 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 535 struct bpf_dtab_netdev *old_dev; 536 int k = *(u32 *)key; 537 538 if (k >= map->max_entries) 539 return -EINVAL; 540 541 /* Use call_rcu() here to ensure any rcu critical sections have 542 * completed as well as any flush operations because call_rcu 543 * will wait for preempt-disable region to complete, NAPI in this 544 * context. And additionally, the driver tear down ensures all 545 * soft irqs are complete before removing the net device in the 546 * case of dev_put equals zero. 547 */ 548 old_dev = xchg(&dtab->netdev_map[k], NULL); 549 if (old_dev) 550 call_rcu(&old_dev->rcu, __dev_map_entry_free); 551 return 0; 552 } 553 554 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 555 { 556 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 557 struct bpf_dtab_netdev *old_dev; 558 int k = *(u32 *)key; 559 unsigned long flags; 560 int ret = -ENOENT; 561 562 spin_lock_irqsave(&dtab->index_lock, flags); 563 564 old_dev = __dev_map_hash_lookup_elem(map, k); 565 if (old_dev) { 566 dtab->items--; 567 hlist_del_init_rcu(&old_dev->index_hlist); 568 call_rcu(&old_dev->rcu, __dev_map_entry_free); 569 ret = 0; 570 } 571 spin_unlock_irqrestore(&dtab->index_lock, flags); 572 573 return ret; 574 } 575 576 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 577 struct bpf_dtab *dtab, 578 struct bpf_devmap_val *val, 579 unsigned int idx) 580 { 581 struct bpf_prog *prog = NULL; 582 struct bpf_dtab_netdev *dev; 583 584 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 585 GFP_ATOMIC | __GFP_NOWARN, 586 dtab->map.numa_node); 587 if (!dev) 588 return ERR_PTR(-ENOMEM); 589 590 dev->dev = dev_get_by_index(net, val->ifindex); 591 if (!dev->dev) 592 goto err_out; 593 594 if (val->bpf_prog.fd > 0) { 595 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 596 BPF_PROG_TYPE_XDP, false); 597 if (IS_ERR(prog)) 598 goto err_put_dev; 599 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 600 goto err_put_prog; 601 } 602 603 dev->idx = idx; 604 dev->dtab = dtab; 605 if (prog) { 606 dev->xdp_prog = prog; 607 dev->val.bpf_prog.id = prog->aux->id; 608 } else { 609 dev->xdp_prog = NULL; 610 dev->val.bpf_prog.id = 0; 611 } 612 dev->val.ifindex = val->ifindex; 613 614 return dev; 615 err_put_prog: 616 bpf_prog_put(prog); 617 err_put_dev: 618 dev_put(dev->dev); 619 err_out: 620 kfree(dev); 621 return ERR_PTR(-EINVAL); 622 } 623 624 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 625 void *key, void *value, u64 map_flags) 626 { 627 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 628 struct bpf_dtab_netdev *dev, *old_dev; 629 struct bpf_devmap_val val = {}; 630 u32 i = *(u32 *)key; 631 632 if (unlikely(map_flags > BPF_EXIST)) 633 return -EINVAL; 634 if (unlikely(i >= dtab->map.max_entries)) 635 return -E2BIG; 636 if (unlikely(map_flags == BPF_NOEXIST)) 637 return -EEXIST; 638 639 /* already verified value_size <= sizeof val */ 640 memcpy(&val, value, map->value_size); 641 642 if (!val.ifindex) { 643 dev = NULL; 644 /* can not specify fd if ifindex is 0 */ 645 if (val.bpf_prog.fd > 0) 646 return -EINVAL; 647 } else { 648 dev = __dev_map_alloc_node(net, dtab, &val, i); 649 if (IS_ERR(dev)) 650 return PTR_ERR(dev); 651 } 652 653 /* Use call_rcu() here to ensure rcu critical sections have completed 654 * Remembering the driver side flush operation will happen before the 655 * net device is removed. 656 */ 657 old_dev = xchg(&dtab->netdev_map[i], dev); 658 if (old_dev) 659 call_rcu(&old_dev->rcu, __dev_map_entry_free); 660 661 return 0; 662 } 663 664 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 665 u64 map_flags) 666 { 667 return __dev_map_update_elem(current->nsproxy->net_ns, 668 map, key, value, map_flags); 669 } 670 671 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 672 void *key, void *value, u64 map_flags) 673 { 674 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 675 struct bpf_dtab_netdev *dev, *old_dev; 676 struct bpf_devmap_val val = {}; 677 u32 idx = *(u32 *)key; 678 unsigned long flags; 679 int err = -EEXIST; 680 681 /* already verified value_size <= sizeof val */ 682 memcpy(&val, value, map->value_size); 683 684 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 685 return -EINVAL; 686 687 spin_lock_irqsave(&dtab->index_lock, flags); 688 689 old_dev = __dev_map_hash_lookup_elem(map, idx); 690 if (old_dev && (map_flags & BPF_NOEXIST)) 691 goto out_err; 692 693 dev = __dev_map_alloc_node(net, dtab, &val, idx); 694 if (IS_ERR(dev)) { 695 err = PTR_ERR(dev); 696 goto out_err; 697 } 698 699 if (old_dev) { 700 hlist_del_rcu(&old_dev->index_hlist); 701 } else { 702 if (dtab->items >= dtab->map.max_entries) { 703 spin_unlock_irqrestore(&dtab->index_lock, flags); 704 call_rcu(&dev->rcu, __dev_map_entry_free); 705 return -E2BIG; 706 } 707 dtab->items++; 708 } 709 710 hlist_add_head_rcu(&dev->index_hlist, 711 dev_map_index_hash(dtab, idx)); 712 spin_unlock_irqrestore(&dtab->index_lock, flags); 713 714 if (old_dev) 715 call_rcu(&old_dev->rcu, __dev_map_entry_free); 716 717 return 0; 718 719 out_err: 720 spin_unlock_irqrestore(&dtab->index_lock, flags); 721 return err; 722 } 723 724 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 725 u64 map_flags) 726 { 727 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 728 map, key, value, map_flags); 729 } 730 731 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 732 { 733 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem); 734 } 735 736 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 737 { 738 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem); 739 } 740 741 static int dev_map_btf_id; 742 const struct bpf_map_ops dev_map_ops = { 743 .map_meta_equal = bpf_map_meta_equal, 744 .map_alloc = dev_map_alloc, 745 .map_free = dev_map_free, 746 .map_get_next_key = dev_map_get_next_key, 747 .map_lookup_elem = dev_map_lookup_elem, 748 .map_update_elem = dev_map_update_elem, 749 .map_delete_elem = dev_map_delete_elem, 750 .map_check_btf = map_check_no_btf, 751 .map_btf_name = "bpf_dtab", 752 .map_btf_id = &dev_map_btf_id, 753 .map_redirect = dev_map_redirect, 754 }; 755 756 static int dev_map_hash_map_btf_id; 757 const struct bpf_map_ops dev_map_hash_ops = { 758 .map_meta_equal = bpf_map_meta_equal, 759 .map_alloc = dev_map_alloc, 760 .map_free = dev_map_free, 761 .map_get_next_key = dev_map_hash_get_next_key, 762 .map_lookup_elem = dev_map_hash_lookup_elem, 763 .map_update_elem = dev_map_hash_update_elem, 764 .map_delete_elem = dev_map_hash_delete_elem, 765 .map_check_btf = map_check_no_btf, 766 .map_btf_name = "bpf_dtab", 767 .map_btf_id = &dev_map_hash_map_btf_id, 768 .map_redirect = dev_hash_map_redirect, 769 }; 770 771 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 772 struct net_device *netdev) 773 { 774 unsigned long flags; 775 u32 i; 776 777 spin_lock_irqsave(&dtab->index_lock, flags); 778 for (i = 0; i < dtab->n_buckets; i++) { 779 struct bpf_dtab_netdev *dev; 780 struct hlist_head *head; 781 struct hlist_node *next; 782 783 head = dev_map_index_hash(dtab, i); 784 785 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 786 if (netdev != dev->dev) 787 continue; 788 789 dtab->items--; 790 hlist_del_rcu(&dev->index_hlist); 791 call_rcu(&dev->rcu, __dev_map_entry_free); 792 } 793 } 794 spin_unlock_irqrestore(&dtab->index_lock, flags); 795 } 796 797 static int dev_map_notification(struct notifier_block *notifier, 798 ulong event, void *ptr) 799 { 800 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 801 struct bpf_dtab *dtab; 802 int i, cpu; 803 804 switch (event) { 805 case NETDEV_REGISTER: 806 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 807 break; 808 809 /* will be freed in free_netdev() */ 810 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); 811 if (!netdev->xdp_bulkq) 812 return NOTIFY_BAD; 813 814 for_each_possible_cpu(cpu) 815 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 816 break; 817 case NETDEV_UNREGISTER: 818 /* This rcu_read_lock/unlock pair is needed because 819 * dev_map_list is an RCU list AND to ensure a delete 820 * operation does not free a netdev_map entry while we 821 * are comparing it against the netdev being unregistered. 822 */ 823 rcu_read_lock(); 824 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 825 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 826 dev_map_hash_remove_netdev(dtab, netdev); 827 continue; 828 } 829 830 for (i = 0; i < dtab->map.max_entries; i++) { 831 struct bpf_dtab_netdev *dev, *odev; 832 833 dev = READ_ONCE(dtab->netdev_map[i]); 834 if (!dev || netdev != dev->dev) 835 continue; 836 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 837 if (dev == odev) 838 call_rcu(&dev->rcu, 839 __dev_map_entry_free); 840 } 841 } 842 rcu_read_unlock(); 843 break; 844 default: 845 break; 846 } 847 return NOTIFY_OK; 848 } 849 850 static struct notifier_block dev_map_notifier = { 851 .notifier_call = dev_map_notification, 852 }; 853 854 static int __init dev_map_init(void) 855 { 856 int cpu; 857 858 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 859 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 860 offsetof(struct _bpf_dtab_netdev, dev)); 861 register_netdevice_notifier(&dev_map_notifier); 862 863 for_each_possible_cpu(cpu) 864 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 865 return 0; 866 } 867 868 subsys_initcall(dev_map_init); 869