1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 * 41 * The devmap_hash type is a map type which interprets keys as ifindexes and 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be 43 * densely packed instead of having holes in the lookup array for unused 44 * ifindexes. The setup and packet enqueue/send code is shared between the two 45 * types of devmap; only the lookup and insertion is different. 46 */ 47 #include <linux/bpf.h> 48 #include <net/xdp.h> 49 #include <linux/filter.h> 50 #include <trace/events/xdp.h> 51 52 #define DEV_CREATE_FLAG_MASK \ 53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 54 55 struct xdp_dev_bulk_queue { 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57 struct list_head flush_node; 58 struct net_device *dev; 59 struct net_device *dev_rx; 60 unsigned int count; 61 }; 62 63 struct bpf_dtab_netdev { 64 struct net_device *dev; /* must be first member, due to tracepoint */ 65 struct hlist_node index_hlist; 66 struct bpf_dtab *dtab; 67 struct bpf_prog *xdp_prog; 68 struct rcu_head rcu; 69 unsigned int idx; 70 struct bpf_devmap_val val; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 76 struct list_head list; 77 78 /* these are only used for DEVMAP_HASH type maps */ 79 struct hlist_head *dev_index_head; 80 spinlock_t index_lock; 81 unsigned int items; 82 u32 n_buckets; 83 }; 84 85 static DEFINE_PER_CPU(struct list_head, dev_flush_list); 86 static DEFINE_SPINLOCK(dev_map_lock); 87 static LIST_HEAD(dev_map_list); 88 89 static struct hlist_head *dev_map_create_hash(unsigned int entries) 90 { 91 int i; 92 struct hlist_head *hash; 93 94 hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); 95 if (hash != NULL) 96 for (i = 0; i < entries; i++) 97 INIT_HLIST_HEAD(&hash[i]); 98 99 return hash; 100 } 101 102 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 103 int idx) 104 { 105 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 106 } 107 108 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 109 { 110 u32 valsize = attr->value_size; 111 u64 cost = 0; 112 int err; 113 114 /* check sanity of attributes. 2 value sizes supported: 115 * 4 bytes: ifindex 116 * 8 bytes: ifindex + prog fd 117 */ 118 if (attr->max_entries == 0 || attr->key_size != 4 || 119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 121 attr->map_flags & ~DEV_CREATE_FLAG_MASK) 122 return -EINVAL; 123 124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 125 * verifier prevents writes from the BPF side 126 */ 127 attr->map_flags |= BPF_F_RDONLY_PROG; 128 129 130 bpf_map_init_from_attr(&dtab->map, attr); 131 132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 134 135 if (!dtab->n_buckets) /* Overflow check */ 136 return -EINVAL; 137 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; 138 } else { 139 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 140 } 141 142 /* if map size is larger than memlock limit, reject it */ 143 err = bpf_map_charge_init(&dtab->map.memory, cost); 144 if (err) 145 return -EINVAL; 146 147 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 148 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); 149 if (!dtab->dev_index_head) 150 goto free_charge; 151 152 spin_lock_init(&dtab->index_lock); 153 } else { 154 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 155 sizeof(struct bpf_dtab_netdev *), 156 dtab->map.numa_node); 157 if (!dtab->netdev_map) 158 goto free_charge; 159 } 160 161 return 0; 162 163 free_charge: 164 bpf_map_charge_finish(&dtab->map.memory); 165 return -ENOMEM; 166 } 167 168 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 169 { 170 struct bpf_dtab *dtab; 171 int err; 172 173 if (!capable(CAP_NET_ADMIN)) 174 return ERR_PTR(-EPERM); 175 176 dtab = kzalloc(sizeof(*dtab), GFP_USER); 177 if (!dtab) 178 return ERR_PTR(-ENOMEM); 179 180 err = dev_map_init_map(dtab, attr); 181 if (err) { 182 kfree(dtab); 183 return ERR_PTR(err); 184 } 185 186 spin_lock(&dev_map_lock); 187 list_add_tail_rcu(&dtab->list, &dev_map_list); 188 spin_unlock(&dev_map_lock); 189 190 return &dtab->map; 191 } 192 193 static void dev_map_free(struct bpf_map *map) 194 { 195 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 196 int i; 197 198 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 199 * so the programs (can be more than one that used this map) were 200 * disconnected from events. The following synchronize_rcu() guarantees 201 * both rcu read critical sections complete and waits for 202 * preempt-disable regions (NAPI being the relevant context here) so we 203 * are certain there will be no further reads against the netdev_map and 204 * all flush operations are complete. Flush operations can only be done 205 * from NAPI context for this reason. 206 */ 207 208 spin_lock(&dev_map_lock); 209 list_del_rcu(&dtab->list); 210 spin_unlock(&dev_map_lock); 211 212 bpf_clear_redirect_map(map); 213 synchronize_rcu(); 214 215 /* Make sure prior __dev_map_entry_free() have completed. */ 216 rcu_barrier(); 217 218 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 219 for (i = 0; i < dtab->n_buckets; i++) { 220 struct bpf_dtab_netdev *dev; 221 struct hlist_head *head; 222 struct hlist_node *next; 223 224 head = dev_map_index_hash(dtab, i); 225 226 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 227 hlist_del_rcu(&dev->index_hlist); 228 if (dev->xdp_prog) 229 bpf_prog_put(dev->xdp_prog); 230 dev_put(dev->dev); 231 kfree(dev); 232 } 233 } 234 235 kfree(dtab->dev_index_head); 236 } else { 237 for (i = 0; i < dtab->map.max_entries; i++) { 238 struct bpf_dtab_netdev *dev; 239 240 dev = dtab->netdev_map[i]; 241 if (!dev) 242 continue; 243 244 if (dev->xdp_prog) 245 bpf_prog_put(dev->xdp_prog); 246 dev_put(dev->dev); 247 kfree(dev); 248 } 249 250 bpf_map_area_free(dtab->netdev_map); 251 } 252 253 kfree(dtab); 254 } 255 256 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 257 { 258 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 259 u32 index = key ? *(u32 *)key : U32_MAX; 260 u32 *next = next_key; 261 262 if (index >= dtab->map.max_entries) { 263 *next = 0; 264 return 0; 265 } 266 267 if (index == dtab->map.max_entries - 1) 268 return -ENOENT; 269 *next = index + 1; 270 return 0; 271 } 272 273 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 274 { 275 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 276 struct hlist_head *head = dev_map_index_hash(dtab, key); 277 struct bpf_dtab_netdev *dev; 278 279 hlist_for_each_entry_rcu(dev, head, index_hlist, 280 lockdep_is_held(&dtab->index_lock)) 281 if (dev->idx == key) 282 return dev; 283 284 return NULL; 285 } 286 287 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 288 void *next_key) 289 { 290 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 291 u32 idx, *next = next_key; 292 struct bpf_dtab_netdev *dev, *next_dev; 293 struct hlist_head *head; 294 int i = 0; 295 296 if (!key) 297 goto find_first; 298 299 idx = *(u32 *)key; 300 301 dev = __dev_map_hash_lookup_elem(map, idx); 302 if (!dev) 303 goto find_first; 304 305 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 306 struct bpf_dtab_netdev, index_hlist); 307 308 if (next_dev) { 309 *next = next_dev->idx; 310 return 0; 311 } 312 313 i = idx & (dtab->n_buckets - 1); 314 i++; 315 316 find_first: 317 for (; i < dtab->n_buckets; i++) { 318 head = dev_map_index_hash(dtab, i); 319 320 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 321 struct bpf_dtab_netdev, 322 index_hlist); 323 if (next_dev) { 324 *next = next_dev->idx; 325 return 0; 326 } 327 } 328 329 return -ENOENT; 330 } 331 332 bool dev_map_can_have_prog(struct bpf_map *map) 333 { 334 if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 335 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 336 map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 337 return true; 338 339 return false; 340 } 341 342 static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 343 { 344 struct net_device *dev = bq->dev; 345 int sent = 0, drops = 0, err = 0; 346 int i; 347 348 if (unlikely(!bq->count)) 349 return 0; 350 351 for (i = 0; i < bq->count; i++) { 352 struct xdp_frame *xdpf = bq->q[i]; 353 354 prefetch(xdpf); 355 } 356 357 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 358 if (sent < 0) { 359 err = sent; 360 sent = 0; 361 goto error; 362 } 363 drops = bq->count - sent; 364 out: 365 bq->count = 0; 366 367 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); 368 bq->dev_rx = NULL; 369 __list_del_clearprev(&bq->flush_node); 370 return 0; 371 error: 372 /* If ndo_xdp_xmit fails with an errno, no frames have been 373 * xmit'ed and it's our responsibility to them free all. 374 */ 375 for (i = 0; i < bq->count; i++) { 376 struct xdp_frame *xdpf = bq->q[i]; 377 378 xdp_return_frame_rx_napi(xdpf); 379 drops++; 380 } 381 goto out; 382 } 383 384 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 385 * from the driver before returning from its napi->poll() routine. The poll() 386 * routine is called either from busy_poll context or net_rx_action signaled 387 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 388 * net device can be torn down. On devmap tear down we ensure the flush list 389 * is empty before completing to ensure all flush operations have completed. 390 * When drivers update the bpf program they may need to ensure any flush ops 391 * are also complete. Using synchronize_rcu or call_rcu will suffice for this 392 * because both wait for napi context to exit. 393 */ 394 void __dev_flush(void) 395 { 396 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 397 struct xdp_dev_bulk_queue *bq, *tmp; 398 399 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 400 bq_xmit_all(bq, XDP_XMIT_FLUSH); 401 } 402 403 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 404 * update happens in parallel here a dev_put wont happen until after reading the 405 * ifindex. 406 */ 407 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 408 { 409 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 410 struct bpf_dtab_netdev *obj; 411 412 if (key >= map->max_entries) 413 return NULL; 414 415 obj = READ_ONCE(dtab->netdev_map[key]); 416 return obj; 417 } 418 419 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 420 * Thus, safe percpu variable access. 421 */ 422 static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 423 struct net_device *dev_rx) 424 { 425 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 426 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 427 428 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 429 bq_xmit_all(bq, 0); 430 431 /* Ingress dev_rx will be the same for all xdp_frame's in 432 * bulk_queue, because bq stored per-CPU and must be flushed 433 * from net_device drivers NAPI func end. 434 */ 435 if (!bq->dev_rx) 436 bq->dev_rx = dev_rx; 437 438 bq->q[bq->count++] = xdpf; 439 440 if (!bq->flush_node.prev) 441 list_add(&bq->flush_node, flush_list); 442 443 return 0; 444 } 445 446 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 447 struct net_device *dev_rx) 448 { 449 struct xdp_frame *xdpf; 450 int err; 451 452 if (!dev->netdev_ops->ndo_xdp_xmit) 453 return -EOPNOTSUPP; 454 455 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 456 if (unlikely(err)) 457 return err; 458 459 xdpf = xdp_convert_buff_to_frame(xdp); 460 if (unlikely(!xdpf)) 461 return -EOVERFLOW; 462 463 return bq_enqueue(dev, xdpf, dev_rx); 464 } 465 466 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 467 struct xdp_buff *xdp, 468 struct bpf_prog *xdp_prog) 469 { 470 struct xdp_txq_info txq = { .dev = dev }; 471 u32 act; 472 473 xdp_set_data_meta_invalid(xdp); 474 xdp->txq = &txq; 475 476 act = bpf_prog_run_xdp(xdp_prog, xdp); 477 switch (act) { 478 case XDP_PASS: 479 return xdp; 480 case XDP_DROP: 481 break; 482 default: 483 bpf_warn_invalid_xdp_action(act); 484 fallthrough; 485 case XDP_ABORTED: 486 trace_xdp_exception(dev, xdp_prog, act); 487 break; 488 } 489 490 xdp_return_buff(xdp); 491 return NULL; 492 } 493 494 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 495 struct net_device *dev_rx) 496 { 497 return __xdp_enqueue(dev, xdp, dev_rx); 498 } 499 500 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 501 struct net_device *dev_rx) 502 { 503 struct net_device *dev = dst->dev; 504 505 if (dst->xdp_prog) { 506 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 507 if (!xdp) 508 return 0; 509 } 510 return __xdp_enqueue(dev, xdp, dev_rx); 511 } 512 513 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 514 struct bpf_prog *xdp_prog) 515 { 516 int err; 517 518 err = xdp_ok_fwd_dev(dst->dev, skb->len); 519 if (unlikely(err)) 520 return err; 521 skb->dev = dst->dev; 522 generic_xdp_tx(skb, xdp_prog); 523 524 return 0; 525 } 526 527 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 528 { 529 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 530 531 return obj ? &obj->val : NULL; 532 } 533 534 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 535 { 536 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 537 *(u32 *)key); 538 return obj ? &obj->val : NULL; 539 } 540 541 static void __dev_map_entry_free(struct rcu_head *rcu) 542 { 543 struct bpf_dtab_netdev *dev; 544 545 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 546 if (dev->xdp_prog) 547 bpf_prog_put(dev->xdp_prog); 548 dev_put(dev->dev); 549 kfree(dev); 550 } 551 552 static int dev_map_delete_elem(struct bpf_map *map, void *key) 553 { 554 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 555 struct bpf_dtab_netdev *old_dev; 556 int k = *(u32 *)key; 557 558 if (k >= map->max_entries) 559 return -EINVAL; 560 561 /* Use call_rcu() here to ensure any rcu critical sections have 562 * completed as well as any flush operations because call_rcu 563 * will wait for preempt-disable region to complete, NAPI in this 564 * context. And additionally, the driver tear down ensures all 565 * soft irqs are complete before removing the net device in the 566 * case of dev_put equals zero. 567 */ 568 old_dev = xchg(&dtab->netdev_map[k], NULL); 569 if (old_dev) 570 call_rcu(&old_dev->rcu, __dev_map_entry_free); 571 return 0; 572 } 573 574 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 575 { 576 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 577 struct bpf_dtab_netdev *old_dev; 578 int k = *(u32 *)key; 579 unsigned long flags; 580 int ret = -ENOENT; 581 582 spin_lock_irqsave(&dtab->index_lock, flags); 583 584 old_dev = __dev_map_hash_lookup_elem(map, k); 585 if (old_dev) { 586 dtab->items--; 587 hlist_del_init_rcu(&old_dev->index_hlist); 588 call_rcu(&old_dev->rcu, __dev_map_entry_free); 589 ret = 0; 590 } 591 spin_unlock_irqrestore(&dtab->index_lock, flags); 592 593 return ret; 594 } 595 596 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 597 struct bpf_dtab *dtab, 598 struct bpf_devmap_val *val, 599 unsigned int idx) 600 { 601 struct bpf_prog *prog = NULL; 602 struct bpf_dtab_netdev *dev; 603 604 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, 605 dtab->map.numa_node); 606 if (!dev) 607 return ERR_PTR(-ENOMEM); 608 609 dev->dev = dev_get_by_index(net, val->ifindex); 610 if (!dev->dev) 611 goto err_out; 612 613 if (val->bpf_prog.fd > 0) { 614 prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 615 BPF_PROG_TYPE_XDP, false); 616 if (IS_ERR(prog)) 617 goto err_put_dev; 618 if (prog->expected_attach_type != BPF_XDP_DEVMAP) 619 goto err_put_prog; 620 } 621 622 dev->idx = idx; 623 dev->dtab = dtab; 624 if (prog) { 625 dev->xdp_prog = prog; 626 dev->val.bpf_prog.id = prog->aux->id; 627 } else { 628 dev->xdp_prog = NULL; 629 dev->val.bpf_prog.id = 0; 630 } 631 dev->val.ifindex = val->ifindex; 632 633 return dev; 634 err_put_prog: 635 bpf_prog_put(prog); 636 err_put_dev: 637 dev_put(dev->dev); 638 err_out: 639 kfree(dev); 640 return ERR_PTR(-EINVAL); 641 } 642 643 static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 644 void *key, void *value, u64 map_flags) 645 { 646 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 647 struct bpf_dtab_netdev *dev, *old_dev; 648 struct bpf_devmap_val val = {}; 649 u32 i = *(u32 *)key; 650 651 if (unlikely(map_flags > BPF_EXIST)) 652 return -EINVAL; 653 if (unlikely(i >= dtab->map.max_entries)) 654 return -E2BIG; 655 if (unlikely(map_flags == BPF_NOEXIST)) 656 return -EEXIST; 657 658 /* already verified value_size <= sizeof val */ 659 memcpy(&val, value, map->value_size); 660 661 if (!val.ifindex) { 662 dev = NULL; 663 /* can not specify fd if ifindex is 0 */ 664 if (val.bpf_prog.fd > 0) 665 return -EINVAL; 666 } else { 667 dev = __dev_map_alloc_node(net, dtab, &val, i); 668 if (IS_ERR(dev)) 669 return PTR_ERR(dev); 670 } 671 672 /* Use call_rcu() here to ensure rcu critical sections have completed 673 * Remembering the driver side flush operation will happen before the 674 * net device is removed. 675 */ 676 old_dev = xchg(&dtab->netdev_map[i], dev); 677 if (old_dev) 678 call_rcu(&old_dev->rcu, __dev_map_entry_free); 679 680 return 0; 681 } 682 683 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 684 u64 map_flags) 685 { 686 return __dev_map_update_elem(current->nsproxy->net_ns, 687 map, key, value, map_flags); 688 } 689 690 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 691 void *key, void *value, u64 map_flags) 692 { 693 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 694 struct bpf_dtab_netdev *dev, *old_dev; 695 struct bpf_devmap_val val = {}; 696 u32 idx = *(u32 *)key; 697 unsigned long flags; 698 int err = -EEXIST; 699 700 /* already verified value_size <= sizeof val */ 701 memcpy(&val, value, map->value_size); 702 703 if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 704 return -EINVAL; 705 706 spin_lock_irqsave(&dtab->index_lock, flags); 707 708 old_dev = __dev_map_hash_lookup_elem(map, idx); 709 if (old_dev && (map_flags & BPF_NOEXIST)) 710 goto out_err; 711 712 dev = __dev_map_alloc_node(net, dtab, &val, idx); 713 if (IS_ERR(dev)) { 714 err = PTR_ERR(dev); 715 goto out_err; 716 } 717 718 if (old_dev) { 719 hlist_del_rcu(&old_dev->index_hlist); 720 } else { 721 if (dtab->items >= dtab->map.max_entries) { 722 spin_unlock_irqrestore(&dtab->index_lock, flags); 723 call_rcu(&dev->rcu, __dev_map_entry_free); 724 return -E2BIG; 725 } 726 dtab->items++; 727 } 728 729 hlist_add_head_rcu(&dev->index_hlist, 730 dev_map_index_hash(dtab, idx)); 731 spin_unlock_irqrestore(&dtab->index_lock, flags); 732 733 if (old_dev) 734 call_rcu(&old_dev->rcu, __dev_map_entry_free); 735 736 return 0; 737 738 out_err: 739 spin_unlock_irqrestore(&dtab->index_lock, flags); 740 return err; 741 } 742 743 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 744 u64 map_flags) 745 { 746 return __dev_map_hash_update_elem(current->nsproxy->net_ns, 747 map, key, value, map_flags); 748 } 749 750 const struct bpf_map_ops dev_map_ops = { 751 .map_alloc = dev_map_alloc, 752 .map_free = dev_map_free, 753 .map_get_next_key = dev_map_get_next_key, 754 .map_lookup_elem = dev_map_lookup_elem, 755 .map_update_elem = dev_map_update_elem, 756 .map_delete_elem = dev_map_delete_elem, 757 .map_check_btf = map_check_no_btf, 758 }; 759 760 const struct bpf_map_ops dev_map_hash_ops = { 761 .map_alloc = dev_map_alloc, 762 .map_free = dev_map_free, 763 .map_get_next_key = dev_map_hash_get_next_key, 764 .map_lookup_elem = dev_map_hash_lookup_elem, 765 .map_update_elem = dev_map_hash_update_elem, 766 .map_delete_elem = dev_map_hash_delete_elem, 767 .map_check_btf = map_check_no_btf, 768 }; 769 770 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 771 struct net_device *netdev) 772 { 773 unsigned long flags; 774 u32 i; 775 776 spin_lock_irqsave(&dtab->index_lock, flags); 777 for (i = 0; i < dtab->n_buckets; i++) { 778 struct bpf_dtab_netdev *dev; 779 struct hlist_head *head; 780 struct hlist_node *next; 781 782 head = dev_map_index_hash(dtab, i); 783 784 hlist_for_each_entry_safe(dev, next, head, index_hlist) { 785 if (netdev != dev->dev) 786 continue; 787 788 dtab->items--; 789 hlist_del_rcu(&dev->index_hlist); 790 call_rcu(&dev->rcu, __dev_map_entry_free); 791 } 792 } 793 spin_unlock_irqrestore(&dtab->index_lock, flags); 794 } 795 796 static int dev_map_notification(struct notifier_block *notifier, 797 ulong event, void *ptr) 798 { 799 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 800 struct bpf_dtab *dtab; 801 int i, cpu; 802 803 switch (event) { 804 case NETDEV_REGISTER: 805 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 806 break; 807 808 /* will be freed in free_netdev() */ 809 netdev->xdp_bulkq = 810 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), 811 sizeof(void *), GFP_ATOMIC); 812 if (!netdev->xdp_bulkq) 813 return NOTIFY_BAD; 814 815 for_each_possible_cpu(cpu) 816 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 817 break; 818 case NETDEV_UNREGISTER: 819 /* This rcu_read_lock/unlock pair is needed because 820 * dev_map_list is an RCU list AND to ensure a delete 821 * operation does not free a netdev_map entry while we 822 * are comparing it against the netdev being unregistered. 823 */ 824 rcu_read_lock(); 825 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 826 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 827 dev_map_hash_remove_netdev(dtab, netdev); 828 continue; 829 } 830 831 for (i = 0; i < dtab->map.max_entries; i++) { 832 struct bpf_dtab_netdev *dev, *odev; 833 834 dev = READ_ONCE(dtab->netdev_map[i]); 835 if (!dev || netdev != dev->dev) 836 continue; 837 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 838 if (dev == odev) 839 call_rcu(&dev->rcu, 840 __dev_map_entry_free); 841 } 842 } 843 rcu_read_unlock(); 844 break; 845 default: 846 break; 847 } 848 return NOTIFY_OK; 849 } 850 851 static struct notifier_block dev_map_notifier = { 852 .notifier_call = dev_map_notification, 853 }; 854 855 static int __init dev_map_init(void) 856 { 857 int cpu; 858 859 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 860 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 861 offsetof(struct _bpf_dtab_netdev, dev)); 862 register_netdevice_notifier(&dev_map_notifier); 863 864 for_each_possible_cpu(cpu) 865 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 866 return 0; 867 } 868 869 subsys_initcall(dev_map_init); 870