1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3 */ 4 5 /* Devmaps primary use is as a backend map for XDP BPF helper call 6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7 * spent some effort to ensure the datapath with redirect maps does not use 8 * any locking. This is a quick note on the details. 9 * 10 * We have three possible paths to get into the devmap control plane bpf 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12 * will invoke an update, delete, or lookup operation. To ensure updates and 13 * deletes appear atomic from the datapath side xchg() is used to modify the 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for 16 * an rcu grace period before free'ing the old data structures. This ensures the 17 * datapath always has a valid copy. However, the datapath does a "flush" 18 * operation that pushes any pending packets in the driver outside the RCU 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21 * this list is empty, indicating outstanding flush operations have completed. 22 * 23 * BPF syscalls may race with BPF program calls on any of the update, delete 24 * or lookup operations. As noted above the xchg() operation also keep the 25 * netdev_map consistent in this case. From the devmap side BPF programs 26 * calling into these operations are the same as multiple user space threads 27 * making system calls. 28 * 29 * Finally, any of the above may race with a netdev_unregister notifier. The 30 * unregister notifier must search for net devices in the map structure that 31 * contain a reference to the net device and remove them. This is a two step 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 33 * check to see if the ifindex is the same as the net_device being removed. 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is 35 * removed, in the case of a concurrent update or delete operation it is 36 * possible that the initially referenced dev is no longer in the map. As the 37 * notifier hook walks the map we know that new dev references can not be 38 * added by the user because core infrastructure ensures dev_get_by_index() 39 * calls will fail at this point. 40 */ 41 #include <linux/bpf.h> 42 #include <net/xdp.h> 43 #include <linux/filter.h> 44 #include <trace/events/xdp.h> 45 46 #define DEV_CREATE_FLAG_MASK \ 47 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 48 49 #define DEV_MAP_BULK_SIZE 16 50 struct bpf_dtab_netdev; 51 52 struct xdp_bulk_queue { 53 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 54 struct list_head flush_node; 55 struct net_device *dev_rx; 56 struct bpf_dtab_netdev *obj; 57 unsigned int count; 58 }; 59 60 struct bpf_dtab_netdev { 61 struct net_device *dev; /* must be first member, due to tracepoint */ 62 struct bpf_dtab *dtab; 63 unsigned int bit; 64 struct xdp_bulk_queue __percpu *bulkq; 65 struct rcu_head rcu; 66 }; 67 68 struct bpf_dtab { 69 struct bpf_map map; 70 struct bpf_dtab_netdev **netdev_map; 71 struct list_head __percpu *flush_list; 72 struct list_head list; 73 }; 74 75 static DEFINE_SPINLOCK(dev_map_lock); 76 static LIST_HEAD(dev_map_list); 77 78 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 79 { 80 struct bpf_dtab *dtab; 81 int err, cpu; 82 u64 cost; 83 84 if (!capable(CAP_NET_ADMIN)) 85 return ERR_PTR(-EPERM); 86 87 /* check sanity of attributes */ 88 if (attr->max_entries == 0 || attr->key_size != 4 || 89 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) 90 return ERR_PTR(-EINVAL); 91 92 /* Lookup returns a pointer straight to dev->ifindex, so make sure the 93 * verifier prevents writes from the BPF side 94 */ 95 attr->map_flags |= BPF_F_RDONLY_PROG; 96 97 dtab = kzalloc(sizeof(*dtab), GFP_USER); 98 if (!dtab) 99 return ERR_PTR(-ENOMEM); 100 101 bpf_map_init_from_attr(&dtab->map, attr); 102 103 /* make sure page count doesn't overflow */ 104 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 105 cost += sizeof(struct list_head) * num_possible_cpus(); 106 107 /* if map size is larger than memlock limit, reject it */ 108 err = bpf_map_charge_init(&dtab->map.memory, cost); 109 if (err) 110 goto free_dtab; 111 112 err = -ENOMEM; 113 114 dtab->flush_list = alloc_percpu(struct list_head); 115 if (!dtab->flush_list) 116 goto free_charge; 117 118 for_each_possible_cpu(cpu) 119 INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); 120 121 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 122 sizeof(struct bpf_dtab_netdev *), 123 dtab->map.numa_node); 124 if (!dtab->netdev_map) 125 goto free_percpu; 126 127 spin_lock(&dev_map_lock); 128 list_add_tail_rcu(&dtab->list, &dev_map_list); 129 spin_unlock(&dev_map_lock); 130 131 return &dtab->map; 132 133 free_percpu: 134 free_percpu(dtab->flush_list); 135 free_charge: 136 bpf_map_charge_finish(&dtab->map.memory); 137 free_dtab: 138 kfree(dtab); 139 return ERR_PTR(err); 140 } 141 142 static void dev_map_free(struct bpf_map *map) 143 { 144 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 145 int i, cpu; 146 147 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 148 * so the programs (can be more than one that used this map) were 149 * disconnected from events. Wait for outstanding critical sections in 150 * these programs to complete. The rcu critical section only guarantees 151 * no further reads against netdev_map. It does __not__ ensure pending 152 * flush operations (if any) are complete. 153 */ 154 155 spin_lock(&dev_map_lock); 156 list_del_rcu(&dtab->list); 157 spin_unlock(&dev_map_lock); 158 159 bpf_clear_redirect_map(map); 160 synchronize_rcu(); 161 162 /* Make sure prior __dev_map_entry_free() have completed. */ 163 rcu_barrier(); 164 165 /* To ensure all pending flush operations have completed wait for flush 166 * list to empty on _all_ cpus. 167 * Because the above synchronize_rcu() ensures the map is disconnected 168 * from the program we can assume no new items will be added. 169 */ 170 for_each_online_cpu(cpu) { 171 struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu); 172 173 while (!list_empty(flush_list)) 174 cond_resched(); 175 } 176 177 for (i = 0; i < dtab->map.max_entries; i++) { 178 struct bpf_dtab_netdev *dev; 179 180 dev = dtab->netdev_map[i]; 181 if (!dev) 182 continue; 183 184 free_percpu(dev->bulkq); 185 dev_put(dev->dev); 186 kfree(dev); 187 } 188 189 free_percpu(dtab->flush_list); 190 bpf_map_area_free(dtab->netdev_map); 191 kfree(dtab); 192 } 193 194 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 195 { 196 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 197 u32 index = key ? *(u32 *)key : U32_MAX; 198 u32 *next = next_key; 199 200 if (index >= dtab->map.max_entries) { 201 *next = 0; 202 return 0; 203 } 204 205 if (index == dtab->map.max_entries - 1) 206 return -ENOENT; 207 *next = index + 1; 208 return 0; 209 } 210 211 static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, 212 bool in_napi_ctx) 213 { 214 struct bpf_dtab_netdev *obj = bq->obj; 215 struct net_device *dev = obj->dev; 216 int sent = 0, drops = 0, err = 0; 217 int i; 218 219 if (unlikely(!bq->count)) 220 return 0; 221 222 for (i = 0; i < bq->count; i++) { 223 struct xdp_frame *xdpf = bq->q[i]; 224 225 prefetch(xdpf); 226 } 227 228 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 229 if (sent < 0) { 230 err = sent; 231 sent = 0; 232 goto error; 233 } 234 drops = bq->count - sent; 235 out: 236 bq->count = 0; 237 238 trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, 239 sent, drops, bq->dev_rx, dev, err); 240 bq->dev_rx = NULL; 241 __list_del_clearprev(&bq->flush_node); 242 return 0; 243 error: 244 /* If ndo_xdp_xmit fails with an errno, no frames have been 245 * xmit'ed and it's our responsibility to them free all. 246 */ 247 for (i = 0; i < bq->count; i++) { 248 struct xdp_frame *xdpf = bq->q[i]; 249 250 /* RX path under NAPI protection, can return frames faster */ 251 if (likely(in_napi_ctx)) 252 xdp_return_frame_rx_napi(xdpf); 253 else 254 xdp_return_frame(xdpf); 255 drops++; 256 } 257 goto out; 258 } 259 260 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled 261 * from the driver before returning from its napi->poll() routine. The poll() 262 * routine is called either from busy_poll context or net_rx_action signaled 263 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 264 * net device can be torn down. On devmap tear down we ensure the flush list 265 * is empty before completing to ensure all flush operations have completed. 266 */ 267 void __dev_map_flush(struct bpf_map *map) 268 { 269 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 270 struct list_head *flush_list = this_cpu_ptr(dtab->flush_list); 271 struct xdp_bulk_queue *bq, *tmp; 272 273 rcu_read_lock(); 274 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 275 bq_xmit_all(bq, XDP_XMIT_FLUSH, true); 276 rcu_read_unlock(); 277 } 278 279 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 280 * update happens in parallel here a dev_put wont happen until after reading the 281 * ifindex. 282 */ 283 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 284 { 285 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 286 struct bpf_dtab_netdev *obj; 287 288 if (key >= map->max_entries) 289 return NULL; 290 291 obj = READ_ONCE(dtab->netdev_map[key]); 292 return obj; 293 } 294 295 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 296 * Thus, safe percpu variable access. 297 */ 298 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, 299 struct net_device *dev_rx) 300 301 { 302 struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list); 303 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 304 305 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 306 bq_xmit_all(bq, 0, true); 307 308 /* Ingress dev_rx will be the same for all xdp_frame's in 309 * bulk_queue, because bq stored per-CPU and must be flushed 310 * from net_device drivers NAPI func end. 311 */ 312 if (!bq->dev_rx) 313 bq->dev_rx = dev_rx; 314 315 bq->q[bq->count++] = xdpf; 316 317 if (!bq->flush_node.prev) 318 list_add(&bq->flush_node, flush_list); 319 320 return 0; 321 } 322 323 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 324 struct net_device *dev_rx) 325 { 326 struct net_device *dev = dst->dev; 327 struct xdp_frame *xdpf; 328 int err; 329 330 if (!dev->netdev_ops->ndo_xdp_xmit) 331 return -EOPNOTSUPP; 332 333 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 334 if (unlikely(err)) 335 return err; 336 337 xdpf = convert_to_xdp_frame(xdp); 338 if (unlikely(!xdpf)) 339 return -EOVERFLOW; 340 341 return bq_enqueue(dst, xdpf, dev_rx); 342 } 343 344 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 345 struct bpf_prog *xdp_prog) 346 { 347 int err; 348 349 err = xdp_ok_fwd_dev(dst->dev, skb->len); 350 if (unlikely(err)) 351 return err; 352 skb->dev = dst->dev; 353 generic_xdp_tx(skb, xdp_prog); 354 355 return 0; 356 } 357 358 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 359 { 360 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 361 struct net_device *dev = obj ? obj->dev : NULL; 362 363 return dev ? &dev->ifindex : NULL; 364 } 365 366 static void dev_map_flush_old(struct bpf_dtab_netdev *dev) 367 { 368 if (dev->dev->netdev_ops->ndo_xdp_xmit) { 369 struct xdp_bulk_queue *bq; 370 int cpu; 371 372 rcu_read_lock(); 373 for_each_online_cpu(cpu) { 374 bq = per_cpu_ptr(dev->bulkq, cpu); 375 bq_xmit_all(bq, XDP_XMIT_FLUSH, false); 376 } 377 rcu_read_unlock(); 378 } 379 } 380 381 static void __dev_map_entry_free(struct rcu_head *rcu) 382 { 383 struct bpf_dtab_netdev *dev; 384 385 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 386 dev_map_flush_old(dev); 387 free_percpu(dev->bulkq); 388 dev_put(dev->dev); 389 kfree(dev); 390 } 391 392 static int dev_map_delete_elem(struct bpf_map *map, void *key) 393 { 394 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 395 struct bpf_dtab_netdev *old_dev; 396 int k = *(u32 *)key; 397 398 if (k >= map->max_entries) 399 return -EINVAL; 400 401 /* Use call_rcu() here to ensure any rcu critical sections have 402 * completed, but this does not guarantee a flush has happened 403 * yet. Because driver side rcu_read_lock/unlock only protects the 404 * running XDP program. However, for pending flush operations the 405 * dev and ctx are stored in another per cpu map. And additionally, 406 * the driver tear down ensures all soft irqs are complete before 407 * removing the net device in the case of dev_put equals zero. 408 */ 409 old_dev = xchg(&dtab->netdev_map[k], NULL); 410 if (old_dev) 411 call_rcu(&old_dev->rcu, __dev_map_entry_free); 412 return 0; 413 } 414 415 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 416 u64 map_flags) 417 { 418 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 419 struct net *net = current->nsproxy->net_ns; 420 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; 421 struct bpf_dtab_netdev *dev, *old_dev; 422 u32 ifindex = *(u32 *)value; 423 struct xdp_bulk_queue *bq; 424 u32 i = *(u32 *)key; 425 int cpu; 426 427 if (unlikely(map_flags > BPF_EXIST)) 428 return -EINVAL; 429 if (unlikely(i >= dtab->map.max_entries)) 430 return -E2BIG; 431 if (unlikely(map_flags == BPF_NOEXIST)) 432 return -EEXIST; 433 434 if (!ifindex) { 435 dev = NULL; 436 } else { 437 dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node); 438 if (!dev) 439 return -ENOMEM; 440 441 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), 442 sizeof(void *), gfp); 443 if (!dev->bulkq) { 444 kfree(dev); 445 return -ENOMEM; 446 } 447 448 for_each_possible_cpu(cpu) { 449 bq = per_cpu_ptr(dev->bulkq, cpu); 450 bq->obj = dev; 451 } 452 453 dev->dev = dev_get_by_index(net, ifindex); 454 if (!dev->dev) { 455 free_percpu(dev->bulkq); 456 kfree(dev); 457 return -EINVAL; 458 } 459 460 dev->bit = i; 461 dev->dtab = dtab; 462 } 463 464 /* Use call_rcu() here to ensure rcu critical sections have completed 465 * Remembering the driver side flush operation will happen before the 466 * net device is removed. 467 */ 468 old_dev = xchg(&dtab->netdev_map[i], dev); 469 if (old_dev) 470 call_rcu(&old_dev->rcu, __dev_map_entry_free); 471 472 return 0; 473 } 474 475 const struct bpf_map_ops dev_map_ops = { 476 .map_alloc = dev_map_alloc, 477 .map_free = dev_map_free, 478 .map_get_next_key = dev_map_get_next_key, 479 .map_lookup_elem = dev_map_lookup_elem, 480 .map_update_elem = dev_map_update_elem, 481 .map_delete_elem = dev_map_delete_elem, 482 .map_check_btf = map_check_no_btf, 483 }; 484 485 static int dev_map_notification(struct notifier_block *notifier, 486 ulong event, void *ptr) 487 { 488 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 489 struct bpf_dtab *dtab; 490 int i; 491 492 switch (event) { 493 case NETDEV_UNREGISTER: 494 /* This rcu_read_lock/unlock pair is needed because 495 * dev_map_list is an RCU list AND to ensure a delete 496 * operation does not free a netdev_map entry while we 497 * are comparing it against the netdev being unregistered. 498 */ 499 rcu_read_lock(); 500 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 501 for (i = 0; i < dtab->map.max_entries; i++) { 502 struct bpf_dtab_netdev *dev, *odev; 503 504 dev = READ_ONCE(dtab->netdev_map[i]); 505 if (!dev || netdev != dev->dev) 506 continue; 507 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 508 if (dev == odev) 509 call_rcu(&dev->rcu, 510 __dev_map_entry_free); 511 } 512 } 513 rcu_read_unlock(); 514 break; 515 default: 516 break; 517 } 518 return NOTIFY_OK; 519 } 520 521 static struct notifier_block dev_map_notifier = { 522 .notifier_call = dev_map_notification, 523 }; 524 525 static int __init dev_map_init(void) 526 { 527 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 528 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 529 offsetof(struct _bpf_dtab_netdev, dev)); 530 register_netdevice_notifier(&dev_map_notifier); 531 return 0; 532 } 533 534 subsys_initcall(dev_map_init); 535