1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 13 /* Devmaps primary use is as a backend map for XDP BPF helper call 14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we 15 * spent some effort to ensure the datapath with redirect maps does not use 16 * any locking. This is a quick note on the details. 17 * 18 * We have three possible paths to get into the devmap control plane bpf 19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 20 * will invoke an update, delete, or lookup operation. To ensure updates and 21 * deletes appear atomic from the datapath side xchg() is used to modify the 22 * netdev_map array. Then because the datapath does a lookup into the netdev_map 23 * array (read-only) from an RCU critical section we use call_rcu() to wait for 24 * an rcu grace period before free'ing the old data structures. This ensures the 25 * datapath always has a valid copy. However, the datapath does a "flush" 26 * operation that pushes any pending packets in the driver outside the RCU 27 * critical section. Each bpf_dtab_netdev tracks these pending operations using 28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed 29 * until all bits are cleared indicating outstanding flush operations have 30 * completed. 31 * 32 * BPF syscalls may race with BPF program calls on any of the update, delete 33 * or lookup operations. As noted above the xchg() operation also keep the 34 * netdev_map consistent in this case. From the devmap side BPF programs 35 * calling into these operations are the same as multiple user space threads 36 * making system calls. 37 * 38 * Finally, any of the above may race with a netdev_unregister notifier. The 39 * unregister notifier must search for net devices in the map structure that 40 * contain a reference to the net device and remove them. This is a two step 41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 42 * check to see if the ifindex is the same as the net_device being removed. 43 * When removing the dev a cmpxchg() is used to ensure the correct dev is 44 * removed, in the case of a concurrent update or delete operation it is 45 * possible that the initially referenced dev is no longer in the map. As the 46 * notifier hook walks the map we know that new dev references can not be 47 * added by the user because core infrastructure ensures dev_get_by_index() 48 * calls will fail at this point. 49 */ 50 #include <linux/bpf.h> 51 #include <net/xdp.h> 52 #include <linux/filter.h> 53 #include <trace/events/xdp.h> 54 55 #define DEV_CREATE_FLAG_MASK \ 56 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 57 58 #define DEV_MAP_BULK_SIZE 16 59 struct xdp_bulk_queue { 60 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 61 struct net_device *dev_rx; 62 unsigned int count; 63 }; 64 65 struct bpf_dtab_netdev { 66 struct net_device *dev; /* must be first member, due to tracepoint */ 67 struct bpf_dtab *dtab; 68 unsigned int bit; 69 struct xdp_bulk_queue __percpu *bulkq; 70 struct rcu_head rcu; 71 }; 72 73 struct bpf_dtab { 74 struct bpf_map map; 75 struct bpf_dtab_netdev **netdev_map; 76 unsigned long __percpu *flush_needed; 77 struct list_head list; 78 }; 79 80 static DEFINE_SPINLOCK(dev_map_lock); 81 static LIST_HEAD(dev_map_list); 82 83 static u64 dev_map_bitmap_size(const union bpf_attr *attr) 84 { 85 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long); 86 } 87 88 static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 89 { 90 struct bpf_dtab *dtab; 91 int err = -EINVAL; 92 u64 cost; 93 94 if (!capable(CAP_NET_ADMIN)) 95 return ERR_PTR(-EPERM); 96 97 /* check sanity of attributes */ 98 if (attr->max_entries == 0 || attr->key_size != 4 || 99 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) 100 return ERR_PTR(-EINVAL); 101 102 dtab = kzalloc(sizeof(*dtab), GFP_USER); 103 if (!dtab) 104 return ERR_PTR(-ENOMEM); 105 106 bpf_map_init_from_attr(&dtab->map, attr); 107 108 /* make sure page count doesn't overflow */ 109 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 110 cost += dev_map_bitmap_size(attr) * num_possible_cpus(); 111 if (cost >= U32_MAX - PAGE_SIZE) 112 goto free_dtab; 113 114 dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 115 116 /* if map size is larger than memlock limit, reject it early */ 117 err = bpf_map_precharge_memlock(dtab->map.pages); 118 if (err) 119 goto free_dtab; 120 121 err = -ENOMEM; 122 123 /* A per cpu bitfield with a bit per possible net device */ 124 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), 125 __alignof__(unsigned long), 126 GFP_KERNEL | __GFP_NOWARN); 127 if (!dtab->flush_needed) 128 goto free_dtab; 129 130 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 131 sizeof(struct bpf_dtab_netdev *), 132 dtab->map.numa_node); 133 if (!dtab->netdev_map) 134 goto free_dtab; 135 136 spin_lock(&dev_map_lock); 137 list_add_tail_rcu(&dtab->list, &dev_map_list); 138 spin_unlock(&dev_map_lock); 139 140 return &dtab->map; 141 free_dtab: 142 free_percpu(dtab->flush_needed); 143 kfree(dtab); 144 return ERR_PTR(err); 145 } 146 147 static void dev_map_free(struct bpf_map *map) 148 { 149 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 150 int i, cpu; 151 152 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 153 * so the programs (can be more than one that used this map) were 154 * disconnected from events. Wait for outstanding critical sections in 155 * these programs to complete. The rcu critical section only guarantees 156 * no further reads against netdev_map. It does __not__ ensure pending 157 * flush operations (if any) are complete. 158 */ 159 160 spin_lock(&dev_map_lock); 161 list_del_rcu(&dtab->list); 162 spin_unlock(&dev_map_lock); 163 164 synchronize_rcu(); 165 166 /* To ensure all pending flush operations have completed wait for flush 167 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. 168 * Because the above synchronize_rcu() ensures the map is disconnected 169 * from the program we can assume no new bits will be set. 170 */ 171 for_each_online_cpu(cpu) { 172 unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); 173 174 while (!bitmap_empty(bitmap, dtab->map.max_entries)) 175 cond_resched(); 176 } 177 178 for (i = 0; i < dtab->map.max_entries; i++) { 179 struct bpf_dtab_netdev *dev; 180 181 dev = dtab->netdev_map[i]; 182 if (!dev) 183 continue; 184 185 dev_put(dev->dev); 186 kfree(dev); 187 } 188 189 free_percpu(dtab->flush_needed); 190 bpf_map_area_free(dtab->netdev_map); 191 kfree(dtab); 192 } 193 194 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 195 { 196 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 197 u32 index = key ? *(u32 *)key : U32_MAX; 198 u32 *next = next_key; 199 200 if (index >= dtab->map.max_entries) { 201 *next = 0; 202 return 0; 203 } 204 205 if (index == dtab->map.max_entries - 1) 206 return -ENOENT; 207 *next = index + 1; 208 return 0; 209 } 210 211 void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) 212 { 213 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 214 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); 215 216 __set_bit(bit, bitmap); 217 } 218 219 static int bq_xmit_all(struct bpf_dtab_netdev *obj, 220 struct xdp_bulk_queue *bq) 221 { 222 struct net_device *dev = obj->dev; 223 int sent = 0, drops = 0, err = 0; 224 int i; 225 226 if (unlikely(!bq->count)) 227 return 0; 228 229 for (i = 0; i < bq->count; i++) { 230 struct xdp_frame *xdpf = bq->q[i]; 231 232 prefetch(xdpf); 233 } 234 235 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q); 236 if (sent < 0) { 237 err = sent; 238 sent = 0; 239 goto error; 240 } 241 drops = bq->count - sent; 242 out: 243 bq->count = 0; 244 245 trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, 246 sent, drops, bq->dev_rx, dev, err); 247 bq->dev_rx = NULL; 248 return 0; 249 error: 250 /* If ndo_xdp_xmit fails with an errno, no frames have been 251 * xmit'ed and it's our responsibility to them free all. 252 */ 253 for (i = 0; i < bq->count; i++) { 254 struct xdp_frame *xdpf = bq->q[i]; 255 256 /* RX path under NAPI protection, can return frames faster */ 257 xdp_return_frame_rx_napi(xdpf); 258 drops++; 259 } 260 goto out; 261 } 262 263 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled 264 * from the driver before returning from its napi->poll() routine. The poll() 265 * routine is called either from busy_poll context or net_rx_action signaled 266 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 267 * net device can be torn down. On devmap tear down we ensure the ctx bitmap 268 * is zeroed before completing to ensure all flush operations have completed. 269 */ 270 void __dev_map_flush(struct bpf_map *map) 271 { 272 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 273 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); 274 u32 bit; 275 276 for_each_set_bit(bit, bitmap, map->max_entries) { 277 struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); 278 struct xdp_bulk_queue *bq; 279 struct net_device *netdev; 280 281 /* This is possible if the dev entry is removed by user space 282 * between xdp redirect and flush op. 283 */ 284 if (unlikely(!dev)) 285 continue; 286 287 __clear_bit(bit, bitmap); 288 289 bq = this_cpu_ptr(dev->bulkq); 290 bq_xmit_all(dev, bq); 291 netdev = dev->dev; 292 if (likely(netdev->netdev_ops->ndo_xdp_flush)) 293 netdev->netdev_ops->ndo_xdp_flush(netdev); 294 } 295 } 296 297 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 298 * update happens in parallel here a dev_put wont happen until after reading the 299 * ifindex. 300 */ 301 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 302 { 303 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 304 struct bpf_dtab_netdev *obj; 305 306 if (key >= map->max_entries) 307 return NULL; 308 309 obj = READ_ONCE(dtab->netdev_map[key]); 310 return obj; 311 } 312 313 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 314 * Thus, safe percpu variable access. 315 */ 316 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, 317 struct net_device *dev_rx) 318 319 { 320 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 321 322 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 323 bq_xmit_all(obj, bq); 324 325 /* Ingress dev_rx will be the same for all xdp_frame's in 326 * bulk_queue, because bq stored per-CPU and must be flushed 327 * from net_device drivers NAPI func end. 328 */ 329 if (!bq->dev_rx) 330 bq->dev_rx = dev_rx; 331 332 bq->q[bq->count++] = xdpf; 333 return 0; 334 } 335 336 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 337 struct net_device *dev_rx) 338 { 339 struct net_device *dev = dst->dev; 340 struct xdp_frame *xdpf; 341 342 if (!dev->netdev_ops->ndo_xdp_xmit) 343 return -EOPNOTSUPP; 344 345 xdpf = convert_to_xdp_frame(xdp); 346 if (unlikely(!xdpf)) 347 return -EOVERFLOW; 348 349 return bq_enqueue(dst, xdpf, dev_rx); 350 } 351 352 static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 353 { 354 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 355 struct net_device *dev = dev = obj ? obj->dev : NULL; 356 357 return dev ? &dev->ifindex : NULL; 358 } 359 360 static void dev_map_flush_old(struct bpf_dtab_netdev *dev) 361 { 362 if (dev->dev->netdev_ops->ndo_xdp_flush) { 363 struct net_device *fl = dev->dev; 364 struct xdp_bulk_queue *bq; 365 unsigned long *bitmap; 366 367 int cpu; 368 369 for_each_online_cpu(cpu) { 370 bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); 371 __clear_bit(dev->bit, bitmap); 372 373 bq = per_cpu_ptr(dev->bulkq, cpu); 374 bq_xmit_all(dev, bq); 375 376 fl->netdev_ops->ndo_xdp_flush(dev->dev); 377 } 378 } 379 } 380 381 static void __dev_map_entry_free(struct rcu_head *rcu) 382 { 383 struct bpf_dtab_netdev *dev; 384 385 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 386 dev_map_flush_old(dev); 387 free_percpu(dev->bulkq); 388 dev_put(dev->dev); 389 kfree(dev); 390 } 391 392 static int dev_map_delete_elem(struct bpf_map *map, void *key) 393 { 394 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 395 struct bpf_dtab_netdev *old_dev; 396 int k = *(u32 *)key; 397 398 if (k >= map->max_entries) 399 return -EINVAL; 400 401 /* Use call_rcu() here to ensure any rcu critical sections have 402 * completed, but this does not guarantee a flush has happened 403 * yet. Because driver side rcu_read_lock/unlock only protects the 404 * running XDP program. However, for pending flush operations the 405 * dev and ctx are stored in another per cpu map. And additionally, 406 * the driver tear down ensures all soft irqs are complete before 407 * removing the net device in the case of dev_put equals zero. 408 */ 409 old_dev = xchg(&dtab->netdev_map[k], NULL); 410 if (old_dev) 411 call_rcu(&old_dev->rcu, __dev_map_entry_free); 412 return 0; 413 } 414 415 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 416 u64 map_flags) 417 { 418 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 419 struct net *net = current->nsproxy->net_ns; 420 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; 421 struct bpf_dtab_netdev *dev, *old_dev; 422 u32 i = *(u32 *)key; 423 u32 ifindex = *(u32 *)value; 424 425 if (unlikely(map_flags > BPF_EXIST)) 426 return -EINVAL; 427 if (unlikely(i >= dtab->map.max_entries)) 428 return -E2BIG; 429 if (unlikely(map_flags == BPF_NOEXIST)) 430 return -EEXIST; 431 432 if (!ifindex) { 433 dev = NULL; 434 } else { 435 dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node); 436 if (!dev) 437 return -ENOMEM; 438 439 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), 440 sizeof(void *), gfp); 441 if (!dev->bulkq) { 442 kfree(dev); 443 return -ENOMEM; 444 } 445 446 dev->dev = dev_get_by_index(net, ifindex); 447 if (!dev->dev) { 448 free_percpu(dev->bulkq); 449 kfree(dev); 450 return -EINVAL; 451 } 452 453 dev->bit = i; 454 dev->dtab = dtab; 455 } 456 457 /* Use call_rcu() here to ensure rcu critical sections have completed 458 * Remembering the driver side flush operation will happen before the 459 * net device is removed. 460 */ 461 old_dev = xchg(&dtab->netdev_map[i], dev); 462 if (old_dev) 463 call_rcu(&old_dev->rcu, __dev_map_entry_free); 464 465 return 0; 466 } 467 468 const struct bpf_map_ops dev_map_ops = { 469 .map_alloc = dev_map_alloc, 470 .map_free = dev_map_free, 471 .map_get_next_key = dev_map_get_next_key, 472 .map_lookup_elem = dev_map_lookup_elem, 473 .map_update_elem = dev_map_update_elem, 474 .map_delete_elem = dev_map_delete_elem, 475 }; 476 477 static int dev_map_notification(struct notifier_block *notifier, 478 ulong event, void *ptr) 479 { 480 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 481 struct bpf_dtab *dtab; 482 int i; 483 484 switch (event) { 485 case NETDEV_UNREGISTER: 486 /* This rcu_read_lock/unlock pair is needed because 487 * dev_map_list is an RCU list AND to ensure a delete 488 * operation does not free a netdev_map entry while we 489 * are comparing it against the netdev being unregistered. 490 */ 491 rcu_read_lock(); 492 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 493 for (i = 0; i < dtab->map.max_entries; i++) { 494 struct bpf_dtab_netdev *dev, *odev; 495 496 dev = READ_ONCE(dtab->netdev_map[i]); 497 if (!dev || 498 dev->dev->ifindex != netdev->ifindex) 499 continue; 500 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 501 if (dev == odev) 502 call_rcu(&dev->rcu, 503 __dev_map_entry_free); 504 } 505 } 506 rcu_read_unlock(); 507 break; 508 default: 509 break; 510 } 511 return NOTIFY_OK; 512 } 513 514 static struct notifier_block dev_map_notifier = { 515 .notifier_call = dev_map_notification, 516 }; 517 518 static int __init dev_map_init(void) 519 { 520 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 521 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 522 offsetof(struct _bpf_dtab_netdev, dev)); 523 register_netdevice_notifier(&dev_map_notifier); 524 return 0; 525 } 526 527 subsys_initcall(dev_map_init); 528