1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <net/xdp_sock_drv.h> 26 #include <net/busy_poll.h> 27 #include <net/xdp.h> 28 29 #include "xsk_queue.h" 30 #include "xdp_umem.h" 31 #include "xsk.h" 32 33 #define TX_BATCH_SIZE 16 34 35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 36 37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 38 { 39 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 40 return; 41 42 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 43 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 44 } 45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 46 47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 48 { 49 struct xdp_sock *xs; 50 51 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 52 return; 53 54 rcu_read_lock(); 55 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 56 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 57 } 58 rcu_read_unlock(); 59 60 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 61 } 62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 63 64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 65 { 66 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 67 return; 68 69 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 70 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 71 } 72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 73 74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 75 { 76 struct xdp_sock *xs; 77 78 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 79 return; 80 81 rcu_read_lock(); 82 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 83 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 84 } 85 rcu_read_unlock(); 86 87 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 88 } 89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 90 91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 92 { 93 return pool->uses_need_wakeup; 94 } 95 EXPORT_SYMBOL(xsk_uses_need_wakeup); 96 97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 98 u16 queue_id) 99 { 100 if (queue_id < dev->real_num_rx_queues) 101 return dev->_rx[queue_id].pool; 102 if (queue_id < dev->real_num_tx_queues) 103 return dev->_tx[queue_id].pool; 104 105 return NULL; 106 } 107 EXPORT_SYMBOL(xsk_get_pool_from_qid); 108 109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 110 { 111 if (queue_id < dev->real_num_rx_queues) 112 dev->_rx[queue_id].pool = NULL; 113 if (queue_id < dev->real_num_tx_queues) 114 dev->_tx[queue_id].pool = NULL; 115 } 116 117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 118 * not know if the device has more tx queues than rx, or the opposite. 119 * This might also change during run time. 120 */ 121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 122 u16 queue_id) 123 { 124 if (queue_id >= max_t(unsigned int, 125 dev->real_num_rx_queues, 126 dev->real_num_tx_queues)) 127 return -EINVAL; 128 129 if (queue_id < dev->real_num_rx_queues) 130 dev->_rx[queue_id].pool = pool; 131 if (queue_id < dev->real_num_tx_queues) 132 dev->_tx[queue_id].pool = pool; 133 134 return 0; 135 } 136 137 void xp_release(struct xdp_buff_xsk *xskb) 138 { 139 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; 140 } 141 142 static u64 xp_get_handle(struct xdp_buff_xsk *xskb) 143 { 144 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; 145 146 offset += xskb->pool->headroom; 147 if (!xskb->pool->unaligned) 148 return xskb->orig_addr + offset; 149 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 150 } 151 152 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 153 { 154 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 155 u64 addr; 156 int err; 157 158 addr = xp_get_handle(xskb); 159 err = xskq_prod_reserve_desc(xs->rx, addr, len); 160 if (err) { 161 xs->rx_queue_full++; 162 return err; 163 } 164 165 xp_release(xskb); 166 return 0; 167 } 168 169 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len) 170 { 171 void *from_buf, *to_buf; 172 u32 metalen; 173 174 if (unlikely(xdp_data_meta_unsupported(from))) { 175 from_buf = from->data; 176 to_buf = to->data; 177 metalen = 0; 178 } else { 179 from_buf = from->data_meta; 180 metalen = from->data - from->data_meta; 181 to_buf = to->data - metalen; 182 } 183 184 memcpy(to_buf, from_buf, len + metalen); 185 } 186 187 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, 188 bool explicit_free) 189 { 190 struct xdp_buff *xsk_xdp; 191 int err; 192 193 if (len > xsk_pool_get_rx_frame_size(xs->pool)) { 194 xs->rx_dropped++; 195 return -ENOSPC; 196 } 197 198 xsk_xdp = xsk_buff_alloc(xs->pool); 199 if (!xsk_xdp) { 200 xs->rx_dropped++; 201 return -ENOSPC; 202 } 203 204 xsk_copy_xdp(xsk_xdp, xdp, len); 205 err = __xsk_rcv_zc(xs, xsk_xdp, len); 206 if (err) { 207 xsk_buff_free(xsk_xdp); 208 return err; 209 } 210 if (explicit_free) 211 xdp_return_buff(xdp); 212 return 0; 213 } 214 215 static bool xsk_tx_writeable(struct xdp_sock *xs) 216 { 217 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 218 return false; 219 220 return true; 221 } 222 223 static bool xsk_is_bound(struct xdp_sock *xs) 224 { 225 if (READ_ONCE(xs->state) == XSK_BOUND) { 226 /* Matches smp_wmb() in bind(). */ 227 smp_rmb(); 228 return true; 229 } 230 return false; 231 } 232 233 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, 234 bool explicit_free) 235 { 236 u32 len; 237 238 if (!xsk_is_bound(xs)) 239 return -EINVAL; 240 241 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 242 return -EINVAL; 243 244 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 245 len = xdp->data_end - xdp->data; 246 247 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ? 248 __xsk_rcv_zc(xs, xdp, len) : 249 __xsk_rcv(xs, xdp, len, explicit_free); 250 } 251 252 static void xsk_flush(struct xdp_sock *xs) 253 { 254 xskq_prod_submit(xs->rx); 255 __xskq_cons_release(xs->pool->fq); 256 sock_def_readable(&xs->sk); 257 } 258 259 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 260 { 261 int err; 262 263 spin_lock_bh(&xs->rx_lock); 264 err = xsk_rcv(xs, xdp, false); 265 xsk_flush(xs); 266 spin_unlock_bh(&xs->rx_lock); 267 return err; 268 } 269 270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 271 { 272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 273 int err; 274 275 err = xsk_rcv(xs, xdp, true); 276 if (err) 277 return err; 278 279 if (!xs->flush_node.prev) 280 list_add(&xs->flush_node, flush_list); 281 282 return 0; 283 } 284 285 void __xsk_map_flush(void) 286 { 287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 288 struct xdp_sock *xs, *tmp; 289 290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 291 xsk_flush(xs); 292 __list_del_clearprev(&xs->flush_node); 293 } 294 } 295 296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 297 { 298 xskq_prod_submit_n(pool->cq, nb_entries); 299 } 300 EXPORT_SYMBOL(xsk_tx_completed); 301 302 void xsk_tx_release(struct xsk_buff_pool *pool) 303 { 304 struct xdp_sock *xs; 305 306 rcu_read_lock(); 307 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 308 __xskq_cons_release(xs->tx); 309 if (xsk_tx_writeable(xs)) 310 xs->sk.sk_write_space(&xs->sk); 311 } 312 rcu_read_unlock(); 313 } 314 EXPORT_SYMBOL(xsk_tx_release); 315 316 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 317 { 318 struct xdp_sock *xs; 319 320 rcu_read_lock(); 321 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 322 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 323 xs->tx->queue_empty_descs++; 324 continue; 325 } 326 327 /* This is the backpressure mechanism for the Tx path. 328 * Reserve space in the completion queue and only proceed 329 * if there is space in it. This avoids having to implement 330 * any buffering in the Tx path. 331 */ 332 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 333 goto out; 334 335 xskq_cons_release(xs->tx); 336 rcu_read_unlock(); 337 return true; 338 } 339 340 out: 341 rcu_read_unlock(); 342 return false; 343 } 344 EXPORT_SYMBOL(xsk_tx_peek_desc); 345 346 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs, 347 u32 max_entries) 348 { 349 u32 nb_pkts = 0; 350 351 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 352 nb_pkts++; 353 354 xsk_tx_release(pool); 355 return nb_pkts; 356 } 357 358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs, 359 u32 max_entries) 360 { 361 struct xdp_sock *xs; 362 u32 nb_pkts; 363 364 rcu_read_lock(); 365 if (!list_is_singular(&pool->xsk_tx_list)) { 366 /* Fallback to the non-batched version */ 367 rcu_read_unlock(); 368 return xsk_tx_peek_release_fallback(pool, descs, max_entries); 369 } 370 371 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 372 if (!xs) { 373 nb_pkts = 0; 374 goto out; 375 } 376 377 nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries); 378 if (!nb_pkts) { 379 xs->tx->queue_empty_descs++; 380 goto out; 381 } 382 383 /* This is the backpressure mechanism for the Tx path. Try to 384 * reserve space in the completion queue for all packets, but 385 * if there are fewer slots available, just process that many 386 * packets. This avoids having to implement any buffering in 387 * the Tx path. 388 */ 389 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts); 390 if (!nb_pkts) 391 goto out; 392 393 xskq_cons_release_n(xs->tx, nb_pkts); 394 __xskq_cons_release(xs->tx); 395 xs->sk.sk_write_space(&xs->sk); 396 397 out: 398 rcu_read_unlock(); 399 return nb_pkts; 400 } 401 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 402 403 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 404 { 405 struct net_device *dev = xs->dev; 406 int err; 407 408 rcu_read_lock(); 409 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 410 rcu_read_unlock(); 411 412 return err; 413 } 414 415 static int xsk_zc_xmit(struct xdp_sock *xs) 416 { 417 return xsk_wakeup(xs, XDP_WAKEUP_TX); 418 } 419 420 static void xsk_destruct_skb(struct sk_buff *skb) 421 { 422 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; 423 struct xdp_sock *xs = xdp_sk(skb->sk); 424 unsigned long flags; 425 426 spin_lock_irqsave(&xs->tx_completion_lock, flags); 427 xskq_prod_submit_addr(xs->pool->cq, addr); 428 spin_unlock_irqrestore(&xs->tx_completion_lock, flags); 429 430 sock_wfree(skb); 431 } 432 433 static int xsk_generic_xmit(struct sock *sk) 434 { 435 struct xdp_sock *xs = xdp_sk(sk); 436 u32 max_batch = TX_BATCH_SIZE; 437 bool sent_frame = false; 438 struct xdp_desc desc; 439 struct sk_buff *skb; 440 int err = 0; 441 442 mutex_lock(&xs->mutex); 443 444 if (xs->queue_id >= xs->dev->real_num_tx_queues) 445 goto out; 446 447 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 448 char *buffer; 449 u64 addr; 450 u32 len; 451 452 if (max_batch-- == 0) { 453 err = -EAGAIN; 454 goto out; 455 } 456 457 len = desc.len; 458 skb = sock_alloc_send_skb(sk, len, 1, &err); 459 if (unlikely(!skb)) 460 goto out; 461 462 skb_put(skb, len); 463 addr = desc.addr; 464 buffer = xsk_buff_raw_get_data(xs->pool, addr); 465 err = skb_store_bits(skb, 0, buffer, len); 466 /* This is the backpressure mechanism for the Tx path. 467 * Reserve space in the completion queue and only proceed 468 * if there is space in it. This avoids having to implement 469 * any buffering in the Tx path. 470 */ 471 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) { 472 kfree_skb(skb); 473 goto out; 474 } 475 476 skb->dev = xs->dev; 477 skb->priority = sk->sk_priority; 478 skb->mark = sk->sk_mark; 479 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; 480 skb->destructor = xsk_destruct_skb; 481 482 err = __dev_direct_xmit(skb, xs->queue_id); 483 if (err == NETDEV_TX_BUSY) { 484 /* Tell user-space to retry the send */ 485 skb->destructor = sock_wfree; 486 /* Free skb without triggering the perf drop trace */ 487 consume_skb(skb); 488 err = -EAGAIN; 489 goto out; 490 } 491 492 xskq_cons_release(xs->tx); 493 /* Ignore NET_XMIT_CN as packet might have been sent */ 494 if (err == NET_XMIT_DROP) { 495 /* SKB completed but not sent */ 496 err = -EBUSY; 497 goto out; 498 } 499 500 sent_frame = true; 501 } 502 503 xs->tx->queue_empty_descs++; 504 505 out: 506 if (sent_frame) 507 if (xsk_tx_writeable(xs)) 508 sk->sk_write_space(sk); 509 510 mutex_unlock(&xs->mutex); 511 return err; 512 } 513 514 static int __xsk_sendmsg(struct sock *sk) 515 { 516 struct xdp_sock *xs = xdp_sk(sk); 517 518 if (unlikely(!(xs->dev->flags & IFF_UP))) 519 return -ENETDOWN; 520 if (unlikely(!xs->tx)) 521 return -ENOBUFS; 522 523 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk); 524 } 525 526 static bool xsk_no_wakeup(struct sock *sk) 527 { 528 #ifdef CONFIG_NET_RX_BUSY_POLL 529 /* Prefer busy-polling, skip the wakeup. */ 530 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 531 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 532 #else 533 return false; 534 #endif 535 } 536 537 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 538 { 539 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 540 struct sock *sk = sock->sk; 541 struct xdp_sock *xs = xdp_sk(sk); 542 struct xsk_buff_pool *pool; 543 544 if (unlikely(!xsk_is_bound(xs))) 545 return -ENXIO; 546 if (unlikely(need_wait)) 547 return -EOPNOTSUPP; 548 549 if (sk_can_busy_loop(sk)) 550 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 551 552 if (xsk_no_wakeup(sk)) 553 return 0; 554 555 pool = xs->pool; 556 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 557 return __xsk_sendmsg(sk); 558 return 0; 559 } 560 561 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 562 { 563 bool need_wait = !(flags & MSG_DONTWAIT); 564 struct sock *sk = sock->sk; 565 struct xdp_sock *xs = xdp_sk(sk); 566 567 if (unlikely(!xsk_is_bound(xs))) 568 return -ENXIO; 569 if (unlikely(!(xs->dev->flags & IFF_UP))) 570 return -ENETDOWN; 571 if (unlikely(!xs->rx)) 572 return -ENOBUFS; 573 if (unlikely(need_wait)) 574 return -EOPNOTSUPP; 575 576 if (sk_can_busy_loop(sk)) 577 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 578 579 if (xsk_no_wakeup(sk)) 580 return 0; 581 582 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 583 return xsk_wakeup(xs, XDP_WAKEUP_RX); 584 return 0; 585 } 586 587 static __poll_t xsk_poll(struct file *file, struct socket *sock, 588 struct poll_table_struct *wait) 589 { 590 __poll_t mask = 0; 591 struct sock *sk = sock->sk; 592 struct xdp_sock *xs = xdp_sk(sk); 593 struct xsk_buff_pool *pool; 594 595 sock_poll_wait(file, sock, wait); 596 597 if (unlikely(!xsk_is_bound(xs))) 598 return mask; 599 600 pool = xs->pool; 601 602 if (pool->cached_need_wakeup) { 603 if (xs->zc) 604 xsk_wakeup(xs, pool->cached_need_wakeup); 605 else 606 /* Poll needs to drive Tx also in copy mode */ 607 __xsk_sendmsg(sk); 608 } 609 610 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 611 mask |= EPOLLIN | EPOLLRDNORM; 612 if (xs->tx && xsk_tx_writeable(xs)) 613 mask |= EPOLLOUT | EPOLLWRNORM; 614 615 return mask; 616 } 617 618 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 619 bool umem_queue) 620 { 621 struct xsk_queue *q; 622 623 if (entries == 0 || *queue || !is_power_of_2(entries)) 624 return -EINVAL; 625 626 q = xskq_create(entries, umem_queue); 627 if (!q) 628 return -ENOMEM; 629 630 /* Make sure queue is ready before it can be seen by others */ 631 smp_wmb(); 632 WRITE_ONCE(*queue, q); 633 return 0; 634 } 635 636 static void xsk_unbind_dev(struct xdp_sock *xs) 637 { 638 struct net_device *dev = xs->dev; 639 640 if (xs->state != XSK_BOUND) 641 return; 642 WRITE_ONCE(xs->state, XSK_UNBOUND); 643 644 /* Wait for driver to stop using the xdp socket. */ 645 xp_del_xsk(xs->pool, xs); 646 xs->dev = NULL; 647 synchronize_net(); 648 dev_put(dev); 649 } 650 651 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 652 struct xdp_sock ***map_entry) 653 { 654 struct xsk_map *map = NULL; 655 struct xsk_map_node *node; 656 657 *map_entry = NULL; 658 659 spin_lock_bh(&xs->map_list_lock); 660 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 661 node); 662 if (node) { 663 bpf_map_inc(&node->map->map); 664 map = node->map; 665 *map_entry = node->map_entry; 666 } 667 spin_unlock_bh(&xs->map_list_lock); 668 return map; 669 } 670 671 static void xsk_delete_from_maps(struct xdp_sock *xs) 672 { 673 /* This function removes the current XDP socket from all the 674 * maps it resides in. We need to take extra care here, due to 675 * the two locks involved. Each map has a lock synchronizing 676 * updates to the entries, and each socket has a lock that 677 * synchronizes access to the list of maps (map_list). For 678 * deadlock avoidance the locks need to be taken in the order 679 * "map lock"->"socket map list lock". We start off by 680 * accessing the socket map list, and take a reference to the 681 * map to guarantee existence between the 682 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 683 * calls. Then we ask the map to remove the socket, which 684 * tries to remove the socket from the map. Note that there 685 * might be updates to the map between 686 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 687 */ 688 struct xdp_sock **map_entry = NULL; 689 struct xsk_map *map; 690 691 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 692 xsk_map_try_sock_delete(map, xs, map_entry); 693 bpf_map_put(&map->map); 694 } 695 } 696 697 static int xsk_release(struct socket *sock) 698 { 699 struct sock *sk = sock->sk; 700 struct xdp_sock *xs = xdp_sk(sk); 701 struct net *net; 702 703 if (!sk) 704 return 0; 705 706 net = sock_net(sk); 707 708 mutex_lock(&net->xdp.lock); 709 sk_del_node_init_rcu(sk); 710 mutex_unlock(&net->xdp.lock); 711 712 local_bh_disable(); 713 sock_prot_inuse_add(net, sk->sk_prot, -1); 714 local_bh_enable(); 715 716 xsk_delete_from_maps(xs); 717 mutex_lock(&xs->mutex); 718 xsk_unbind_dev(xs); 719 mutex_unlock(&xs->mutex); 720 721 xskq_destroy(xs->rx); 722 xskq_destroy(xs->tx); 723 xskq_destroy(xs->fq_tmp); 724 xskq_destroy(xs->cq_tmp); 725 726 sock_orphan(sk); 727 sock->sk = NULL; 728 729 sk_refcnt_debug_release(sk); 730 sock_put(sk); 731 732 return 0; 733 } 734 735 static struct socket *xsk_lookup_xsk_from_fd(int fd) 736 { 737 struct socket *sock; 738 int err; 739 740 sock = sockfd_lookup(fd, &err); 741 if (!sock) 742 return ERR_PTR(-ENOTSOCK); 743 744 if (sock->sk->sk_family != PF_XDP) { 745 sockfd_put(sock); 746 return ERR_PTR(-ENOPROTOOPT); 747 } 748 749 return sock; 750 } 751 752 static bool xsk_validate_queues(struct xdp_sock *xs) 753 { 754 return xs->fq_tmp && xs->cq_tmp; 755 } 756 757 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 758 { 759 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 760 struct sock *sk = sock->sk; 761 struct xdp_sock *xs = xdp_sk(sk); 762 struct net_device *dev; 763 u32 flags, qid; 764 int err = 0; 765 766 if (addr_len < sizeof(struct sockaddr_xdp)) 767 return -EINVAL; 768 if (sxdp->sxdp_family != AF_XDP) 769 return -EINVAL; 770 771 flags = sxdp->sxdp_flags; 772 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 773 XDP_USE_NEED_WAKEUP)) 774 return -EINVAL; 775 776 rtnl_lock(); 777 mutex_lock(&xs->mutex); 778 if (xs->state != XSK_READY) { 779 err = -EBUSY; 780 goto out_release; 781 } 782 783 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 784 if (!dev) { 785 err = -ENODEV; 786 goto out_release; 787 } 788 789 if (!xs->rx && !xs->tx) { 790 err = -EINVAL; 791 goto out_unlock; 792 } 793 794 qid = sxdp->sxdp_queue_id; 795 796 if (flags & XDP_SHARED_UMEM) { 797 struct xdp_sock *umem_xs; 798 struct socket *sock; 799 800 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 801 (flags & XDP_USE_NEED_WAKEUP)) { 802 /* Cannot specify flags for shared sockets. */ 803 err = -EINVAL; 804 goto out_unlock; 805 } 806 807 if (xs->umem) { 808 /* We have already our own. */ 809 err = -EINVAL; 810 goto out_unlock; 811 } 812 813 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 814 if (IS_ERR(sock)) { 815 err = PTR_ERR(sock); 816 goto out_unlock; 817 } 818 819 umem_xs = xdp_sk(sock->sk); 820 if (!xsk_is_bound(umem_xs)) { 821 err = -EBADF; 822 sockfd_put(sock); 823 goto out_unlock; 824 } 825 826 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 827 /* Share the umem with another socket on another qid 828 * and/or device. 829 */ 830 xs->pool = xp_create_and_assign_umem(xs, 831 umem_xs->umem); 832 if (!xs->pool) { 833 err = -ENOMEM; 834 sockfd_put(sock); 835 goto out_unlock; 836 } 837 838 err = xp_assign_dev_shared(xs->pool, umem_xs->umem, 839 dev, qid); 840 if (err) { 841 xp_destroy(xs->pool); 842 xs->pool = NULL; 843 sockfd_put(sock); 844 goto out_unlock; 845 } 846 } else { 847 /* Share the buffer pool with the other socket. */ 848 if (xs->fq_tmp || xs->cq_tmp) { 849 /* Do not allow setting your own fq or cq. */ 850 err = -EINVAL; 851 sockfd_put(sock); 852 goto out_unlock; 853 } 854 855 xp_get_pool(umem_xs->pool); 856 xs->pool = umem_xs->pool; 857 } 858 859 xdp_get_umem(umem_xs->umem); 860 WRITE_ONCE(xs->umem, umem_xs->umem); 861 sockfd_put(sock); 862 } else if (!xs->umem || !xsk_validate_queues(xs)) { 863 err = -EINVAL; 864 goto out_unlock; 865 } else { 866 /* This xsk has its own umem. */ 867 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 868 if (!xs->pool) { 869 err = -ENOMEM; 870 goto out_unlock; 871 } 872 873 err = xp_assign_dev(xs->pool, dev, qid, flags); 874 if (err) { 875 xp_destroy(xs->pool); 876 xs->pool = NULL; 877 goto out_unlock; 878 } 879 } 880 881 xs->dev = dev; 882 xs->zc = xs->umem->zc; 883 xs->queue_id = qid; 884 xp_add_xsk(xs->pool, xs); 885 886 out_unlock: 887 if (err) { 888 dev_put(dev); 889 } else { 890 /* Matches smp_rmb() in bind() for shared umem 891 * sockets, and xsk_is_bound(). 892 */ 893 smp_wmb(); 894 WRITE_ONCE(xs->state, XSK_BOUND); 895 } 896 out_release: 897 mutex_unlock(&xs->mutex); 898 rtnl_unlock(); 899 return err; 900 } 901 902 struct xdp_umem_reg_v1 { 903 __u64 addr; /* Start of packet data area */ 904 __u64 len; /* Length of packet data area */ 905 __u32 chunk_size; 906 __u32 headroom; 907 }; 908 909 static int xsk_setsockopt(struct socket *sock, int level, int optname, 910 sockptr_t optval, unsigned int optlen) 911 { 912 struct sock *sk = sock->sk; 913 struct xdp_sock *xs = xdp_sk(sk); 914 int err; 915 916 if (level != SOL_XDP) 917 return -ENOPROTOOPT; 918 919 switch (optname) { 920 case XDP_RX_RING: 921 case XDP_TX_RING: 922 { 923 struct xsk_queue **q; 924 int entries; 925 926 if (optlen < sizeof(entries)) 927 return -EINVAL; 928 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 929 return -EFAULT; 930 931 mutex_lock(&xs->mutex); 932 if (xs->state != XSK_READY) { 933 mutex_unlock(&xs->mutex); 934 return -EBUSY; 935 } 936 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 937 err = xsk_init_queue(entries, q, false); 938 if (!err && optname == XDP_TX_RING) 939 /* Tx needs to be explicitly woken up the first time */ 940 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 941 mutex_unlock(&xs->mutex); 942 return err; 943 } 944 case XDP_UMEM_REG: 945 { 946 size_t mr_size = sizeof(struct xdp_umem_reg); 947 struct xdp_umem_reg mr = {}; 948 struct xdp_umem *umem; 949 950 if (optlen < sizeof(struct xdp_umem_reg_v1)) 951 return -EINVAL; 952 else if (optlen < sizeof(mr)) 953 mr_size = sizeof(struct xdp_umem_reg_v1); 954 955 if (copy_from_sockptr(&mr, optval, mr_size)) 956 return -EFAULT; 957 958 mutex_lock(&xs->mutex); 959 if (xs->state != XSK_READY || xs->umem) { 960 mutex_unlock(&xs->mutex); 961 return -EBUSY; 962 } 963 964 umem = xdp_umem_create(&mr); 965 if (IS_ERR(umem)) { 966 mutex_unlock(&xs->mutex); 967 return PTR_ERR(umem); 968 } 969 970 /* Make sure umem is ready before it can be seen by others */ 971 smp_wmb(); 972 WRITE_ONCE(xs->umem, umem); 973 mutex_unlock(&xs->mutex); 974 return 0; 975 } 976 case XDP_UMEM_FILL_RING: 977 case XDP_UMEM_COMPLETION_RING: 978 { 979 struct xsk_queue **q; 980 int entries; 981 982 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 983 return -EFAULT; 984 985 mutex_lock(&xs->mutex); 986 if (xs->state != XSK_READY) { 987 mutex_unlock(&xs->mutex); 988 return -EBUSY; 989 } 990 991 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 992 &xs->cq_tmp; 993 err = xsk_init_queue(entries, q, true); 994 mutex_unlock(&xs->mutex); 995 return err; 996 } 997 default: 998 break; 999 } 1000 1001 return -ENOPROTOOPT; 1002 } 1003 1004 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1005 { 1006 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1007 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1008 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1009 } 1010 1011 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1012 { 1013 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1014 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1015 ring->desc = offsetof(struct xdp_umem_ring, desc); 1016 } 1017 1018 struct xdp_statistics_v1 { 1019 __u64 rx_dropped; 1020 __u64 rx_invalid_descs; 1021 __u64 tx_invalid_descs; 1022 }; 1023 1024 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1025 char __user *optval, int __user *optlen) 1026 { 1027 struct sock *sk = sock->sk; 1028 struct xdp_sock *xs = xdp_sk(sk); 1029 int len; 1030 1031 if (level != SOL_XDP) 1032 return -ENOPROTOOPT; 1033 1034 if (get_user(len, optlen)) 1035 return -EFAULT; 1036 if (len < 0) 1037 return -EINVAL; 1038 1039 switch (optname) { 1040 case XDP_STATISTICS: 1041 { 1042 struct xdp_statistics stats = {}; 1043 bool extra_stats = true; 1044 size_t stats_size; 1045 1046 if (len < sizeof(struct xdp_statistics_v1)) { 1047 return -EINVAL; 1048 } else if (len < sizeof(stats)) { 1049 extra_stats = false; 1050 stats_size = sizeof(struct xdp_statistics_v1); 1051 } else { 1052 stats_size = sizeof(stats); 1053 } 1054 1055 mutex_lock(&xs->mutex); 1056 stats.rx_dropped = xs->rx_dropped; 1057 if (extra_stats) { 1058 stats.rx_ring_full = xs->rx_queue_full; 1059 stats.rx_fill_ring_empty_descs = 1060 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1061 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1062 } else { 1063 stats.rx_dropped += xs->rx_queue_full; 1064 } 1065 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1066 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1067 mutex_unlock(&xs->mutex); 1068 1069 if (copy_to_user(optval, &stats, stats_size)) 1070 return -EFAULT; 1071 if (put_user(stats_size, optlen)) 1072 return -EFAULT; 1073 1074 return 0; 1075 } 1076 case XDP_MMAP_OFFSETS: 1077 { 1078 struct xdp_mmap_offsets off; 1079 struct xdp_mmap_offsets_v1 off_v1; 1080 bool flags_supported = true; 1081 void *to_copy; 1082 1083 if (len < sizeof(off_v1)) 1084 return -EINVAL; 1085 else if (len < sizeof(off)) 1086 flags_supported = false; 1087 1088 if (flags_supported) { 1089 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1090 * except for the flags field added to the end. 1091 */ 1092 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1093 &off.rx); 1094 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1095 &off.tx); 1096 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1097 &off.fr); 1098 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1099 &off.cr); 1100 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1101 ptrs.flags); 1102 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1103 ptrs.flags); 1104 off.fr.flags = offsetof(struct xdp_umem_ring, 1105 ptrs.flags); 1106 off.cr.flags = offsetof(struct xdp_umem_ring, 1107 ptrs.flags); 1108 1109 len = sizeof(off); 1110 to_copy = &off; 1111 } else { 1112 xsk_enter_rxtx_offsets(&off_v1.rx); 1113 xsk_enter_rxtx_offsets(&off_v1.tx); 1114 xsk_enter_umem_offsets(&off_v1.fr); 1115 xsk_enter_umem_offsets(&off_v1.cr); 1116 1117 len = sizeof(off_v1); 1118 to_copy = &off_v1; 1119 } 1120 1121 if (copy_to_user(optval, to_copy, len)) 1122 return -EFAULT; 1123 if (put_user(len, optlen)) 1124 return -EFAULT; 1125 1126 return 0; 1127 } 1128 case XDP_OPTIONS: 1129 { 1130 struct xdp_options opts = {}; 1131 1132 if (len < sizeof(opts)) 1133 return -EINVAL; 1134 1135 mutex_lock(&xs->mutex); 1136 if (xs->zc) 1137 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1138 mutex_unlock(&xs->mutex); 1139 1140 len = sizeof(opts); 1141 if (copy_to_user(optval, &opts, len)) 1142 return -EFAULT; 1143 if (put_user(len, optlen)) 1144 return -EFAULT; 1145 1146 return 0; 1147 } 1148 default: 1149 break; 1150 } 1151 1152 return -EOPNOTSUPP; 1153 } 1154 1155 static int xsk_mmap(struct file *file, struct socket *sock, 1156 struct vm_area_struct *vma) 1157 { 1158 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1159 unsigned long size = vma->vm_end - vma->vm_start; 1160 struct xdp_sock *xs = xdp_sk(sock->sk); 1161 struct xsk_queue *q = NULL; 1162 unsigned long pfn; 1163 struct page *qpg; 1164 1165 if (READ_ONCE(xs->state) != XSK_READY) 1166 return -EBUSY; 1167 1168 if (offset == XDP_PGOFF_RX_RING) { 1169 q = READ_ONCE(xs->rx); 1170 } else if (offset == XDP_PGOFF_TX_RING) { 1171 q = READ_ONCE(xs->tx); 1172 } else { 1173 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1174 smp_rmb(); 1175 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1176 q = READ_ONCE(xs->fq_tmp); 1177 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1178 q = READ_ONCE(xs->cq_tmp); 1179 } 1180 1181 if (!q) 1182 return -EINVAL; 1183 1184 /* Matches the smp_wmb() in xsk_init_queue */ 1185 smp_rmb(); 1186 qpg = virt_to_head_page(q->ring); 1187 if (size > page_size(qpg)) 1188 return -EINVAL; 1189 1190 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; 1191 return remap_pfn_range(vma, vma->vm_start, pfn, 1192 size, vma->vm_page_prot); 1193 } 1194 1195 static int xsk_notifier(struct notifier_block *this, 1196 unsigned long msg, void *ptr) 1197 { 1198 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1199 struct net *net = dev_net(dev); 1200 struct sock *sk; 1201 1202 switch (msg) { 1203 case NETDEV_UNREGISTER: 1204 mutex_lock(&net->xdp.lock); 1205 sk_for_each(sk, &net->xdp.list) { 1206 struct xdp_sock *xs = xdp_sk(sk); 1207 1208 mutex_lock(&xs->mutex); 1209 if (xs->dev == dev) { 1210 sk->sk_err = ENETDOWN; 1211 if (!sock_flag(sk, SOCK_DEAD)) 1212 sk->sk_error_report(sk); 1213 1214 xsk_unbind_dev(xs); 1215 1216 /* Clear device references. */ 1217 xp_clear_dev(xs->pool); 1218 } 1219 mutex_unlock(&xs->mutex); 1220 } 1221 mutex_unlock(&net->xdp.lock); 1222 break; 1223 } 1224 return NOTIFY_DONE; 1225 } 1226 1227 static struct proto xsk_proto = { 1228 .name = "XDP", 1229 .owner = THIS_MODULE, 1230 .obj_size = sizeof(struct xdp_sock), 1231 }; 1232 1233 static const struct proto_ops xsk_proto_ops = { 1234 .family = PF_XDP, 1235 .owner = THIS_MODULE, 1236 .release = xsk_release, 1237 .bind = xsk_bind, 1238 .connect = sock_no_connect, 1239 .socketpair = sock_no_socketpair, 1240 .accept = sock_no_accept, 1241 .getname = sock_no_getname, 1242 .poll = xsk_poll, 1243 .ioctl = sock_no_ioctl, 1244 .listen = sock_no_listen, 1245 .shutdown = sock_no_shutdown, 1246 .setsockopt = xsk_setsockopt, 1247 .getsockopt = xsk_getsockopt, 1248 .sendmsg = xsk_sendmsg, 1249 .recvmsg = xsk_recvmsg, 1250 .mmap = xsk_mmap, 1251 .sendpage = sock_no_sendpage, 1252 }; 1253 1254 static void xsk_destruct(struct sock *sk) 1255 { 1256 struct xdp_sock *xs = xdp_sk(sk); 1257 1258 if (!sock_flag(sk, SOCK_DEAD)) 1259 return; 1260 1261 if (!xp_put_pool(xs->pool)) 1262 xdp_put_umem(xs->umem, !xs->pool); 1263 1264 sk_refcnt_debug_dec(sk); 1265 } 1266 1267 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1268 int kern) 1269 { 1270 struct xdp_sock *xs; 1271 struct sock *sk; 1272 1273 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1274 return -EPERM; 1275 if (sock->type != SOCK_RAW) 1276 return -ESOCKTNOSUPPORT; 1277 1278 if (protocol) 1279 return -EPROTONOSUPPORT; 1280 1281 sock->state = SS_UNCONNECTED; 1282 1283 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1284 if (!sk) 1285 return -ENOBUFS; 1286 1287 sock->ops = &xsk_proto_ops; 1288 1289 sock_init_data(sock, sk); 1290 1291 sk->sk_family = PF_XDP; 1292 1293 sk->sk_destruct = xsk_destruct; 1294 sk_refcnt_debug_inc(sk); 1295 1296 sock_set_flag(sk, SOCK_RCU_FREE); 1297 1298 xs = xdp_sk(sk); 1299 xs->state = XSK_READY; 1300 mutex_init(&xs->mutex); 1301 spin_lock_init(&xs->rx_lock); 1302 spin_lock_init(&xs->tx_completion_lock); 1303 1304 INIT_LIST_HEAD(&xs->map_list); 1305 spin_lock_init(&xs->map_list_lock); 1306 1307 mutex_lock(&net->xdp.lock); 1308 sk_add_node_rcu(sk, &net->xdp.list); 1309 mutex_unlock(&net->xdp.lock); 1310 1311 local_bh_disable(); 1312 sock_prot_inuse_add(net, &xsk_proto, 1); 1313 local_bh_enable(); 1314 1315 return 0; 1316 } 1317 1318 static const struct net_proto_family xsk_family_ops = { 1319 .family = PF_XDP, 1320 .create = xsk_create, 1321 .owner = THIS_MODULE, 1322 }; 1323 1324 static struct notifier_block xsk_netdev_notifier = { 1325 .notifier_call = xsk_notifier, 1326 }; 1327 1328 static int __net_init xsk_net_init(struct net *net) 1329 { 1330 mutex_init(&net->xdp.lock); 1331 INIT_HLIST_HEAD(&net->xdp.list); 1332 return 0; 1333 } 1334 1335 static void __net_exit xsk_net_exit(struct net *net) 1336 { 1337 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1338 } 1339 1340 static struct pernet_operations xsk_net_ops = { 1341 .init = xsk_net_init, 1342 .exit = xsk_net_exit, 1343 }; 1344 1345 static int __init xsk_init(void) 1346 { 1347 int err, cpu; 1348 1349 err = proto_register(&xsk_proto, 0 /* no slab */); 1350 if (err) 1351 goto out; 1352 1353 err = sock_register(&xsk_family_ops); 1354 if (err) 1355 goto out_proto; 1356 1357 err = register_pernet_subsys(&xsk_net_ops); 1358 if (err) 1359 goto out_sk; 1360 1361 err = register_netdevice_notifier(&xsk_netdev_notifier); 1362 if (err) 1363 goto out_pernet; 1364 1365 for_each_possible_cpu(cpu) 1366 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1367 return 0; 1368 1369 out_pernet: 1370 unregister_pernet_subsys(&xsk_net_ops); 1371 out_sk: 1372 sock_unregister(PF_XDP); 1373 out_proto: 1374 proto_unregister(&xsk_proto); 1375 out: 1376 return err; 1377 } 1378 1379 fs_initcall(xsk_init); 1380