1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <linux/vmalloc.h> 26 #include <net/xdp_sock_drv.h> 27 #include <net/busy_poll.h> 28 #include <net/netdev_rx_queue.h> 29 #include <net/xdp.h> 30 31 #include "xsk_queue.h" 32 #include "xdp_umem.h" 33 #include "xsk.h" 34 35 #define TX_BATCH_SIZE 32 36 37 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 38 39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 40 { 41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 42 return; 43 44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 45 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 46 } 47 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 48 49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 50 { 51 struct xdp_sock *xs; 52 53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 54 return; 55 56 rcu_read_lock(); 57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 58 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 59 } 60 rcu_read_unlock(); 61 62 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 63 } 64 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 65 66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 67 { 68 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 69 return; 70 71 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 72 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 73 } 74 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 75 76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 77 { 78 struct xdp_sock *xs; 79 80 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 81 return; 82 83 rcu_read_lock(); 84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 85 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 86 } 87 rcu_read_unlock(); 88 89 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 90 } 91 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 92 93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 94 { 95 return pool->uses_need_wakeup; 96 } 97 EXPORT_SYMBOL(xsk_uses_need_wakeup); 98 99 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 100 u16 queue_id) 101 { 102 if (queue_id < dev->real_num_rx_queues) 103 return dev->_rx[queue_id].pool; 104 if (queue_id < dev->real_num_tx_queues) 105 return dev->_tx[queue_id].pool; 106 107 return NULL; 108 } 109 EXPORT_SYMBOL(xsk_get_pool_from_qid); 110 111 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 112 { 113 if (queue_id < dev->num_rx_queues) 114 dev->_rx[queue_id].pool = NULL; 115 if (queue_id < dev->num_tx_queues) 116 dev->_tx[queue_id].pool = NULL; 117 } 118 119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 120 * not know if the device has more tx queues than rx, or the opposite. 121 * This might also change during run time. 122 */ 123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 124 u16 queue_id) 125 { 126 if (queue_id >= max_t(unsigned int, 127 dev->real_num_rx_queues, 128 dev->real_num_tx_queues)) 129 return -EINVAL; 130 131 if (queue_id < dev->real_num_rx_queues) 132 dev->_rx[queue_id].pool = pool; 133 if (queue_id < dev->real_num_tx_queues) 134 dev->_tx[queue_id].pool = pool; 135 136 return 0; 137 } 138 139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, 140 u32 flags) 141 { 142 u64 addr; 143 int err; 144 145 addr = xp_get_handle(xskb); 146 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); 147 if (err) { 148 xs->rx_queue_full++; 149 return err; 150 } 151 152 xp_release(xskb); 153 return 0; 154 } 155 156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 157 { 158 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 159 u32 frags = xdp_buff_has_frags(xdp); 160 struct xdp_buff_xsk *pos, *tmp; 161 struct list_head *xskb_list; 162 u32 contd = 0; 163 int err; 164 165 if (frags) 166 contd = XDP_PKT_CONTD; 167 168 err = __xsk_rcv_zc(xs, xskb, len, contd); 169 if (err || likely(!frags)) 170 goto out; 171 172 xskb_list = &xskb->pool->xskb_list; 173 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { 174 if (list_is_singular(xskb_list)) 175 contd = 0; 176 len = pos->xdp.data_end - pos->xdp.data; 177 err = __xsk_rcv_zc(xs, pos, len, contd); 178 if (err) 179 return err; 180 list_del(&pos->xskb_list_node); 181 } 182 183 out: 184 return err; 185 } 186 187 static void *xsk_copy_xdp_start(struct xdp_buff *from) 188 { 189 if (unlikely(xdp_data_meta_unsupported(from))) 190 return from->data; 191 else 192 return from->data_meta; 193 } 194 195 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, 196 u32 *from_len, skb_frag_t **frag, u32 rem) 197 { 198 u32 copied = 0; 199 200 while (1) { 201 u32 copy_len = min_t(u32, *from_len, to_len); 202 203 memcpy(to, *from, copy_len); 204 copied += copy_len; 205 if (rem == copied) 206 return copied; 207 208 if (*from_len == copy_len) { 209 *from = skb_frag_address(*frag); 210 *from_len = skb_frag_size((*frag)++); 211 } else { 212 *from += copy_len; 213 *from_len -= copy_len; 214 } 215 if (to_len == copy_len) 216 return copied; 217 218 to_len -= copy_len; 219 to += copy_len; 220 } 221 } 222 223 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 224 { 225 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 226 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 227 u32 from_len, meta_len, rem, num_desc; 228 struct xdp_buff_xsk *xskb; 229 struct xdp_buff *xsk_xdp; 230 skb_frag_t *frag; 231 232 from_len = xdp->data_end - copy_from; 233 meta_len = xdp->data - copy_from; 234 rem = len + meta_len; 235 236 if (len <= frame_size && !xdp_buff_has_frags(xdp)) { 237 int err; 238 239 xsk_xdp = xsk_buff_alloc(xs->pool); 240 if (!xsk_xdp) { 241 xs->rx_dropped++; 242 return -ENOMEM; 243 } 244 memcpy(xsk_xdp->data - meta_len, copy_from, rem); 245 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 246 err = __xsk_rcv_zc(xs, xskb, len, 0); 247 if (err) { 248 xsk_buff_free(xsk_xdp); 249 return err; 250 } 251 252 return 0; 253 } 254 255 num_desc = (len - 1) / frame_size + 1; 256 257 if (!xsk_buff_can_alloc(xs->pool, num_desc)) { 258 xs->rx_dropped++; 259 return -ENOMEM; 260 } 261 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { 262 xs->rx_queue_full++; 263 return -ENOBUFS; 264 } 265 266 if (xdp_buff_has_frags(xdp)) { 267 struct skb_shared_info *sinfo; 268 269 sinfo = xdp_get_shared_info_from_buff(xdp); 270 frag = &sinfo->frags[0]; 271 } 272 273 do { 274 u32 to_len = frame_size + meta_len; 275 u32 copied; 276 277 xsk_xdp = xsk_buff_alloc(xs->pool); 278 copy_to = xsk_xdp->data - meta_len; 279 280 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem); 281 rem -= copied; 282 283 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 284 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); 285 meta_len = 0; 286 } while (rem); 287 288 return 0; 289 } 290 291 static bool xsk_tx_writeable(struct xdp_sock *xs) 292 { 293 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 294 return false; 295 296 return true; 297 } 298 299 static bool xsk_is_bound(struct xdp_sock *xs) 300 { 301 if (READ_ONCE(xs->state) == XSK_BOUND) { 302 /* Matches smp_wmb() in bind(). */ 303 smp_rmb(); 304 return true; 305 } 306 return false; 307 } 308 309 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 310 { 311 if (!xsk_is_bound(xs)) 312 return -ENXIO; 313 314 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 315 return -EINVAL; 316 317 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 318 xs->rx_dropped++; 319 return -ENOSPC; 320 } 321 322 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 323 return 0; 324 } 325 326 static void xsk_flush(struct xdp_sock *xs) 327 { 328 xskq_prod_submit(xs->rx); 329 __xskq_cons_release(xs->pool->fq); 330 sock_def_readable(&xs->sk); 331 } 332 333 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 334 { 335 u32 len = xdp_get_buff_len(xdp); 336 int err; 337 338 spin_lock_bh(&xs->rx_lock); 339 err = xsk_rcv_check(xs, xdp, len); 340 if (!err) { 341 err = __xsk_rcv(xs, xdp, len); 342 xsk_flush(xs); 343 } 344 spin_unlock_bh(&xs->rx_lock); 345 return err; 346 } 347 348 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 349 { 350 u32 len = xdp_get_buff_len(xdp); 351 int err; 352 353 err = xsk_rcv_check(xs, xdp, len); 354 if (err) 355 return err; 356 357 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 358 len = xdp->data_end - xdp->data; 359 return xsk_rcv_zc(xs, xdp, len); 360 } 361 362 err = __xsk_rcv(xs, xdp, len); 363 if (!err) 364 xdp_return_buff(xdp); 365 return err; 366 } 367 368 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 369 { 370 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 371 int err; 372 373 err = xsk_rcv(xs, xdp); 374 if (err) 375 return err; 376 377 if (!xs->flush_node.prev) 378 list_add(&xs->flush_node, flush_list); 379 380 return 0; 381 } 382 383 void __xsk_map_flush(void) 384 { 385 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 386 struct xdp_sock *xs, *tmp; 387 388 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 389 xsk_flush(xs); 390 __list_del_clearprev(&xs->flush_node); 391 } 392 } 393 394 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 395 { 396 xskq_prod_submit_n(pool->cq, nb_entries); 397 } 398 EXPORT_SYMBOL(xsk_tx_completed); 399 400 void xsk_tx_release(struct xsk_buff_pool *pool) 401 { 402 struct xdp_sock *xs; 403 404 rcu_read_lock(); 405 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 406 __xskq_cons_release(xs->tx); 407 if (xsk_tx_writeable(xs)) 408 xs->sk.sk_write_space(&xs->sk); 409 } 410 rcu_read_unlock(); 411 } 412 EXPORT_SYMBOL(xsk_tx_release); 413 414 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 415 { 416 struct xdp_sock *xs; 417 418 rcu_read_lock(); 419 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 420 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 421 if (xskq_has_descs(xs->tx)) 422 xskq_cons_release(xs->tx); 423 continue; 424 } 425 426 /* This is the backpressure mechanism for the Tx path. 427 * Reserve space in the completion queue and only proceed 428 * if there is space in it. This avoids having to implement 429 * any buffering in the Tx path. 430 */ 431 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 432 goto out; 433 434 xskq_cons_release(xs->tx); 435 rcu_read_unlock(); 436 return true; 437 } 438 439 out: 440 rcu_read_unlock(); 441 return false; 442 } 443 EXPORT_SYMBOL(xsk_tx_peek_desc); 444 445 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 446 { 447 struct xdp_desc *descs = pool->tx_descs; 448 u32 nb_pkts = 0; 449 450 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 451 nb_pkts++; 452 453 xsk_tx_release(pool); 454 return nb_pkts; 455 } 456 457 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) 458 { 459 struct xdp_sock *xs; 460 461 rcu_read_lock(); 462 if (!list_is_singular(&pool->xsk_tx_list)) { 463 /* Fallback to the non-batched version */ 464 rcu_read_unlock(); 465 return xsk_tx_peek_release_fallback(pool, nb_pkts); 466 } 467 468 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 469 if (!xs) { 470 nb_pkts = 0; 471 goto out; 472 } 473 474 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); 475 476 /* This is the backpressure mechanism for the Tx path. Try to 477 * reserve space in the completion queue for all packets, but 478 * if there are fewer slots available, just process that many 479 * packets. This avoids having to implement any buffering in 480 * the Tx path. 481 */ 482 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); 483 if (!nb_pkts) 484 goto out; 485 486 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); 487 if (!nb_pkts) { 488 xs->tx->queue_empty_descs++; 489 goto out; 490 } 491 492 __xskq_cons_release(xs->tx); 493 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 494 xs->sk.sk_write_space(&xs->sk); 495 496 out: 497 rcu_read_unlock(); 498 return nb_pkts; 499 } 500 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 501 502 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 503 { 504 struct net_device *dev = xs->dev; 505 506 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 507 } 508 509 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) 510 { 511 unsigned long flags; 512 int ret; 513 514 spin_lock_irqsave(&xs->pool->cq_lock, flags); 515 ret = xskq_prod_reserve_addr(xs->pool->cq, addr); 516 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 517 518 return ret; 519 } 520 521 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) 522 { 523 unsigned long flags; 524 525 spin_lock_irqsave(&xs->pool->cq_lock, flags); 526 xskq_prod_submit_n(xs->pool->cq, n); 527 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 528 } 529 530 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) 531 { 532 unsigned long flags; 533 534 spin_lock_irqsave(&xs->pool->cq_lock, flags); 535 xskq_prod_cancel_n(xs->pool->cq, n); 536 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 537 } 538 539 static u32 xsk_get_num_desc(struct sk_buff *skb) 540 { 541 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; 542 } 543 544 static void xsk_destruct_skb(struct sk_buff *skb) 545 { 546 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb)); 547 sock_wfree(skb); 548 } 549 550 static void xsk_set_destructor_arg(struct sk_buff *skb) 551 { 552 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1; 553 554 skb_shinfo(skb)->destructor_arg = (void *)num; 555 } 556 557 static void xsk_consume_skb(struct sk_buff *skb) 558 { 559 struct xdp_sock *xs = xdp_sk(skb->sk); 560 561 skb->destructor = sock_wfree; 562 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb)); 563 /* Free skb without triggering the perf drop trace */ 564 consume_skb(skb); 565 xs->skb = NULL; 566 } 567 568 static void xsk_drop_skb(struct sk_buff *skb) 569 { 570 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); 571 xsk_consume_skb(skb); 572 } 573 574 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 575 struct xdp_desc *desc) 576 { 577 struct xsk_buff_pool *pool = xs->pool; 578 u32 hr, len, ts, offset, copy, copied; 579 struct sk_buff *skb = xs->skb; 580 struct page *page; 581 void *buffer; 582 int err, i; 583 u64 addr; 584 585 if (!skb) { 586 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 587 588 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 589 if (unlikely(!skb)) 590 return ERR_PTR(err); 591 592 skb_reserve(skb, hr); 593 } 594 595 addr = desc->addr; 596 len = desc->len; 597 ts = pool->unaligned ? len : pool->chunk_size; 598 599 buffer = xsk_buff_raw_get_data(pool, addr); 600 offset = offset_in_page(buffer); 601 addr = buffer - pool->addrs; 602 603 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { 604 if (unlikely(i >= MAX_SKB_FRAGS)) 605 return ERR_PTR(-EFAULT); 606 607 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 608 get_page(page); 609 610 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 611 skb_fill_page_desc(skb, i, page, offset, copy); 612 613 copied += copy; 614 addr += copy; 615 offset = 0; 616 } 617 618 skb->len += len; 619 skb->data_len += len; 620 skb->truesize += ts; 621 622 refcount_add(ts, &xs->sk.sk_wmem_alloc); 623 624 return skb; 625 } 626 627 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 628 struct xdp_desc *desc) 629 { 630 struct net_device *dev = xs->dev; 631 struct sk_buff *skb = xs->skb; 632 int err; 633 634 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 635 skb = xsk_build_skb_zerocopy(xs, desc); 636 if (IS_ERR(skb)) { 637 err = PTR_ERR(skb); 638 goto free_err; 639 } 640 } else { 641 u32 hr, tr, len; 642 void *buffer; 643 644 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 645 len = desc->len; 646 647 if (!skb) { 648 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 649 tr = dev->needed_tailroom; 650 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 651 if (unlikely(!skb)) 652 goto free_err; 653 654 skb_reserve(skb, hr); 655 skb_put(skb, len); 656 657 err = skb_store_bits(skb, 0, buffer, len); 658 if (unlikely(err)) 659 goto free_err; 660 } else { 661 int nr_frags = skb_shinfo(skb)->nr_frags; 662 struct page *page; 663 u8 *vaddr; 664 665 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { 666 err = -EFAULT; 667 goto free_err; 668 } 669 670 page = alloc_page(xs->sk.sk_allocation); 671 if (unlikely(!page)) { 672 err = -EAGAIN; 673 goto free_err; 674 } 675 676 vaddr = kmap_local_page(page); 677 memcpy(vaddr, buffer, len); 678 kunmap_local(vaddr); 679 680 skb_add_rx_frag(skb, nr_frags, page, 0, len, 0); 681 } 682 } 683 684 skb->dev = dev; 685 skb->priority = xs->sk.sk_priority; 686 skb->mark = READ_ONCE(xs->sk.sk_mark); 687 skb->destructor = xsk_destruct_skb; 688 xsk_set_destructor_arg(skb); 689 690 return skb; 691 692 free_err: 693 if (err == -EAGAIN) { 694 xsk_cq_cancel_locked(xs, 1); 695 } else { 696 xsk_set_destructor_arg(skb); 697 xsk_drop_skb(skb); 698 xskq_cons_release(xs->tx); 699 } 700 701 return ERR_PTR(err); 702 } 703 704 static int __xsk_generic_xmit(struct sock *sk) 705 { 706 struct xdp_sock *xs = xdp_sk(sk); 707 u32 max_batch = TX_BATCH_SIZE; 708 bool sent_frame = false; 709 struct xdp_desc desc; 710 struct sk_buff *skb; 711 int err = 0; 712 713 mutex_lock(&xs->mutex); 714 715 /* Since we dropped the RCU read lock, the socket state might have changed. */ 716 if (unlikely(!xsk_is_bound(xs))) { 717 err = -ENXIO; 718 goto out; 719 } 720 721 if (xs->queue_id >= xs->dev->real_num_tx_queues) 722 goto out; 723 724 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 725 if (max_batch-- == 0) { 726 err = -EAGAIN; 727 goto out; 728 } 729 730 /* This is the backpressure mechanism for the Tx path. 731 * Reserve space in the completion queue and only proceed 732 * if there is space in it. This avoids having to implement 733 * any buffering in the Tx path. 734 */ 735 if (xsk_cq_reserve_addr_locked(xs, desc.addr)) 736 goto out; 737 738 skb = xsk_build_skb(xs, &desc); 739 if (IS_ERR(skb)) { 740 err = PTR_ERR(skb); 741 if (err == -EAGAIN) 742 goto out; 743 err = 0; 744 continue; 745 } 746 747 xskq_cons_release(xs->tx); 748 749 if (xp_mb_desc(&desc)) { 750 xs->skb = skb; 751 continue; 752 } 753 754 err = __dev_direct_xmit(skb, xs->queue_id); 755 if (err == NETDEV_TX_BUSY) { 756 /* Tell user-space to retry the send */ 757 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb)); 758 xsk_consume_skb(skb); 759 err = -EAGAIN; 760 goto out; 761 } 762 763 /* Ignore NET_XMIT_CN as packet might have been sent */ 764 if (err == NET_XMIT_DROP) { 765 /* SKB completed but not sent */ 766 err = -EBUSY; 767 xs->skb = NULL; 768 goto out; 769 } 770 771 sent_frame = true; 772 xs->skb = NULL; 773 } 774 775 if (xskq_has_descs(xs->tx)) { 776 if (xs->skb) 777 xsk_drop_skb(xs->skb); 778 xskq_cons_release(xs->tx); 779 } 780 781 out: 782 if (sent_frame) 783 if (xsk_tx_writeable(xs)) 784 sk->sk_write_space(sk); 785 786 mutex_unlock(&xs->mutex); 787 return err; 788 } 789 790 static int xsk_generic_xmit(struct sock *sk) 791 { 792 int ret; 793 794 /* Drop the RCU lock since the SKB path might sleep. */ 795 rcu_read_unlock(); 796 ret = __xsk_generic_xmit(sk); 797 /* Reaquire RCU lock before going into common code. */ 798 rcu_read_lock(); 799 800 return ret; 801 } 802 803 static bool xsk_no_wakeup(struct sock *sk) 804 { 805 #ifdef CONFIG_NET_RX_BUSY_POLL 806 /* Prefer busy-polling, skip the wakeup. */ 807 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 808 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 809 #else 810 return false; 811 #endif 812 } 813 814 static int xsk_check_common(struct xdp_sock *xs) 815 { 816 if (unlikely(!xsk_is_bound(xs))) 817 return -ENXIO; 818 if (unlikely(!(xs->dev->flags & IFF_UP))) 819 return -ENETDOWN; 820 821 return 0; 822 } 823 824 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 825 { 826 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 827 struct sock *sk = sock->sk; 828 struct xdp_sock *xs = xdp_sk(sk); 829 struct xsk_buff_pool *pool; 830 int err; 831 832 err = xsk_check_common(xs); 833 if (err) 834 return err; 835 if (unlikely(need_wait)) 836 return -EOPNOTSUPP; 837 if (unlikely(!xs->tx)) 838 return -ENOBUFS; 839 840 if (sk_can_busy_loop(sk)) { 841 if (xs->zc) 842 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool)); 843 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 844 } 845 846 if (xs->zc && xsk_no_wakeup(sk)) 847 return 0; 848 849 pool = xs->pool; 850 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { 851 if (xs->zc) 852 return xsk_wakeup(xs, XDP_WAKEUP_TX); 853 return xsk_generic_xmit(sk); 854 } 855 return 0; 856 } 857 858 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 859 { 860 int ret; 861 862 rcu_read_lock(); 863 ret = __xsk_sendmsg(sock, m, total_len); 864 rcu_read_unlock(); 865 866 return ret; 867 } 868 869 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 870 { 871 bool need_wait = !(flags & MSG_DONTWAIT); 872 struct sock *sk = sock->sk; 873 struct xdp_sock *xs = xdp_sk(sk); 874 int err; 875 876 err = xsk_check_common(xs); 877 if (err) 878 return err; 879 if (unlikely(!xs->rx)) 880 return -ENOBUFS; 881 if (unlikely(need_wait)) 882 return -EOPNOTSUPP; 883 884 if (sk_can_busy_loop(sk)) 885 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 886 887 if (xsk_no_wakeup(sk)) 888 return 0; 889 890 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 891 return xsk_wakeup(xs, XDP_WAKEUP_RX); 892 return 0; 893 } 894 895 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 896 { 897 int ret; 898 899 rcu_read_lock(); 900 ret = __xsk_recvmsg(sock, m, len, flags); 901 rcu_read_unlock(); 902 903 return ret; 904 } 905 906 static __poll_t xsk_poll(struct file *file, struct socket *sock, 907 struct poll_table_struct *wait) 908 { 909 __poll_t mask = 0; 910 struct sock *sk = sock->sk; 911 struct xdp_sock *xs = xdp_sk(sk); 912 struct xsk_buff_pool *pool; 913 914 sock_poll_wait(file, sock, wait); 915 916 rcu_read_lock(); 917 if (xsk_check_common(xs)) 918 goto skip_tx; 919 920 pool = xs->pool; 921 922 if (pool->cached_need_wakeup) { 923 if (xs->zc) 924 xsk_wakeup(xs, pool->cached_need_wakeup); 925 else if (xs->tx) 926 /* Poll needs to drive Tx also in copy mode */ 927 xsk_generic_xmit(sk); 928 } 929 930 skip_tx: 931 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 932 mask |= EPOLLIN | EPOLLRDNORM; 933 if (xs->tx && xsk_tx_writeable(xs)) 934 mask |= EPOLLOUT | EPOLLWRNORM; 935 936 rcu_read_unlock(); 937 return mask; 938 } 939 940 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 941 bool umem_queue) 942 { 943 struct xsk_queue *q; 944 945 if (entries == 0 || *queue || !is_power_of_2(entries)) 946 return -EINVAL; 947 948 q = xskq_create(entries, umem_queue); 949 if (!q) 950 return -ENOMEM; 951 952 /* Make sure queue is ready before it can be seen by others */ 953 smp_wmb(); 954 WRITE_ONCE(*queue, q); 955 return 0; 956 } 957 958 static void xsk_unbind_dev(struct xdp_sock *xs) 959 { 960 struct net_device *dev = xs->dev; 961 962 if (xs->state != XSK_BOUND) 963 return; 964 WRITE_ONCE(xs->state, XSK_UNBOUND); 965 966 /* Wait for driver to stop using the xdp socket. */ 967 xp_del_xsk(xs->pool, xs); 968 synchronize_net(); 969 dev_put(dev); 970 } 971 972 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 973 struct xdp_sock __rcu ***map_entry) 974 { 975 struct xsk_map *map = NULL; 976 struct xsk_map_node *node; 977 978 *map_entry = NULL; 979 980 spin_lock_bh(&xs->map_list_lock); 981 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 982 node); 983 if (node) { 984 bpf_map_inc(&node->map->map); 985 map = node->map; 986 *map_entry = node->map_entry; 987 } 988 spin_unlock_bh(&xs->map_list_lock); 989 return map; 990 } 991 992 static void xsk_delete_from_maps(struct xdp_sock *xs) 993 { 994 /* This function removes the current XDP socket from all the 995 * maps it resides in. We need to take extra care here, due to 996 * the two locks involved. Each map has a lock synchronizing 997 * updates to the entries, and each socket has a lock that 998 * synchronizes access to the list of maps (map_list). For 999 * deadlock avoidance the locks need to be taken in the order 1000 * "map lock"->"socket map list lock". We start off by 1001 * accessing the socket map list, and take a reference to the 1002 * map to guarantee existence between the 1003 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 1004 * calls. Then we ask the map to remove the socket, which 1005 * tries to remove the socket from the map. Note that there 1006 * might be updates to the map between 1007 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 1008 */ 1009 struct xdp_sock __rcu **map_entry = NULL; 1010 struct xsk_map *map; 1011 1012 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 1013 xsk_map_try_sock_delete(map, xs, map_entry); 1014 bpf_map_put(&map->map); 1015 } 1016 } 1017 1018 static int xsk_release(struct socket *sock) 1019 { 1020 struct sock *sk = sock->sk; 1021 struct xdp_sock *xs = xdp_sk(sk); 1022 struct net *net; 1023 1024 if (!sk) 1025 return 0; 1026 1027 net = sock_net(sk); 1028 1029 if (xs->skb) 1030 xsk_drop_skb(xs->skb); 1031 1032 mutex_lock(&net->xdp.lock); 1033 sk_del_node_init_rcu(sk); 1034 mutex_unlock(&net->xdp.lock); 1035 1036 sock_prot_inuse_add(net, sk->sk_prot, -1); 1037 1038 xsk_delete_from_maps(xs); 1039 mutex_lock(&xs->mutex); 1040 xsk_unbind_dev(xs); 1041 mutex_unlock(&xs->mutex); 1042 1043 xskq_destroy(xs->rx); 1044 xskq_destroy(xs->tx); 1045 xskq_destroy(xs->fq_tmp); 1046 xskq_destroy(xs->cq_tmp); 1047 1048 sock_orphan(sk); 1049 sock->sk = NULL; 1050 1051 sock_put(sk); 1052 1053 return 0; 1054 } 1055 1056 static struct socket *xsk_lookup_xsk_from_fd(int fd) 1057 { 1058 struct socket *sock; 1059 int err; 1060 1061 sock = sockfd_lookup(fd, &err); 1062 if (!sock) 1063 return ERR_PTR(-ENOTSOCK); 1064 1065 if (sock->sk->sk_family != PF_XDP) { 1066 sockfd_put(sock); 1067 return ERR_PTR(-ENOPROTOOPT); 1068 } 1069 1070 return sock; 1071 } 1072 1073 static bool xsk_validate_queues(struct xdp_sock *xs) 1074 { 1075 return xs->fq_tmp && xs->cq_tmp; 1076 } 1077 1078 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 1079 { 1080 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 1081 struct sock *sk = sock->sk; 1082 struct xdp_sock *xs = xdp_sk(sk); 1083 struct net_device *dev; 1084 int bound_dev_if; 1085 u32 flags, qid; 1086 int err = 0; 1087 1088 if (addr_len < sizeof(struct sockaddr_xdp)) 1089 return -EINVAL; 1090 if (sxdp->sxdp_family != AF_XDP) 1091 return -EINVAL; 1092 1093 flags = sxdp->sxdp_flags; 1094 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 1095 XDP_USE_NEED_WAKEUP | XDP_USE_SG)) 1096 return -EINVAL; 1097 1098 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1099 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) 1100 return -EINVAL; 1101 1102 rtnl_lock(); 1103 mutex_lock(&xs->mutex); 1104 if (xs->state != XSK_READY) { 1105 err = -EBUSY; 1106 goto out_release; 1107 } 1108 1109 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 1110 if (!dev) { 1111 err = -ENODEV; 1112 goto out_release; 1113 } 1114 1115 if (!xs->rx && !xs->tx) { 1116 err = -EINVAL; 1117 goto out_unlock; 1118 } 1119 1120 qid = sxdp->sxdp_queue_id; 1121 1122 if (flags & XDP_SHARED_UMEM) { 1123 struct xdp_sock *umem_xs; 1124 struct socket *sock; 1125 1126 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 1127 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { 1128 /* Cannot specify flags for shared sockets. */ 1129 err = -EINVAL; 1130 goto out_unlock; 1131 } 1132 1133 if (xs->umem) { 1134 /* We have already our own. */ 1135 err = -EINVAL; 1136 goto out_unlock; 1137 } 1138 1139 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 1140 if (IS_ERR(sock)) { 1141 err = PTR_ERR(sock); 1142 goto out_unlock; 1143 } 1144 1145 umem_xs = xdp_sk(sock->sk); 1146 if (!xsk_is_bound(umem_xs)) { 1147 err = -EBADF; 1148 sockfd_put(sock); 1149 goto out_unlock; 1150 } 1151 1152 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 1153 /* Share the umem with another socket on another qid 1154 * and/or device. 1155 */ 1156 xs->pool = xp_create_and_assign_umem(xs, 1157 umem_xs->umem); 1158 if (!xs->pool) { 1159 err = -ENOMEM; 1160 sockfd_put(sock); 1161 goto out_unlock; 1162 } 1163 1164 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, 1165 qid); 1166 if (err) { 1167 xp_destroy(xs->pool); 1168 xs->pool = NULL; 1169 sockfd_put(sock); 1170 goto out_unlock; 1171 } 1172 } else { 1173 /* Share the buffer pool with the other socket. */ 1174 if (xs->fq_tmp || xs->cq_tmp) { 1175 /* Do not allow setting your own fq or cq. */ 1176 err = -EINVAL; 1177 sockfd_put(sock); 1178 goto out_unlock; 1179 } 1180 1181 xp_get_pool(umem_xs->pool); 1182 xs->pool = umem_xs->pool; 1183 1184 /* If underlying shared umem was created without Tx 1185 * ring, allocate Tx descs array that Tx batching API 1186 * utilizes 1187 */ 1188 if (xs->tx && !xs->pool->tx_descs) { 1189 err = xp_alloc_tx_descs(xs->pool, xs); 1190 if (err) { 1191 xp_put_pool(xs->pool); 1192 xs->pool = NULL; 1193 sockfd_put(sock); 1194 goto out_unlock; 1195 } 1196 } 1197 } 1198 1199 xdp_get_umem(umem_xs->umem); 1200 WRITE_ONCE(xs->umem, umem_xs->umem); 1201 sockfd_put(sock); 1202 } else if (!xs->umem || !xsk_validate_queues(xs)) { 1203 err = -EINVAL; 1204 goto out_unlock; 1205 } else { 1206 /* This xsk has its own umem. */ 1207 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 1208 if (!xs->pool) { 1209 err = -ENOMEM; 1210 goto out_unlock; 1211 } 1212 1213 err = xp_assign_dev(xs->pool, dev, qid, flags); 1214 if (err) { 1215 xp_destroy(xs->pool); 1216 xs->pool = NULL; 1217 goto out_unlock; 1218 } 1219 } 1220 1221 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1222 xs->fq_tmp = NULL; 1223 xs->cq_tmp = NULL; 1224 1225 xs->dev = dev; 1226 xs->zc = xs->umem->zc; 1227 xs->sg = !!(flags & XDP_USE_SG); 1228 xs->queue_id = qid; 1229 xp_add_xsk(xs->pool, xs); 1230 1231 out_unlock: 1232 if (err) { 1233 dev_put(dev); 1234 } else { 1235 /* Matches smp_rmb() in bind() for shared umem 1236 * sockets, and xsk_is_bound(). 1237 */ 1238 smp_wmb(); 1239 WRITE_ONCE(xs->state, XSK_BOUND); 1240 } 1241 out_release: 1242 mutex_unlock(&xs->mutex); 1243 rtnl_unlock(); 1244 return err; 1245 } 1246 1247 struct xdp_umem_reg_v1 { 1248 __u64 addr; /* Start of packet data area */ 1249 __u64 len; /* Length of packet data area */ 1250 __u32 chunk_size; 1251 __u32 headroom; 1252 }; 1253 1254 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1255 sockptr_t optval, unsigned int optlen) 1256 { 1257 struct sock *sk = sock->sk; 1258 struct xdp_sock *xs = xdp_sk(sk); 1259 int err; 1260 1261 if (level != SOL_XDP) 1262 return -ENOPROTOOPT; 1263 1264 switch (optname) { 1265 case XDP_RX_RING: 1266 case XDP_TX_RING: 1267 { 1268 struct xsk_queue **q; 1269 int entries; 1270 1271 if (optlen < sizeof(entries)) 1272 return -EINVAL; 1273 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1274 return -EFAULT; 1275 1276 mutex_lock(&xs->mutex); 1277 if (xs->state != XSK_READY) { 1278 mutex_unlock(&xs->mutex); 1279 return -EBUSY; 1280 } 1281 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1282 err = xsk_init_queue(entries, q, false); 1283 if (!err && optname == XDP_TX_RING) 1284 /* Tx needs to be explicitly woken up the first time */ 1285 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1286 mutex_unlock(&xs->mutex); 1287 return err; 1288 } 1289 case XDP_UMEM_REG: 1290 { 1291 size_t mr_size = sizeof(struct xdp_umem_reg); 1292 struct xdp_umem_reg mr = {}; 1293 struct xdp_umem *umem; 1294 1295 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1296 return -EINVAL; 1297 else if (optlen < sizeof(mr)) 1298 mr_size = sizeof(struct xdp_umem_reg_v1); 1299 1300 if (copy_from_sockptr(&mr, optval, mr_size)) 1301 return -EFAULT; 1302 1303 mutex_lock(&xs->mutex); 1304 if (xs->state != XSK_READY || xs->umem) { 1305 mutex_unlock(&xs->mutex); 1306 return -EBUSY; 1307 } 1308 1309 umem = xdp_umem_create(&mr); 1310 if (IS_ERR(umem)) { 1311 mutex_unlock(&xs->mutex); 1312 return PTR_ERR(umem); 1313 } 1314 1315 /* Make sure umem is ready before it can be seen by others */ 1316 smp_wmb(); 1317 WRITE_ONCE(xs->umem, umem); 1318 mutex_unlock(&xs->mutex); 1319 return 0; 1320 } 1321 case XDP_UMEM_FILL_RING: 1322 case XDP_UMEM_COMPLETION_RING: 1323 { 1324 struct xsk_queue **q; 1325 int entries; 1326 1327 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1328 return -EFAULT; 1329 1330 mutex_lock(&xs->mutex); 1331 if (xs->state != XSK_READY) { 1332 mutex_unlock(&xs->mutex); 1333 return -EBUSY; 1334 } 1335 1336 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1337 &xs->cq_tmp; 1338 err = xsk_init_queue(entries, q, true); 1339 mutex_unlock(&xs->mutex); 1340 return err; 1341 } 1342 default: 1343 break; 1344 } 1345 1346 return -ENOPROTOOPT; 1347 } 1348 1349 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1350 { 1351 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1352 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1353 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1354 } 1355 1356 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1357 { 1358 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1359 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1360 ring->desc = offsetof(struct xdp_umem_ring, desc); 1361 } 1362 1363 struct xdp_statistics_v1 { 1364 __u64 rx_dropped; 1365 __u64 rx_invalid_descs; 1366 __u64 tx_invalid_descs; 1367 }; 1368 1369 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1370 char __user *optval, int __user *optlen) 1371 { 1372 struct sock *sk = sock->sk; 1373 struct xdp_sock *xs = xdp_sk(sk); 1374 int len; 1375 1376 if (level != SOL_XDP) 1377 return -ENOPROTOOPT; 1378 1379 if (get_user(len, optlen)) 1380 return -EFAULT; 1381 if (len < 0) 1382 return -EINVAL; 1383 1384 switch (optname) { 1385 case XDP_STATISTICS: 1386 { 1387 struct xdp_statistics stats = {}; 1388 bool extra_stats = true; 1389 size_t stats_size; 1390 1391 if (len < sizeof(struct xdp_statistics_v1)) { 1392 return -EINVAL; 1393 } else if (len < sizeof(stats)) { 1394 extra_stats = false; 1395 stats_size = sizeof(struct xdp_statistics_v1); 1396 } else { 1397 stats_size = sizeof(stats); 1398 } 1399 1400 mutex_lock(&xs->mutex); 1401 stats.rx_dropped = xs->rx_dropped; 1402 if (extra_stats) { 1403 stats.rx_ring_full = xs->rx_queue_full; 1404 stats.rx_fill_ring_empty_descs = 1405 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1406 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1407 } else { 1408 stats.rx_dropped += xs->rx_queue_full; 1409 } 1410 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1411 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1412 mutex_unlock(&xs->mutex); 1413 1414 if (copy_to_user(optval, &stats, stats_size)) 1415 return -EFAULT; 1416 if (put_user(stats_size, optlen)) 1417 return -EFAULT; 1418 1419 return 0; 1420 } 1421 case XDP_MMAP_OFFSETS: 1422 { 1423 struct xdp_mmap_offsets off; 1424 struct xdp_mmap_offsets_v1 off_v1; 1425 bool flags_supported = true; 1426 void *to_copy; 1427 1428 if (len < sizeof(off_v1)) 1429 return -EINVAL; 1430 else if (len < sizeof(off)) 1431 flags_supported = false; 1432 1433 if (flags_supported) { 1434 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1435 * except for the flags field added to the end. 1436 */ 1437 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1438 &off.rx); 1439 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1440 &off.tx); 1441 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1442 &off.fr); 1443 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1444 &off.cr); 1445 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1446 ptrs.flags); 1447 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1448 ptrs.flags); 1449 off.fr.flags = offsetof(struct xdp_umem_ring, 1450 ptrs.flags); 1451 off.cr.flags = offsetof(struct xdp_umem_ring, 1452 ptrs.flags); 1453 1454 len = sizeof(off); 1455 to_copy = &off; 1456 } else { 1457 xsk_enter_rxtx_offsets(&off_v1.rx); 1458 xsk_enter_rxtx_offsets(&off_v1.tx); 1459 xsk_enter_umem_offsets(&off_v1.fr); 1460 xsk_enter_umem_offsets(&off_v1.cr); 1461 1462 len = sizeof(off_v1); 1463 to_copy = &off_v1; 1464 } 1465 1466 if (copy_to_user(optval, to_copy, len)) 1467 return -EFAULT; 1468 if (put_user(len, optlen)) 1469 return -EFAULT; 1470 1471 return 0; 1472 } 1473 case XDP_OPTIONS: 1474 { 1475 struct xdp_options opts = {}; 1476 1477 if (len < sizeof(opts)) 1478 return -EINVAL; 1479 1480 mutex_lock(&xs->mutex); 1481 if (xs->zc) 1482 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1483 mutex_unlock(&xs->mutex); 1484 1485 len = sizeof(opts); 1486 if (copy_to_user(optval, &opts, len)) 1487 return -EFAULT; 1488 if (put_user(len, optlen)) 1489 return -EFAULT; 1490 1491 return 0; 1492 } 1493 default: 1494 break; 1495 } 1496 1497 return -EOPNOTSUPP; 1498 } 1499 1500 static int xsk_mmap(struct file *file, struct socket *sock, 1501 struct vm_area_struct *vma) 1502 { 1503 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1504 unsigned long size = vma->vm_end - vma->vm_start; 1505 struct xdp_sock *xs = xdp_sk(sock->sk); 1506 int state = READ_ONCE(xs->state); 1507 struct xsk_queue *q = NULL; 1508 1509 if (state != XSK_READY && state != XSK_BOUND) 1510 return -EBUSY; 1511 1512 if (offset == XDP_PGOFF_RX_RING) { 1513 q = READ_ONCE(xs->rx); 1514 } else if (offset == XDP_PGOFF_TX_RING) { 1515 q = READ_ONCE(xs->tx); 1516 } else { 1517 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1518 smp_rmb(); 1519 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1520 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : 1521 READ_ONCE(xs->pool->fq); 1522 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1523 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : 1524 READ_ONCE(xs->pool->cq); 1525 } 1526 1527 if (!q) 1528 return -EINVAL; 1529 1530 /* Matches the smp_wmb() in xsk_init_queue */ 1531 smp_rmb(); 1532 if (size > q->ring_vmalloc_size) 1533 return -EINVAL; 1534 1535 return remap_vmalloc_range(vma, q->ring, 0); 1536 } 1537 1538 static int xsk_notifier(struct notifier_block *this, 1539 unsigned long msg, void *ptr) 1540 { 1541 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1542 struct net *net = dev_net(dev); 1543 struct sock *sk; 1544 1545 switch (msg) { 1546 case NETDEV_UNREGISTER: 1547 mutex_lock(&net->xdp.lock); 1548 sk_for_each(sk, &net->xdp.list) { 1549 struct xdp_sock *xs = xdp_sk(sk); 1550 1551 mutex_lock(&xs->mutex); 1552 if (xs->dev == dev) { 1553 sk->sk_err = ENETDOWN; 1554 if (!sock_flag(sk, SOCK_DEAD)) 1555 sk_error_report(sk); 1556 1557 xsk_unbind_dev(xs); 1558 1559 /* Clear device references. */ 1560 xp_clear_dev(xs->pool); 1561 } 1562 mutex_unlock(&xs->mutex); 1563 } 1564 mutex_unlock(&net->xdp.lock); 1565 break; 1566 } 1567 return NOTIFY_DONE; 1568 } 1569 1570 static struct proto xsk_proto = { 1571 .name = "XDP", 1572 .owner = THIS_MODULE, 1573 .obj_size = sizeof(struct xdp_sock), 1574 }; 1575 1576 static const struct proto_ops xsk_proto_ops = { 1577 .family = PF_XDP, 1578 .owner = THIS_MODULE, 1579 .release = xsk_release, 1580 .bind = xsk_bind, 1581 .connect = sock_no_connect, 1582 .socketpair = sock_no_socketpair, 1583 .accept = sock_no_accept, 1584 .getname = sock_no_getname, 1585 .poll = xsk_poll, 1586 .ioctl = sock_no_ioctl, 1587 .listen = sock_no_listen, 1588 .shutdown = sock_no_shutdown, 1589 .setsockopt = xsk_setsockopt, 1590 .getsockopt = xsk_getsockopt, 1591 .sendmsg = xsk_sendmsg, 1592 .recvmsg = xsk_recvmsg, 1593 .mmap = xsk_mmap, 1594 }; 1595 1596 static void xsk_destruct(struct sock *sk) 1597 { 1598 struct xdp_sock *xs = xdp_sk(sk); 1599 1600 if (!sock_flag(sk, SOCK_DEAD)) 1601 return; 1602 1603 if (!xp_put_pool(xs->pool)) 1604 xdp_put_umem(xs->umem, !xs->pool); 1605 } 1606 1607 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1608 int kern) 1609 { 1610 struct xdp_sock *xs; 1611 struct sock *sk; 1612 1613 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1614 return -EPERM; 1615 if (sock->type != SOCK_RAW) 1616 return -ESOCKTNOSUPPORT; 1617 1618 if (protocol) 1619 return -EPROTONOSUPPORT; 1620 1621 sock->state = SS_UNCONNECTED; 1622 1623 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1624 if (!sk) 1625 return -ENOBUFS; 1626 1627 sock->ops = &xsk_proto_ops; 1628 1629 sock_init_data(sock, sk); 1630 1631 sk->sk_family = PF_XDP; 1632 1633 sk->sk_destruct = xsk_destruct; 1634 1635 sock_set_flag(sk, SOCK_RCU_FREE); 1636 1637 xs = xdp_sk(sk); 1638 xs->state = XSK_READY; 1639 mutex_init(&xs->mutex); 1640 spin_lock_init(&xs->rx_lock); 1641 1642 INIT_LIST_HEAD(&xs->map_list); 1643 spin_lock_init(&xs->map_list_lock); 1644 1645 mutex_lock(&net->xdp.lock); 1646 sk_add_node_rcu(sk, &net->xdp.list); 1647 mutex_unlock(&net->xdp.lock); 1648 1649 sock_prot_inuse_add(net, &xsk_proto, 1); 1650 1651 return 0; 1652 } 1653 1654 static const struct net_proto_family xsk_family_ops = { 1655 .family = PF_XDP, 1656 .create = xsk_create, 1657 .owner = THIS_MODULE, 1658 }; 1659 1660 static struct notifier_block xsk_netdev_notifier = { 1661 .notifier_call = xsk_notifier, 1662 }; 1663 1664 static int __net_init xsk_net_init(struct net *net) 1665 { 1666 mutex_init(&net->xdp.lock); 1667 INIT_HLIST_HEAD(&net->xdp.list); 1668 return 0; 1669 } 1670 1671 static void __net_exit xsk_net_exit(struct net *net) 1672 { 1673 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1674 } 1675 1676 static struct pernet_operations xsk_net_ops = { 1677 .init = xsk_net_init, 1678 .exit = xsk_net_exit, 1679 }; 1680 1681 static int __init xsk_init(void) 1682 { 1683 int err, cpu; 1684 1685 err = proto_register(&xsk_proto, 0 /* no slab */); 1686 if (err) 1687 goto out; 1688 1689 err = sock_register(&xsk_family_ops); 1690 if (err) 1691 goto out_proto; 1692 1693 err = register_pernet_subsys(&xsk_net_ops); 1694 if (err) 1695 goto out_sk; 1696 1697 err = register_netdevice_notifier(&xsk_netdev_notifier); 1698 if (err) 1699 goto out_pernet; 1700 1701 for_each_possible_cpu(cpu) 1702 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1703 return 0; 1704 1705 out_pernet: 1706 unregister_pernet_subsys(&xsk_net_ops); 1707 out_sk: 1708 sock_unregister(PF_XDP); 1709 out_proto: 1710 proto_unregister(&xsk_proto); 1711 out: 1712 return err; 1713 } 1714 1715 fs_initcall(xsk_init); 1716