1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <net/xdp_sock_drv.h> 26 #include <net/busy_poll.h> 27 #include <net/xdp.h> 28 29 #include "xsk_queue.h" 30 #include "xdp_umem.h" 31 #include "xsk.h" 32 33 #define TX_BATCH_SIZE 32 34 35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 36 37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 38 { 39 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 40 return; 41 42 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 43 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 44 } 45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 46 47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 48 { 49 struct xdp_sock *xs; 50 51 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 52 return; 53 54 rcu_read_lock(); 55 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 56 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 57 } 58 rcu_read_unlock(); 59 60 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 61 } 62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 63 64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 65 { 66 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 67 return; 68 69 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 70 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 71 } 72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 73 74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 75 { 76 struct xdp_sock *xs; 77 78 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 79 return; 80 81 rcu_read_lock(); 82 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 83 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 84 } 85 rcu_read_unlock(); 86 87 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 88 } 89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 90 91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 92 { 93 return pool->uses_need_wakeup; 94 } 95 EXPORT_SYMBOL(xsk_uses_need_wakeup); 96 97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 98 u16 queue_id) 99 { 100 if (queue_id < dev->real_num_rx_queues) 101 return dev->_rx[queue_id].pool; 102 if (queue_id < dev->real_num_tx_queues) 103 return dev->_tx[queue_id].pool; 104 105 return NULL; 106 } 107 EXPORT_SYMBOL(xsk_get_pool_from_qid); 108 109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 110 { 111 if (queue_id < dev->num_rx_queues) 112 dev->_rx[queue_id].pool = NULL; 113 if (queue_id < dev->num_tx_queues) 114 dev->_tx[queue_id].pool = NULL; 115 } 116 117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 118 * not know if the device has more tx queues than rx, or the opposite. 119 * This might also change during run time. 120 */ 121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 122 u16 queue_id) 123 { 124 if (queue_id >= max_t(unsigned int, 125 dev->real_num_rx_queues, 126 dev->real_num_tx_queues)) 127 return -EINVAL; 128 129 if (queue_id < dev->real_num_rx_queues) 130 dev->_rx[queue_id].pool = pool; 131 if (queue_id < dev->real_num_tx_queues) 132 dev->_tx[queue_id].pool = pool; 133 134 return 0; 135 } 136 137 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 138 { 139 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 140 u64 addr; 141 int err; 142 143 addr = xp_get_handle(xskb); 144 err = xskq_prod_reserve_desc(xs->rx, addr, len); 145 if (err) { 146 xs->rx_queue_full++; 147 return err; 148 } 149 150 xp_release(xskb); 151 return 0; 152 } 153 154 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len) 155 { 156 void *from_buf, *to_buf; 157 u32 metalen; 158 159 if (unlikely(xdp_data_meta_unsupported(from))) { 160 from_buf = from->data; 161 to_buf = to->data; 162 metalen = 0; 163 } else { 164 from_buf = from->data_meta; 165 metalen = from->data - from->data_meta; 166 to_buf = to->data - metalen; 167 } 168 169 memcpy(to_buf, from_buf, len + metalen); 170 } 171 172 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 173 { 174 struct xdp_buff *xsk_xdp; 175 int err; 176 u32 len; 177 178 len = xdp->data_end - xdp->data; 179 if (len > xsk_pool_get_rx_frame_size(xs->pool)) { 180 xs->rx_dropped++; 181 return -ENOSPC; 182 } 183 184 xsk_xdp = xsk_buff_alloc(xs->pool); 185 if (!xsk_xdp) { 186 xs->rx_dropped++; 187 return -ENOSPC; 188 } 189 190 xsk_copy_xdp(xsk_xdp, xdp, len); 191 err = __xsk_rcv_zc(xs, xsk_xdp, len); 192 if (err) { 193 xsk_buff_free(xsk_xdp); 194 return err; 195 } 196 return 0; 197 } 198 199 static bool xsk_tx_writeable(struct xdp_sock *xs) 200 { 201 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 202 return false; 203 204 return true; 205 } 206 207 static bool xsk_is_bound(struct xdp_sock *xs) 208 { 209 if (READ_ONCE(xs->state) == XSK_BOUND) { 210 /* Matches smp_wmb() in bind(). */ 211 smp_rmb(); 212 return true; 213 } 214 return false; 215 } 216 217 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp) 218 { 219 if (!xsk_is_bound(xs)) 220 return -EINVAL; 221 222 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 223 return -EINVAL; 224 225 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 226 return 0; 227 } 228 229 static void xsk_flush(struct xdp_sock *xs) 230 { 231 xskq_prod_submit(xs->rx); 232 __xskq_cons_release(xs->pool->fq); 233 sock_def_readable(&xs->sk); 234 } 235 236 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 237 { 238 int err; 239 240 spin_lock_bh(&xs->rx_lock); 241 err = xsk_rcv_check(xs, xdp); 242 if (!err) { 243 err = __xsk_rcv(xs, xdp); 244 xsk_flush(xs); 245 } 246 spin_unlock_bh(&xs->rx_lock); 247 return err; 248 } 249 250 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 251 { 252 int err; 253 u32 len; 254 255 err = xsk_rcv_check(xs, xdp); 256 if (err) 257 return err; 258 259 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 260 len = xdp->data_end - xdp->data; 261 return __xsk_rcv_zc(xs, xdp, len); 262 } 263 264 err = __xsk_rcv(xs, xdp); 265 if (!err) 266 xdp_return_buff(xdp); 267 return err; 268 } 269 270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 271 { 272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 273 int err; 274 275 err = xsk_rcv(xs, xdp); 276 if (err) 277 return err; 278 279 if (!xs->flush_node.prev) 280 list_add(&xs->flush_node, flush_list); 281 282 return 0; 283 } 284 285 void __xsk_map_flush(void) 286 { 287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 288 struct xdp_sock *xs, *tmp; 289 290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 291 xsk_flush(xs); 292 __list_del_clearprev(&xs->flush_node); 293 } 294 } 295 296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 297 { 298 xskq_prod_submit_n(pool->cq, nb_entries); 299 } 300 EXPORT_SYMBOL(xsk_tx_completed); 301 302 void xsk_tx_release(struct xsk_buff_pool *pool) 303 { 304 struct xdp_sock *xs; 305 306 rcu_read_lock(); 307 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 308 __xskq_cons_release(xs->tx); 309 if (xsk_tx_writeable(xs)) 310 xs->sk.sk_write_space(&xs->sk); 311 } 312 rcu_read_unlock(); 313 } 314 EXPORT_SYMBOL(xsk_tx_release); 315 316 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 317 { 318 struct xdp_sock *xs; 319 320 rcu_read_lock(); 321 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 322 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 323 xs->tx->queue_empty_descs++; 324 continue; 325 } 326 327 /* This is the backpressure mechanism for the Tx path. 328 * Reserve space in the completion queue and only proceed 329 * if there is space in it. This avoids having to implement 330 * any buffering in the Tx path. 331 */ 332 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 333 goto out; 334 335 xskq_cons_release(xs->tx); 336 rcu_read_unlock(); 337 return true; 338 } 339 340 out: 341 rcu_read_unlock(); 342 return false; 343 } 344 EXPORT_SYMBOL(xsk_tx_peek_desc); 345 346 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 347 { 348 struct xdp_desc *descs = pool->tx_descs; 349 u32 nb_pkts = 0; 350 351 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 352 nb_pkts++; 353 354 xsk_tx_release(pool); 355 return nb_pkts; 356 } 357 358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) 359 { 360 struct xdp_sock *xs; 361 u32 nb_pkts; 362 363 rcu_read_lock(); 364 if (!list_is_singular(&pool->xsk_tx_list)) { 365 /* Fallback to the non-batched version */ 366 rcu_read_unlock(); 367 return xsk_tx_peek_release_fallback(pool, max_entries); 368 } 369 370 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 371 if (!xs) { 372 nb_pkts = 0; 373 goto out; 374 } 375 376 nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries); 377 if (!nb_pkts) { 378 xs->tx->queue_empty_descs++; 379 goto out; 380 } 381 382 /* This is the backpressure mechanism for the Tx path. Try to 383 * reserve space in the completion queue for all packets, but 384 * if there are fewer slots available, just process that many 385 * packets. This avoids having to implement any buffering in 386 * the Tx path. 387 */ 388 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 389 if (!nb_pkts) 390 goto out; 391 392 xskq_cons_release_n(xs->tx, nb_pkts); 393 __xskq_cons_release(xs->tx); 394 xs->sk.sk_write_space(&xs->sk); 395 396 out: 397 rcu_read_unlock(); 398 return nb_pkts; 399 } 400 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 401 402 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 403 { 404 struct net_device *dev = xs->dev; 405 406 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 407 } 408 409 static void xsk_destruct_skb(struct sk_buff *skb) 410 { 411 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; 412 struct xdp_sock *xs = xdp_sk(skb->sk); 413 unsigned long flags; 414 415 spin_lock_irqsave(&xs->pool->cq_lock, flags); 416 xskq_prod_submit_addr(xs->pool->cq, addr); 417 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 418 419 sock_wfree(skb); 420 } 421 422 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 423 struct xdp_desc *desc) 424 { 425 struct xsk_buff_pool *pool = xs->pool; 426 u32 hr, len, ts, offset, copy, copied; 427 struct sk_buff *skb; 428 struct page *page; 429 void *buffer; 430 int err, i; 431 u64 addr; 432 433 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 434 435 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 436 if (unlikely(!skb)) 437 return ERR_PTR(err); 438 439 skb_reserve(skb, hr); 440 441 addr = desc->addr; 442 len = desc->len; 443 ts = pool->unaligned ? len : pool->chunk_size; 444 445 buffer = xsk_buff_raw_get_data(pool, addr); 446 offset = offset_in_page(buffer); 447 addr = buffer - pool->addrs; 448 449 for (copied = 0, i = 0; copied < len; i++) { 450 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 451 get_page(page); 452 453 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 454 skb_fill_page_desc(skb, i, page, offset, copy); 455 456 copied += copy; 457 addr += copy; 458 offset = 0; 459 } 460 461 skb->len += len; 462 skb->data_len += len; 463 skb->truesize += ts; 464 465 refcount_add(ts, &xs->sk.sk_wmem_alloc); 466 467 return skb; 468 } 469 470 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 471 struct xdp_desc *desc) 472 { 473 struct net_device *dev = xs->dev; 474 struct sk_buff *skb; 475 476 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 477 skb = xsk_build_skb_zerocopy(xs, desc); 478 if (IS_ERR(skb)) 479 return skb; 480 } else { 481 u32 hr, tr, len; 482 void *buffer; 483 int err; 484 485 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 486 tr = dev->needed_tailroom; 487 len = desc->len; 488 489 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 490 if (unlikely(!skb)) 491 return ERR_PTR(err); 492 493 skb_reserve(skb, hr); 494 skb_put(skb, len); 495 496 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 497 err = skb_store_bits(skb, 0, buffer, len); 498 if (unlikely(err)) { 499 kfree_skb(skb); 500 return ERR_PTR(err); 501 } 502 } 503 504 skb->dev = dev; 505 skb->priority = xs->sk.sk_priority; 506 skb->mark = xs->sk.sk_mark; 507 skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr; 508 skb->destructor = xsk_destruct_skb; 509 510 return skb; 511 } 512 513 static int xsk_generic_xmit(struct sock *sk) 514 { 515 struct xdp_sock *xs = xdp_sk(sk); 516 u32 max_batch = TX_BATCH_SIZE; 517 bool sent_frame = false; 518 struct xdp_desc desc; 519 struct sk_buff *skb; 520 unsigned long flags; 521 int err = 0; 522 523 mutex_lock(&xs->mutex); 524 525 /* Since we dropped the RCU read lock, the socket state might have changed. */ 526 if (unlikely(!xsk_is_bound(xs))) { 527 err = -ENXIO; 528 goto out; 529 } 530 531 if (xs->queue_id >= xs->dev->real_num_tx_queues) 532 goto out; 533 534 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 535 if (max_batch-- == 0) { 536 err = -EAGAIN; 537 goto out; 538 } 539 540 skb = xsk_build_skb(xs, &desc); 541 if (IS_ERR(skb)) { 542 err = PTR_ERR(skb); 543 goto out; 544 } 545 546 /* This is the backpressure mechanism for the Tx path. 547 * Reserve space in the completion queue and only proceed 548 * if there is space in it. This avoids having to implement 549 * any buffering in the Tx path. 550 */ 551 spin_lock_irqsave(&xs->pool->cq_lock, flags); 552 if (xskq_prod_reserve(xs->pool->cq)) { 553 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 554 kfree_skb(skb); 555 goto out; 556 } 557 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 558 559 err = __dev_direct_xmit(skb, xs->queue_id); 560 if (err == NETDEV_TX_BUSY) { 561 /* Tell user-space to retry the send */ 562 skb->destructor = sock_wfree; 563 spin_lock_irqsave(&xs->pool->cq_lock, flags); 564 xskq_prod_cancel(xs->pool->cq); 565 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 566 /* Free skb without triggering the perf drop trace */ 567 consume_skb(skb); 568 err = -EAGAIN; 569 goto out; 570 } 571 572 xskq_cons_release(xs->tx); 573 /* Ignore NET_XMIT_CN as packet might have been sent */ 574 if (err == NET_XMIT_DROP) { 575 /* SKB completed but not sent */ 576 err = -EBUSY; 577 goto out; 578 } 579 580 sent_frame = true; 581 } 582 583 xs->tx->queue_empty_descs++; 584 585 out: 586 if (sent_frame) 587 if (xsk_tx_writeable(xs)) 588 sk->sk_write_space(sk); 589 590 mutex_unlock(&xs->mutex); 591 return err; 592 } 593 594 static int xsk_xmit(struct sock *sk) 595 { 596 struct xdp_sock *xs = xdp_sk(sk); 597 int ret; 598 599 if (unlikely(!(xs->dev->flags & IFF_UP))) 600 return -ENETDOWN; 601 if (unlikely(!xs->tx)) 602 return -ENOBUFS; 603 604 if (xs->zc) 605 return xsk_wakeup(xs, XDP_WAKEUP_TX); 606 607 /* Drop the RCU lock since the SKB path might sleep. */ 608 rcu_read_unlock(); 609 ret = xsk_generic_xmit(sk); 610 /* Reaquire RCU lock before going into common code. */ 611 rcu_read_lock(); 612 613 return ret; 614 } 615 616 static bool xsk_no_wakeup(struct sock *sk) 617 { 618 #ifdef CONFIG_NET_RX_BUSY_POLL 619 /* Prefer busy-polling, skip the wakeup. */ 620 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 621 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 622 #else 623 return false; 624 #endif 625 } 626 627 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 628 { 629 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 630 struct sock *sk = sock->sk; 631 struct xdp_sock *xs = xdp_sk(sk); 632 struct xsk_buff_pool *pool; 633 634 if (unlikely(!xsk_is_bound(xs))) 635 return -ENXIO; 636 if (unlikely(need_wait)) 637 return -EOPNOTSUPP; 638 639 if (sk_can_busy_loop(sk)) 640 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 641 642 if (xs->zc && xsk_no_wakeup(sk)) 643 return 0; 644 645 pool = xs->pool; 646 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 647 return xsk_xmit(sk); 648 return 0; 649 } 650 651 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 652 { 653 int ret; 654 655 rcu_read_lock(); 656 ret = __xsk_sendmsg(sock, m, total_len); 657 rcu_read_unlock(); 658 659 return ret; 660 } 661 662 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 663 { 664 bool need_wait = !(flags & MSG_DONTWAIT); 665 struct sock *sk = sock->sk; 666 struct xdp_sock *xs = xdp_sk(sk); 667 668 if (unlikely(!xsk_is_bound(xs))) 669 return -ENXIO; 670 if (unlikely(!(xs->dev->flags & IFF_UP))) 671 return -ENETDOWN; 672 if (unlikely(!xs->rx)) 673 return -ENOBUFS; 674 if (unlikely(need_wait)) 675 return -EOPNOTSUPP; 676 677 if (sk_can_busy_loop(sk)) 678 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 679 680 if (xsk_no_wakeup(sk)) 681 return 0; 682 683 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 684 return xsk_wakeup(xs, XDP_WAKEUP_RX); 685 return 0; 686 } 687 688 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 689 { 690 int ret; 691 692 rcu_read_lock(); 693 ret = __xsk_recvmsg(sock, m, len, flags); 694 rcu_read_unlock(); 695 696 return ret; 697 } 698 699 static __poll_t xsk_poll(struct file *file, struct socket *sock, 700 struct poll_table_struct *wait) 701 { 702 __poll_t mask = 0; 703 struct sock *sk = sock->sk; 704 struct xdp_sock *xs = xdp_sk(sk); 705 struct xsk_buff_pool *pool; 706 707 sock_poll_wait(file, sock, wait); 708 709 rcu_read_lock(); 710 if (unlikely(!xsk_is_bound(xs))) { 711 rcu_read_unlock(); 712 return mask; 713 } 714 715 pool = xs->pool; 716 717 if (pool->cached_need_wakeup) { 718 if (xs->zc) 719 xsk_wakeup(xs, pool->cached_need_wakeup); 720 else 721 /* Poll needs to drive Tx also in copy mode */ 722 xsk_xmit(sk); 723 } 724 725 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 726 mask |= EPOLLIN | EPOLLRDNORM; 727 if (xs->tx && xsk_tx_writeable(xs)) 728 mask |= EPOLLOUT | EPOLLWRNORM; 729 730 rcu_read_unlock(); 731 return mask; 732 } 733 734 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 735 bool umem_queue) 736 { 737 struct xsk_queue *q; 738 739 if (entries == 0 || *queue || !is_power_of_2(entries)) 740 return -EINVAL; 741 742 q = xskq_create(entries, umem_queue); 743 if (!q) 744 return -ENOMEM; 745 746 /* Make sure queue is ready before it can be seen by others */ 747 smp_wmb(); 748 WRITE_ONCE(*queue, q); 749 return 0; 750 } 751 752 static void xsk_unbind_dev(struct xdp_sock *xs) 753 { 754 struct net_device *dev = xs->dev; 755 756 if (xs->state != XSK_BOUND) 757 return; 758 WRITE_ONCE(xs->state, XSK_UNBOUND); 759 760 /* Wait for driver to stop using the xdp socket. */ 761 xp_del_xsk(xs->pool, xs); 762 synchronize_net(); 763 dev_put(dev); 764 } 765 766 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 767 struct xdp_sock __rcu ***map_entry) 768 { 769 struct xsk_map *map = NULL; 770 struct xsk_map_node *node; 771 772 *map_entry = NULL; 773 774 spin_lock_bh(&xs->map_list_lock); 775 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 776 node); 777 if (node) { 778 bpf_map_inc(&node->map->map); 779 map = node->map; 780 *map_entry = node->map_entry; 781 } 782 spin_unlock_bh(&xs->map_list_lock); 783 return map; 784 } 785 786 static void xsk_delete_from_maps(struct xdp_sock *xs) 787 { 788 /* This function removes the current XDP socket from all the 789 * maps it resides in. We need to take extra care here, due to 790 * the two locks involved. Each map has a lock synchronizing 791 * updates to the entries, and each socket has a lock that 792 * synchronizes access to the list of maps (map_list). For 793 * deadlock avoidance the locks need to be taken in the order 794 * "map lock"->"socket map list lock". We start off by 795 * accessing the socket map list, and take a reference to the 796 * map to guarantee existence between the 797 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 798 * calls. Then we ask the map to remove the socket, which 799 * tries to remove the socket from the map. Note that there 800 * might be updates to the map between 801 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 802 */ 803 struct xdp_sock __rcu **map_entry = NULL; 804 struct xsk_map *map; 805 806 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 807 xsk_map_try_sock_delete(map, xs, map_entry); 808 bpf_map_put(&map->map); 809 } 810 } 811 812 static int xsk_release(struct socket *sock) 813 { 814 struct sock *sk = sock->sk; 815 struct xdp_sock *xs = xdp_sk(sk); 816 struct net *net; 817 818 if (!sk) 819 return 0; 820 821 net = sock_net(sk); 822 823 mutex_lock(&net->xdp.lock); 824 sk_del_node_init_rcu(sk); 825 mutex_unlock(&net->xdp.lock); 826 827 sock_prot_inuse_add(net, sk->sk_prot, -1); 828 829 xsk_delete_from_maps(xs); 830 mutex_lock(&xs->mutex); 831 xsk_unbind_dev(xs); 832 mutex_unlock(&xs->mutex); 833 834 xskq_destroy(xs->rx); 835 xskq_destroy(xs->tx); 836 xskq_destroy(xs->fq_tmp); 837 xskq_destroy(xs->cq_tmp); 838 839 sock_orphan(sk); 840 sock->sk = NULL; 841 842 sk_refcnt_debug_release(sk); 843 sock_put(sk); 844 845 return 0; 846 } 847 848 static struct socket *xsk_lookup_xsk_from_fd(int fd) 849 { 850 struct socket *sock; 851 int err; 852 853 sock = sockfd_lookup(fd, &err); 854 if (!sock) 855 return ERR_PTR(-ENOTSOCK); 856 857 if (sock->sk->sk_family != PF_XDP) { 858 sockfd_put(sock); 859 return ERR_PTR(-ENOPROTOOPT); 860 } 861 862 return sock; 863 } 864 865 static bool xsk_validate_queues(struct xdp_sock *xs) 866 { 867 return xs->fq_tmp && xs->cq_tmp; 868 } 869 870 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 871 { 872 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 873 struct sock *sk = sock->sk; 874 struct xdp_sock *xs = xdp_sk(sk); 875 struct net_device *dev; 876 u32 flags, qid; 877 int err = 0; 878 879 if (addr_len < sizeof(struct sockaddr_xdp)) 880 return -EINVAL; 881 if (sxdp->sxdp_family != AF_XDP) 882 return -EINVAL; 883 884 flags = sxdp->sxdp_flags; 885 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 886 XDP_USE_NEED_WAKEUP)) 887 return -EINVAL; 888 889 rtnl_lock(); 890 mutex_lock(&xs->mutex); 891 if (xs->state != XSK_READY) { 892 err = -EBUSY; 893 goto out_release; 894 } 895 896 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 897 if (!dev) { 898 err = -ENODEV; 899 goto out_release; 900 } 901 902 if (!xs->rx && !xs->tx) { 903 err = -EINVAL; 904 goto out_unlock; 905 } 906 907 qid = sxdp->sxdp_queue_id; 908 909 if (flags & XDP_SHARED_UMEM) { 910 struct xdp_sock *umem_xs; 911 struct socket *sock; 912 913 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 914 (flags & XDP_USE_NEED_WAKEUP)) { 915 /* Cannot specify flags for shared sockets. */ 916 err = -EINVAL; 917 goto out_unlock; 918 } 919 920 if (xs->umem) { 921 /* We have already our own. */ 922 err = -EINVAL; 923 goto out_unlock; 924 } 925 926 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 927 if (IS_ERR(sock)) { 928 err = PTR_ERR(sock); 929 goto out_unlock; 930 } 931 932 umem_xs = xdp_sk(sock->sk); 933 if (!xsk_is_bound(umem_xs)) { 934 err = -EBADF; 935 sockfd_put(sock); 936 goto out_unlock; 937 } 938 939 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 940 /* Share the umem with another socket on another qid 941 * and/or device. 942 */ 943 xs->pool = xp_create_and_assign_umem(xs, 944 umem_xs->umem); 945 if (!xs->pool) { 946 err = -ENOMEM; 947 sockfd_put(sock); 948 goto out_unlock; 949 } 950 951 err = xp_assign_dev_shared(xs->pool, umem_xs->umem, 952 dev, qid); 953 if (err) { 954 xp_destroy(xs->pool); 955 xs->pool = NULL; 956 sockfd_put(sock); 957 goto out_unlock; 958 } 959 } else { 960 /* Share the buffer pool with the other socket. */ 961 if (xs->fq_tmp || xs->cq_tmp) { 962 /* Do not allow setting your own fq or cq. */ 963 err = -EINVAL; 964 sockfd_put(sock); 965 goto out_unlock; 966 } 967 968 xp_get_pool(umem_xs->pool); 969 xs->pool = umem_xs->pool; 970 971 /* If underlying shared umem was created without Tx 972 * ring, allocate Tx descs array that Tx batching API 973 * utilizes 974 */ 975 if (xs->tx && !xs->pool->tx_descs) { 976 err = xp_alloc_tx_descs(xs->pool, xs); 977 if (err) { 978 xp_put_pool(xs->pool); 979 sockfd_put(sock); 980 goto out_unlock; 981 } 982 } 983 } 984 985 xdp_get_umem(umem_xs->umem); 986 WRITE_ONCE(xs->umem, umem_xs->umem); 987 sockfd_put(sock); 988 } else if (!xs->umem || !xsk_validate_queues(xs)) { 989 err = -EINVAL; 990 goto out_unlock; 991 } else { 992 /* This xsk has its own umem. */ 993 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 994 if (!xs->pool) { 995 err = -ENOMEM; 996 goto out_unlock; 997 } 998 999 err = xp_assign_dev(xs->pool, dev, qid, flags); 1000 if (err) { 1001 xp_destroy(xs->pool); 1002 xs->pool = NULL; 1003 goto out_unlock; 1004 } 1005 } 1006 1007 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1008 xs->fq_tmp = NULL; 1009 xs->cq_tmp = NULL; 1010 1011 xs->dev = dev; 1012 xs->zc = xs->umem->zc; 1013 xs->queue_id = qid; 1014 xp_add_xsk(xs->pool, xs); 1015 1016 out_unlock: 1017 if (err) { 1018 dev_put(dev); 1019 } else { 1020 /* Matches smp_rmb() in bind() for shared umem 1021 * sockets, and xsk_is_bound(). 1022 */ 1023 smp_wmb(); 1024 WRITE_ONCE(xs->state, XSK_BOUND); 1025 } 1026 out_release: 1027 mutex_unlock(&xs->mutex); 1028 rtnl_unlock(); 1029 return err; 1030 } 1031 1032 struct xdp_umem_reg_v1 { 1033 __u64 addr; /* Start of packet data area */ 1034 __u64 len; /* Length of packet data area */ 1035 __u32 chunk_size; 1036 __u32 headroom; 1037 }; 1038 1039 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1040 sockptr_t optval, unsigned int optlen) 1041 { 1042 struct sock *sk = sock->sk; 1043 struct xdp_sock *xs = xdp_sk(sk); 1044 int err; 1045 1046 if (level != SOL_XDP) 1047 return -ENOPROTOOPT; 1048 1049 switch (optname) { 1050 case XDP_RX_RING: 1051 case XDP_TX_RING: 1052 { 1053 struct xsk_queue **q; 1054 int entries; 1055 1056 if (optlen < sizeof(entries)) 1057 return -EINVAL; 1058 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1059 return -EFAULT; 1060 1061 mutex_lock(&xs->mutex); 1062 if (xs->state != XSK_READY) { 1063 mutex_unlock(&xs->mutex); 1064 return -EBUSY; 1065 } 1066 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1067 err = xsk_init_queue(entries, q, false); 1068 if (!err && optname == XDP_TX_RING) 1069 /* Tx needs to be explicitly woken up the first time */ 1070 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1071 mutex_unlock(&xs->mutex); 1072 return err; 1073 } 1074 case XDP_UMEM_REG: 1075 { 1076 size_t mr_size = sizeof(struct xdp_umem_reg); 1077 struct xdp_umem_reg mr = {}; 1078 struct xdp_umem *umem; 1079 1080 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1081 return -EINVAL; 1082 else if (optlen < sizeof(mr)) 1083 mr_size = sizeof(struct xdp_umem_reg_v1); 1084 1085 if (copy_from_sockptr(&mr, optval, mr_size)) 1086 return -EFAULT; 1087 1088 mutex_lock(&xs->mutex); 1089 if (xs->state != XSK_READY || xs->umem) { 1090 mutex_unlock(&xs->mutex); 1091 return -EBUSY; 1092 } 1093 1094 umem = xdp_umem_create(&mr); 1095 if (IS_ERR(umem)) { 1096 mutex_unlock(&xs->mutex); 1097 return PTR_ERR(umem); 1098 } 1099 1100 /* Make sure umem is ready before it can be seen by others */ 1101 smp_wmb(); 1102 WRITE_ONCE(xs->umem, umem); 1103 mutex_unlock(&xs->mutex); 1104 return 0; 1105 } 1106 case XDP_UMEM_FILL_RING: 1107 case XDP_UMEM_COMPLETION_RING: 1108 { 1109 struct xsk_queue **q; 1110 int entries; 1111 1112 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1113 return -EFAULT; 1114 1115 mutex_lock(&xs->mutex); 1116 if (xs->state != XSK_READY) { 1117 mutex_unlock(&xs->mutex); 1118 return -EBUSY; 1119 } 1120 1121 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1122 &xs->cq_tmp; 1123 err = xsk_init_queue(entries, q, true); 1124 mutex_unlock(&xs->mutex); 1125 return err; 1126 } 1127 default: 1128 break; 1129 } 1130 1131 return -ENOPROTOOPT; 1132 } 1133 1134 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1135 { 1136 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1137 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1138 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1139 } 1140 1141 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1142 { 1143 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1144 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1145 ring->desc = offsetof(struct xdp_umem_ring, desc); 1146 } 1147 1148 struct xdp_statistics_v1 { 1149 __u64 rx_dropped; 1150 __u64 rx_invalid_descs; 1151 __u64 tx_invalid_descs; 1152 }; 1153 1154 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1155 char __user *optval, int __user *optlen) 1156 { 1157 struct sock *sk = sock->sk; 1158 struct xdp_sock *xs = xdp_sk(sk); 1159 int len; 1160 1161 if (level != SOL_XDP) 1162 return -ENOPROTOOPT; 1163 1164 if (get_user(len, optlen)) 1165 return -EFAULT; 1166 if (len < 0) 1167 return -EINVAL; 1168 1169 switch (optname) { 1170 case XDP_STATISTICS: 1171 { 1172 struct xdp_statistics stats = {}; 1173 bool extra_stats = true; 1174 size_t stats_size; 1175 1176 if (len < sizeof(struct xdp_statistics_v1)) { 1177 return -EINVAL; 1178 } else if (len < sizeof(stats)) { 1179 extra_stats = false; 1180 stats_size = sizeof(struct xdp_statistics_v1); 1181 } else { 1182 stats_size = sizeof(stats); 1183 } 1184 1185 mutex_lock(&xs->mutex); 1186 stats.rx_dropped = xs->rx_dropped; 1187 if (extra_stats) { 1188 stats.rx_ring_full = xs->rx_queue_full; 1189 stats.rx_fill_ring_empty_descs = 1190 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1191 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1192 } else { 1193 stats.rx_dropped += xs->rx_queue_full; 1194 } 1195 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1196 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1197 mutex_unlock(&xs->mutex); 1198 1199 if (copy_to_user(optval, &stats, stats_size)) 1200 return -EFAULT; 1201 if (put_user(stats_size, optlen)) 1202 return -EFAULT; 1203 1204 return 0; 1205 } 1206 case XDP_MMAP_OFFSETS: 1207 { 1208 struct xdp_mmap_offsets off; 1209 struct xdp_mmap_offsets_v1 off_v1; 1210 bool flags_supported = true; 1211 void *to_copy; 1212 1213 if (len < sizeof(off_v1)) 1214 return -EINVAL; 1215 else if (len < sizeof(off)) 1216 flags_supported = false; 1217 1218 if (flags_supported) { 1219 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1220 * except for the flags field added to the end. 1221 */ 1222 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1223 &off.rx); 1224 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1225 &off.tx); 1226 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1227 &off.fr); 1228 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1229 &off.cr); 1230 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1231 ptrs.flags); 1232 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1233 ptrs.flags); 1234 off.fr.flags = offsetof(struct xdp_umem_ring, 1235 ptrs.flags); 1236 off.cr.flags = offsetof(struct xdp_umem_ring, 1237 ptrs.flags); 1238 1239 len = sizeof(off); 1240 to_copy = &off; 1241 } else { 1242 xsk_enter_rxtx_offsets(&off_v1.rx); 1243 xsk_enter_rxtx_offsets(&off_v1.tx); 1244 xsk_enter_umem_offsets(&off_v1.fr); 1245 xsk_enter_umem_offsets(&off_v1.cr); 1246 1247 len = sizeof(off_v1); 1248 to_copy = &off_v1; 1249 } 1250 1251 if (copy_to_user(optval, to_copy, len)) 1252 return -EFAULT; 1253 if (put_user(len, optlen)) 1254 return -EFAULT; 1255 1256 return 0; 1257 } 1258 case XDP_OPTIONS: 1259 { 1260 struct xdp_options opts = {}; 1261 1262 if (len < sizeof(opts)) 1263 return -EINVAL; 1264 1265 mutex_lock(&xs->mutex); 1266 if (xs->zc) 1267 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1268 mutex_unlock(&xs->mutex); 1269 1270 len = sizeof(opts); 1271 if (copy_to_user(optval, &opts, len)) 1272 return -EFAULT; 1273 if (put_user(len, optlen)) 1274 return -EFAULT; 1275 1276 return 0; 1277 } 1278 default: 1279 break; 1280 } 1281 1282 return -EOPNOTSUPP; 1283 } 1284 1285 static int xsk_mmap(struct file *file, struct socket *sock, 1286 struct vm_area_struct *vma) 1287 { 1288 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1289 unsigned long size = vma->vm_end - vma->vm_start; 1290 struct xdp_sock *xs = xdp_sk(sock->sk); 1291 struct xsk_queue *q = NULL; 1292 unsigned long pfn; 1293 struct page *qpg; 1294 1295 if (READ_ONCE(xs->state) != XSK_READY) 1296 return -EBUSY; 1297 1298 if (offset == XDP_PGOFF_RX_RING) { 1299 q = READ_ONCE(xs->rx); 1300 } else if (offset == XDP_PGOFF_TX_RING) { 1301 q = READ_ONCE(xs->tx); 1302 } else { 1303 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1304 smp_rmb(); 1305 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1306 q = READ_ONCE(xs->fq_tmp); 1307 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1308 q = READ_ONCE(xs->cq_tmp); 1309 } 1310 1311 if (!q) 1312 return -EINVAL; 1313 1314 /* Matches the smp_wmb() in xsk_init_queue */ 1315 smp_rmb(); 1316 qpg = virt_to_head_page(q->ring); 1317 if (size > page_size(qpg)) 1318 return -EINVAL; 1319 1320 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; 1321 return remap_pfn_range(vma, vma->vm_start, pfn, 1322 size, vma->vm_page_prot); 1323 } 1324 1325 static int xsk_notifier(struct notifier_block *this, 1326 unsigned long msg, void *ptr) 1327 { 1328 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1329 struct net *net = dev_net(dev); 1330 struct sock *sk; 1331 1332 switch (msg) { 1333 case NETDEV_UNREGISTER: 1334 mutex_lock(&net->xdp.lock); 1335 sk_for_each(sk, &net->xdp.list) { 1336 struct xdp_sock *xs = xdp_sk(sk); 1337 1338 mutex_lock(&xs->mutex); 1339 if (xs->dev == dev) { 1340 sk->sk_err = ENETDOWN; 1341 if (!sock_flag(sk, SOCK_DEAD)) 1342 sk_error_report(sk); 1343 1344 xsk_unbind_dev(xs); 1345 1346 /* Clear device references. */ 1347 xp_clear_dev(xs->pool); 1348 } 1349 mutex_unlock(&xs->mutex); 1350 } 1351 mutex_unlock(&net->xdp.lock); 1352 break; 1353 } 1354 return NOTIFY_DONE; 1355 } 1356 1357 static struct proto xsk_proto = { 1358 .name = "XDP", 1359 .owner = THIS_MODULE, 1360 .obj_size = sizeof(struct xdp_sock), 1361 }; 1362 1363 static const struct proto_ops xsk_proto_ops = { 1364 .family = PF_XDP, 1365 .owner = THIS_MODULE, 1366 .release = xsk_release, 1367 .bind = xsk_bind, 1368 .connect = sock_no_connect, 1369 .socketpair = sock_no_socketpair, 1370 .accept = sock_no_accept, 1371 .getname = sock_no_getname, 1372 .poll = xsk_poll, 1373 .ioctl = sock_no_ioctl, 1374 .listen = sock_no_listen, 1375 .shutdown = sock_no_shutdown, 1376 .setsockopt = xsk_setsockopt, 1377 .getsockopt = xsk_getsockopt, 1378 .sendmsg = xsk_sendmsg, 1379 .recvmsg = xsk_recvmsg, 1380 .mmap = xsk_mmap, 1381 .sendpage = sock_no_sendpage, 1382 }; 1383 1384 static void xsk_destruct(struct sock *sk) 1385 { 1386 struct xdp_sock *xs = xdp_sk(sk); 1387 1388 if (!sock_flag(sk, SOCK_DEAD)) 1389 return; 1390 1391 if (!xp_put_pool(xs->pool)) 1392 xdp_put_umem(xs->umem, !xs->pool); 1393 1394 sk_refcnt_debug_dec(sk); 1395 } 1396 1397 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1398 int kern) 1399 { 1400 struct xdp_sock *xs; 1401 struct sock *sk; 1402 1403 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1404 return -EPERM; 1405 if (sock->type != SOCK_RAW) 1406 return -ESOCKTNOSUPPORT; 1407 1408 if (protocol) 1409 return -EPROTONOSUPPORT; 1410 1411 sock->state = SS_UNCONNECTED; 1412 1413 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1414 if (!sk) 1415 return -ENOBUFS; 1416 1417 sock->ops = &xsk_proto_ops; 1418 1419 sock_init_data(sock, sk); 1420 1421 sk->sk_family = PF_XDP; 1422 1423 sk->sk_destruct = xsk_destruct; 1424 sk_refcnt_debug_inc(sk); 1425 1426 sock_set_flag(sk, SOCK_RCU_FREE); 1427 1428 xs = xdp_sk(sk); 1429 xs->state = XSK_READY; 1430 mutex_init(&xs->mutex); 1431 spin_lock_init(&xs->rx_lock); 1432 1433 INIT_LIST_HEAD(&xs->map_list); 1434 spin_lock_init(&xs->map_list_lock); 1435 1436 mutex_lock(&net->xdp.lock); 1437 sk_add_node_rcu(sk, &net->xdp.list); 1438 mutex_unlock(&net->xdp.lock); 1439 1440 sock_prot_inuse_add(net, &xsk_proto, 1); 1441 1442 return 0; 1443 } 1444 1445 static const struct net_proto_family xsk_family_ops = { 1446 .family = PF_XDP, 1447 .create = xsk_create, 1448 .owner = THIS_MODULE, 1449 }; 1450 1451 static struct notifier_block xsk_netdev_notifier = { 1452 .notifier_call = xsk_notifier, 1453 }; 1454 1455 static int __net_init xsk_net_init(struct net *net) 1456 { 1457 mutex_init(&net->xdp.lock); 1458 INIT_HLIST_HEAD(&net->xdp.list); 1459 return 0; 1460 } 1461 1462 static void __net_exit xsk_net_exit(struct net *net) 1463 { 1464 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1465 } 1466 1467 static struct pernet_operations xsk_net_ops = { 1468 .init = xsk_net_init, 1469 .exit = xsk_net_exit, 1470 }; 1471 1472 static int __init xsk_init(void) 1473 { 1474 int err, cpu; 1475 1476 err = proto_register(&xsk_proto, 0 /* no slab */); 1477 if (err) 1478 goto out; 1479 1480 err = sock_register(&xsk_family_ops); 1481 if (err) 1482 goto out_proto; 1483 1484 err = register_pernet_subsys(&xsk_net_ops); 1485 if (err) 1486 goto out_sk; 1487 1488 err = register_netdevice_notifier(&xsk_netdev_notifier); 1489 if (err) 1490 goto out_pernet; 1491 1492 for_each_possible_cpu(cpu) 1493 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1494 return 0; 1495 1496 out_pernet: 1497 unregister_pernet_subsys(&xsk_net_ops); 1498 out_sk: 1499 sock_unregister(PF_XDP); 1500 out_proto: 1501 proto_unregister(&xsk_proto); 1502 out: 1503 return err; 1504 } 1505 1506 fs_initcall(xsk_init); 1507