1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <net/xdp_sock_drv.h> 26 #include <net/busy_poll.h> 27 #include <net/xdp.h> 28 29 #include "xsk_queue.h" 30 #include "xdp_umem.h" 31 #include "xsk.h" 32 33 #define TX_BATCH_SIZE 32 34 35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 36 37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 38 { 39 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 40 return; 41 42 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 43 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 44 } 45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 46 47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 48 { 49 struct xdp_sock *xs; 50 51 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 52 return; 53 54 rcu_read_lock(); 55 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 56 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 57 } 58 rcu_read_unlock(); 59 60 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 61 } 62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 63 64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 65 { 66 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 67 return; 68 69 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 70 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 71 } 72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 73 74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 75 { 76 struct xdp_sock *xs; 77 78 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 79 return; 80 81 rcu_read_lock(); 82 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 83 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 84 } 85 rcu_read_unlock(); 86 87 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 88 } 89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 90 91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 92 { 93 return pool->uses_need_wakeup; 94 } 95 EXPORT_SYMBOL(xsk_uses_need_wakeup); 96 97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 98 u16 queue_id) 99 { 100 if (queue_id < dev->real_num_rx_queues) 101 return dev->_rx[queue_id].pool; 102 if (queue_id < dev->real_num_tx_queues) 103 return dev->_tx[queue_id].pool; 104 105 return NULL; 106 } 107 EXPORT_SYMBOL(xsk_get_pool_from_qid); 108 109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 110 { 111 if (queue_id < dev->num_rx_queues) 112 dev->_rx[queue_id].pool = NULL; 113 if (queue_id < dev->num_tx_queues) 114 dev->_tx[queue_id].pool = NULL; 115 } 116 117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 118 * not know if the device has more tx queues than rx, or the opposite. 119 * This might also change during run time. 120 */ 121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 122 u16 queue_id) 123 { 124 if (queue_id >= max_t(unsigned int, 125 dev->real_num_rx_queues, 126 dev->real_num_tx_queues)) 127 return -EINVAL; 128 129 if (queue_id < dev->real_num_rx_queues) 130 dev->_rx[queue_id].pool = pool; 131 if (queue_id < dev->real_num_tx_queues) 132 dev->_tx[queue_id].pool = pool; 133 134 return 0; 135 } 136 137 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 138 { 139 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 140 u64 addr; 141 int err; 142 143 addr = xp_get_handle(xskb); 144 err = xskq_prod_reserve_desc(xs->rx, addr, len); 145 if (err) { 146 xs->rx_queue_full++; 147 return err; 148 } 149 150 xp_release(xskb); 151 return 0; 152 } 153 154 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len) 155 { 156 void *from_buf, *to_buf; 157 u32 metalen; 158 159 if (unlikely(xdp_data_meta_unsupported(from))) { 160 from_buf = from->data; 161 to_buf = to->data; 162 metalen = 0; 163 } else { 164 from_buf = from->data_meta; 165 metalen = from->data - from->data_meta; 166 to_buf = to->data - metalen; 167 } 168 169 memcpy(to_buf, from_buf, len + metalen); 170 } 171 172 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 173 { 174 struct xdp_buff *xsk_xdp; 175 int err; 176 u32 len; 177 178 len = xdp->data_end - xdp->data; 179 if (len > xsk_pool_get_rx_frame_size(xs->pool)) { 180 xs->rx_dropped++; 181 return -ENOSPC; 182 } 183 184 xsk_xdp = xsk_buff_alloc(xs->pool); 185 if (!xsk_xdp) { 186 xs->rx_dropped++; 187 return -ENOSPC; 188 } 189 190 xsk_copy_xdp(xsk_xdp, xdp, len); 191 err = __xsk_rcv_zc(xs, xsk_xdp, len); 192 if (err) { 193 xsk_buff_free(xsk_xdp); 194 return err; 195 } 196 return 0; 197 } 198 199 static bool xsk_tx_writeable(struct xdp_sock *xs) 200 { 201 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 202 return false; 203 204 return true; 205 } 206 207 static bool xsk_is_bound(struct xdp_sock *xs) 208 { 209 if (READ_ONCE(xs->state) == XSK_BOUND) { 210 /* Matches smp_wmb() in bind(). */ 211 smp_rmb(); 212 return true; 213 } 214 return false; 215 } 216 217 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp) 218 { 219 if (!xsk_is_bound(xs)) 220 return -EINVAL; 221 222 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 223 return -EINVAL; 224 225 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 226 return 0; 227 } 228 229 static void xsk_flush(struct xdp_sock *xs) 230 { 231 xskq_prod_submit(xs->rx); 232 __xskq_cons_release(xs->pool->fq); 233 sock_def_readable(&xs->sk); 234 } 235 236 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 237 { 238 int err; 239 240 spin_lock_bh(&xs->rx_lock); 241 err = xsk_rcv_check(xs, xdp); 242 if (!err) { 243 err = __xsk_rcv(xs, xdp); 244 xsk_flush(xs); 245 } 246 spin_unlock_bh(&xs->rx_lock); 247 return err; 248 } 249 250 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 251 { 252 int err; 253 u32 len; 254 255 err = xsk_rcv_check(xs, xdp); 256 if (err) 257 return err; 258 259 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 260 len = xdp->data_end - xdp->data; 261 return __xsk_rcv_zc(xs, xdp, len); 262 } 263 264 err = __xsk_rcv(xs, xdp); 265 if (!err) 266 xdp_return_buff(xdp); 267 return err; 268 } 269 270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 271 { 272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 273 int err; 274 275 err = xsk_rcv(xs, xdp); 276 if (err) 277 return err; 278 279 if (!xs->flush_node.prev) 280 list_add(&xs->flush_node, flush_list); 281 282 return 0; 283 } 284 285 void __xsk_map_flush(void) 286 { 287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 288 struct xdp_sock *xs, *tmp; 289 290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 291 xsk_flush(xs); 292 __list_del_clearprev(&xs->flush_node); 293 } 294 } 295 296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 297 { 298 xskq_prod_submit_n(pool->cq, nb_entries); 299 } 300 EXPORT_SYMBOL(xsk_tx_completed); 301 302 void xsk_tx_release(struct xsk_buff_pool *pool) 303 { 304 struct xdp_sock *xs; 305 306 rcu_read_lock(); 307 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 308 __xskq_cons_release(xs->tx); 309 if (xsk_tx_writeable(xs)) 310 xs->sk.sk_write_space(&xs->sk); 311 } 312 rcu_read_unlock(); 313 } 314 EXPORT_SYMBOL(xsk_tx_release); 315 316 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 317 { 318 struct xdp_sock *xs; 319 320 rcu_read_lock(); 321 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 322 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 323 xs->tx->queue_empty_descs++; 324 continue; 325 } 326 327 /* This is the backpressure mechanism for the Tx path. 328 * Reserve space in the completion queue and only proceed 329 * if there is space in it. This avoids having to implement 330 * any buffering in the Tx path. 331 */ 332 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 333 goto out; 334 335 xskq_cons_release(xs->tx); 336 rcu_read_unlock(); 337 return true; 338 } 339 340 out: 341 rcu_read_unlock(); 342 return false; 343 } 344 EXPORT_SYMBOL(xsk_tx_peek_desc); 345 346 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 347 { 348 struct xdp_desc *descs = pool->tx_descs; 349 u32 nb_pkts = 0; 350 351 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 352 nb_pkts++; 353 354 xsk_tx_release(pool); 355 return nb_pkts; 356 } 357 358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) 359 { 360 struct xdp_sock *xs; 361 u32 nb_pkts; 362 363 rcu_read_lock(); 364 if (!list_is_singular(&pool->xsk_tx_list)) { 365 /* Fallback to the non-batched version */ 366 rcu_read_unlock(); 367 return xsk_tx_peek_release_fallback(pool, max_entries); 368 } 369 370 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 371 if (!xs) { 372 nb_pkts = 0; 373 goto out; 374 } 375 376 nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries); 377 if (!nb_pkts) { 378 xs->tx->queue_empty_descs++; 379 goto out; 380 } 381 382 /* This is the backpressure mechanism for the Tx path. Try to 383 * reserve space in the completion queue for all packets, but 384 * if there are fewer slots available, just process that many 385 * packets. This avoids having to implement any buffering in 386 * the Tx path. 387 */ 388 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 389 if (!nb_pkts) 390 goto out; 391 392 xskq_cons_release_n(xs->tx, nb_pkts); 393 __xskq_cons_release(xs->tx); 394 xs->sk.sk_write_space(&xs->sk); 395 396 out: 397 rcu_read_unlock(); 398 return nb_pkts; 399 } 400 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 401 402 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 403 { 404 struct net_device *dev = xs->dev; 405 int err; 406 407 rcu_read_lock(); 408 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 409 rcu_read_unlock(); 410 411 return err; 412 } 413 414 static int xsk_zc_xmit(struct xdp_sock *xs) 415 { 416 return xsk_wakeup(xs, XDP_WAKEUP_TX); 417 } 418 419 static void xsk_destruct_skb(struct sk_buff *skb) 420 { 421 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; 422 struct xdp_sock *xs = xdp_sk(skb->sk); 423 unsigned long flags; 424 425 spin_lock_irqsave(&xs->pool->cq_lock, flags); 426 xskq_prod_submit_addr(xs->pool->cq, addr); 427 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 428 429 sock_wfree(skb); 430 } 431 432 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 433 struct xdp_desc *desc) 434 { 435 struct xsk_buff_pool *pool = xs->pool; 436 u32 hr, len, ts, offset, copy, copied; 437 struct sk_buff *skb; 438 struct page *page; 439 void *buffer; 440 int err, i; 441 u64 addr; 442 443 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 444 445 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 446 if (unlikely(!skb)) 447 return ERR_PTR(err); 448 449 skb_reserve(skb, hr); 450 451 addr = desc->addr; 452 len = desc->len; 453 ts = pool->unaligned ? len : pool->chunk_size; 454 455 buffer = xsk_buff_raw_get_data(pool, addr); 456 offset = offset_in_page(buffer); 457 addr = buffer - pool->addrs; 458 459 for (copied = 0, i = 0; copied < len; i++) { 460 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 461 get_page(page); 462 463 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 464 skb_fill_page_desc(skb, i, page, offset, copy); 465 466 copied += copy; 467 addr += copy; 468 offset = 0; 469 } 470 471 skb->len += len; 472 skb->data_len += len; 473 skb->truesize += ts; 474 475 refcount_add(ts, &xs->sk.sk_wmem_alloc); 476 477 return skb; 478 } 479 480 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 481 struct xdp_desc *desc) 482 { 483 struct net_device *dev = xs->dev; 484 struct sk_buff *skb; 485 486 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 487 skb = xsk_build_skb_zerocopy(xs, desc); 488 if (IS_ERR(skb)) 489 return skb; 490 } else { 491 u32 hr, tr, len; 492 void *buffer; 493 int err; 494 495 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 496 tr = dev->needed_tailroom; 497 len = desc->len; 498 499 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 500 if (unlikely(!skb)) 501 return ERR_PTR(err); 502 503 skb_reserve(skb, hr); 504 skb_put(skb, len); 505 506 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 507 err = skb_store_bits(skb, 0, buffer, len); 508 if (unlikely(err)) { 509 kfree_skb(skb); 510 return ERR_PTR(err); 511 } 512 } 513 514 skb->dev = dev; 515 skb->priority = xs->sk.sk_priority; 516 skb->mark = xs->sk.sk_mark; 517 skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr; 518 skb->destructor = xsk_destruct_skb; 519 520 return skb; 521 } 522 523 static int xsk_generic_xmit(struct sock *sk) 524 { 525 struct xdp_sock *xs = xdp_sk(sk); 526 u32 max_batch = TX_BATCH_SIZE; 527 bool sent_frame = false; 528 struct xdp_desc desc; 529 struct sk_buff *skb; 530 unsigned long flags; 531 int err = 0; 532 533 mutex_lock(&xs->mutex); 534 535 if (xs->queue_id >= xs->dev->real_num_tx_queues) 536 goto out; 537 538 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 539 if (max_batch-- == 0) { 540 err = -EAGAIN; 541 goto out; 542 } 543 544 skb = xsk_build_skb(xs, &desc); 545 if (IS_ERR(skb)) { 546 err = PTR_ERR(skb); 547 goto out; 548 } 549 550 /* This is the backpressure mechanism for the Tx path. 551 * Reserve space in the completion queue and only proceed 552 * if there is space in it. This avoids having to implement 553 * any buffering in the Tx path. 554 */ 555 spin_lock_irqsave(&xs->pool->cq_lock, flags); 556 if (xskq_prod_reserve(xs->pool->cq)) { 557 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 558 kfree_skb(skb); 559 goto out; 560 } 561 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 562 563 err = __dev_direct_xmit(skb, xs->queue_id); 564 if (err == NETDEV_TX_BUSY) { 565 /* Tell user-space to retry the send */ 566 skb->destructor = sock_wfree; 567 spin_lock_irqsave(&xs->pool->cq_lock, flags); 568 xskq_prod_cancel(xs->pool->cq); 569 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 570 /* Free skb without triggering the perf drop trace */ 571 consume_skb(skb); 572 err = -EAGAIN; 573 goto out; 574 } 575 576 xskq_cons_release(xs->tx); 577 /* Ignore NET_XMIT_CN as packet might have been sent */ 578 if (err == NET_XMIT_DROP) { 579 /* SKB completed but not sent */ 580 err = -EBUSY; 581 goto out; 582 } 583 584 sent_frame = true; 585 } 586 587 xs->tx->queue_empty_descs++; 588 589 out: 590 if (sent_frame) 591 if (xsk_tx_writeable(xs)) 592 sk->sk_write_space(sk); 593 594 mutex_unlock(&xs->mutex); 595 return err; 596 } 597 598 static int __xsk_sendmsg(struct sock *sk) 599 { 600 struct xdp_sock *xs = xdp_sk(sk); 601 602 if (unlikely(!(xs->dev->flags & IFF_UP))) 603 return -ENETDOWN; 604 if (unlikely(!xs->tx)) 605 return -ENOBUFS; 606 607 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk); 608 } 609 610 static bool xsk_no_wakeup(struct sock *sk) 611 { 612 #ifdef CONFIG_NET_RX_BUSY_POLL 613 /* Prefer busy-polling, skip the wakeup. */ 614 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 615 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 616 #else 617 return false; 618 #endif 619 } 620 621 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 622 { 623 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 624 struct sock *sk = sock->sk; 625 struct xdp_sock *xs = xdp_sk(sk); 626 struct xsk_buff_pool *pool; 627 628 if (unlikely(!xsk_is_bound(xs))) 629 return -ENXIO; 630 if (unlikely(need_wait)) 631 return -EOPNOTSUPP; 632 633 if (sk_can_busy_loop(sk)) 634 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 635 636 if (xsk_no_wakeup(sk)) 637 return 0; 638 639 pool = xs->pool; 640 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 641 return __xsk_sendmsg(sk); 642 return 0; 643 } 644 645 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 646 { 647 bool need_wait = !(flags & MSG_DONTWAIT); 648 struct sock *sk = sock->sk; 649 struct xdp_sock *xs = xdp_sk(sk); 650 651 if (unlikely(!xsk_is_bound(xs))) 652 return -ENXIO; 653 if (unlikely(!(xs->dev->flags & IFF_UP))) 654 return -ENETDOWN; 655 if (unlikely(!xs->rx)) 656 return -ENOBUFS; 657 if (unlikely(need_wait)) 658 return -EOPNOTSUPP; 659 660 if (sk_can_busy_loop(sk)) 661 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 662 663 if (xsk_no_wakeup(sk)) 664 return 0; 665 666 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 667 return xsk_wakeup(xs, XDP_WAKEUP_RX); 668 return 0; 669 } 670 671 static __poll_t xsk_poll(struct file *file, struct socket *sock, 672 struct poll_table_struct *wait) 673 { 674 __poll_t mask = 0; 675 struct sock *sk = sock->sk; 676 struct xdp_sock *xs = xdp_sk(sk); 677 struct xsk_buff_pool *pool; 678 679 sock_poll_wait(file, sock, wait); 680 681 if (unlikely(!xsk_is_bound(xs))) 682 return mask; 683 684 pool = xs->pool; 685 686 if (pool->cached_need_wakeup) { 687 if (xs->zc) 688 xsk_wakeup(xs, pool->cached_need_wakeup); 689 else 690 /* Poll needs to drive Tx also in copy mode */ 691 __xsk_sendmsg(sk); 692 } 693 694 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 695 mask |= EPOLLIN | EPOLLRDNORM; 696 if (xs->tx && xsk_tx_writeable(xs)) 697 mask |= EPOLLOUT | EPOLLWRNORM; 698 699 return mask; 700 } 701 702 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 703 bool umem_queue) 704 { 705 struct xsk_queue *q; 706 707 if (entries == 0 || *queue || !is_power_of_2(entries)) 708 return -EINVAL; 709 710 q = xskq_create(entries, umem_queue); 711 if (!q) 712 return -ENOMEM; 713 714 /* Make sure queue is ready before it can be seen by others */ 715 smp_wmb(); 716 WRITE_ONCE(*queue, q); 717 return 0; 718 } 719 720 static void xsk_unbind_dev(struct xdp_sock *xs) 721 { 722 struct net_device *dev = xs->dev; 723 724 if (xs->state != XSK_BOUND) 725 return; 726 WRITE_ONCE(xs->state, XSK_UNBOUND); 727 728 /* Wait for driver to stop using the xdp socket. */ 729 xp_del_xsk(xs->pool, xs); 730 xs->dev = NULL; 731 synchronize_net(); 732 dev_put(dev); 733 } 734 735 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 736 struct xdp_sock __rcu ***map_entry) 737 { 738 struct xsk_map *map = NULL; 739 struct xsk_map_node *node; 740 741 *map_entry = NULL; 742 743 spin_lock_bh(&xs->map_list_lock); 744 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 745 node); 746 if (node) { 747 bpf_map_inc(&node->map->map); 748 map = node->map; 749 *map_entry = node->map_entry; 750 } 751 spin_unlock_bh(&xs->map_list_lock); 752 return map; 753 } 754 755 static void xsk_delete_from_maps(struct xdp_sock *xs) 756 { 757 /* This function removes the current XDP socket from all the 758 * maps it resides in. We need to take extra care here, due to 759 * the two locks involved. Each map has a lock synchronizing 760 * updates to the entries, and each socket has a lock that 761 * synchronizes access to the list of maps (map_list). For 762 * deadlock avoidance the locks need to be taken in the order 763 * "map lock"->"socket map list lock". We start off by 764 * accessing the socket map list, and take a reference to the 765 * map to guarantee existence between the 766 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 767 * calls. Then we ask the map to remove the socket, which 768 * tries to remove the socket from the map. Note that there 769 * might be updates to the map between 770 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 771 */ 772 struct xdp_sock __rcu **map_entry = NULL; 773 struct xsk_map *map; 774 775 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 776 xsk_map_try_sock_delete(map, xs, map_entry); 777 bpf_map_put(&map->map); 778 } 779 } 780 781 static int xsk_release(struct socket *sock) 782 { 783 struct sock *sk = sock->sk; 784 struct xdp_sock *xs = xdp_sk(sk); 785 struct net *net; 786 787 if (!sk) 788 return 0; 789 790 net = sock_net(sk); 791 792 mutex_lock(&net->xdp.lock); 793 sk_del_node_init_rcu(sk); 794 mutex_unlock(&net->xdp.lock); 795 796 sock_prot_inuse_add(net, sk->sk_prot, -1); 797 798 xsk_delete_from_maps(xs); 799 mutex_lock(&xs->mutex); 800 xsk_unbind_dev(xs); 801 mutex_unlock(&xs->mutex); 802 803 xskq_destroy(xs->rx); 804 xskq_destroy(xs->tx); 805 xskq_destroy(xs->fq_tmp); 806 xskq_destroy(xs->cq_tmp); 807 808 sock_orphan(sk); 809 sock->sk = NULL; 810 811 sk_refcnt_debug_release(sk); 812 sock_put(sk); 813 814 return 0; 815 } 816 817 static struct socket *xsk_lookup_xsk_from_fd(int fd) 818 { 819 struct socket *sock; 820 int err; 821 822 sock = sockfd_lookup(fd, &err); 823 if (!sock) 824 return ERR_PTR(-ENOTSOCK); 825 826 if (sock->sk->sk_family != PF_XDP) { 827 sockfd_put(sock); 828 return ERR_PTR(-ENOPROTOOPT); 829 } 830 831 return sock; 832 } 833 834 static bool xsk_validate_queues(struct xdp_sock *xs) 835 { 836 return xs->fq_tmp && xs->cq_tmp; 837 } 838 839 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 840 { 841 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 842 struct sock *sk = sock->sk; 843 struct xdp_sock *xs = xdp_sk(sk); 844 struct net_device *dev; 845 u32 flags, qid; 846 int err = 0; 847 848 if (addr_len < sizeof(struct sockaddr_xdp)) 849 return -EINVAL; 850 if (sxdp->sxdp_family != AF_XDP) 851 return -EINVAL; 852 853 flags = sxdp->sxdp_flags; 854 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 855 XDP_USE_NEED_WAKEUP)) 856 return -EINVAL; 857 858 rtnl_lock(); 859 mutex_lock(&xs->mutex); 860 if (xs->state != XSK_READY) { 861 err = -EBUSY; 862 goto out_release; 863 } 864 865 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 866 if (!dev) { 867 err = -ENODEV; 868 goto out_release; 869 } 870 871 if (!xs->rx && !xs->tx) { 872 err = -EINVAL; 873 goto out_unlock; 874 } 875 876 qid = sxdp->sxdp_queue_id; 877 878 if (flags & XDP_SHARED_UMEM) { 879 struct xdp_sock *umem_xs; 880 struct socket *sock; 881 882 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 883 (flags & XDP_USE_NEED_WAKEUP)) { 884 /* Cannot specify flags for shared sockets. */ 885 err = -EINVAL; 886 goto out_unlock; 887 } 888 889 if (xs->umem) { 890 /* We have already our own. */ 891 err = -EINVAL; 892 goto out_unlock; 893 } 894 895 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 896 if (IS_ERR(sock)) { 897 err = PTR_ERR(sock); 898 goto out_unlock; 899 } 900 901 umem_xs = xdp_sk(sock->sk); 902 if (!xsk_is_bound(umem_xs)) { 903 err = -EBADF; 904 sockfd_put(sock); 905 goto out_unlock; 906 } 907 908 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 909 /* Share the umem with another socket on another qid 910 * and/or device. 911 */ 912 xs->pool = xp_create_and_assign_umem(xs, 913 umem_xs->umem); 914 if (!xs->pool) { 915 err = -ENOMEM; 916 sockfd_put(sock); 917 goto out_unlock; 918 } 919 920 err = xp_assign_dev_shared(xs->pool, umem_xs->umem, 921 dev, qid); 922 if (err) { 923 xp_destroy(xs->pool); 924 xs->pool = NULL; 925 sockfd_put(sock); 926 goto out_unlock; 927 } 928 } else { 929 /* Share the buffer pool with the other socket. */ 930 if (xs->fq_tmp || xs->cq_tmp) { 931 /* Do not allow setting your own fq or cq. */ 932 err = -EINVAL; 933 sockfd_put(sock); 934 goto out_unlock; 935 } 936 937 xp_get_pool(umem_xs->pool); 938 xs->pool = umem_xs->pool; 939 } 940 941 xdp_get_umem(umem_xs->umem); 942 WRITE_ONCE(xs->umem, umem_xs->umem); 943 sockfd_put(sock); 944 } else if (!xs->umem || !xsk_validate_queues(xs)) { 945 err = -EINVAL; 946 goto out_unlock; 947 } else { 948 /* This xsk has its own umem. */ 949 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 950 if (!xs->pool) { 951 err = -ENOMEM; 952 goto out_unlock; 953 } 954 955 err = xp_assign_dev(xs->pool, dev, qid, flags); 956 if (err) { 957 xp_destroy(xs->pool); 958 xs->pool = NULL; 959 goto out_unlock; 960 } 961 } 962 963 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 964 xs->fq_tmp = NULL; 965 xs->cq_tmp = NULL; 966 967 xs->dev = dev; 968 xs->zc = xs->umem->zc; 969 xs->queue_id = qid; 970 xp_add_xsk(xs->pool, xs); 971 972 out_unlock: 973 if (err) { 974 dev_put(dev); 975 } else { 976 /* Matches smp_rmb() in bind() for shared umem 977 * sockets, and xsk_is_bound(). 978 */ 979 smp_wmb(); 980 WRITE_ONCE(xs->state, XSK_BOUND); 981 } 982 out_release: 983 mutex_unlock(&xs->mutex); 984 rtnl_unlock(); 985 return err; 986 } 987 988 struct xdp_umem_reg_v1 { 989 __u64 addr; /* Start of packet data area */ 990 __u64 len; /* Length of packet data area */ 991 __u32 chunk_size; 992 __u32 headroom; 993 }; 994 995 static int xsk_setsockopt(struct socket *sock, int level, int optname, 996 sockptr_t optval, unsigned int optlen) 997 { 998 struct sock *sk = sock->sk; 999 struct xdp_sock *xs = xdp_sk(sk); 1000 int err; 1001 1002 if (level != SOL_XDP) 1003 return -ENOPROTOOPT; 1004 1005 switch (optname) { 1006 case XDP_RX_RING: 1007 case XDP_TX_RING: 1008 { 1009 struct xsk_queue **q; 1010 int entries; 1011 1012 if (optlen < sizeof(entries)) 1013 return -EINVAL; 1014 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1015 return -EFAULT; 1016 1017 mutex_lock(&xs->mutex); 1018 if (xs->state != XSK_READY) { 1019 mutex_unlock(&xs->mutex); 1020 return -EBUSY; 1021 } 1022 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1023 err = xsk_init_queue(entries, q, false); 1024 if (!err && optname == XDP_TX_RING) 1025 /* Tx needs to be explicitly woken up the first time */ 1026 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1027 mutex_unlock(&xs->mutex); 1028 return err; 1029 } 1030 case XDP_UMEM_REG: 1031 { 1032 size_t mr_size = sizeof(struct xdp_umem_reg); 1033 struct xdp_umem_reg mr = {}; 1034 struct xdp_umem *umem; 1035 1036 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1037 return -EINVAL; 1038 else if (optlen < sizeof(mr)) 1039 mr_size = sizeof(struct xdp_umem_reg_v1); 1040 1041 if (copy_from_sockptr(&mr, optval, mr_size)) 1042 return -EFAULT; 1043 1044 mutex_lock(&xs->mutex); 1045 if (xs->state != XSK_READY || xs->umem) { 1046 mutex_unlock(&xs->mutex); 1047 return -EBUSY; 1048 } 1049 1050 umem = xdp_umem_create(&mr); 1051 if (IS_ERR(umem)) { 1052 mutex_unlock(&xs->mutex); 1053 return PTR_ERR(umem); 1054 } 1055 1056 /* Make sure umem is ready before it can be seen by others */ 1057 smp_wmb(); 1058 WRITE_ONCE(xs->umem, umem); 1059 mutex_unlock(&xs->mutex); 1060 return 0; 1061 } 1062 case XDP_UMEM_FILL_RING: 1063 case XDP_UMEM_COMPLETION_RING: 1064 { 1065 struct xsk_queue **q; 1066 int entries; 1067 1068 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1069 return -EFAULT; 1070 1071 mutex_lock(&xs->mutex); 1072 if (xs->state != XSK_READY) { 1073 mutex_unlock(&xs->mutex); 1074 return -EBUSY; 1075 } 1076 1077 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1078 &xs->cq_tmp; 1079 err = xsk_init_queue(entries, q, true); 1080 mutex_unlock(&xs->mutex); 1081 return err; 1082 } 1083 default: 1084 break; 1085 } 1086 1087 return -ENOPROTOOPT; 1088 } 1089 1090 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1091 { 1092 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1093 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1094 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1095 } 1096 1097 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1098 { 1099 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1100 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1101 ring->desc = offsetof(struct xdp_umem_ring, desc); 1102 } 1103 1104 struct xdp_statistics_v1 { 1105 __u64 rx_dropped; 1106 __u64 rx_invalid_descs; 1107 __u64 tx_invalid_descs; 1108 }; 1109 1110 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1111 char __user *optval, int __user *optlen) 1112 { 1113 struct sock *sk = sock->sk; 1114 struct xdp_sock *xs = xdp_sk(sk); 1115 int len; 1116 1117 if (level != SOL_XDP) 1118 return -ENOPROTOOPT; 1119 1120 if (get_user(len, optlen)) 1121 return -EFAULT; 1122 if (len < 0) 1123 return -EINVAL; 1124 1125 switch (optname) { 1126 case XDP_STATISTICS: 1127 { 1128 struct xdp_statistics stats = {}; 1129 bool extra_stats = true; 1130 size_t stats_size; 1131 1132 if (len < sizeof(struct xdp_statistics_v1)) { 1133 return -EINVAL; 1134 } else if (len < sizeof(stats)) { 1135 extra_stats = false; 1136 stats_size = sizeof(struct xdp_statistics_v1); 1137 } else { 1138 stats_size = sizeof(stats); 1139 } 1140 1141 mutex_lock(&xs->mutex); 1142 stats.rx_dropped = xs->rx_dropped; 1143 if (extra_stats) { 1144 stats.rx_ring_full = xs->rx_queue_full; 1145 stats.rx_fill_ring_empty_descs = 1146 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1147 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1148 } else { 1149 stats.rx_dropped += xs->rx_queue_full; 1150 } 1151 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1152 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1153 mutex_unlock(&xs->mutex); 1154 1155 if (copy_to_user(optval, &stats, stats_size)) 1156 return -EFAULT; 1157 if (put_user(stats_size, optlen)) 1158 return -EFAULT; 1159 1160 return 0; 1161 } 1162 case XDP_MMAP_OFFSETS: 1163 { 1164 struct xdp_mmap_offsets off; 1165 struct xdp_mmap_offsets_v1 off_v1; 1166 bool flags_supported = true; 1167 void *to_copy; 1168 1169 if (len < sizeof(off_v1)) 1170 return -EINVAL; 1171 else if (len < sizeof(off)) 1172 flags_supported = false; 1173 1174 if (flags_supported) { 1175 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1176 * except for the flags field added to the end. 1177 */ 1178 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1179 &off.rx); 1180 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1181 &off.tx); 1182 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1183 &off.fr); 1184 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1185 &off.cr); 1186 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1187 ptrs.flags); 1188 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1189 ptrs.flags); 1190 off.fr.flags = offsetof(struct xdp_umem_ring, 1191 ptrs.flags); 1192 off.cr.flags = offsetof(struct xdp_umem_ring, 1193 ptrs.flags); 1194 1195 len = sizeof(off); 1196 to_copy = &off; 1197 } else { 1198 xsk_enter_rxtx_offsets(&off_v1.rx); 1199 xsk_enter_rxtx_offsets(&off_v1.tx); 1200 xsk_enter_umem_offsets(&off_v1.fr); 1201 xsk_enter_umem_offsets(&off_v1.cr); 1202 1203 len = sizeof(off_v1); 1204 to_copy = &off_v1; 1205 } 1206 1207 if (copy_to_user(optval, to_copy, len)) 1208 return -EFAULT; 1209 if (put_user(len, optlen)) 1210 return -EFAULT; 1211 1212 return 0; 1213 } 1214 case XDP_OPTIONS: 1215 { 1216 struct xdp_options opts = {}; 1217 1218 if (len < sizeof(opts)) 1219 return -EINVAL; 1220 1221 mutex_lock(&xs->mutex); 1222 if (xs->zc) 1223 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1224 mutex_unlock(&xs->mutex); 1225 1226 len = sizeof(opts); 1227 if (copy_to_user(optval, &opts, len)) 1228 return -EFAULT; 1229 if (put_user(len, optlen)) 1230 return -EFAULT; 1231 1232 return 0; 1233 } 1234 default: 1235 break; 1236 } 1237 1238 return -EOPNOTSUPP; 1239 } 1240 1241 static int xsk_mmap(struct file *file, struct socket *sock, 1242 struct vm_area_struct *vma) 1243 { 1244 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1245 unsigned long size = vma->vm_end - vma->vm_start; 1246 struct xdp_sock *xs = xdp_sk(sock->sk); 1247 struct xsk_queue *q = NULL; 1248 unsigned long pfn; 1249 struct page *qpg; 1250 1251 if (READ_ONCE(xs->state) != XSK_READY) 1252 return -EBUSY; 1253 1254 if (offset == XDP_PGOFF_RX_RING) { 1255 q = READ_ONCE(xs->rx); 1256 } else if (offset == XDP_PGOFF_TX_RING) { 1257 q = READ_ONCE(xs->tx); 1258 } else { 1259 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1260 smp_rmb(); 1261 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1262 q = READ_ONCE(xs->fq_tmp); 1263 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1264 q = READ_ONCE(xs->cq_tmp); 1265 } 1266 1267 if (!q) 1268 return -EINVAL; 1269 1270 /* Matches the smp_wmb() in xsk_init_queue */ 1271 smp_rmb(); 1272 qpg = virt_to_head_page(q->ring); 1273 if (size > page_size(qpg)) 1274 return -EINVAL; 1275 1276 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; 1277 return remap_pfn_range(vma, vma->vm_start, pfn, 1278 size, vma->vm_page_prot); 1279 } 1280 1281 static int xsk_notifier(struct notifier_block *this, 1282 unsigned long msg, void *ptr) 1283 { 1284 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1285 struct net *net = dev_net(dev); 1286 struct sock *sk; 1287 1288 switch (msg) { 1289 case NETDEV_UNREGISTER: 1290 mutex_lock(&net->xdp.lock); 1291 sk_for_each(sk, &net->xdp.list) { 1292 struct xdp_sock *xs = xdp_sk(sk); 1293 1294 mutex_lock(&xs->mutex); 1295 if (xs->dev == dev) { 1296 sk->sk_err = ENETDOWN; 1297 if (!sock_flag(sk, SOCK_DEAD)) 1298 sk_error_report(sk); 1299 1300 xsk_unbind_dev(xs); 1301 1302 /* Clear device references. */ 1303 xp_clear_dev(xs->pool); 1304 } 1305 mutex_unlock(&xs->mutex); 1306 } 1307 mutex_unlock(&net->xdp.lock); 1308 break; 1309 } 1310 return NOTIFY_DONE; 1311 } 1312 1313 static struct proto xsk_proto = { 1314 .name = "XDP", 1315 .owner = THIS_MODULE, 1316 .obj_size = sizeof(struct xdp_sock), 1317 }; 1318 1319 static const struct proto_ops xsk_proto_ops = { 1320 .family = PF_XDP, 1321 .owner = THIS_MODULE, 1322 .release = xsk_release, 1323 .bind = xsk_bind, 1324 .connect = sock_no_connect, 1325 .socketpair = sock_no_socketpair, 1326 .accept = sock_no_accept, 1327 .getname = sock_no_getname, 1328 .poll = xsk_poll, 1329 .ioctl = sock_no_ioctl, 1330 .listen = sock_no_listen, 1331 .shutdown = sock_no_shutdown, 1332 .setsockopt = xsk_setsockopt, 1333 .getsockopt = xsk_getsockopt, 1334 .sendmsg = xsk_sendmsg, 1335 .recvmsg = xsk_recvmsg, 1336 .mmap = xsk_mmap, 1337 .sendpage = sock_no_sendpage, 1338 }; 1339 1340 static void xsk_destruct(struct sock *sk) 1341 { 1342 struct xdp_sock *xs = xdp_sk(sk); 1343 1344 if (!sock_flag(sk, SOCK_DEAD)) 1345 return; 1346 1347 if (!xp_put_pool(xs->pool)) 1348 xdp_put_umem(xs->umem, !xs->pool); 1349 1350 sk_refcnt_debug_dec(sk); 1351 } 1352 1353 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1354 int kern) 1355 { 1356 struct xdp_sock *xs; 1357 struct sock *sk; 1358 1359 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1360 return -EPERM; 1361 if (sock->type != SOCK_RAW) 1362 return -ESOCKTNOSUPPORT; 1363 1364 if (protocol) 1365 return -EPROTONOSUPPORT; 1366 1367 sock->state = SS_UNCONNECTED; 1368 1369 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1370 if (!sk) 1371 return -ENOBUFS; 1372 1373 sock->ops = &xsk_proto_ops; 1374 1375 sock_init_data(sock, sk); 1376 1377 sk->sk_family = PF_XDP; 1378 1379 sk->sk_destruct = xsk_destruct; 1380 sk_refcnt_debug_inc(sk); 1381 1382 sock_set_flag(sk, SOCK_RCU_FREE); 1383 1384 xs = xdp_sk(sk); 1385 xs->state = XSK_READY; 1386 mutex_init(&xs->mutex); 1387 spin_lock_init(&xs->rx_lock); 1388 1389 INIT_LIST_HEAD(&xs->map_list); 1390 spin_lock_init(&xs->map_list_lock); 1391 1392 mutex_lock(&net->xdp.lock); 1393 sk_add_node_rcu(sk, &net->xdp.list); 1394 mutex_unlock(&net->xdp.lock); 1395 1396 sock_prot_inuse_add(net, &xsk_proto, 1); 1397 1398 return 0; 1399 } 1400 1401 static const struct net_proto_family xsk_family_ops = { 1402 .family = PF_XDP, 1403 .create = xsk_create, 1404 .owner = THIS_MODULE, 1405 }; 1406 1407 static struct notifier_block xsk_netdev_notifier = { 1408 .notifier_call = xsk_notifier, 1409 }; 1410 1411 static int __net_init xsk_net_init(struct net *net) 1412 { 1413 mutex_init(&net->xdp.lock); 1414 INIT_HLIST_HEAD(&net->xdp.list); 1415 return 0; 1416 } 1417 1418 static void __net_exit xsk_net_exit(struct net *net) 1419 { 1420 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1421 } 1422 1423 static struct pernet_operations xsk_net_ops = { 1424 .init = xsk_net_init, 1425 .exit = xsk_net_exit, 1426 }; 1427 1428 static int __init xsk_init(void) 1429 { 1430 int err, cpu; 1431 1432 err = proto_register(&xsk_proto, 0 /* no slab */); 1433 if (err) 1434 goto out; 1435 1436 err = sock_register(&xsk_family_ops); 1437 if (err) 1438 goto out_proto; 1439 1440 err = register_pernet_subsys(&xsk_net_ops); 1441 if (err) 1442 goto out_sk; 1443 1444 err = register_netdevice_notifier(&xsk_netdev_notifier); 1445 if (err) 1446 goto out_pernet; 1447 1448 for_each_possible_cpu(cpu) 1449 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1450 return 0; 1451 1452 out_pernet: 1453 unregister_pernet_subsys(&xsk_net_ops); 1454 out_sk: 1455 sock_unregister(PF_XDP); 1456 out_proto: 1457 proto_unregister(&xsk_proto); 1458 out: 1459 return err; 1460 } 1461 1462 fs_initcall(xsk_init); 1463