1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/skbuff.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_ether.h> 41 #include <net/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/moduleparam.h> 44 #include <linux/mm.h> 45 #include <linux/slab.h> 46 #include <net/ip.h> 47 #include <linux/bpf.h> 48 #include <net/page_pool.h> 49 #include <linux/bpf_trace.h> 50 51 #include <xen/xen.h> 52 #include <xen/xenbus.h> 53 #include <xen/events.h> 54 #include <xen/page.h> 55 #include <xen/platform_pci.h> 56 #include <xen/grant_table.h> 57 58 #include <xen/interface/io/netif.h> 59 #include <xen/interface/memory.h> 60 #include <xen/interface/grant_table.h> 61 62 /* Module parameters */ 63 #define MAX_QUEUES_DEFAULT 8 64 static unsigned int xennet_max_queues; 65 module_param_named(max_queues, xennet_max_queues, uint, 0644); 66 MODULE_PARM_DESC(max_queues, 67 "Maximum number of queues per virtual interface"); 68 69 #define XENNET_TIMEOUT (5 * HZ) 70 71 static const struct ethtool_ops xennet_ethtool_ops; 72 73 struct netfront_cb { 74 int pull_to; 75 }; 76 77 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 78 79 #define RX_COPY_THRESHOLD 256 80 81 #define GRANT_INVALID_REF 0 82 83 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 84 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 85 86 /* Minimum number of Rx slots (includes slot for GSO metadata). */ 87 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 88 89 /* Queue name is interface name with "-qNNN" appended */ 90 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 91 92 /* IRQ name is queue name with "-tx" or "-rx" appended */ 93 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 94 95 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 96 97 struct netfront_stats { 98 u64 packets; 99 u64 bytes; 100 struct u64_stats_sync syncp; 101 }; 102 103 struct netfront_info; 104 105 struct netfront_queue { 106 unsigned int id; /* Queue ID, 0-based */ 107 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 108 struct netfront_info *info; 109 110 struct bpf_prog __rcu *xdp_prog; 111 112 struct napi_struct napi; 113 114 /* Split event channels support, tx_* == rx_* when using 115 * single event channel. 116 */ 117 unsigned int tx_evtchn, rx_evtchn; 118 unsigned int tx_irq, rx_irq; 119 /* Only used when split event channels support is enabled */ 120 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 121 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 122 123 spinlock_t tx_lock; 124 struct xen_netif_tx_front_ring tx; 125 int tx_ring_ref; 126 127 /* 128 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 129 * are linked from tx_skb_freelist through tx_link. 130 */ 131 struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; 132 unsigned short tx_link[NET_TX_RING_SIZE]; 133 #define TX_LINK_NONE 0xffff 134 #define TX_PENDING 0xfffe 135 grant_ref_t gref_tx_head; 136 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 137 struct page *grant_tx_page[NET_TX_RING_SIZE]; 138 unsigned tx_skb_freelist; 139 unsigned int tx_pend_queue; 140 141 spinlock_t rx_lock ____cacheline_aligned_in_smp; 142 struct xen_netif_rx_front_ring rx; 143 int rx_ring_ref; 144 145 struct timer_list rx_refill_timer; 146 147 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 148 grant_ref_t gref_rx_head; 149 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 150 151 unsigned int rx_rsp_unconsumed; 152 spinlock_t rx_cons_lock; 153 154 struct page_pool *page_pool; 155 struct xdp_rxq_info xdp_rxq; 156 }; 157 158 struct netfront_info { 159 struct list_head list; 160 struct net_device *netdev; 161 162 struct xenbus_device *xbdev; 163 164 /* Multi-queue support */ 165 struct netfront_queue *queues; 166 167 /* Statistics */ 168 struct netfront_stats __percpu *rx_stats; 169 struct netfront_stats __percpu *tx_stats; 170 171 /* XDP state */ 172 bool netback_has_xdp_headroom; 173 bool netfront_xdp_enabled; 174 175 /* Is device behaving sane? */ 176 bool broken; 177 178 atomic_t rx_gso_checksum_fixup; 179 }; 180 181 struct netfront_rx_info { 182 struct xen_netif_rx_response rx; 183 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 184 }; 185 186 /* 187 * Access macros for acquiring freeing slots in tx_skbs[]. 188 */ 189 190 static void add_id_to_list(unsigned *head, unsigned short *list, 191 unsigned short id) 192 { 193 list[id] = *head; 194 *head = id; 195 } 196 197 static unsigned short get_id_from_list(unsigned *head, unsigned short *list) 198 { 199 unsigned int id = *head; 200 201 if (id != TX_LINK_NONE) { 202 *head = list[id]; 203 list[id] = TX_LINK_NONE; 204 } 205 return id; 206 } 207 208 static int xennet_rxidx(RING_IDX idx) 209 { 210 return idx & (NET_RX_RING_SIZE - 1); 211 } 212 213 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 214 RING_IDX ri) 215 { 216 int i = xennet_rxidx(ri); 217 struct sk_buff *skb = queue->rx_skbs[i]; 218 queue->rx_skbs[i] = NULL; 219 return skb; 220 } 221 222 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 223 RING_IDX ri) 224 { 225 int i = xennet_rxidx(ri); 226 grant_ref_t ref = queue->grant_rx_ref[i]; 227 queue->grant_rx_ref[i] = GRANT_INVALID_REF; 228 return ref; 229 } 230 231 #ifdef CONFIG_SYSFS 232 static const struct attribute_group xennet_dev_group; 233 #endif 234 235 static bool xennet_can_sg(struct net_device *dev) 236 { 237 return dev->features & NETIF_F_SG; 238 } 239 240 241 static void rx_refill_timeout(struct timer_list *t) 242 { 243 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); 244 napi_schedule(&queue->napi); 245 } 246 247 static int netfront_tx_slot_available(struct netfront_queue *queue) 248 { 249 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 250 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); 251 } 252 253 static void xennet_maybe_wake_tx(struct netfront_queue *queue) 254 { 255 struct net_device *dev = queue->info->netdev; 256 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 257 258 if (unlikely(netif_tx_queue_stopped(dev_queue)) && 259 netfront_tx_slot_available(queue) && 260 likely(netif_running(dev))) 261 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 262 } 263 264 265 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 266 { 267 struct sk_buff *skb; 268 struct page *page; 269 270 skb = __netdev_alloc_skb(queue->info->netdev, 271 RX_COPY_THRESHOLD + NET_IP_ALIGN, 272 GFP_ATOMIC | __GFP_NOWARN); 273 if (unlikely(!skb)) 274 return NULL; 275 276 page = page_pool_dev_alloc_pages(queue->page_pool); 277 if (unlikely(!page)) { 278 kfree_skb(skb); 279 return NULL; 280 } 281 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 282 283 /* Align ip header to a 16 bytes boundary */ 284 skb_reserve(skb, NET_IP_ALIGN); 285 skb->dev = queue->info->netdev; 286 287 return skb; 288 } 289 290 291 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 292 { 293 RING_IDX req_prod = queue->rx.req_prod_pvt; 294 int notify; 295 int err = 0; 296 297 if (unlikely(!netif_carrier_ok(queue->info->netdev))) 298 return; 299 300 for (req_prod = queue->rx.req_prod_pvt; 301 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 302 req_prod++) { 303 struct sk_buff *skb; 304 unsigned short id; 305 grant_ref_t ref; 306 struct page *page; 307 struct xen_netif_rx_request *req; 308 309 skb = xennet_alloc_one_rx_buffer(queue); 310 if (!skb) { 311 err = -ENOMEM; 312 break; 313 } 314 315 id = xennet_rxidx(req_prod); 316 317 BUG_ON(queue->rx_skbs[id]); 318 queue->rx_skbs[id] = skb; 319 320 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 321 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 322 queue->grant_rx_ref[id] = ref; 323 324 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 325 326 req = RING_GET_REQUEST(&queue->rx, req_prod); 327 gnttab_page_grant_foreign_access_ref_one(ref, 328 queue->info->xbdev->otherend_id, 329 page, 330 0); 331 req->id = id; 332 req->gref = ref; 333 } 334 335 queue->rx.req_prod_pvt = req_prod; 336 337 /* Try again later if there are not enough requests or skb allocation 338 * failed. 339 * Enough requests is quantified as the sum of newly created slots and 340 * the unconsumed slots at the backend. 341 */ 342 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 343 unlikely(err)) { 344 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 345 return; 346 } 347 348 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 349 if (notify) 350 notify_remote_via_irq(queue->rx_irq); 351 } 352 353 static int xennet_open(struct net_device *dev) 354 { 355 struct netfront_info *np = netdev_priv(dev); 356 unsigned int num_queues = dev->real_num_tx_queues; 357 unsigned int i = 0; 358 struct netfront_queue *queue = NULL; 359 360 if (!np->queues || np->broken) 361 return -ENODEV; 362 363 for (i = 0; i < num_queues; ++i) { 364 queue = &np->queues[i]; 365 napi_enable(&queue->napi); 366 367 spin_lock_bh(&queue->rx_lock); 368 if (netif_carrier_ok(dev)) { 369 xennet_alloc_rx_buffers(queue); 370 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 371 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 372 napi_schedule(&queue->napi); 373 } 374 spin_unlock_bh(&queue->rx_lock); 375 } 376 377 netif_tx_start_all_queues(dev); 378 379 return 0; 380 } 381 382 static bool xennet_tx_buf_gc(struct netfront_queue *queue) 383 { 384 RING_IDX cons, prod; 385 unsigned short id; 386 struct sk_buff *skb; 387 bool more_to_do; 388 bool work_done = false; 389 const struct device *dev = &queue->info->netdev->dev; 390 391 BUG_ON(!netif_carrier_ok(queue->info->netdev)); 392 393 do { 394 prod = queue->tx.sring->rsp_prod; 395 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { 396 dev_alert(dev, "Illegal number of responses %u\n", 397 prod - queue->tx.rsp_cons); 398 goto err; 399 } 400 rmb(); /* Ensure we see responses up to 'rp'. */ 401 402 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 403 struct xen_netif_tx_response txrsp; 404 405 work_done = true; 406 407 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); 408 if (txrsp.status == XEN_NETIF_RSP_NULL) 409 continue; 410 411 id = txrsp.id; 412 if (id >= RING_SIZE(&queue->tx)) { 413 dev_alert(dev, 414 "Response has incorrect id (%u)\n", 415 id); 416 goto err; 417 } 418 if (queue->tx_link[id] != TX_PENDING) { 419 dev_alert(dev, 420 "Response for inactive request\n"); 421 goto err; 422 } 423 424 queue->tx_link[id] = TX_LINK_NONE; 425 skb = queue->tx_skbs[id]; 426 queue->tx_skbs[id] = NULL; 427 if (unlikely(!gnttab_end_foreign_access_ref( 428 queue->grant_tx_ref[id]))) { 429 dev_alert(dev, 430 "Grant still in use by backend domain\n"); 431 goto err; 432 } 433 gnttab_release_grant_reference( 434 &queue->gref_tx_head, queue->grant_tx_ref[id]); 435 queue->grant_tx_ref[id] = GRANT_INVALID_REF; 436 queue->grant_tx_page[id] = NULL; 437 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); 438 dev_kfree_skb_irq(skb); 439 } 440 441 queue->tx.rsp_cons = prod; 442 443 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); 444 } while (more_to_do); 445 446 xennet_maybe_wake_tx(queue); 447 448 return work_done; 449 450 err: 451 queue->info->broken = true; 452 dev_alert(dev, "Disabled for further use\n"); 453 454 return work_done; 455 } 456 457 struct xennet_gnttab_make_txreq { 458 struct netfront_queue *queue; 459 struct sk_buff *skb; 460 struct page *page; 461 struct xen_netif_tx_request *tx; /* Last request on ring page */ 462 struct xen_netif_tx_request tx_local; /* Last request local copy*/ 463 unsigned int size; 464 }; 465 466 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, 467 unsigned int len, void *data) 468 { 469 struct xennet_gnttab_make_txreq *info = data; 470 unsigned int id; 471 struct xen_netif_tx_request *tx; 472 grant_ref_t ref; 473 /* convenient aliases */ 474 struct page *page = info->page; 475 struct netfront_queue *queue = info->queue; 476 struct sk_buff *skb = info->skb; 477 478 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); 479 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 480 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 481 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 482 483 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 484 gfn, GNTMAP_readonly); 485 486 queue->tx_skbs[id] = skb; 487 queue->grant_tx_page[id] = page; 488 queue->grant_tx_ref[id] = ref; 489 490 info->tx_local.id = id; 491 info->tx_local.gref = ref; 492 info->tx_local.offset = offset; 493 info->tx_local.size = len; 494 info->tx_local.flags = 0; 495 496 *tx = info->tx_local; 497 498 /* 499 * Put the request in the pending queue, it will be set to be pending 500 * when the producer index is about to be raised. 501 */ 502 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); 503 504 info->tx = tx; 505 info->size += info->tx_local.size; 506 } 507 508 static struct xen_netif_tx_request *xennet_make_first_txreq( 509 struct xennet_gnttab_make_txreq *info, 510 unsigned int offset, unsigned int len) 511 { 512 info->size = 0; 513 514 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); 515 516 return info->tx; 517 } 518 519 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, 520 unsigned int len, void *data) 521 { 522 struct xennet_gnttab_make_txreq *info = data; 523 524 info->tx->flags |= XEN_NETTXF_more_data; 525 skb_get(info->skb); 526 xennet_tx_setup_grant(gfn, offset, len, data); 527 } 528 529 static void xennet_make_txreqs( 530 struct xennet_gnttab_make_txreq *info, 531 struct page *page, 532 unsigned int offset, unsigned int len) 533 { 534 /* Skip unused frames from start of page */ 535 page += offset >> PAGE_SHIFT; 536 offset &= ~PAGE_MASK; 537 538 while (len) { 539 info->page = page; 540 info->size = 0; 541 542 gnttab_foreach_grant_in_range(page, offset, len, 543 xennet_make_one_txreq, 544 info); 545 546 page++; 547 offset = 0; 548 len -= info->size; 549 } 550 } 551 552 /* 553 * Count how many ring slots are required to send this skb. Each frag 554 * might be a compound page. 555 */ 556 static int xennet_count_skb_slots(struct sk_buff *skb) 557 { 558 int i, frags = skb_shinfo(skb)->nr_frags; 559 int slots; 560 561 slots = gnttab_count_grant(offset_in_page(skb->data), 562 skb_headlen(skb)); 563 564 for (i = 0; i < frags; i++) { 565 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 566 unsigned long size = skb_frag_size(frag); 567 unsigned long offset = skb_frag_off(frag); 568 569 /* Skip unused frames from start of page */ 570 offset &= ~PAGE_MASK; 571 572 slots += gnttab_count_grant(offset, size); 573 } 574 575 return slots; 576 } 577 578 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 579 struct net_device *sb_dev) 580 { 581 unsigned int num_queues = dev->real_num_tx_queues; 582 u32 hash; 583 u16 queue_idx; 584 585 /* First, check if there is only one queue */ 586 if (num_queues == 1) { 587 queue_idx = 0; 588 } else { 589 hash = skb_get_hash(skb); 590 queue_idx = hash % num_queues; 591 } 592 593 return queue_idx; 594 } 595 596 static void xennet_mark_tx_pending(struct netfront_queue *queue) 597 { 598 unsigned int i; 599 600 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != 601 TX_LINK_NONE) 602 queue->tx_link[i] = TX_PENDING; 603 } 604 605 static int xennet_xdp_xmit_one(struct net_device *dev, 606 struct netfront_queue *queue, 607 struct xdp_frame *xdpf) 608 { 609 struct netfront_info *np = netdev_priv(dev); 610 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 611 struct xennet_gnttab_make_txreq info = { 612 .queue = queue, 613 .skb = NULL, 614 .page = virt_to_page(xdpf->data), 615 }; 616 int notify; 617 618 xennet_make_first_txreq(&info, 619 offset_in_page(xdpf->data), 620 xdpf->len); 621 622 xennet_mark_tx_pending(queue); 623 624 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 625 if (notify) 626 notify_remote_via_irq(queue->tx_irq); 627 628 u64_stats_update_begin(&tx_stats->syncp); 629 tx_stats->bytes += xdpf->len; 630 tx_stats->packets++; 631 u64_stats_update_end(&tx_stats->syncp); 632 633 xennet_tx_buf_gc(queue); 634 635 return 0; 636 } 637 638 static int xennet_xdp_xmit(struct net_device *dev, int n, 639 struct xdp_frame **frames, u32 flags) 640 { 641 unsigned int num_queues = dev->real_num_tx_queues; 642 struct netfront_info *np = netdev_priv(dev); 643 struct netfront_queue *queue = NULL; 644 unsigned long irq_flags; 645 int nxmit = 0; 646 int i; 647 648 if (unlikely(np->broken)) 649 return -ENODEV; 650 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 651 return -EINVAL; 652 653 queue = &np->queues[smp_processor_id() % num_queues]; 654 655 spin_lock_irqsave(&queue->tx_lock, irq_flags); 656 for (i = 0; i < n; i++) { 657 struct xdp_frame *xdpf = frames[i]; 658 659 if (!xdpf) 660 continue; 661 if (xennet_xdp_xmit_one(dev, queue, xdpf)) 662 break; 663 nxmit++; 664 } 665 spin_unlock_irqrestore(&queue->tx_lock, irq_flags); 666 667 return nxmit; 668 } 669 670 671 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 672 673 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 674 { 675 struct netfront_info *np = netdev_priv(dev); 676 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 677 struct xen_netif_tx_request *first_tx; 678 unsigned int i; 679 int notify; 680 int slots; 681 struct page *page; 682 unsigned int offset; 683 unsigned int len; 684 unsigned long flags; 685 struct netfront_queue *queue = NULL; 686 struct xennet_gnttab_make_txreq info = { }; 687 unsigned int num_queues = dev->real_num_tx_queues; 688 u16 queue_index; 689 struct sk_buff *nskb; 690 691 /* Drop the packet if no queues are set up */ 692 if (num_queues < 1) 693 goto drop; 694 if (unlikely(np->broken)) 695 goto drop; 696 /* Determine which queue to transmit this SKB on */ 697 queue_index = skb_get_queue_mapping(skb); 698 queue = &np->queues[queue_index]; 699 700 /* If skb->len is too big for wire format, drop skb and alert 701 * user about misconfiguration. 702 */ 703 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 704 net_alert_ratelimited( 705 "xennet: skb->len = %u, too big for wire format\n", 706 skb->len); 707 goto drop; 708 } 709 710 slots = xennet_count_skb_slots(skb); 711 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { 712 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 713 slots, skb->len); 714 if (skb_linearize(skb)) 715 goto drop; 716 } 717 718 page = virt_to_page(skb->data); 719 offset = offset_in_page(skb->data); 720 721 /* The first req should be at least ETH_HLEN size or the packet will be 722 * dropped by netback. 723 */ 724 if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 725 nskb = skb_copy(skb, GFP_ATOMIC); 726 if (!nskb) 727 goto drop; 728 dev_consume_skb_any(skb); 729 skb = nskb; 730 page = virt_to_page(skb->data); 731 offset = offset_in_page(skb->data); 732 } 733 734 len = skb_headlen(skb); 735 736 spin_lock_irqsave(&queue->tx_lock, flags); 737 738 if (unlikely(!netif_carrier_ok(dev) || 739 (slots > 1 && !xennet_can_sg(dev)) || 740 netif_needs_gso(skb, netif_skb_features(skb)))) { 741 spin_unlock_irqrestore(&queue->tx_lock, flags); 742 goto drop; 743 } 744 745 /* First request for the linear area. */ 746 info.queue = queue; 747 info.skb = skb; 748 info.page = page; 749 first_tx = xennet_make_first_txreq(&info, offset, len); 750 offset += info.tx_local.size; 751 if (offset == PAGE_SIZE) { 752 page++; 753 offset = 0; 754 } 755 len -= info.tx_local.size; 756 757 if (skb->ip_summed == CHECKSUM_PARTIAL) 758 /* local packet? */ 759 first_tx->flags |= XEN_NETTXF_csum_blank | 760 XEN_NETTXF_data_validated; 761 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 762 /* remote but checksummed. */ 763 first_tx->flags |= XEN_NETTXF_data_validated; 764 765 /* Optional extra info after the first request. */ 766 if (skb_shinfo(skb)->gso_size) { 767 struct xen_netif_extra_info *gso; 768 769 gso = (struct xen_netif_extra_info *) 770 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 771 772 first_tx->flags |= XEN_NETTXF_extra_info; 773 774 gso->u.gso.size = skb_shinfo(skb)->gso_size; 775 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 776 XEN_NETIF_GSO_TYPE_TCPV6 : 777 XEN_NETIF_GSO_TYPE_TCPV4; 778 gso->u.gso.pad = 0; 779 gso->u.gso.features = 0; 780 781 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 782 gso->flags = 0; 783 } 784 785 /* Requests for the rest of the linear area. */ 786 xennet_make_txreqs(&info, page, offset, len); 787 788 /* Requests for all the frags. */ 789 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 790 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 791 xennet_make_txreqs(&info, skb_frag_page(frag), 792 skb_frag_off(frag), 793 skb_frag_size(frag)); 794 } 795 796 /* First request has the packet length. */ 797 first_tx->size = skb->len; 798 799 /* timestamp packet in software */ 800 skb_tx_timestamp(skb); 801 802 xennet_mark_tx_pending(queue); 803 804 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 805 if (notify) 806 notify_remote_via_irq(queue->tx_irq); 807 808 u64_stats_update_begin(&tx_stats->syncp); 809 tx_stats->bytes += skb->len; 810 tx_stats->packets++; 811 u64_stats_update_end(&tx_stats->syncp); 812 813 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 814 xennet_tx_buf_gc(queue); 815 816 if (!netfront_tx_slot_available(queue)) 817 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 818 819 spin_unlock_irqrestore(&queue->tx_lock, flags); 820 821 return NETDEV_TX_OK; 822 823 drop: 824 dev->stats.tx_dropped++; 825 dev_kfree_skb_any(skb); 826 return NETDEV_TX_OK; 827 } 828 829 static int xennet_close(struct net_device *dev) 830 { 831 struct netfront_info *np = netdev_priv(dev); 832 unsigned int num_queues = dev->real_num_tx_queues; 833 unsigned int i; 834 struct netfront_queue *queue; 835 netif_tx_stop_all_queues(np->netdev); 836 for (i = 0; i < num_queues; ++i) { 837 queue = &np->queues[i]; 838 napi_disable(&queue->napi); 839 } 840 return 0; 841 } 842 843 static void xennet_destroy_queues(struct netfront_info *info) 844 { 845 unsigned int i; 846 847 for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 848 struct netfront_queue *queue = &info->queues[i]; 849 850 if (netif_running(info->netdev)) 851 napi_disable(&queue->napi); 852 netif_napi_del(&queue->napi); 853 } 854 855 kfree(info->queues); 856 info->queues = NULL; 857 } 858 859 static void xennet_uninit(struct net_device *dev) 860 { 861 struct netfront_info *np = netdev_priv(dev); 862 xennet_destroy_queues(np); 863 } 864 865 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) 866 { 867 unsigned long flags; 868 869 spin_lock_irqsave(&queue->rx_cons_lock, flags); 870 queue->rx.rsp_cons = val; 871 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 872 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 873 } 874 875 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 876 grant_ref_t ref) 877 { 878 int new = xennet_rxidx(queue->rx.req_prod_pvt); 879 880 BUG_ON(queue->rx_skbs[new]); 881 queue->rx_skbs[new] = skb; 882 queue->grant_rx_ref[new] = ref; 883 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 884 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 885 queue->rx.req_prod_pvt++; 886 } 887 888 static int xennet_get_extras(struct netfront_queue *queue, 889 struct xen_netif_extra_info *extras, 890 RING_IDX rp) 891 892 { 893 struct xen_netif_extra_info extra; 894 struct device *dev = &queue->info->netdev->dev; 895 RING_IDX cons = queue->rx.rsp_cons; 896 int err = 0; 897 898 do { 899 struct sk_buff *skb; 900 grant_ref_t ref; 901 902 if (unlikely(cons + 1 == rp)) { 903 if (net_ratelimit()) 904 dev_warn(dev, "Missing extra info\n"); 905 err = -EBADR; 906 break; 907 } 908 909 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); 910 911 if (unlikely(!extra.type || 912 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 913 if (net_ratelimit()) 914 dev_warn(dev, "Invalid extra type: %d\n", 915 extra.type); 916 err = -EINVAL; 917 } else { 918 extras[extra.type - 1] = extra; 919 } 920 921 skb = xennet_get_rx_skb(queue, cons); 922 ref = xennet_get_rx_ref(queue, cons); 923 xennet_move_rx_slot(queue, skb, ref); 924 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 925 926 xennet_set_rx_rsp_cons(queue, cons); 927 return err; 928 } 929 930 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, 931 struct xen_netif_rx_response *rx, struct bpf_prog *prog, 932 struct xdp_buff *xdp, bool *need_xdp_flush) 933 { 934 struct xdp_frame *xdpf; 935 u32 len = rx->status; 936 u32 act; 937 int err; 938 939 xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, 940 &queue->xdp_rxq); 941 xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM, 942 len, false); 943 944 act = bpf_prog_run_xdp(prog, xdp); 945 switch (act) { 946 case XDP_TX: 947 get_page(pdata); 948 xdpf = xdp_convert_buff_to_frame(xdp); 949 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); 950 if (unlikely(!err)) 951 xdp_return_frame_rx_napi(xdpf); 952 else if (unlikely(err < 0)) 953 trace_xdp_exception(queue->info->netdev, prog, act); 954 break; 955 case XDP_REDIRECT: 956 get_page(pdata); 957 err = xdp_do_redirect(queue->info->netdev, xdp, prog); 958 *need_xdp_flush = true; 959 if (unlikely(err)) 960 trace_xdp_exception(queue->info->netdev, prog, act); 961 break; 962 case XDP_PASS: 963 case XDP_DROP: 964 break; 965 966 case XDP_ABORTED: 967 trace_xdp_exception(queue->info->netdev, prog, act); 968 break; 969 970 default: 971 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); 972 } 973 974 return act; 975 } 976 977 static int xennet_get_responses(struct netfront_queue *queue, 978 struct netfront_rx_info *rinfo, RING_IDX rp, 979 struct sk_buff_head *list, 980 bool *need_xdp_flush) 981 { 982 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; 983 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); 984 RING_IDX cons = queue->rx.rsp_cons; 985 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 986 struct xen_netif_extra_info *extras = rinfo->extras; 987 grant_ref_t ref = xennet_get_rx_ref(queue, cons); 988 struct device *dev = &queue->info->netdev->dev; 989 struct bpf_prog *xdp_prog; 990 struct xdp_buff xdp; 991 int slots = 1; 992 int err = 0; 993 u32 verdict; 994 995 if (rx->flags & XEN_NETRXF_extra_info) { 996 err = xennet_get_extras(queue, extras, rp); 997 if (!err) { 998 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) { 999 struct xen_netif_extra_info *xdp; 1000 1001 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; 1002 rx->offset = xdp->u.xdp.headroom; 1003 } 1004 } 1005 cons = queue->rx.rsp_cons; 1006 } 1007 1008 for (;;) { 1009 if (unlikely(rx->status < 0 || 1010 rx->offset + rx->status > XEN_PAGE_SIZE)) { 1011 if (net_ratelimit()) 1012 dev_warn(dev, "rx->offset: %u, size: %d\n", 1013 rx->offset, rx->status); 1014 xennet_move_rx_slot(queue, skb, ref); 1015 err = -EINVAL; 1016 goto next; 1017 } 1018 1019 /* 1020 * This definitely indicates a bug, either in this driver or in 1021 * the backend driver. In future this should flag the bad 1022 * situation to the system controller to reboot the backend. 1023 */ 1024 if (ref == GRANT_INVALID_REF) { 1025 if (net_ratelimit()) 1026 dev_warn(dev, "Bad rx response id %d.\n", 1027 rx->id); 1028 err = -EINVAL; 1029 goto next; 1030 } 1031 1032 if (!gnttab_end_foreign_access_ref(ref)) { 1033 dev_alert(dev, 1034 "Grant still in use by backend domain\n"); 1035 queue->info->broken = true; 1036 dev_alert(dev, "Disabled for further use\n"); 1037 return -EINVAL; 1038 } 1039 1040 gnttab_release_grant_reference(&queue->gref_rx_head, ref); 1041 1042 rcu_read_lock(); 1043 xdp_prog = rcu_dereference(queue->xdp_prog); 1044 if (xdp_prog) { 1045 if (!(rx->flags & XEN_NETRXF_more_data)) { 1046 /* currently only a single page contains data */ 1047 verdict = xennet_run_xdp(queue, 1048 skb_frag_page(&skb_shinfo(skb)->frags[0]), 1049 rx, xdp_prog, &xdp, need_xdp_flush); 1050 if (verdict != XDP_PASS) 1051 err = -EINVAL; 1052 } else { 1053 /* drop the frame */ 1054 err = -EINVAL; 1055 } 1056 } 1057 rcu_read_unlock(); 1058 next: 1059 __skb_queue_tail(list, skb); 1060 if (!(rx->flags & XEN_NETRXF_more_data)) 1061 break; 1062 1063 if (cons + slots == rp) { 1064 if (net_ratelimit()) 1065 dev_warn(dev, "Need more slots\n"); 1066 err = -ENOENT; 1067 break; 1068 } 1069 1070 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); 1071 rx = &rx_local; 1072 skb = xennet_get_rx_skb(queue, cons + slots); 1073 ref = xennet_get_rx_ref(queue, cons + slots); 1074 slots++; 1075 } 1076 1077 if (unlikely(slots > max)) { 1078 if (net_ratelimit()) 1079 dev_warn(dev, "Too many slots\n"); 1080 err = -E2BIG; 1081 } 1082 1083 if (unlikely(err)) 1084 xennet_set_rx_rsp_cons(queue, cons + slots); 1085 1086 return err; 1087 } 1088 1089 static int xennet_set_skb_gso(struct sk_buff *skb, 1090 struct xen_netif_extra_info *gso) 1091 { 1092 if (!gso->u.gso.size) { 1093 if (net_ratelimit()) 1094 pr_warn("GSO size must not be zero\n"); 1095 return -EINVAL; 1096 } 1097 1098 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 1099 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 1100 if (net_ratelimit()) 1101 pr_warn("Bad GSO type %d\n", gso->u.gso.type); 1102 return -EINVAL; 1103 } 1104 1105 skb_shinfo(skb)->gso_size = gso->u.gso.size; 1106 skb_shinfo(skb)->gso_type = 1107 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 1108 SKB_GSO_TCPV4 : 1109 SKB_GSO_TCPV6; 1110 1111 /* Header must be checked, and gso_segs computed. */ 1112 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 1113 skb_shinfo(skb)->gso_segs = 0; 1114 1115 return 0; 1116 } 1117 1118 static int xennet_fill_frags(struct netfront_queue *queue, 1119 struct sk_buff *skb, 1120 struct sk_buff_head *list) 1121 { 1122 RING_IDX cons = queue->rx.rsp_cons; 1123 struct sk_buff *nskb; 1124 1125 while ((nskb = __skb_dequeue(list))) { 1126 struct xen_netif_rx_response rx; 1127 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 1128 1129 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); 1130 1131 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 1132 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 1133 1134 BUG_ON(pull_to < skb_headlen(skb)); 1135 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 1136 } 1137 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 1138 xennet_set_rx_rsp_cons(queue, 1139 ++cons + skb_queue_len(list)); 1140 kfree_skb(nskb); 1141 return -ENOENT; 1142 } 1143 1144 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1145 skb_frag_page(nfrag), 1146 rx.offset, rx.status, PAGE_SIZE); 1147 1148 skb_shinfo(nskb)->nr_frags = 0; 1149 kfree_skb(nskb); 1150 } 1151 1152 xennet_set_rx_rsp_cons(queue, cons); 1153 1154 return 0; 1155 } 1156 1157 static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 1158 { 1159 bool recalculate_partial_csum = false; 1160 1161 /* 1162 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 1163 * peers can fail to set NETRXF_csum_blank when sending a GSO 1164 * frame. In this case force the SKB to CHECKSUM_PARTIAL and 1165 * recalculate the partial checksum. 1166 */ 1167 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1168 struct netfront_info *np = netdev_priv(dev); 1169 atomic_inc(&np->rx_gso_checksum_fixup); 1170 skb->ip_summed = CHECKSUM_PARTIAL; 1171 recalculate_partial_csum = true; 1172 } 1173 1174 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1175 if (skb->ip_summed != CHECKSUM_PARTIAL) 1176 return 0; 1177 1178 return skb_checksum_setup(skb, recalculate_partial_csum); 1179 } 1180 1181 static int handle_incoming_queue(struct netfront_queue *queue, 1182 struct sk_buff_head *rxq) 1183 { 1184 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 1185 int packets_dropped = 0; 1186 struct sk_buff *skb; 1187 1188 while ((skb = __skb_dequeue(rxq)) != NULL) { 1189 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 1190 1191 if (pull_to > skb_headlen(skb)) 1192 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 1193 1194 /* Ethernet work: Delayed to here as it peeks the header. */ 1195 skb->protocol = eth_type_trans(skb, queue->info->netdev); 1196 skb_reset_network_header(skb); 1197 1198 if (checksum_setup(queue->info->netdev, skb)) { 1199 kfree_skb(skb); 1200 packets_dropped++; 1201 queue->info->netdev->stats.rx_errors++; 1202 continue; 1203 } 1204 1205 u64_stats_update_begin(&rx_stats->syncp); 1206 rx_stats->packets++; 1207 rx_stats->bytes += skb->len; 1208 u64_stats_update_end(&rx_stats->syncp); 1209 1210 /* Pass it up. */ 1211 napi_gro_receive(&queue->napi, skb); 1212 } 1213 1214 return packets_dropped; 1215 } 1216 1217 static int xennet_poll(struct napi_struct *napi, int budget) 1218 { 1219 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 1220 struct net_device *dev = queue->info->netdev; 1221 struct sk_buff *skb; 1222 struct netfront_rx_info rinfo; 1223 struct xen_netif_rx_response *rx = &rinfo.rx; 1224 struct xen_netif_extra_info *extras = rinfo.extras; 1225 RING_IDX i, rp; 1226 int work_done; 1227 struct sk_buff_head rxq; 1228 struct sk_buff_head errq; 1229 struct sk_buff_head tmpq; 1230 int err; 1231 bool need_xdp_flush = false; 1232 1233 spin_lock(&queue->rx_lock); 1234 1235 skb_queue_head_init(&rxq); 1236 skb_queue_head_init(&errq); 1237 skb_queue_head_init(&tmpq); 1238 1239 rp = queue->rx.sring->rsp_prod; 1240 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { 1241 dev_alert(&dev->dev, "Illegal number of responses %u\n", 1242 rp - queue->rx.rsp_cons); 1243 queue->info->broken = true; 1244 spin_unlock(&queue->rx_lock); 1245 return 0; 1246 } 1247 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1248 1249 i = queue->rx.rsp_cons; 1250 work_done = 0; 1251 while ((i != rp) && (work_done < budget)) { 1252 RING_COPY_RESPONSE(&queue->rx, i, rx); 1253 memset(extras, 0, sizeof(rinfo.extras)); 1254 1255 err = xennet_get_responses(queue, &rinfo, rp, &tmpq, 1256 &need_xdp_flush); 1257 1258 if (unlikely(err)) { 1259 if (queue->info->broken) { 1260 spin_unlock(&queue->rx_lock); 1261 return 0; 1262 } 1263 err: 1264 while ((skb = __skb_dequeue(&tmpq))) 1265 __skb_queue_tail(&errq, skb); 1266 dev->stats.rx_errors++; 1267 i = queue->rx.rsp_cons; 1268 continue; 1269 } 1270 1271 skb = __skb_dequeue(&tmpq); 1272 1273 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1274 struct xen_netif_extra_info *gso; 1275 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1276 1277 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1278 __skb_queue_head(&tmpq, skb); 1279 xennet_set_rx_rsp_cons(queue, 1280 queue->rx.rsp_cons + 1281 skb_queue_len(&tmpq)); 1282 goto err; 1283 } 1284 } 1285 1286 NETFRONT_SKB_CB(skb)->pull_to = rx->status; 1287 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 1288 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 1289 1290 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); 1291 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1292 skb->data_len = rx->status; 1293 skb->len += rx->status; 1294 1295 if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) 1296 goto err; 1297 1298 if (rx->flags & XEN_NETRXF_csum_blank) 1299 skb->ip_summed = CHECKSUM_PARTIAL; 1300 else if (rx->flags & XEN_NETRXF_data_validated) 1301 skb->ip_summed = CHECKSUM_UNNECESSARY; 1302 1303 __skb_queue_tail(&rxq, skb); 1304 1305 i = queue->rx.rsp_cons + 1; 1306 xennet_set_rx_rsp_cons(queue, i); 1307 work_done++; 1308 } 1309 if (need_xdp_flush) 1310 xdp_do_flush(); 1311 1312 __skb_queue_purge(&errq); 1313 1314 work_done -= handle_incoming_queue(queue, &rxq); 1315 1316 xennet_alloc_rx_buffers(queue); 1317 1318 if (work_done < budget) { 1319 int more_to_do = 0; 1320 1321 napi_complete_done(napi, work_done); 1322 1323 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 1324 if (more_to_do) 1325 napi_schedule(napi); 1326 } 1327 1328 spin_unlock(&queue->rx_lock); 1329 1330 return work_done; 1331 } 1332 1333 static int xennet_change_mtu(struct net_device *dev, int mtu) 1334 { 1335 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 1336 1337 if (mtu > max) 1338 return -EINVAL; 1339 dev->mtu = mtu; 1340 return 0; 1341 } 1342 1343 static void xennet_get_stats64(struct net_device *dev, 1344 struct rtnl_link_stats64 *tot) 1345 { 1346 struct netfront_info *np = netdev_priv(dev); 1347 int cpu; 1348 1349 for_each_possible_cpu(cpu) { 1350 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1351 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1352 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1353 unsigned int start; 1354 1355 do { 1356 start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1357 tx_packets = tx_stats->packets; 1358 tx_bytes = tx_stats->bytes; 1359 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1360 1361 do { 1362 start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1363 rx_packets = rx_stats->packets; 1364 rx_bytes = rx_stats->bytes; 1365 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1366 1367 tot->rx_packets += rx_packets; 1368 tot->tx_packets += tx_packets; 1369 tot->rx_bytes += rx_bytes; 1370 tot->tx_bytes += tx_bytes; 1371 } 1372 1373 tot->rx_errors = dev->stats.rx_errors; 1374 tot->tx_dropped = dev->stats.tx_dropped; 1375 } 1376 1377 static void xennet_release_tx_bufs(struct netfront_queue *queue) 1378 { 1379 struct sk_buff *skb; 1380 int i; 1381 1382 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1383 /* Skip over entries which are actually freelist references */ 1384 if (!queue->tx_skbs[i]) 1385 continue; 1386 1387 skb = queue->tx_skbs[i]; 1388 queue->tx_skbs[i] = NULL; 1389 get_page(queue->grant_tx_page[i]); 1390 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1391 (unsigned long)page_address(queue->grant_tx_page[i])); 1392 queue->grant_tx_page[i] = NULL; 1393 queue->grant_tx_ref[i] = GRANT_INVALID_REF; 1394 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); 1395 dev_kfree_skb_irq(skb); 1396 } 1397 } 1398 1399 static void xennet_release_rx_bufs(struct netfront_queue *queue) 1400 { 1401 int id, ref; 1402 1403 spin_lock_bh(&queue->rx_lock); 1404 1405 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1406 struct sk_buff *skb; 1407 struct page *page; 1408 1409 skb = queue->rx_skbs[id]; 1410 if (!skb) 1411 continue; 1412 1413 ref = queue->grant_rx_ref[id]; 1414 if (ref == GRANT_INVALID_REF) 1415 continue; 1416 1417 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1418 1419 /* gnttab_end_foreign_access() needs a page ref until 1420 * foreign access is ended (which may be deferred). 1421 */ 1422 get_page(page); 1423 gnttab_end_foreign_access(ref, 1424 (unsigned long)page_address(page)); 1425 queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1426 1427 kfree_skb(skb); 1428 } 1429 1430 spin_unlock_bh(&queue->rx_lock); 1431 } 1432 1433 static netdev_features_t xennet_fix_features(struct net_device *dev, 1434 netdev_features_t features) 1435 { 1436 struct netfront_info *np = netdev_priv(dev); 1437 1438 if (features & NETIF_F_SG && 1439 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 1440 features &= ~NETIF_F_SG; 1441 1442 if (features & NETIF_F_IPV6_CSUM && 1443 !xenbus_read_unsigned(np->xbdev->otherend, 1444 "feature-ipv6-csum-offload", 0)) 1445 features &= ~NETIF_F_IPV6_CSUM; 1446 1447 if (features & NETIF_F_TSO && 1448 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 1449 features &= ~NETIF_F_TSO; 1450 1451 if (features & NETIF_F_TSO6 && 1452 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 1453 features &= ~NETIF_F_TSO6; 1454 1455 return features; 1456 } 1457 1458 static int xennet_set_features(struct net_device *dev, 1459 netdev_features_t features) 1460 { 1461 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 1462 netdev_info(dev, "Reducing MTU because no SG offload"); 1463 dev->mtu = ETH_DATA_LEN; 1464 } 1465 1466 return 0; 1467 } 1468 1469 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) 1470 { 1471 unsigned long flags; 1472 1473 if (unlikely(queue->info->broken)) 1474 return false; 1475 1476 spin_lock_irqsave(&queue->tx_lock, flags); 1477 if (xennet_tx_buf_gc(queue)) 1478 *eoi = 0; 1479 spin_unlock_irqrestore(&queue->tx_lock, flags); 1480 1481 return true; 1482 } 1483 1484 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1485 { 1486 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1487 1488 if (likely(xennet_handle_tx(dev_id, &eoiflag))) 1489 xen_irq_lateeoi(irq, eoiflag); 1490 1491 return IRQ_HANDLED; 1492 } 1493 1494 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) 1495 { 1496 unsigned int work_queued; 1497 unsigned long flags; 1498 1499 if (unlikely(queue->info->broken)) 1500 return false; 1501 1502 spin_lock_irqsave(&queue->rx_cons_lock, flags); 1503 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 1504 if (work_queued > queue->rx_rsp_unconsumed) { 1505 queue->rx_rsp_unconsumed = work_queued; 1506 *eoi = 0; 1507 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { 1508 const struct device *dev = &queue->info->netdev->dev; 1509 1510 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1511 dev_alert(dev, "RX producer index going backwards\n"); 1512 dev_alert(dev, "Disabled for further use\n"); 1513 queue->info->broken = true; 1514 return false; 1515 } 1516 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1517 1518 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) 1519 napi_schedule(&queue->napi); 1520 1521 return true; 1522 } 1523 1524 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1525 { 1526 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1527 1528 if (likely(xennet_handle_rx(dev_id, &eoiflag))) 1529 xen_irq_lateeoi(irq, eoiflag); 1530 1531 return IRQ_HANDLED; 1532 } 1533 1534 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1535 { 1536 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1537 1538 if (xennet_handle_tx(dev_id, &eoiflag) && 1539 xennet_handle_rx(dev_id, &eoiflag)) 1540 xen_irq_lateeoi(irq, eoiflag); 1541 1542 return IRQ_HANDLED; 1543 } 1544 1545 #ifdef CONFIG_NET_POLL_CONTROLLER 1546 static void xennet_poll_controller(struct net_device *dev) 1547 { 1548 /* Poll each queue */ 1549 struct netfront_info *info = netdev_priv(dev); 1550 unsigned int num_queues = dev->real_num_tx_queues; 1551 unsigned int i; 1552 1553 if (info->broken) 1554 return; 1555 1556 for (i = 0; i < num_queues; ++i) 1557 xennet_interrupt(0, &info->queues[i]); 1558 } 1559 #endif 1560 1561 #define NETBACK_XDP_HEADROOM_DISABLE 0 1562 #define NETBACK_XDP_HEADROOM_ENABLE 1 1563 1564 static int talk_to_netback_xdp(struct netfront_info *np, int xdp) 1565 { 1566 int err; 1567 unsigned short headroom; 1568 1569 headroom = xdp ? XDP_PACKET_HEADROOM : 0; 1570 err = xenbus_printf(XBT_NIL, np->xbdev->nodename, 1571 "xdp-headroom", "%hu", 1572 headroom); 1573 if (err) 1574 pr_warn("Error writing xdp-headroom\n"); 1575 1576 return err; 1577 } 1578 1579 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1580 struct netlink_ext_ack *extack) 1581 { 1582 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; 1583 struct netfront_info *np = netdev_priv(dev); 1584 struct bpf_prog *old_prog; 1585 unsigned int i, err; 1586 1587 if (dev->mtu > max_mtu) { 1588 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu); 1589 return -EINVAL; 1590 } 1591 1592 if (!np->netback_has_xdp_headroom) 1593 return 0; 1594 1595 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring); 1596 1597 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE : 1598 NETBACK_XDP_HEADROOM_DISABLE); 1599 if (err) 1600 return err; 1601 1602 /* avoid the race with XDP headroom adjustment */ 1603 wait_event(module_wq, 1604 xenbus_read_driver_state(np->xbdev->otherend) == 1605 XenbusStateReconfigured); 1606 np->netfront_xdp_enabled = true; 1607 1608 old_prog = rtnl_dereference(np->queues[0].xdp_prog); 1609 1610 if (prog) 1611 bpf_prog_add(prog, dev->real_num_tx_queues); 1612 1613 for (i = 0; i < dev->real_num_tx_queues; ++i) 1614 rcu_assign_pointer(np->queues[i].xdp_prog, prog); 1615 1616 if (old_prog) 1617 for (i = 0; i < dev->real_num_tx_queues; ++i) 1618 bpf_prog_put(old_prog); 1619 1620 xenbus_switch_state(np->xbdev, XenbusStateConnected); 1621 1622 return 0; 1623 } 1624 1625 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1626 { 1627 struct netfront_info *np = netdev_priv(dev); 1628 1629 if (np->broken) 1630 return -ENODEV; 1631 1632 switch (xdp->command) { 1633 case XDP_SETUP_PROG: 1634 return xennet_xdp_set(dev, xdp->prog, xdp->extack); 1635 default: 1636 return -EINVAL; 1637 } 1638 } 1639 1640 static const struct net_device_ops xennet_netdev_ops = { 1641 .ndo_uninit = xennet_uninit, 1642 .ndo_open = xennet_open, 1643 .ndo_stop = xennet_close, 1644 .ndo_start_xmit = xennet_start_xmit, 1645 .ndo_change_mtu = xennet_change_mtu, 1646 .ndo_get_stats64 = xennet_get_stats64, 1647 .ndo_set_mac_address = eth_mac_addr, 1648 .ndo_validate_addr = eth_validate_addr, 1649 .ndo_fix_features = xennet_fix_features, 1650 .ndo_set_features = xennet_set_features, 1651 .ndo_select_queue = xennet_select_queue, 1652 .ndo_bpf = xennet_xdp, 1653 .ndo_xdp_xmit = xennet_xdp_xmit, 1654 #ifdef CONFIG_NET_POLL_CONTROLLER 1655 .ndo_poll_controller = xennet_poll_controller, 1656 #endif 1657 }; 1658 1659 static void xennet_free_netdev(struct net_device *netdev) 1660 { 1661 struct netfront_info *np = netdev_priv(netdev); 1662 1663 free_percpu(np->rx_stats); 1664 free_percpu(np->tx_stats); 1665 free_netdev(netdev); 1666 } 1667 1668 static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1669 { 1670 int err; 1671 struct net_device *netdev; 1672 struct netfront_info *np; 1673 1674 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 1675 if (!netdev) 1676 return ERR_PTR(-ENOMEM); 1677 1678 np = netdev_priv(netdev); 1679 np->xbdev = dev; 1680 1681 np->queues = NULL; 1682 1683 err = -ENOMEM; 1684 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1685 if (np->rx_stats == NULL) 1686 goto exit; 1687 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1688 if (np->tx_stats == NULL) 1689 goto exit; 1690 1691 netdev->netdev_ops = &xennet_netdev_ops; 1692 1693 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1694 NETIF_F_GSO_ROBUST; 1695 netdev->hw_features = NETIF_F_SG | 1696 NETIF_F_IPV6_CSUM | 1697 NETIF_F_TSO | NETIF_F_TSO6; 1698 1699 /* 1700 * Assume that all hw features are available for now. This set 1701 * will be adjusted by the call to netdev_update_features() in 1702 * xennet_connect() which is the earliest point where we can 1703 * negotiate with the backend regarding supported features. 1704 */ 1705 netdev->features |= netdev->hw_features; 1706 1707 netdev->ethtool_ops = &xennet_ethtool_ops; 1708 netdev->min_mtu = ETH_MIN_MTU; 1709 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1710 SET_NETDEV_DEV(netdev, &dev->dev); 1711 1712 np->netdev = netdev; 1713 np->netfront_xdp_enabled = false; 1714 1715 netif_carrier_off(netdev); 1716 1717 do { 1718 xenbus_switch_state(dev, XenbusStateInitialising); 1719 err = wait_event_timeout(module_wq, 1720 xenbus_read_driver_state(dev->otherend) != 1721 XenbusStateClosed && 1722 xenbus_read_driver_state(dev->otherend) != 1723 XenbusStateUnknown, XENNET_TIMEOUT); 1724 } while (!err); 1725 1726 return netdev; 1727 1728 exit: 1729 xennet_free_netdev(netdev); 1730 return ERR_PTR(err); 1731 } 1732 1733 /* 1734 * Entry point to this code when a new device is created. Allocate the basic 1735 * structures and the ring buffers for communication with the backend, and 1736 * inform the backend of the appropriate details for those. 1737 */ 1738 static int netfront_probe(struct xenbus_device *dev, 1739 const struct xenbus_device_id *id) 1740 { 1741 int err; 1742 struct net_device *netdev; 1743 struct netfront_info *info; 1744 1745 netdev = xennet_create_dev(dev); 1746 if (IS_ERR(netdev)) { 1747 err = PTR_ERR(netdev); 1748 xenbus_dev_fatal(dev, err, "creating netdev"); 1749 return err; 1750 } 1751 1752 info = netdev_priv(netdev); 1753 dev_set_drvdata(&dev->dev, info); 1754 #ifdef CONFIG_SYSFS 1755 info->netdev->sysfs_groups[0] = &xennet_dev_group; 1756 #endif 1757 1758 return 0; 1759 } 1760 1761 static void xennet_end_access(int ref, void *page) 1762 { 1763 /* This frees the page as a side-effect */ 1764 if (ref != GRANT_INVALID_REF) 1765 gnttab_end_foreign_access(ref, (unsigned long)page); 1766 } 1767 1768 static void xennet_disconnect_backend(struct netfront_info *info) 1769 { 1770 unsigned int i = 0; 1771 unsigned int num_queues = info->netdev->real_num_tx_queues; 1772 1773 netif_carrier_off(info->netdev); 1774 1775 for (i = 0; i < num_queues && info->queues; ++i) { 1776 struct netfront_queue *queue = &info->queues[i]; 1777 1778 del_timer_sync(&queue->rx_refill_timer); 1779 1780 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1781 unbind_from_irqhandler(queue->tx_irq, queue); 1782 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1783 unbind_from_irqhandler(queue->tx_irq, queue); 1784 unbind_from_irqhandler(queue->rx_irq, queue); 1785 } 1786 queue->tx_evtchn = queue->rx_evtchn = 0; 1787 queue->tx_irq = queue->rx_irq = 0; 1788 1789 if (netif_running(info->netdev)) 1790 napi_synchronize(&queue->napi); 1791 1792 xennet_release_tx_bufs(queue); 1793 xennet_release_rx_bufs(queue); 1794 gnttab_free_grant_references(queue->gref_tx_head); 1795 gnttab_free_grant_references(queue->gref_rx_head); 1796 1797 /* End access and free the pages */ 1798 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1799 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1800 1801 queue->tx_ring_ref = GRANT_INVALID_REF; 1802 queue->rx_ring_ref = GRANT_INVALID_REF; 1803 queue->tx.sring = NULL; 1804 queue->rx.sring = NULL; 1805 1806 page_pool_destroy(queue->page_pool); 1807 } 1808 } 1809 1810 /* 1811 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1812 * driver restart. We tear down our netif structure and recreate it, but 1813 * leave the device-layer structures intact so that this is transparent to the 1814 * rest of the kernel. 1815 */ 1816 static int netfront_resume(struct xenbus_device *dev) 1817 { 1818 struct netfront_info *info = dev_get_drvdata(&dev->dev); 1819 1820 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1821 1822 netif_tx_lock_bh(info->netdev); 1823 netif_device_detach(info->netdev); 1824 netif_tx_unlock_bh(info->netdev); 1825 1826 xennet_disconnect_backend(info); 1827 return 0; 1828 } 1829 1830 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1831 { 1832 char *s, *e, *macstr; 1833 int i; 1834 1835 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1836 if (IS_ERR(macstr)) 1837 return PTR_ERR(macstr); 1838 1839 for (i = 0; i < ETH_ALEN; i++) { 1840 mac[i] = simple_strtoul(s, &e, 16); 1841 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1842 kfree(macstr); 1843 return -ENOENT; 1844 } 1845 s = e+1; 1846 } 1847 1848 kfree(macstr); 1849 return 0; 1850 } 1851 1852 static int setup_netfront_single(struct netfront_queue *queue) 1853 { 1854 int err; 1855 1856 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1857 if (err < 0) 1858 goto fail; 1859 1860 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1861 xennet_interrupt, 0, 1862 queue->info->netdev->name, 1863 queue); 1864 if (err < 0) 1865 goto bind_fail; 1866 queue->rx_evtchn = queue->tx_evtchn; 1867 queue->rx_irq = queue->tx_irq = err; 1868 1869 return 0; 1870 1871 bind_fail: 1872 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 1873 queue->tx_evtchn = 0; 1874 fail: 1875 return err; 1876 } 1877 1878 static int setup_netfront_split(struct netfront_queue *queue) 1879 { 1880 int err; 1881 1882 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1883 if (err < 0) 1884 goto fail; 1885 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1886 if (err < 0) 1887 goto alloc_rx_evtchn_fail; 1888 1889 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 1890 "%s-tx", queue->name); 1891 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1892 xennet_tx_interrupt, 0, 1893 queue->tx_irq_name, queue); 1894 if (err < 0) 1895 goto bind_tx_fail; 1896 queue->tx_irq = err; 1897 1898 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 1899 "%s-rx", queue->name); 1900 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, 1901 xennet_rx_interrupt, 0, 1902 queue->rx_irq_name, queue); 1903 if (err < 0) 1904 goto bind_rx_fail; 1905 queue->rx_irq = err; 1906 1907 return 0; 1908 1909 bind_rx_fail: 1910 unbind_from_irqhandler(queue->tx_irq, queue); 1911 queue->tx_irq = 0; 1912 bind_tx_fail: 1913 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 1914 queue->rx_evtchn = 0; 1915 alloc_rx_evtchn_fail: 1916 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 1917 queue->tx_evtchn = 0; 1918 fail: 1919 return err; 1920 } 1921 1922 static int setup_netfront(struct xenbus_device *dev, 1923 struct netfront_queue *queue, unsigned int feature_split_evtchn) 1924 { 1925 struct xen_netif_tx_sring *txs; 1926 struct xen_netif_rx_sring *rxs = NULL; 1927 grant_ref_t gref; 1928 int err; 1929 1930 queue->tx_ring_ref = GRANT_INVALID_REF; 1931 queue->rx_ring_ref = GRANT_INVALID_REF; 1932 queue->rx.sring = NULL; 1933 queue->tx.sring = NULL; 1934 1935 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1936 if (!txs) { 1937 err = -ENOMEM; 1938 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1939 goto fail; 1940 } 1941 SHARED_RING_INIT(txs); 1942 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 1943 1944 err = xenbus_grant_ring(dev, txs, 1, &gref); 1945 if (err < 0) 1946 goto fail; 1947 queue->tx_ring_ref = gref; 1948 1949 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1950 if (!rxs) { 1951 err = -ENOMEM; 1952 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1953 goto fail; 1954 } 1955 SHARED_RING_INIT(rxs); 1956 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 1957 1958 err = xenbus_grant_ring(dev, rxs, 1, &gref); 1959 if (err < 0) 1960 goto fail; 1961 queue->rx_ring_ref = gref; 1962 1963 if (feature_split_evtchn) 1964 err = setup_netfront_split(queue); 1965 /* setup single event channel if 1966 * a) feature-split-event-channels == 0 1967 * b) feature-split-event-channels == 1 but failed to setup 1968 */ 1969 if (!feature_split_evtchn || err) 1970 err = setup_netfront_single(queue); 1971 1972 if (err) 1973 goto fail; 1974 1975 return 0; 1976 1977 /* If we fail to setup netfront, it is safe to just revoke access to 1978 * granted pages because backend is not accessing it at this point. 1979 */ 1980 fail: 1981 if (queue->rx_ring_ref != GRANT_INVALID_REF) { 1982 gnttab_end_foreign_access(queue->rx_ring_ref, 1983 (unsigned long)rxs); 1984 queue->rx_ring_ref = GRANT_INVALID_REF; 1985 } else { 1986 free_page((unsigned long)rxs); 1987 } 1988 if (queue->tx_ring_ref != GRANT_INVALID_REF) { 1989 gnttab_end_foreign_access(queue->tx_ring_ref, 1990 (unsigned long)txs); 1991 queue->tx_ring_ref = GRANT_INVALID_REF; 1992 } else { 1993 free_page((unsigned long)txs); 1994 } 1995 return err; 1996 } 1997 1998 /* Queue-specific initialisation 1999 * This used to be done in xennet_create_dev() but must now 2000 * be run per-queue. 2001 */ 2002 static int xennet_init_queue(struct netfront_queue *queue) 2003 { 2004 unsigned short i; 2005 int err = 0; 2006 char *devid; 2007 2008 spin_lock_init(&queue->tx_lock); 2009 spin_lock_init(&queue->rx_lock); 2010 spin_lock_init(&queue->rx_cons_lock); 2011 2012 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 2013 2014 devid = strrchr(queue->info->xbdev->nodename, '/') + 1; 2015 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", 2016 devid, queue->id); 2017 2018 /* Initialise tx_skb_freelist as a free chain containing every entry. */ 2019 queue->tx_skb_freelist = 0; 2020 queue->tx_pend_queue = TX_LINK_NONE; 2021 for (i = 0; i < NET_TX_RING_SIZE; i++) { 2022 queue->tx_link[i] = i + 1; 2023 queue->grant_tx_ref[i] = GRANT_INVALID_REF; 2024 queue->grant_tx_page[i] = NULL; 2025 } 2026 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; 2027 2028 /* Clear out rx_skbs */ 2029 for (i = 0; i < NET_RX_RING_SIZE; i++) { 2030 queue->rx_skbs[i] = NULL; 2031 queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2032 } 2033 2034 /* A grant for every tx ring slot */ 2035 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 2036 &queue->gref_tx_head) < 0) { 2037 pr_alert("can't alloc tx grant refs\n"); 2038 err = -ENOMEM; 2039 goto exit; 2040 } 2041 2042 /* A grant for every rx ring slot */ 2043 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 2044 &queue->gref_rx_head) < 0) { 2045 pr_alert("can't alloc rx grant refs\n"); 2046 err = -ENOMEM; 2047 goto exit_free_tx; 2048 } 2049 2050 return 0; 2051 2052 exit_free_tx: 2053 gnttab_free_grant_references(queue->gref_tx_head); 2054 exit: 2055 return err; 2056 } 2057 2058 static int write_queue_xenstore_keys(struct netfront_queue *queue, 2059 struct xenbus_transaction *xbt, int write_hierarchical) 2060 { 2061 /* Write the queue-specific keys into XenStore in the traditional 2062 * way for a single queue, or in a queue subkeys for multiple 2063 * queues. 2064 */ 2065 struct xenbus_device *dev = queue->info->xbdev; 2066 int err; 2067 const char *message; 2068 char *path; 2069 size_t pathsize; 2070 2071 /* Choose the correct place to write the keys */ 2072 if (write_hierarchical) { 2073 pathsize = strlen(dev->nodename) + 10; 2074 path = kzalloc(pathsize, GFP_KERNEL); 2075 if (!path) { 2076 err = -ENOMEM; 2077 message = "out of memory while writing ring references"; 2078 goto error; 2079 } 2080 snprintf(path, pathsize, "%s/queue-%u", 2081 dev->nodename, queue->id); 2082 } else { 2083 path = (char *)dev->nodename; 2084 } 2085 2086 /* Write ring references */ 2087 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 2088 queue->tx_ring_ref); 2089 if (err) { 2090 message = "writing tx-ring-ref"; 2091 goto error; 2092 } 2093 2094 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 2095 queue->rx_ring_ref); 2096 if (err) { 2097 message = "writing rx-ring-ref"; 2098 goto error; 2099 } 2100 2101 /* Write event channels; taking into account both shared 2102 * and split event channel scenarios. 2103 */ 2104 if (queue->tx_evtchn == queue->rx_evtchn) { 2105 /* Shared event channel */ 2106 err = xenbus_printf(*xbt, path, 2107 "event-channel", "%u", queue->tx_evtchn); 2108 if (err) { 2109 message = "writing event-channel"; 2110 goto error; 2111 } 2112 } else { 2113 /* Split event channels */ 2114 err = xenbus_printf(*xbt, path, 2115 "event-channel-tx", "%u", queue->tx_evtchn); 2116 if (err) { 2117 message = "writing event-channel-tx"; 2118 goto error; 2119 } 2120 2121 err = xenbus_printf(*xbt, path, 2122 "event-channel-rx", "%u", queue->rx_evtchn); 2123 if (err) { 2124 message = "writing event-channel-rx"; 2125 goto error; 2126 } 2127 } 2128 2129 if (write_hierarchical) 2130 kfree(path); 2131 return 0; 2132 2133 error: 2134 if (write_hierarchical) 2135 kfree(path); 2136 xenbus_dev_fatal(dev, err, "%s", message); 2137 return err; 2138 } 2139 2140 2141 2142 static int xennet_create_page_pool(struct netfront_queue *queue) 2143 { 2144 int err; 2145 struct page_pool_params pp_params = { 2146 .order = 0, 2147 .flags = 0, 2148 .pool_size = NET_RX_RING_SIZE, 2149 .nid = NUMA_NO_NODE, 2150 .dev = &queue->info->netdev->dev, 2151 .offset = XDP_PACKET_HEADROOM, 2152 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, 2153 }; 2154 2155 queue->page_pool = page_pool_create(&pp_params); 2156 if (IS_ERR(queue->page_pool)) { 2157 err = PTR_ERR(queue->page_pool); 2158 queue->page_pool = NULL; 2159 return err; 2160 } 2161 2162 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, 2163 queue->id, 0); 2164 if (err) { 2165 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); 2166 goto err_free_pp; 2167 } 2168 2169 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, 2170 MEM_TYPE_PAGE_POOL, queue->page_pool); 2171 if (err) { 2172 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); 2173 goto err_unregister_rxq; 2174 } 2175 return 0; 2176 2177 err_unregister_rxq: 2178 xdp_rxq_info_unreg(&queue->xdp_rxq); 2179 err_free_pp: 2180 page_pool_destroy(queue->page_pool); 2181 queue->page_pool = NULL; 2182 return err; 2183 } 2184 2185 static int xennet_create_queues(struct netfront_info *info, 2186 unsigned int *num_queues) 2187 { 2188 unsigned int i; 2189 int ret; 2190 2191 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), 2192 GFP_KERNEL); 2193 if (!info->queues) 2194 return -ENOMEM; 2195 2196 for (i = 0; i < *num_queues; i++) { 2197 struct netfront_queue *queue = &info->queues[i]; 2198 2199 queue->id = i; 2200 queue->info = info; 2201 2202 ret = xennet_init_queue(queue); 2203 if (ret < 0) { 2204 dev_warn(&info->xbdev->dev, 2205 "only created %d queues\n", i); 2206 *num_queues = i; 2207 break; 2208 } 2209 2210 /* use page pool recycling instead of buddy allocator */ 2211 ret = xennet_create_page_pool(queue); 2212 if (ret < 0) { 2213 dev_err(&info->xbdev->dev, "can't allocate page pool\n"); 2214 *num_queues = i; 2215 return ret; 2216 } 2217 2218 netif_napi_add(queue->info->netdev, &queue->napi, 2219 xennet_poll, 64); 2220 if (netif_running(info->netdev)) 2221 napi_enable(&queue->napi); 2222 } 2223 2224 netif_set_real_num_tx_queues(info->netdev, *num_queues); 2225 2226 if (*num_queues == 0) { 2227 dev_err(&info->xbdev->dev, "no queues\n"); 2228 return -EINVAL; 2229 } 2230 return 0; 2231 } 2232 2233 /* Common code used when first setting up, and when resuming. */ 2234 static int talk_to_netback(struct xenbus_device *dev, 2235 struct netfront_info *info) 2236 { 2237 const char *message; 2238 struct xenbus_transaction xbt; 2239 int err; 2240 unsigned int feature_split_evtchn; 2241 unsigned int i = 0; 2242 unsigned int max_queues = 0; 2243 struct netfront_queue *queue = NULL; 2244 unsigned int num_queues = 1; 2245 u8 addr[ETH_ALEN]; 2246 2247 info->netdev->irq = 0; 2248 2249 /* Check if backend supports multiple queues */ 2250 max_queues = xenbus_read_unsigned(info->xbdev->otherend, 2251 "multi-queue-max-queues", 1); 2252 num_queues = min(max_queues, xennet_max_queues); 2253 2254 /* Check feature-split-event-channels */ 2255 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 2256 "feature-split-event-channels", 0); 2257 2258 /* Read mac addr. */ 2259 err = xen_net_read_mac(dev, addr); 2260 if (err) { 2261 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 2262 goto out_unlocked; 2263 } 2264 eth_hw_addr_set(info->netdev, addr); 2265 2266 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, 2267 "feature-xdp-headroom", 0); 2268 if (info->netback_has_xdp_headroom) { 2269 /* set the current xen-netfront xdp state */ 2270 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ? 2271 NETBACK_XDP_HEADROOM_ENABLE : 2272 NETBACK_XDP_HEADROOM_DISABLE); 2273 if (err) 2274 goto out_unlocked; 2275 } 2276 2277 rtnl_lock(); 2278 if (info->queues) 2279 xennet_destroy_queues(info); 2280 2281 /* For the case of a reconnect reset the "broken" indicator. */ 2282 info->broken = false; 2283 2284 err = xennet_create_queues(info, &num_queues); 2285 if (err < 0) { 2286 xenbus_dev_fatal(dev, err, "creating queues"); 2287 kfree(info->queues); 2288 info->queues = NULL; 2289 goto out; 2290 } 2291 rtnl_unlock(); 2292 2293 /* Create shared ring, alloc event channel -- for each queue */ 2294 for (i = 0; i < num_queues; ++i) { 2295 queue = &info->queues[i]; 2296 err = setup_netfront(dev, queue, feature_split_evtchn); 2297 if (err) 2298 goto destroy_ring; 2299 } 2300 2301 again: 2302 err = xenbus_transaction_start(&xbt); 2303 if (err) { 2304 xenbus_dev_fatal(dev, err, "starting transaction"); 2305 goto destroy_ring; 2306 } 2307 2308 if (xenbus_exists(XBT_NIL, 2309 info->xbdev->otherend, "multi-queue-max-queues")) { 2310 /* Write the number of queues */ 2311 err = xenbus_printf(xbt, dev->nodename, 2312 "multi-queue-num-queues", "%u", num_queues); 2313 if (err) { 2314 message = "writing multi-queue-num-queues"; 2315 goto abort_transaction_no_dev_fatal; 2316 } 2317 } 2318 2319 if (num_queues == 1) { 2320 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 2321 if (err) 2322 goto abort_transaction_no_dev_fatal; 2323 } else { 2324 /* Write the keys for each queue */ 2325 for (i = 0; i < num_queues; ++i) { 2326 queue = &info->queues[i]; 2327 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 2328 if (err) 2329 goto abort_transaction_no_dev_fatal; 2330 } 2331 } 2332 2333 /* The remaining keys are not queue-specific */ 2334 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 2335 1); 2336 if (err) { 2337 message = "writing request-rx-copy"; 2338 goto abort_transaction; 2339 } 2340 2341 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 2342 if (err) { 2343 message = "writing feature-rx-notify"; 2344 goto abort_transaction; 2345 } 2346 2347 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 2348 if (err) { 2349 message = "writing feature-sg"; 2350 goto abort_transaction; 2351 } 2352 2353 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 2354 if (err) { 2355 message = "writing feature-gso-tcpv4"; 2356 goto abort_transaction; 2357 } 2358 2359 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 2360 if (err) { 2361 message = "writing feature-gso-tcpv6"; 2362 goto abort_transaction; 2363 } 2364 2365 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 2366 "1"); 2367 if (err) { 2368 message = "writing feature-ipv6-csum-offload"; 2369 goto abort_transaction; 2370 } 2371 2372 err = xenbus_transaction_end(xbt, 0); 2373 if (err) { 2374 if (err == -EAGAIN) 2375 goto again; 2376 xenbus_dev_fatal(dev, err, "completing transaction"); 2377 goto destroy_ring; 2378 } 2379 2380 return 0; 2381 2382 abort_transaction: 2383 xenbus_dev_fatal(dev, err, "%s", message); 2384 abort_transaction_no_dev_fatal: 2385 xenbus_transaction_end(xbt, 1); 2386 destroy_ring: 2387 xennet_disconnect_backend(info); 2388 rtnl_lock(); 2389 xennet_destroy_queues(info); 2390 out: 2391 rtnl_unlock(); 2392 out_unlocked: 2393 device_unregister(&dev->dev); 2394 return err; 2395 } 2396 2397 static int xennet_connect(struct net_device *dev) 2398 { 2399 struct netfront_info *np = netdev_priv(dev); 2400 unsigned int num_queues = 0; 2401 int err; 2402 unsigned int j = 0; 2403 struct netfront_queue *queue = NULL; 2404 2405 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 2406 dev_info(&dev->dev, 2407 "backend does not support copying receive path\n"); 2408 return -ENODEV; 2409 } 2410 2411 err = talk_to_netback(np->xbdev, np); 2412 if (err) 2413 return err; 2414 if (np->netback_has_xdp_headroom) 2415 pr_info("backend supports XDP headroom\n"); 2416 2417 /* talk_to_netback() sets the correct number of queues */ 2418 num_queues = dev->real_num_tx_queues; 2419 2420 if (dev->reg_state == NETREG_UNINITIALIZED) { 2421 err = register_netdev(dev); 2422 if (err) { 2423 pr_warn("%s: register_netdev err=%d\n", __func__, err); 2424 device_unregister(&np->xbdev->dev); 2425 return err; 2426 } 2427 } 2428 2429 rtnl_lock(); 2430 netdev_update_features(dev); 2431 rtnl_unlock(); 2432 2433 /* 2434 * All public and private state should now be sane. Get 2435 * ready to start sending and receiving packets and give the driver 2436 * domain a kick because we've probably just requeued some 2437 * packets. 2438 */ 2439 netif_tx_lock_bh(np->netdev); 2440 netif_device_attach(np->netdev); 2441 netif_tx_unlock_bh(np->netdev); 2442 2443 netif_carrier_on(np->netdev); 2444 for (j = 0; j < num_queues; ++j) { 2445 queue = &np->queues[j]; 2446 2447 notify_remote_via_irq(queue->tx_irq); 2448 if (queue->tx_irq != queue->rx_irq) 2449 notify_remote_via_irq(queue->rx_irq); 2450 2451 spin_lock_irq(&queue->tx_lock); 2452 xennet_tx_buf_gc(queue); 2453 spin_unlock_irq(&queue->tx_lock); 2454 2455 spin_lock_bh(&queue->rx_lock); 2456 xennet_alloc_rx_buffers(queue); 2457 spin_unlock_bh(&queue->rx_lock); 2458 } 2459 2460 return 0; 2461 } 2462 2463 /* 2464 * Callback received when the backend's state changes. 2465 */ 2466 static void netback_changed(struct xenbus_device *dev, 2467 enum xenbus_state backend_state) 2468 { 2469 struct netfront_info *np = dev_get_drvdata(&dev->dev); 2470 struct net_device *netdev = np->netdev; 2471 2472 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2473 2474 wake_up_all(&module_wq); 2475 2476 switch (backend_state) { 2477 case XenbusStateInitialising: 2478 case XenbusStateInitialised: 2479 case XenbusStateReconfiguring: 2480 case XenbusStateReconfigured: 2481 case XenbusStateUnknown: 2482 break; 2483 2484 case XenbusStateInitWait: 2485 if (dev->state != XenbusStateInitialising) 2486 break; 2487 if (xennet_connect(netdev) != 0) 2488 break; 2489 xenbus_switch_state(dev, XenbusStateConnected); 2490 break; 2491 2492 case XenbusStateConnected: 2493 netdev_notify_peers(netdev); 2494 break; 2495 2496 case XenbusStateClosed: 2497 if (dev->state == XenbusStateClosed) 2498 break; 2499 fallthrough; /* Missed the backend's CLOSING state */ 2500 case XenbusStateClosing: 2501 xenbus_frontend_closed(dev); 2502 break; 2503 } 2504 } 2505 2506 static const struct xennet_stat { 2507 char name[ETH_GSTRING_LEN]; 2508 u16 offset; 2509 } xennet_stats[] = { 2510 { 2511 "rx_gso_checksum_fixup", 2512 offsetof(struct netfront_info, rx_gso_checksum_fixup) 2513 }, 2514 }; 2515 2516 static int xennet_get_sset_count(struct net_device *dev, int string_set) 2517 { 2518 switch (string_set) { 2519 case ETH_SS_STATS: 2520 return ARRAY_SIZE(xennet_stats); 2521 default: 2522 return -EINVAL; 2523 } 2524 } 2525 2526 static void xennet_get_ethtool_stats(struct net_device *dev, 2527 struct ethtool_stats *stats, u64 * data) 2528 { 2529 void *np = netdev_priv(dev); 2530 int i; 2531 2532 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2533 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2534 } 2535 2536 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2537 { 2538 int i; 2539 2540 switch (stringset) { 2541 case ETH_SS_STATS: 2542 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2543 memcpy(data + i * ETH_GSTRING_LEN, 2544 xennet_stats[i].name, ETH_GSTRING_LEN); 2545 break; 2546 } 2547 } 2548 2549 static const struct ethtool_ops xennet_ethtool_ops = 2550 { 2551 .get_link = ethtool_op_get_link, 2552 2553 .get_sset_count = xennet_get_sset_count, 2554 .get_ethtool_stats = xennet_get_ethtool_stats, 2555 .get_strings = xennet_get_strings, 2556 .get_ts_info = ethtool_op_get_ts_info, 2557 }; 2558 2559 #ifdef CONFIG_SYSFS 2560 static ssize_t show_rxbuf(struct device *dev, 2561 struct device_attribute *attr, char *buf) 2562 { 2563 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 2564 } 2565 2566 static ssize_t store_rxbuf(struct device *dev, 2567 struct device_attribute *attr, 2568 const char *buf, size_t len) 2569 { 2570 char *endp; 2571 2572 if (!capable(CAP_NET_ADMIN)) 2573 return -EPERM; 2574 2575 simple_strtoul(buf, &endp, 0); 2576 if (endp == buf) 2577 return -EBADMSG; 2578 2579 /* rxbuf_min and rxbuf_max are no longer configurable. */ 2580 2581 return len; 2582 } 2583 2584 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); 2585 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); 2586 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); 2587 2588 static struct attribute *xennet_dev_attrs[] = { 2589 &dev_attr_rxbuf_min.attr, 2590 &dev_attr_rxbuf_max.attr, 2591 &dev_attr_rxbuf_cur.attr, 2592 NULL 2593 }; 2594 2595 static const struct attribute_group xennet_dev_group = { 2596 .attrs = xennet_dev_attrs 2597 }; 2598 #endif /* CONFIG_SYSFS */ 2599 2600 static void xennet_bus_close(struct xenbus_device *dev) 2601 { 2602 int ret; 2603 2604 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2605 return; 2606 do { 2607 xenbus_switch_state(dev, XenbusStateClosing); 2608 ret = wait_event_timeout(module_wq, 2609 xenbus_read_driver_state(dev->otherend) == 2610 XenbusStateClosing || 2611 xenbus_read_driver_state(dev->otherend) == 2612 XenbusStateClosed || 2613 xenbus_read_driver_state(dev->otherend) == 2614 XenbusStateUnknown, 2615 XENNET_TIMEOUT); 2616 } while (!ret); 2617 2618 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2619 return; 2620 2621 do { 2622 xenbus_switch_state(dev, XenbusStateClosed); 2623 ret = wait_event_timeout(module_wq, 2624 xenbus_read_driver_state(dev->otherend) == 2625 XenbusStateClosed || 2626 xenbus_read_driver_state(dev->otherend) == 2627 XenbusStateUnknown, 2628 XENNET_TIMEOUT); 2629 } while (!ret); 2630 } 2631 2632 static int xennet_remove(struct xenbus_device *dev) 2633 { 2634 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2635 2636 xennet_bus_close(dev); 2637 xennet_disconnect_backend(info); 2638 2639 if (info->netdev->reg_state == NETREG_REGISTERED) 2640 unregister_netdev(info->netdev); 2641 2642 if (info->queues) { 2643 rtnl_lock(); 2644 xennet_destroy_queues(info); 2645 rtnl_unlock(); 2646 } 2647 xennet_free_netdev(info->netdev); 2648 2649 return 0; 2650 } 2651 2652 static const struct xenbus_device_id netfront_ids[] = { 2653 { "vif" }, 2654 { "" } 2655 }; 2656 2657 static struct xenbus_driver netfront_driver = { 2658 .ids = netfront_ids, 2659 .probe = netfront_probe, 2660 .remove = xennet_remove, 2661 .resume = netfront_resume, 2662 .otherend_changed = netback_changed, 2663 }; 2664 2665 static int __init netif_init(void) 2666 { 2667 if (!xen_domain()) 2668 return -ENODEV; 2669 2670 if (!xen_has_pv_nic_devices()) 2671 return -ENODEV; 2672 2673 pr_info("Initialising Xen virtual ethernet driver\n"); 2674 2675 /* Allow as many queues as there are CPUs inut max. 8 if user has not 2676 * specified a value. 2677 */ 2678 if (xennet_max_queues == 0) 2679 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 2680 num_online_cpus()); 2681 2682 return xenbus_register_frontend(&netfront_driver); 2683 } 2684 module_init(netif_init); 2685 2686 2687 static void __exit netif_exit(void) 2688 { 2689 xenbus_unregister_driver(&netfront_driver); 2690 } 2691 module_exit(netif_exit); 2692 2693 MODULE_DESCRIPTION("Xen virtual network device frontend"); 2694 MODULE_LICENSE("GPL"); 2695 MODULE_ALIAS("xen:vif"); 2696 MODULE_ALIAS("xennet"); 2697