1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/skbuff.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_ether.h> 41 #include <net/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/moduleparam.h> 44 #include <linux/mm.h> 45 #include <linux/slab.h> 46 #include <net/ip.h> 47 48 #include <xen/xen.h> 49 #include <xen/xenbus.h> 50 #include <xen/events.h> 51 #include <xen/page.h> 52 #include <xen/platform_pci.h> 53 #include <xen/grant_table.h> 54 55 #include <xen/interface/io/netif.h> 56 #include <xen/interface/memory.h> 57 #include <xen/interface/grant_table.h> 58 59 /* Module parameters */ 60 #define MAX_QUEUES_DEFAULT 8 61 static unsigned int xennet_max_queues; 62 module_param_named(max_queues, xennet_max_queues, uint, 0644); 63 MODULE_PARM_DESC(max_queues, 64 "Maximum number of queues per virtual interface"); 65 66 static const struct ethtool_ops xennet_ethtool_ops; 67 68 struct netfront_cb { 69 int pull_to; 70 }; 71 72 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 73 74 #define RX_COPY_THRESHOLD 256 75 76 #define GRANT_INVALID_REF 0 77 78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 80 81 /* Minimum number of Rx slots (includes slot for GSO metadata). */ 82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 83 84 /* Queue name is interface name with "-qNNN" appended */ 85 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 86 87 /* IRQ name is queue name with "-tx" or "-rx" appended */ 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 90 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 91 92 struct netfront_stats { 93 u64 packets; 94 u64 bytes; 95 struct u64_stats_sync syncp; 96 }; 97 98 struct netfront_info; 99 100 struct netfront_queue { 101 unsigned int id; /* Queue ID, 0-based */ 102 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 103 struct netfront_info *info; 104 105 struct napi_struct napi; 106 107 /* Split event channels support, tx_* == rx_* when using 108 * single event channel. 109 */ 110 unsigned int tx_evtchn, rx_evtchn; 111 unsigned int tx_irq, rx_irq; 112 /* Only used when split event channels support is enabled */ 113 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 114 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 115 116 spinlock_t tx_lock; 117 struct xen_netif_tx_front_ring tx; 118 int tx_ring_ref; 119 120 /* 121 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 122 * are linked from tx_skb_freelist through skb_entry.link. 123 * 124 * NB. Freelist index entries are always going to be less than 125 * PAGE_OFFSET, whereas pointers to skbs will always be equal or 126 * greater than PAGE_OFFSET: we use this property to distinguish 127 * them. 128 */ 129 union skb_entry { 130 struct sk_buff *skb; 131 unsigned long link; 132 } tx_skbs[NET_TX_RING_SIZE]; 133 grant_ref_t gref_tx_head; 134 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 135 struct page *grant_tx_page[NET_TX_RING_SIZE]; 136 unsigned tx_skb_freelist; 137 138 spinlock_t rx_lock ____cacheline_aligned_in_smp; 139 struct xen_netif_rx_front_ring rx; 140 int rx_ring_ref; 141 142 struct timer_list rx_refill_timer; 143 144 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 145 grant_ref_t gref_rx_head; 146 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 147 }; 148 149 struct netfront_info { 150 struct list_head list; 151 struct net_device *netdev; 152 153 struct xenbus_device *xbdev; 154 155 /* Multi-queue support */ 156 struct netfront_queue *queues; 157 158 /* Statistics */ 159 struct netfront_stats __percpu *rx_stats; 160 struct netfront_stats __percpu *tx_stats; 161 162 atomic_t rx_gso_checksum_fixup; 163 }; 164 165 struct netfront_rx_info { 166 struct xen_netif_rx_response rx; 167 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 168 }; 169 170 static void skb_entry_set_link(union skb_entry *list, unsigned short id) 171 { 172 list->link = id; 173 } 174 175 static int skb_entry_is_link(const union skb_entry *list) 176 { 177 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 178 return (unsigned long)list->skb < PAGE_OFFSET; 179 } 180 181 /* 182 * Access macros for acquiring freeing slots in tx_skbs[]. 183 */ 184 185 static void add_id_to_freelist(unsigned *head, union skb_entry *list, 186 unsigned short id) 187 { 188 skb_entry_set_link(&list[id], *head); 189 *head = id; 190 } 191 192 static unsigned short get_id_from_freelist(unsigned *head, 193 union skb_entry *list) 194 { 195 unsigned int id = *head; 196 *head = list[id].link; 197 return id; 198 } 199 200 static int xennet_rxidx(RING_IDX idx) 201 { 202 return idx & (NET_RX_RING_SIZE - 1); 203 } 204 205 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 206 RING_IDX ri) 207 { 208 int i = xennet_rxidx(ri); 209 struct sk_buff *skb = queue->rx_skbs[i]; 210 queue->rx_skbs[i] = NULL; 211 return skb; 212 } 213 214 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 215 RING_IDX ri) 216 { 217 int i = xennet_rxidx(ri); 218 grant_ref_t ref = queue->grant_rx_ref[i]; 219 queue->grant_rx_ref[i] = GRANT_INVALID_REF; 220 return ref; 221 } 222 223 #ifdef CONFIG_SYSFS 224 static const struct attribute_group xennet_dev_group; 225 #endif 226 227 static bool xennet_can_sg(struct net_device *dev) 228 { 229 return dev->features & NETIF_F_SG; 230 } 231 232 233 static void rx_refill_timeout(struct timer_list *t) 234 { 235 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); 236 napi_schedule(&queue->napi); 237 } 238 239 static int netfront_tx_slot_available(struct netfront_queue *queue) 240 { 241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 242 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); 243 } 244 245 static void xennet_maybe_wake_tx(struct netfront_queue *queue) 246 { 247 struct net_device *dev = queue->info->netdev; 248 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 249 250 if (unlikely(netif_tx_queue_stopped(dev_queue)) && 251 netfront_tx_slot_available(queue) && 252 likely(netif_running(dev))) 253 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 254 } 255 256 257 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 258 { 259 struct sk_buff *skb; 260 struct page *page; 261 262 skb = __netdev_alloc_skb(queue->info->netdev, 263 RX_COPY_THRESHOLD + NET_IP_ALIGN, 264 GFP_ATOMIC | __GFP_NOWARN); 265 if (unlikely(!skb)) 266 return NULL; 267 268 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 269 if (!page) { 270 kfree_skb(skb); 271 return NULL; 272 } 273 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 274 275 /* Align ip header to a 16 bytes boundary */ 276 skb_reserve(skb, NET_IP_ALIGN); 277 skb->dev = queue->info->netdev; 278 279 return skb; 280 } 281 282 283 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 284 { 285 RING_IDX req_prod = queue->rx.req_prod_pvt; 286 int notify; 287 int err = 0; 288 289 if (unlikely(!netif_carrier_ok(queue->info->netdev))) 290 return; 291 292 for (req_prod = queue->rx.req_prod_pvt; 293 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 294 req_prod++) { 295 struct sk_buff *skb; 296 unsigned short id; 297 grant_ref_t ref; 298 struct page *page; 299 struct xen_netif_rx_request *req; 300 301 skb = xennet_alloc_one_rx_buffer(queue); 302 if (!skb) { 303 err = -ENOMEM; 304 break; 305 } 306 307 id = xennet_rxidx(req_prod); 308 309 BUG_ON(queue->rx_skbs[id]); 310 queue->rx_skbs[id] = skb; 311 312 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 313 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 314 queue->grant_rx_ref[id] = ref; 315 316 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 317 318 req = RING_GET_REQUEST(&queue->rx, req_prod); 319 gnttab_page_grant_foreign_access_ref_one(ref, 320 queue->info->xbdev->otherend_id, 321 page, 322 0); 323 req->id = id; 324 req->gref = ref; 325 } 326 327 queue->rx.req_prod_pvt = req_prod; 328 329 /* Try again later if there are not enough requests or skb allocation 330 * failed. 331 * Enough requests is quantified as the sum of newly created slots and 332 * the unconsumed slots at the backend. 333 */ 334 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 335 unlikely(err)) { 336 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 337 return; 338 } 339 340 wmb(); /* barrier so backend seens requests */ 341 342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 343 if (notify) 344 notify_remote_via_irq(queue->rx_irq); 345 } 346 347 static int xennet_open(struct net_device *dev) 348 { 349 struct netfront_info *np = netdev_priv(dev); 350 unsigned int num_queues = dev->real_num_tx_queues; 351 unsigned int i = 0; 352 struct netfront_queue *queue = NULL; 353 354 if (!np->queues) 355 return -ENODEV; 356 357 for (i = 0; i < num_queues; ++i) { 358 queue = &np->queues[i]; 359 napi_enable(&queue->napi); 360 361 spin_lock_bh(&queue->rx_lock); 362 if (netif_carrier_ok(dev)) { 363 xennet_alloc_rx_buffers(queue); 364 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 365 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 366 napi_schedule(&queue->napi); 367 } 368 spin_unlock_bh(&queue->rx_lock); 369 } 370 371 netif_tx_start_all_queues(dev); 372 373 return 0; 374 } 375 376 static void xennet_tx_buf_gc(struct netfront_queue *queue) 377 { 378 RING_IDX cons, prod; 379 unsigned short id; 380 struct sk_buff *skb; 381 bool more_to_do; 382 383 BUG_ON(!netif_carrier_ok(queue->info->netdev)); 384 385 do { 386 prod = queue->tx.sring->rsp_prod; 387 rmb(); /* Ensure we see responses up to 'rp'. */ 388 389 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 390 struct xen_netif_tx_response *txrsp; 391 392 txrsp = RING_GET_RESPONSE(&queue->tx, cons); 393 if (txrsp->status == XEN_NETIF_RSP_NULL) 394 continue; 395 396 id = txrsp->id; 397 skb = queue->tx_skbs[id].skb; 398 if (unlikely(gnttab_query_foreign_access( 399 queue->grant_tx_ref[id]) != 0)) { 400 pr_alert("%s: warning -- grant still in use by backend domain\n", 401 __func__); 402 BUG(); 403 } 404 gnttab_end_foreign_access_ref( 405 queue->grant_tx_ref[id], GNTMAP_readonly); 406 gnttab_release_grant_reference( 407 &queue->gref_tx_head, queue->grant_tx_ref[id]); 408 queue->grant_tx_ref[id] = GRANT_INVALID_REF; 409 queue->grant_tx_page[id] = NULL; 410 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); 411 dev_kfree_skb_irq(skb); 412 } 413 414 queue->tx.rsp_cons = prod; 415 416 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); 417 } while (more_to_do); 418 419 xennet_maybe_wake_tx(queue); 420 } 421 422 struct xennet_gnttab_make_txreq { 423 struct netfront_queue *queue; 424 struct sk_buff *skb; 425 struct page *page; 426 struct xen_netif_tx_request *tx; /* Last request */ 427 unsigned int size; 428 }; 429 430 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, 431 unsigned int len, void *data) 432 { 433 struct xennet_gnttab_make_txreq *info = data; 434 unsigned int id; 435 struct xen_netif_tx_request *tx; 436 grant_ref_t ref; 437 /* convenient aliases */ 438 struct page *page = info->page; 439 struct netfront_queue *queue = info->queue; 440 struct sk_buff *skb = info->skb; 441 442 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 443 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 444 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 445 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 446 447 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 448 gfn, GNTMAP_readonly); 449 450 queue->tx_skbs[id].skb = skb; 451 queue->grant_tx_page[id] = page; 452 queue->grant_tx_ref[id] = ref; 453 454 tx->id = id; 455 tx->gref = ref; 456 tx->offset = offset; 457 tx->size = len; 458 tx->flags = 0; 459 460 info->tx = tx; 461 info->size += tx->size; 462 } 463 464 static struct xen_netif_tx_request *xennet_make_first_txreq( 465 struct netfront_queue *queue, struct sk_buff *skb, 466 struct page *page, unsigned int offset, unsigned int len) 467 { 468 struct xennet_gnttab_make_txreq info = { 469 .queue = queue, 470 .skb = skb, 471 .page = page, 472 .size = 0, 473 }; 474 475 gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); 476 477 return info.tx; 478 } 479 480 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, 481 unsigned int len, void *data) 482 { 483 struct xennet_gnttab_make_txreq *info = data; 484 485 info->tx->flags |= XEN_NETTXF_more_data; 486 skb_get(info->skb); 487 xennet_tx_setup_grant(gfn, offset, len, data); 488 } 489 490 static struct xen_netif_tx_request *xennet_make_txreqs( 491 struct netfront_queue *queue, struct xen_netif_tx_request *tx, 492 struct sk_buff *skb, struct page *page, 493 unsigned int offset, unsigned int len) 494 { 495 struct xennet_gnttab_make_txreq info = { 496 .queue = queue, 497 .skb = skb, 498 .tx = tx, 499 }; 500 501 /* Skip unused frames from start of page */ 502 page += offset >> PAGE_SHIFT; 503 offset &= ~PAGE_MASK; 504 505 while (len) { 506 info.page = page; 507 info.size = 0; 508 509 gnttab_foreach_grant_in_range(page, offset, len, 510 xennet_make_one_txreq, 511 &info); 512 513 page++; 514 offset = 0; 515 len -= info.size; 516 } 517 518 return info.tx; 519 } 520 521 /* 522 * Count how many ring slots are required to send this skb. Each frag 523 * might be a compound page. 524 */ 525 static int xennet_count_skb_slots(struct sk_buff *skb) 526 { 527 int i, frags = skb_shinfo(skb)->nr_frags; 528 int slots; 529 530 slots = gnttab_count_grant(offset_in_page(skb->data), 531 skb_headlen(skb)); 532 533 for (i = 0; i < frags; i++) { 534 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 535 unsigned long size = skb_frag_size(frag); 536 unsigned long offset = frag->page_offset; 537 538 /* Skip unused frames from start of page */ 539 offset &= ~PAGE_MASK; 540 541 slots += gnttab_count_grant(offset, size); 542 } 543 544 return slots; 545 } 546 547 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 548 struct net_device *sb_dev, 549 select_queue_fallback_t fallback) 550 { 551 unsigned int num_queues = dev->real_num_tx_queues; 552 u32 hash; 553 u16 queue_idx; 554 555 /* First, check if there is only one queue */ 556 if (num_queues == 1) { 557 queue_idx = 0; 558 } else { 559 hash = skb_get_hash(skb); 560 queue_idx = hash % num_queues; 561 } 562 563 return queue_idx; 564 } 565 566 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 567 568 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 569 { 570 struct netfront_info *np = netdev_priv(dev); 571 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 572 struct xen_netif_tx_request *tx, *first_tx; 573 unsigned int i; 574 int notify; 575 int slots; 576 struct page *page; 577 unsigned int offset; 578 unsigned int len; 579 unsigned long flags; 580 struct netfront_queue *queue = NULL; 581 unsigned int num_queues = dev->real_num_tx_queues; 582 u16 queue_index; 583 struct sk_buff *nskb; 584 585 /* Drop the packet if no queues are set up */ 586 if (num_queues < 1) 587 goto drop; 588 /* Determine which queue to transmit this SKB on */ 589 queue_index = skb_get_queue_mapping(skb); 590 queue = &np->queues[queue_index]; 591 592 /* If skb->len is too big for wire format, drop skb and alert 593 * user about misconfiguration. 594 */ 595 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 596 net_alert_ratelimited( 597 "xennet: skb->len = %u, too big for wire format\n", 598 skb->len); 599 goto drop; 600 } 601 602 slots = xennet_count_skb_slots(skb); 603 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { 604 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 605 slots, skb->len); 606 if (skb_linearize(skb)) 607 goto drop; 608 } 609 610 page = virt_to_page(skb->data); 611 offset = offset_in_page(skb->data); 612 613 /* The first req should be at least ETH_HLEN size or the packet will be 614 * dropped by netback. 615 */ 616 if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 617 nskb = skb_copy(skb, GFP_ATOMIC); 618 if (!nskb) 619 goto drop; 620 dev_consume_skb_any(skb); 621 skb = nskb; 622 page = virt_to_page(skb->data); 623 offset = offset_in_page(skb->data); 624 } 625 626 len = skb_headlen(skb); 627 628 spin_lock_irqsave(&queue->tx_lock, flags); 629 630 if (unlikely(!netif_carrier_ok(dev) || 631 (slots > 1 && !xennet_can_sg(dev)) || 632 netif_needs_gso(skb, netif_skb_features(skb)))) { 633 spin_unlock_irqrestore(&queue->tx_lock, flags); 634 goto drop; 635 } 636 637 /* First request for the linear area. */ 638 first_tx = tx = xennet_make_first_txreq(queue, skb, 639 page, offset, len); 640 offset += tx->size; 641 if (offset == PAGE_SIZE) { 642 page++; 643 offset = 0; 644 } 645 len -= tx->size; 646 647 if (skb->ip_summed == CHECKSUM_PARTIAL) 648 /* local packet? */ 649 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 650 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 651 /* remote but checksummed. */ 652 tx->flags |= XEN_NETTXF_data_validated; 653 654 /* Optional extra info after the first request. */ 655 if (skb_shinfo(skb)->gso_size) { 656 struct xen_netif_extra_info *gso; 657 658 gso = (struct xen_netif_extra_info *) 659 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 660 661 tx->flags |= XEN_NETTXF_extra_info; 662 663 gso->u.gso.size = skb_shinfo(skb)->gso_size; 664 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 665 XEN_NETIF_GSO_TYPE_TCPV6 : 666 XEN_NETIF_GSO_TYPE_TCPV4; 667 gso->u.gso.pad = 0; 668 gso->u.gso.features = 0; 669 670 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 671 gso->flags = 0; 672 } 673 674 /* Requests for the rest of the linear area. */ 675 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); 676 677 /* Requests for all the frags. */ 678 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 679 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 680 tx = xennet_make_txreqs(queue, tx, skb, 681 skb_frag_page(frag), frag->page_offset, 682 skb_frag_size(frag)); 683 } 684 685 /* First request has the packet length. */ 686 first_tx->size = skb->len; 687 688 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 689 if (notify) 690 notify_remote_via_irq(queue->tx_irq); 691 692 u64_stats_update_begin(&tx_stats->syncp); 693 tx_stats->bytes += skb->len; 694 tx_stats->packets++; 695 u64_stats_update_end(&tx_stats->syncp); 696 697 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 698 xennet_tx_buf_gc(queue); 699 700 if (!netfront_tx_slot_available(queue)) 701 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 702 703 spin_unlock_irqrestore(&queue->tx_lock, flags); 704 705 return NETDEV_TX_OK; 706 707 drop: 708 dev->stats.tx_dropped++; 709 dev_kfree_skb_any(skb); 710 return NETDEV_TX_OK; 711 } 712 713 static int xennet_close(struct net_device *dev) 714 { 715 struct netfront_info *np = netdev_priv(dev); 716 unsigned int num_queues = dev->real_num_tx_queues; 717 unsigned int i; 718 struct netfront_queue *queue; 719 netif_tx_stop_all_queues(np->netdev); 720 for (i = 0; i < num_queues; ++i) { 721 queue = &np->queues[i]; 722 napi_disable(&queue->napi); 723 } 724 return 0; 725 } 726 727 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 728 grant_ref_t ref) 729 { 730 int new = xennet_rxidx(queue->rx.req_prod_pvt); 731 732 BUG_ON(queue->rx_skbs[new]); 733 queue->rx_skbs[new] = skb; 734 queue->grant_rx_ref[new] = ref; 735 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 736 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 737 queue->rx.req_prod_pvt++; 738 } 739 740 static int xennet_get_extras(struct netfront_queue *queue, 741 struct xen_netif_extra_info *extras, 742 RING_IDX rp) 743 744 { 745 struct xen_netif_extra_info *extra; 746 struct device *dev = &queue->info->netdev->dev; 747 RING_IDX cons = queue->rx.rsp_cons; 748 int err = 0; 749 750 do { 751 struct sk_buff *skb; 752 grant_ref_t ref; 753 754 if (unlikely(cons + 1 == rp)) { 755 if (net_ratelimit()) 756 dev_warn(dev, "Missing extra info\n"); 757 err = -EBADR; 758 break; 759 } 760 761 extra = (struct xen_netif_extra_info *) 762 RING_GET_RESPONSE(&queue->rx, ++cons); 763 764 if (unlikely(!extra->type || 765 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 766 if (net_ratelimit()) 767 dev_warn(dev, "Invalid extra type: %d\n", 768 extra->type); 769 err = -EINVAL; 770 } else { 771 memcpy(&extras[extra->type - 1], extra, 772 sizeof(*extra)); 773 } 774 775 skb = xennet_get_rx_skb(queue, cons); 776 ref = xennet_get_rx_ref(queue, cons); 777 xennet_move_rx_slot(queue, skb, ref); 778 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 779 780 queue->rx.rsp_cons = cons; 781 return err; 782 } 783 784 static int xennet_get_responses(struct netfront_queue *queue, 785 struct netfront_rx_info *rinfo, RING_IDX rp, 786 struct sk_buff_head *list) 787 { 788 struct xen_netif_rx_response *rx = &rinfo->rx; 789 struct xen_netif_extra_info *extras = rinfo->extras; 790 struct device *dev = &queue->info->netdev->dev; 791 RING_IDX cons = queue->rx.rsp_cons; 792 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 793 grant_ref_t ref = xennet_get_rx_ref(queue, cons); 794 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); 795 int slots = 1; 796 int err = 0; 797 unsigned long ret; 798 799 if (rx->flags & XEN_NETRXF_extra_info) { 800 err = xennet_get_extras(queue, extras, rp); 801 cons = queue->rx.rsp_cons; 802 } 803 804 for (;;) { 805 if (unlikely(rx->status < 0 || 806 rx->offset + rx->status > XEN_PAGE_SIZE)) { 807 if (net_ratelimit()) 808 dev_warn(dev, "rx->offset: %u, size: %d\n", 809 rx->offset, rx->status); 810 xennet_move_rx_slot(queue, skb, ref); 811 err = -EINVAL; 812 goto next; 813 } 814 815 /* 816 * This definitely indicates a bug, either in this driver or in 817 * the backend driver. In future this should flag the bad 818 * situation to the system controller to reboot the backend. 819 */ 820 if (ref == GRANT_INVALID_REF) { 821 if (net_ratelimit()) 822 dev_warn(dev, "Bad rx response id %d.\n", 823 rx->id); 824 err = -EINVAL; 825 goto next; 826 } 827 828 ret = gnttab_end_foreign_access_ref(ref, 0); 829 BUG_ON(!ret); 830 831 gnttab_release_grant_reference(&queue->gref_rx_head, ref); 832 833 __skb_queue_tail(list, skb); 834 835 next: 836 if (!(rx->flags & XEN_NETRXF_more_data)) 837 break; 838 839 if (cons + slots == rp) { 840 if (net_ratelimit()) 841 dev_warn(dev, "Need more slots\n"); 842 err = -ENOENT; 843 break; 844 } 845 846 rx = RING_GET_RESPONSE(&queue->rx, cons + slots); 847 skb = xennet_get_rx_skb(queue, cons + slots); 848 ref = xennet_get_rx_ref(queue, cons + slots); 849 slots++; 850 } 851 852 if (unlikely(slots > max)) { 853 if (net_ratelimit()) 854 dev_warn(dev, "Too many slots\n"); 855 err = -E2BIG; 856 } 857 858 if (unlikely(err)) 859 queue->rx.rsp_cons = cons + slots; 860 861 return err; 862 } 863 864 static int xennet_set_skb_gso(struct sk_buff *skb, 865 struct xen_netif_extra_info *gso) 866 { 867 if (!gso->u.gso.size) { 868 if (net_ratelimit()) 869 pr_warn("GSO size must not be zero\n"); 870 return -EINVAL; 871 } 872 873 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 874 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 875 if (net_ratelimit()) 876 pr_warn("Bad GSO type %d\n", gso->u.gso.type); 877 return -EINVAL; 878 } 879 880 skb_shinfo(skb)->gso_size = gso->u.gso.size; 881 skb_shinfo(skb)->gso_type = 882 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 883 SKB_GSO_TCPV4 : 884 SKB_GSO_TCPV6; 885 886 /* Header must be checked, and gso_segs computed. */ 887 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 888 skb_shinfo(skb)->gso_segs = 0; 889 890 return 0; 891 } 892 893 static RING_IDX xennet_fill_frags(struct netfront_queue *queue, 894 struct sk_buff *skb, 895 struct sk_buff_head *list) 896 { 897 RING_IDX cons = queue->rx.rsp_cons; 898 struct sk_buff *nskb; 899 900 while ((nskb = __skb_dequeue(list))) { 901 struct xen_netif_rx_response *rx = 902 RING_GET_RESPONSE(&queue->rx, ++cons); 903 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 904 905 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 906 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 907 908 BUG_ON(pull_to <= skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 910 } 911 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 912 queue->rx.rsp_cons = ++cons; 913 kfree_skb(nskb); 914 return ~0U; 915 } 916 917 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 918 skb_frag_page(nfrag), 919 rx->offset, rx->status, PAGE_SIZE); 920 921 skb_shinfo(nskb)->nr_frags = 0; 922 kfree_skb(nskb); 923 } 924 925 return cons; 926 } 927 928 static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 929 { 930 bool recalculate_partial_csum = false; 931 932 /* 933 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 934 * peers can fail to set NETRXF_csum_blank when sending a GSO 935 * frame. In this case force the SKB to CHECKSUM_PARTIAL and 936 * recalculate the partial checksum. 937 */ 938 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 939 struct netfront_info *np = netdev_priv(dev); 940 atomic_inc(&np->rx_gso_checksum_fixup); 941 skb->ip_summed = CHECKSUM_PARTIAL; 942 recalculate_partial_csum = true; 943 } 944 945 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 946 if (skb->ip_summed != CHECKSUM_PARTIAL) 947 return 0; 948 949 return skb_checksum_setup(skb, recalculate_partial_csum); 950 } 951 952 static int handle_incoming_queue(struct netfront_queue *queue, 953 struct sk_buff_head *rxq) 954 { 955 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 956 int packets_dropped = 0; 957 struct sk_buff *skb; 958 959 while ((skb = __skb_dequeue(rxq)) != NULL) { 960 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 961 962 if (pull_to > skb_headlen(skb)) 963 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 964 965 /* Ethernet work: Delayed to here as it peeks the header. */ 966 skb->protocol = eth_type_trans(skb, queue->info->netdev); 967 skb_reset_network_header(skb); 968 969 if (checksum_setup(queue->info->netdev, skb)) { 970 kfree_skb(skb); 971 packets_dropped++; 972 queue->info->netdev->stats.rx_errors++; 973 continue; 974 } 975 976 u64_stats_update_begin(&rx_stats->syncp); 977 rx_stats->packets++; 978 rx_stats->bytes += skb->len; 979 u64_stats_update_end(&rx_stats->syncp); 980 981 /* Pass it up. */ 982 napi_gro_receive(&queue->napi, skb); 983 } 984 985 return packets_dropped; 986 } 987 988 static int xennet_poll(struct napi_struct *napi, int budget) 989 { 990 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 991 struct net_device *dev = queue->info->netdev; 992 struct sk_buff *skb; 993 struct netfront_rx_info rinfo; 994 struct xen_netif_rx_response *rx = &rinfo.rx; 995 struct xen_netif_extra_info *extras = rinfo.extras; 996 RING_IDX i, rp; 997 int work_done; 998 struct sk_buff_head rxq; 999 struct sk_buff_head errq; 1000 struct sk_buff_head tmpq; 1001 int err; 1002 1003 spin_lock(&queue->rx_lock); 1004 1005 skb_queue_head_init(&rxq); 1006 skb_queue_head_init(&errq); 1007 skb_queue_head_init(&tmpq); 1008 1009 rp = queue->rx.sring->rsp_prod; 1010 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1011 1012 i = queue->rx.rsp_cons; 1013 work_done = 0; 1014 while ((i != rp) && (work_done < budget)) { 1015 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); 1016 memset(extras, 0, sizeof(rinfo.extras)); 1017 1018 err = xennet_get_responses(queue, &rinfo, rp, &tmpq); 1019 1020 if (unlikely(err)) { 1021 err: 1022 while ((skb = __skb_dequeue(&tmpq))) 1023 __skb_queue_tail(&errq, skb); 1024 dev->stats.rx_errors++; 1025 i = queue->rx.rsp_cons; 1026 continue; 1027 } 1028 1029 skb = __skb_dequeue(&tmpq); 1030 1031 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1032 struct xen_netif_extra_info *gso; 1033 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1034 1035 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1036 __skb_queue_head(&tmpq, skb); 1037 queue->rx.rsp_cons += skb_queue_len(&tmpq); 1038 goto err; 1039 } 1040 } 1041 1042 NETFRONT_SKB_CB(skb)->pull_to = rx->status; 1043 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 1044 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 1045 1046 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1047 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1048 skb->data_len = rx->status; 1049 skb->len += rx->status; 1050 1051 i = xennet_fill_frags(queue, skb, &tmpq); 1052 if (unlikely(i == ~0U)) 1053 goto err; 1054 1055 if (rx->flags & XEN_NETRXF_csum_blank) 1056 skb->ip_summed = CHECKSUM_PARTIAL; 1057 else if (rx->flags & XEN_NETRXF_data_validated) 1058 skb->ip_summed = CHECKSUM_UNNECESSARY; 1059 1060 __skb_queue_tail(&rxq, skb); 1061 1062 queue->rx.rsp_cons = ++i; 1063 work_done++; 1064 } 1065 1066 __skb_queue_purge(&errq); 1067 1068 work_done -= handle_incoming_queue(queue, &rxq); 1069 1070 xennet_alloc_rx_buffers(queue); 1071 1072 if (work_done < budget) { 1073 int more_to_do = 0; 1074 1075 napi_complete_done(napi, work_done); 1076 1077 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 1078 if (more_to_do) 1079 napi_schedule(napi); 1080 } 1081 1082 spin_unlock(&queue->rx_lock); 1083 1084 return work_done; 1085 } 1086 1087 static int xennet_change_mtu(struct net_device *dev, int mtu) 1088 { 1089 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 1090 1091 if (mtu > max) 1092 return -EINVAL; 1093 dev->mtu = mtu; 1094 return 0; 1095 } 1096 1097 static void xennet_get_stats64(struct net_device *dev, 1098 struct rtnl_link_stats64 *tot) 1099 { 1100 struct netfront_info *np = netdev_priv(dev); 1101 int cpu; 1102 1103 for_each_possible_cpu(cpu) { 1104 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1105 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1106 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1107 unsigned int start; 1108 1109 do { 1110 start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1111 tx_packets = tx_stats->packets; 1112 tx_bytes = tx_stats->bytes; 1113 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1114 1115 do { 1116 start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1117 rx_packets = rx_stats->packets; 1118 rx_bytes = rx_stats->bytes; 1119 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1120 1121 tot->rx_packets += rx_packets; 1122 tot->tx_packets += tx_packets; 1123 tot->rx_bytes += rx_bytes; 1124 tot->tx_bytes += tx_bytes; 1125 } 1126 1127 tot->rx_errors = dev->stats.rx_errors; 1128 tot->tx_dropped = dev->stats.tx_dropped; 1129 } 1130 1131 static void xennet_release_tx_bufs(struct netfront_queue *queue) 1132 { 1133 struct sk_buff *skb; 1134 int i; 1135 1136 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1137 /* Skip over entries which are actually freelist references */ 1138 if (skb_entry_is_link(&queue->tx_skbs[i])) 1139 continue; 1140 1141 skb = queue->tx_skbs[i].skb; 1142 get_page(queue->grant_tx_page[i]); 1143 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1144 GNTMAP_readonly, 1145 (unsigned long)page_address(queue->grant_tx_page[i])); 1146 queue->grant_tx_page[i] = NULL; 1147 queue->grant_tx_ref[i] = GRANT_INVALID_REF; 1148 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); 1149 dev_kfree_skb_irq(skb); 1150 } 1151 } 1152 1153 static void xennet_release_rx_bufs(struct netfront_queue *queue) 1154 { 1155 int id, ref; 1156 1157 spin_lock_bh(&queue->rx_lock); 1158 1159 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1160 struct sk_buff *skb; 1161 struct page *page; 1162 1163 skb = queue->rx_skbs[id]; 1164 if (!skb) 1165 continue; 1166 1167 ref = queue->grant_rx_ref[id]; 1168 if (ref == GRANT_INVALID_REF) 1169 continue; 1170 1171 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1172 1173 /* gnttab_end_foreign_access() needs a page ref until 1174 * foreign access is ended (which may be deferred). 1175 */ 1176 get_page(page); 1177 gnttab_end_foreign_access(ref, 0, 1178 (unsigned long)page_address(page)); 1179 queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1180 1181 kfree_skb(skb); 1182 } 1183 1184 spin_unlock_bh(&queue->rx_lock); 1185 } 1186 1187 static netdev_features_t xennet_fix_features(struct net_device *dev, 1188 netdev_features_t features) 1189 { 1190 struct netfront_info *np = netdev_priv(dev); 1191 1192 if (features & NETIF_F_SG && 1193 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 1194 features &= ~NETIF_F_SG; 1195 1196 if (features & NETIF_F_IPV6_CSUM && 1197 !xenbus_read_unsigned(np->xbdev->otherend, 1198 "feature-ipv6-csum-offload", 0)) 1199 features &= ~NETIF_F_IPV6_CSUM; 1200 1201 if (features & NETIF_F_TSO && 1202 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 1203 features &= ~NETIF_F_TSO; 1204 1205 if (features & NETIF_F_TSO6 && 1206 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 1207 features &= ~NETIF_F_TSO6; 1208 1209 return features; 1210 } 1211 1212 static int xennet_set_features(struct net_device *dev, 1213 netdev_features_t features) 1214 { 1215 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 1216 netdev_info(dev, "Reducing MTU because no SG offload"); 1217 dev->mtu = ETH_DATA_LEN; 1218 } 1219 1220 return 0; 1221 } 1222 1223 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1224 { 1225 struct netfront_queue *queue = dev_id; 1226 unsigned long flags; 1227 1228 spin_lock_irqsave(&queue->tx_lock, flags); 1229 xennet_tx_buf_gc(queue); 1230 spin_unlock_irqrestore(&queue->tx_lock, flags); 1231 1232 return IRQ_HANDLED; 1233 } 1234 1235 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1236 { 1237 struct netfront_queue *queue = dev_id; 1238 struct net_device *dev = queue->info->netdev; 1239 1240 if (likely(netif_carrier_ok(dev) && 1241 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1242 napi_schedule(&queue->napi); 1243 1244 return IRQ_HANDLED; 1245 } 1246 1247 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1248 { 1249 xennet_tx_interrupt(irq, dev_id); 1250 xennet_rx_interrupt(irq, dev_id); 1251 return IRQ_HANDLED; 1252 } 1253 1254 #ifdef CONFIG_NET_POLL_CONTROLLER 1255 static void xennet_poll_controller(struct net_device *dev) 1256 { 1257 /* Poll each queue */ 1258 struct netfront_info *info = netdev_priv(dev); 1259 unsigned int num_queues = dev->real_num_tx_queues; 1260 unsigned int i; 1261 for (i = 0; i < num_queues; ++i) 1262 xennet_interrupt(0, &info->queues[i]); 1263 } 1264 #endif 1265 1266 static const struct net_device_ops xennet_netdev_ops = { 1267 .ndo_open = xennet_open, 1268 .ndo_stop = xennet_close, 1269 .ndo_start_xmit = xennet_start_xmit, 1270 .ndo_change_mtu = xennet_change_mtu, 1271 .ndo_get_stats64 = xennet_get_stats64, 1272 .ndo_set_mac_address = eth_mac_addr, 1273 .ndo_validate_addr = eth_validate_addr, 1274 .ndo_fix_features = xennet_fix_features, 1275 .ndo_set_features = xennet_set_features, 1276 .ndo_select_queue = xennet_select_queue, 1277 #ifdef CONFIG_NET_POLL_CONTROLLER 1278 .ndo_poll_controller = xennet_poll_controller, 1279 #endif 1280 }; 1281 1282 static void xennet_free_netdev(struct net_device *netdev) 1283 { 1284 struct netfront_info *np = netdev_priv(netdev); 1285 1286 free_percpu(np->rx_stats); 1287 free_percpu(np->tx_stats); 1288 free_netdev(netdev); 1289 } 1290 1291 static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1292 { 1293 int err; 1294 struct net_device *netdev; 1295 struct netfront_info *np; 1296 1297 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 1298 if (!netdev) 1299 return ERR_PTR(-ENOMEM); 1300 1301 np = netdev_priv(netdev); 1302 np->xbdev = dev; 1303 1304 np->queues = NULL; 1305 1306 err = -ENOMEM; 1307 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1308 if (np->rx_stats == NULL) 1309 goto exit; 1310 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1311 if (np->tx_stats == NULL) 1312 goto exit; 1313 1314 netdev->netdev_ops = &xennet_netdev_ops; 1315 1316 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1317 NETIF_F_GSO_ROBUST; 1318 netdev->hw_features = NETIF_F_SG | 1319 NETIF_F_IPV6_CSUM | 1320 NETIF_F_TSO | NETIF_F_TSO6; 1321 1322 /* 1323 * Assume that all hw features are available for now. This set 1324 * will be adjusted by the call to netdev_update_features() in 1325 * xennet_connect() which is the earliest point where we can 1326 * negotiate with the backend regarding supported features. 1327 */ 1328 netdev->features |= netdev->hw_features; 1329 1330 netdev->ethtool_ops = &xennet_ethtool_ops; 1331 netdev->min_mtu = ETH_MIN_MTU; 1332 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1333 SET_NETDEV_DEV(netdev, &dev->dev); 1334 1335 np->netdev = netdev; 1336 1337 netif_carrier_off(netdev); 1338 1339 xenbus_switch_state(dev, XenbusStateInitialising); 1340 wait_event(module_wq, 1341 xenbus_read_driver_state(dev->otherend) != 1342 XenbusStateClosed && 1343 xenbus_read_driver_state(dev->otherend) != 1344 XenbusStateUnknown); 1345 return netdev; 1346 1347 exit: 1348 xennet_free_netdev(netdev); 1349 return ERR_PTR(err); 1350 } 1351 1352 /** 1353 * Entry point to this code when a new device is created. Allocate the basic 1354 * structures and the ring buffers for communication with the backend, and 1355 * inform the backend of the appropriate details for those. 1356 */ 1357 static int netfront_probe(struct xenbus_device *dev, 1358 const struct xenbus_device_id *id) 1359 { 1360 int err; 1361 struct net_device *netdev; 1362 struct netfront_info *info; 1363 1364 netdev = xennet_create_dev(dev); 1365 if (IS_ERR(netdev)) { 1366 err = PTR_ERR(netdev); 1367 xenbus_dev_fatal(dev, err, "creating netdev"); 1368 return err; 1369 } 1370 1371 info = netdev_priv(netdev); 1372 dev_set_drvdata(&dev->dev, info); 1373 #ifdef CONFIG_SYSFS 1374 info->netdev->sysfs_groups[0] = &xennet_dev_group; 1375 #endif 1376 1377 return 0; 1378 } 1379 1380 static void xennet_end_access(int ref, void *page) 1381 { 1382 /* This frees the page as a side-effect */ 1383 if (ref != GRANT_INVALID_REF) 1384 gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1385 } 1386 1387 static void xennet_disconnect_backend(struct netfront_info *info) 1388 { 1389 unsigned int i = 0; 1390 unsigned int num_queues = info->netdev->real_num_tx_queues; 1391 1392 netif_carrier_off(info->netdev); 1393 1394 for (i = 0; i < num_queues && info->queues; ++i) { 1395 struct netfront_queue *queue = &info->queues[i]; 1396 1397 del_timer_sync(&queue->rx_refill_timer); 1398 1399 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1400 unbind_from_irqhandler(queue->tx_irq, queue); 1401 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1402 unbind_from_irqhandler(queue->tx_irq, queue); 1403 unbind_from_irqhandler(queue->rx_irq, queue); 1404 } 1405 queue->tx_evtchn = queue->rx_evtchn = 0; 1406 queue->tx_irq = queue->rx_irq = 0; 1407 1408 if (netif_running(info->netdev)) 1409 napi_synchronize(&queue->napi); 1410 1411 xennet_release_tx_bufs(queue); 1412 xennet_release_rx_bufs(queue); 1413 gnttab_free_grant_references(queue->gref_tx_head); 1414 gnttab_free_grant_references(queue->gref_rx_head); 1415 1416 /* End access and free the pages */ 1417 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1418 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1419 1420 queue->tx_ring_ref = GRANT_INVALID_REF; 1421 queue->rx_ring_ref = GRANT_INVALID_REF; 1422 queue->tx.sring = NULL; 1423 queue->rx.sring = NULL; 1424 } 1425 } 1426 1427 /** 1428 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1429 * driver restart. We tear down our netif structure and recreate it, but 1430 * leave the device-layer structures intact so that this is transparent to the 1431 * rest of the kernel. 1432 */ 1433 static int netfront_resume(struct xenbus_device *dev) 1434 { 1435 struct netfront_info *info = dev_get_drvdata(&dev->dev); 1436 1437 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1438 1439 xennet_disconnect_backend(info); 1440 return 0; 1441 } 1442 1443 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1444 { 1445 char *s, *e, *macstr; 1446 int i; 1447 1448 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1449 if (IS_ERR(macstr)) 1450 return PTR_ERR(macstr); 1451 1452 for (i = 0; i < ETH_ALEN; i++) { 1453 mac[i] = simple_strtoul(s, &e, 16); 1454 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1455 kfree(macstr); 1456 return -ENOENT; 1457 } 1458 s = e+1; 1459 } 1460 1461 kfree(macstr); 1462 return 0; 1463 } 1464 1465 static int setup_netfront_single(struct netfront_queue *queue) 1466 { 1467 int err; 1468 1469 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1470 if (err < 0) 1471 goto fail; 1472 1473 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1474 xennet_interrupt, 1475 0, queue->info->netdev->name, queue); 1476 if (err < 0) 1477 goto bind_fail; 1478 queue->rx_evtchn = queue->tx_evtchn; 1479 queue->rx_irq = queue->tx_irq = err; 1480 1481 return 0; 1482 1483 bind_fail: 1484 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 1485 queue->tx_evtchn = 0; 1486 fail: 1487 return err; 1488 } 1489 1490 static int setup_netfront_split(struct netfront_queue *queue) 1491 { 1492 int err; 1493 1494 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1495 if (err < 0) 1496 goto fail; 1497 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1498 if (err < 0) 1499 goto alloc_rx_evtchn_fail; 1500 1501 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 1502 "%s-tx", queue->name); 1503 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1504 xennet_tx_interrupt, 1505 0, queue->tx_irq_name, queue); 1506 if (err < 0) 1507 goto bind_tx_fail; 1508 queue->tx_irq = err; 1509 1510 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 1511 "%s-rx", queue->name); 1512 err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1513 xennet_rx_interrupt, 1514 0, queue->rx_irq_name, queue); 1515 if (err < 0) 1516 goto bind_rx_fail; 1517 queue->rx_irq = err; 1518 1519 return 0; 1520 1521 bind_rx_fail: 1522 unbind_from_irqhandler(queue->tx_irq, queue); 1523 queue->tx_irq = 0; 1524 bind_tx_fail: 1525 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 1526 queue->rx_evtchn = 0; 1527 alloc_rx_evtchn_fail: 1528 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 1529 queue->tx_evtchn = 0; 1530 fail: 1531 return err; 1532 } 1533 1534 static int setup_netfront(struct xenbus_device *dev, 1535 struct netfront_queue *queue, unsigned int feature_split_evtchn) 1536 { 1537 struct xen_netif_tx_sring *txs; 1538 struct xen_netif_rx_sring *rxs; 1539 grant_ref_t gref; 1540 int err; 1541 1542 queue->tx_ring_ref = GRANT_INVALID_REF; 1543 queue->rx_ring_ref = GRANT_INVALID_REF; 1544 queue->rx.sring = NULL; 1545 queue->tx.sring = NULL; 1546 1547 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1548 if (!txs) { 1549 err = -ENOMEM; 1550 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1551 goto fail; 1552 } 1553 SHARED_RING_INIT(txs); 1554 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 1555 1556 err = xenbus_grant_ring(dev, txs, 1, &gref); 1557 if (err < 0) 1558 goto grant_tx_ring_fail; 1559 queue->tx_ring_ref = gref; 1560 1561 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1562 if (!rxs) { 1563 err = -ENOMEM; 1564 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1565 goto alloc_rx_ring_fail; 1566 } 1567 SHARED_RING_INIT(rxs); 1568 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 1569 1570 err = xenbus_grant_ring(dev, rxs, 1, &gref); 1571 if (err < 0) 1572 goto grant_rx_ring_fail; 1573 queue->rx_ring_ref = gref; 1574 1575 if (feature_split_evtchn) 1576 err = setup_netfront_split(queue); 1577 /* setup single event channel if 1578 * a) feature-split-event-channels == 0 1579 * b) feature-split-event-channels == 1 but failed to setup 1580 */ 1581 if (!feature_split_evtchn || (feature_split_evtchn && err)) 1582 err = setup_netfront_single(queue); 1583 1584 if (err) 1585 goto alloc_evtchn_fail; 1586 1587 return 0; 1588 1589 /* If we fail to setup netfront, it is safe to just revoke access to 1590 * granted pages because backend is not accessing it at this point. 1591 */ 1592 alloc_evtchn_fail: 1593 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); 1594 grant_rx_ring_fail: 1595 free_page((unsigned long)rxs); 1596 alloc_rx_ring_fail: 1597 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); 1598 grant_tx_ring_fail: 1599 free_page((unsigned long)txs); 1600 fail: 1601 return err; 1602 } 1603 1604 /* Queue-specific initialisation 1605 * This used to be done in xennet_create_dev() but must now 1606 * be run per-queue. 1607 */ 1608 static int xennet_init_queue(struct netfront_queue *queue) 1609 { 1610 unsigned short i; 1611 int err = 0; 1612 char *devid; 1613 1614 spin_lock_init(&queue->tx_lock); 1615 spin_lock_init(&queue->rx_lock); 1616 1617 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 1618 1619 devid = strrchr(queue->info->xbdev->nodename, '/') + 1; 1620 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", 1621 devid, queue->id); 1622 1623 /* Initialise tx_skbs as a free chain containing every entry. */ 1624 queue->tx_skb_freelist = 0; 1625 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1626 skb_entry_set_link(&queue->tx_skbs[i], i+1); 1627 queue->grant_tx_ref[i] = GRANT_INVALID_REF; 1628 queue->grant_tx_page[i] = NULL; 1629 } 1630 1631 /* Clear out rx_skbs */ 1632 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1633 queue->rx_skbs[i] = NULL; 1634 queue->grant_rx_ref[i] = GRANT_INVALID_REF; 1635 } 1636 1637 /* A grant for every tx ring slot */ 1638 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 1639 &queue->gref_tx_head) < 0) { 1640 pr_alert("can't alloc tx grant refs\n"); 1641 err = -ENOMEM; 1642 goto exit; 1643 } 1644 1645 /* A grant for every rx ring slot */ 1646 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 1647 &queue->gref_rx_head) < 0) { 1648 pr_alert("can't alloc rx grant refs\n"); 1649 err = -ENOMEM; 1650 goto exit_free_tx; 1651 } 1652 1653 return 0; 1654 1655 exit_free_tx: 1656 gnttab_free_grant_references(queue->gref_tx_head); 1657 exit: 1658 return err; 1659 } 1660 1661 static int write_queue_xenstore_keys(struct netfront_queue *queue, 1662 struct xenbus_transaction *xbt, int write_hierarchical) 1663 { 1664 /* Write the queue-specific keys into XenStore in the traditional 1665 * way for a single queue, or in a queue subkeys for multiple 1666 * queues. 1667 */ 1668 struct xenbus_device *dev = queue->info->xbdev; 1669 int err; 1670 const char *message; 1671 char *path; 1672 size_t pathsize; 1673 1674 /* Choose the correct place to write the keys */ 1675 if (write_hierarchical) { 1676 pathsize = strlen(dev->nodename) + 10; 1677 path = kzalloc(pathsize, GFP_KERNEL); 1678 if (!path) { 1679 err = -ENOMEM; 1680 message = "out of memory while writing ring references"; 1681 goto error; 1682 } 1683 snprintf(path, pathsize, "%s/queue-%u", 1684 dev->nodename, queue->id); 1685 } else { 1686 path = (char *)dev->nodename; 1687 } 1688 1689 /* Write ring references */ 1690 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 1691 queue->tx_ring_ref); 1692 if (err) { 1693 message = "writing tx-ring-ref"; 1694 goto error; 1695 } 1696 1697 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 1698 queue->rx_ring_ref); 1699 if (err) { 1700 message = "writing rx-ring-ref"; 1701 goto error; 1702 } 1703 1704 /* Write event channels; taking into account both shared 1705 * and split event channel scenarios. 1706 */ 1707 if (queue->tx_evtchn == queue->rx_evtchn) { 1708 /* Shared event channel */ 1709 err = xenbus_printf(*xbt, path, 1710 "event-channel", "%u", queue->tx_evtchn); 1711 if (err) { 1712 message = "writing event-channel"; 1713 goto error; 1714 } 1715 } else { 1716 /* Split event channels */ 1717 err = xenbus_printf(*xbt, path, 1718 "event-channel-tx", "%u", queue->tx_evtchn); 1719 if (err) { 1720 message = "writing event-channel-tx"; 1721 goto error; 1722 } 1723 1724 err = xenbus_printf(*xbt, path, 1725 "event-channel-rx", "%u", queue->rx_evtchn); 1726 if (err) { 1727 message = "writing event-channel-rx"; 1728 goto error; 1729 } 1730 } 1731 1732 if (write_hierarchical) 1733 kfree(path); 1734 return 0; 1735 1736 error: 1737 if (write_hierarchical) 1738 kfree(path); 1739 xenbus_dev_fatal(dev, err, "%s", message); 1740 return err; 1741 } 1742 1743 static void xennet_destroy_queues(struct netfront_info *info) 1744 { 1745 unsigned int i; 1746 1747 for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1748 struct netfront_queue *queue = &info->queues[i]; 1749 1750 if (netif_running(info->netdev)) 1751 napi_disable(&queue->napi); 1752 netif_napi_del(&queue->napi); 1753 } 1754 1755 kfree(info->queues); 1756 info->queues = NULL; 1757 } 1758 1759 static int xennet_create_queues(struct netfront_info *info, 1760 unsigned int *num_queues) 1761 { 1762 unsigned int i; 1763 int ret; 1764 1765 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), 1766 GFP_KERNEL); 1767 if (!info->queues) 1768 return -ENOMEM; 1769 1770 for (i = 0; i < *num_queues; i++) { 1771 struct netfront_queue *queue = &info->queues[i]; 1772 1773 queue->id = i; 1774 queue->info = info; 1775 1776 ret = xennet_init_queue(queue); 1777 if (ret < 0) { 1778 dev_warn(&info->xbdev->dev, 1779 "only created %d queues\n", i); 1780 *num_queues = i; 1781 break; 1782 } 1783 1784 netif_napi_add(queue->info->netdev, &queue->napi, 1785 xennet_poll, 64); 1786 if (netif_running(info->netdev)) 1787 napi_enable(&queue->napi); 1788 } 1789 1790 netif_set_real_num_tx_queues(info->netdev, *num_queues); 1791 1792 if (*num_queues == 0) { 1793 dev_err(&info->xbdev->dev, "no queues\n"); 1794 return -EINVAL; 1795 } 1796 return 0; 1797 } 1798 1799 /* Common code used when first setting up, and when resuming. */ 1800 static int talk_to_netback(struct xenbus_device *dev, 1801 struct netfront_info *info) 1802 { 1803 const char *message; 1804 struct xenbus_transaction xbt; 1805 int err; 1806 unsigned int feature_split_evtchn; 1807 unsigned int i = 0; 1808 unsigned int max_queues = 0; 1809 struct netfront_queue *queue = NULL; 1810 unsigned int num_queues = 1; 1811 1812 info->netdev->irq = 0; 1813 1814 /* Check if backend supports multiple queues */ 1815 max_queues = xenbus_read_unsigned(info->xbdev->otherend, 1816 "multi-queue-max-queues", 1); 1817 num_queues = min(max_queues, xennet_max_queues); 1818 1819 /* Check feature-split-event-channels */ 1820 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 1821 "feature-split-event-channels", 0); 1822 1823 /* Read mac addr. */ 1824 err = xen_net_read_mac(dev, info->netdev->dev_addr); 1825 if (err) { 1826 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1827 goto out_unlocked; 1828 } 1829 1830 rtnl_lock(); 1831 if (info->queues) 1832 xennet_destroy_queues(info); 1833 1834 err = xennet_create_queues(info, &num_queues); 1835 if (err < 0) { 1836 xenbus_dev_fatal(dev, err, "creating queues"); 1837 kfree(info->queues); 1838 info->queues = NULL; 1839 goto out; 1840 } 1841 rtnl_unlock(); 1842 1843 /* Create shared ring, alloc event channel -- for each queue */ 1844 for (i = 0; i < num_queues; ++i) { 1845 queue = &info->queues[i]; 1846 err = setup_netfront(dev, queue, feature_split_evtchn); 1847 if (err) 1848 goto destroy_ring; 1849 } 1850 1851 again: 1852 err = xenbus_transaction_start(&xbt); 1853 if (err) { 1854 xenbus_dev_fatal(dev, err, "starting transaction"); 1855 goto destroy_ring; 1856 } 1857 1858 if (xenbus_exists(XBT_NIL, 1859 info->xbdev->otherend, "multi-queue-max-queues")) { 1860 /* Write the number of queues */ 1861 err = xenbus_printf(xbt, dev->nodename, 1862 "multi-queue-num-queues", "%u", num_queues); 1863 if (err) { 1864 message = "writing multi-queue-num-queues"; 1865 goto abort_transaction_no_dev_fatal; 1866 } 1867 } 1868 1869 if (num_queues == 1) { 1870 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 1871 if (err) 1872 goto abort_transaction_no_dev_fatal; 1873 } else { 1874 /* Write the keys for each queue */ 1875 for (i = 0; i < num_queues; ++i) { 1876 queue = &info->queues[i]; 1877 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 1878 if (err) 1879 goto abort_transaction_no_dev_fatal; 1880 } 1881 } 1882 1883 /* The remaining keys are not queue-specific */ 1884 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1885 1); 1886 if (err) { 1887 message = "writing request-rx-copy"; 1888 goto abort_transaction; 1889 } 1890 1891 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 1892 if (err) { 1893 message = "writing feature-rx-notify"; 1894 goto abort_transaction; 1895 } 1896 1897 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 1898 if (err) { 1899 message = "writing feature-sg"; 1900 goto abort_transaction; 1901 } 1902 1903 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 1904 if (err) { 1905 message = "writing feature-gso-tcpv4"; 1906 goto abort_transaction; 1907 } 1908 1909 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 1910 if (err) { 1911 message = "writing feature-gso-tcpv6"; 1912 goto abort_transaction; 1913 } 1914 1915 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 1916 "1"); 1917 if (err) { 1918 message = "writing feature-ipv6-csum-offload"; 1919 goto abort_transaction; 1920 } 1921 1922 err = xenbus_transaction_end(xbt, 0); 1923 if (err) { 1924 if (err == -EAGAIN) 1925 goto again; 1926 xenbus_dev_fatal(dev, err, "completing transaction"); 1927 goto destroy_ring; 1928 } 1929 1930 return 0; 1931 1932 abort_transaction: 1933 xenbus_dev_fatal(dev, err, "%s", message); 1934 abort_transaction_no_dev_fatal: 1935 xenbus_transaction_end(xbt, 1); 1936 destroy_ring: 1937 xennet_disconnect_backend(info); 1938 rtnl_lock(); 1939 xennet_destroy_queues(info); 1940 out: 1941 rtnl_unlock(); 1942 out_unlocked: 1943 device_unregister(&dev->dev); 1944 return err; 1945 } 1946 1947 static int xennet_connect(struct net_device *dev) 1948 { 1949 struct netfront_info *np = netdev_priv(dev); 1950 unsigned int num_queues = 0; 1951 int err; 1952 unsigned int j = 0; 1953 struct netfront_queue *queue = NULL; 1954 1955 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 1956 dev_info(&dev->dev, 1957 "backend does not support copying receive path\n"); 1958 return -ENODEV; 1959 } 1960 1961 err = talk_to_netback(np->xbdev, np); 1962 if (err) 1963 return err; 1964 1965 /* talk_to_netback() sets the correct number of queues */ 1966 num_queues = dev->real_num_tx_queues; 1967 1968 if (dev->reg_state == NETREG_UNINITIALIZED) { 1969 err = register_netdev(dev); 1970 if (err) { 1971 pr_warn("%s: register_netdev err=%d\n", __func__, err); 1972 device_unregister(&np->xbdev->dev); 1973 return err; 1974 } 1975 } 1976 1977 rtnl_lock(); 1978 netdev_update_features(dev); 1979 rtnl_unlock(); 1980 1981 /* 1982 * All public and private state should now be sane. Get 1983 * ready to start sending and receiving packets and give the driver 1984 * domain a kick because we've probably just requeued some 1985 * packets. 1986 */ 1987 netif_carrier_on(np->netdev); 1988 for (j = 0; j < num_queues; ++j) { 1989 queue = &np->queues[j]; 1990 1991 notify_remote_via_irq(queue->tx_irq); 1992 if (queue->tx_irq != queue->rx_irq) 1993 notify_remote_via_irq(queue->rx_irq); 1994 1995 spin_lock_irq(&queue->tx_lock); 1996 xennet_tx_buf_gc(queue); 1997 spin_unlock_irq(&queue->tx_lock); 1998 1999 spin_lock_bh(&queue->rx_lock); 2000 xennet_alloc_rx_buffers(queue); 2001 spin_unlock_bh(&queue->rx_lock); 2002 } 2003 2004 return 0; 2005 } 2006 2007 /** 2008 * Callback received when the backend's state changes. 2009 */ 2010 static void netback_changed(struct xenbus_device *dev, 2011 enum xenbus_state backend_state) 2012 { 2013 struct netfront_info *np = dev_get_drvdata(&dev->dev); 2014 struct net_device *netdev = np->netdev; 2015 2016 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2017 2018 wake_up_all(&module_wq); 2019 2020 switch (backend_state) { 2021 case XenbusStateInitialising: 2022 case XenbusStateInitialised: 2023 case XenbusStateReconfiguring: 2024 case XenbusStateReconfigured: 2025 case XenbusStateUnknown: 2026 break; 2027 2028 case XenbusStateInitWait: 2029 if (dev->state != XenbusStateInitialising) 2030 break; 2031 if (xennet_connect(netdev) != 0) 2032 break; 2033 xenbus_switch_state(dev, XenbusStateConnected); 2034 break; 2035 2036 case XenbusStateConnected: 2037 netdev_notify_peers(netdev); 2038 break; 2039 2040 case XenbusStateClosed: 2041 if (dev->state == XenbusStateClosed) 2042 break; 2043 /* Missed the backend's CLOSING state -- fallthrough */ 2044 case XenbusStateClosing: 2045 xenbus_frontend_closed(dev); 2046 break; 2047 } 2048 } 2049 2050 static const struct xennet_stat { 2051 char name[ETH_GSTRING_LEN]; 2052 u16 offset; 2053 } xennet_stats[] = { 2054 { 2055 "rx_gso_checksum_fixup", 2056 offsetof(struct netfront_info, rx_gso_checksum_fixup) 2057 }, 2058 }; 2059 2060 static int xennet_get_sset_count(struct net_device *dev, int string_set) 2061 { 2062 switch (string_set) { 2063 case ETH_SS_STATS: 2064 return ARRAY_SIZE(xennet_stats); 2065 default: 2066 return -EINVAL; 2067 } 2068 } 2069 2070 static void xennet_get_ethtool_stats(struct net_device *dev, 2071 struct ethtool_stats *stats, u64 * data) 2072 { 2073 void *np = netdev_priv(dev); 2074 int i; 2075 2076 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2077 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2078 } 2079 2080 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2081 { 2082 int i; 2083 2084 switch (stringset) { 2085 case ETH_SS_STATS: 2086 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2087 memcpy(data + i * ETH_GSTRING_LEN, 2088 xennet_stats[i].name, ETH_GSTRING_LEN); 2089 break; 2090 } 2091 } 2092 2093 static const struct ethtool_ops xennet_ethtool_ops = 2094 { 2095 .get_link = ethtool_op_get_link, 2096 2097 .get_sset_count = xennet_get_sset_count, 2098 .get_ethtool_stats = xennet_get_ethtool_stats, 2099 .get_strings = xennet_get_strings, 2100 }; 2101 2102 #ifdef CONFIG_SYSFS 2103 static ssize_t show_rxbuf(struct device *dev, 2104 struct device_attribute *attr, char *buf) 2105 { 2106 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 2107 } 2108 2109 static ssize_t store_rxbuf(struct device *dev, 2110 struct device_attribute *attr, 2111 const char *buf, size_t len) 2112 { 2113 char *endp; 2114 unsigned long target; 2115 2116 if (!capable(CAP_NET_ADMIN)) 2117 return -EPERM; 2118 2119 target = simple_strtoul(buf, &endp, 0); 2120 if (endp == buf) 2121 return -EBADMSG; 2122 2123 /* rxbuf_min and rxbuf_max are no longer configurable. */ 2124 2125 return len; 2126 } 2127 2128 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); 2129 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); 2130 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); 2131 2132 static struct attribute *xennet_dev_attrs[] = { 2133 &dev_attr_rxbuf_min.attr, 2134 &dev_attr_rxbuf_max.attr, 2135 &dev_attr_rxbuf_cur.attr, 2136 NULL 2137 }; 2138 2139 static const struct attribute_group xennet_dev_group = { 2140 .attrs = xennet_dev_attrs 2141 }; 2142 #endif /* CONFIG_SYSFS */ 2143 2144 static int xennet_remove(struct xenbus_device *dev) 2145 { 2146 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2147 2148 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2149 2150 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2151 xenbus_switch_state(dev, XenbusStateClosing); 2152 wait_event(module_wq, 2153 xenbus_read_driver_state(dev->otherend) == 2154 XenbusStateClosing || 2155 xenbus_read_driver_state(dev->otherend) == 2156 XenbusStateUnknown); 2157 2158 xenbus_switch_state(dev, XenbusStateClosed); 2159 wait_event(module_wq, 2160 xenbus_read_driver_state(dev->otherend) == 2161 XenbusStateClosed || 2162 xenbus_read_driver_state(dev->otherend) == 2163 XenbusStateUnknown); 2164 } 2165 2166 xennet_disconnect_backend(info); 2167 2168 if (info->netdev->reg_state == NETREG_REGISTERED) 2169 unregister_netdev(info->netdev); 2170 2171 if (info->queues) { 2172 rtnl_lock(); 2173 xennet_destroy_queues(info); 2174 rtnl_unlock(); 2175 } 2176 xennet_free_netdev(info->netdev); 2177 2178 return 0; 2179 } 2180 2181 static const struct xenbus_device_id netfront_ids[] = { 2182 { "vif" }, 2183 { "" } 2184 }; 2185 2186 static struct xenbus_driver netfront_driver = { 2187 .ids = netfront_ids, 2188 .probe = netfront_probe, 2189 .remove = xennet_remove, 2190 .resume = netfront_resume, 2191 .otherend_changed = netback_changed, 2192 }; 2193 2194 static int __init netif_init(void) 2195 { 2196 if (!xen_domain()) 2197 return -ENODEV; 2198 2199 if (!xen_has_pv_nic_devices()) 2200 return -ENODEV; 2201 2202 pr_info("Initialising Xen virtual ethernet driver\n"); 2203 2204 /* Allow as many queues as there are CPUs inut max. 8 if user has not 2205 * specified a value. 2206 */ 2207 if (xennet_max_queues == 0) 2208 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 2209 num_online_cpus()); 2210 2211 return xenbus_register_frontend(&netfront_driver); 2212 } 2213 module_init(netif_init); 2214 2215 2216 static void __exit netif_exit(void) 2217 { 2218 xenbus_unregister_driver(&netfront_driver); 2219 } 2220 module_exit(netif_exit); 2221 2222 MODULE_DESCRIPTION("Xen virtual network device frontend"); 2223 MODULE_LICENSE("GPL"); 2224 MODULE_ALIAS("xen:vif"); 2225 MODULE_ALIAS("xennet"); 2226