1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/skbuff.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_ether.h> 41 #include <net/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/moduleparam.h> 44 #include <linux/mm.h> 45 #include <linux/slab.h> 46 #include <net/ip.h> 47 48 #include <asm/xen/page.h> 49 #include <xen/xen.h> 50 #include <xen/xenbus.h> 51 #include <xen/events.h> 52 #include <xen/page.h> 53 #include <xen/platform_pci.h> 54 #include <xen/grant_table.h> 55 56 #include <xen/interface/io/netif.h> 57 #include <xen/interface/memory.h> 58 #include <xen/interface/grant_table.h> 59 60 static const struct ethtool_ops xennet_ethtool_ops; 61 62 struct netfront_cb { 63 int pull_to; 64 }; 65 66 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 67 68 #define RX_COPY_THRESHOLD 256 69 70 #define GRANT_INVALID_REF 0 71 72 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 73 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 74 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) 75 76 struct netfront_stats { 77 u64 rx_packets; 78 u64 tx_packets; 79 u64 rx_bytes; 80 u64 tx_bytes; 81 struct u64_stats_sync syncp; 82 }; 83 84 struct netfront_info { 85 struct list_head list; 86 struct net_device *netdev; 87 88 struct napi_struct napi; 89 90 /* Split event channels support, tx_* == rx_* when using 91 * single event channel. 92 */ 93 unsigned int tx_evtchn, rx_evtchn; 94 unsigned int tx_irq, rx_irq; 95 /* Only used when split event channels support is enabled */ 96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 98 99 struct xenbus_device *xbdev; 100 101 spinlock_t tx_lock; 102 struct xen_netif_tx_front_ring tx; 103 int tx_ring_ref; 104 105 /* 106 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 107 * are linked from tx_skb_freelist through skb_entry.link. 108 * 109 * NB. Freelist index entries are always going to be less than 110 * PAGE_OFFSET, whereas pointers to skbs will always be equal or 111 * greater than PAGE_OFFSET: we use this property to distinguish 112 * them. 113 */ 114 union skb_entry { 115 struct sk_buff *skb; 116 unsigned long link; 117 } tx_skbs[NET_TX_RING_SIZE]; 118 grant_ref_t gref_tx_head; 119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 120 unsigned tx_skb_freelist; 121 122 spinlock_t rx_lock ____cacheline_aligned_in_smp; 123 struct xen_netif_rx_front_ring rx; 124 int rx_ring_ref; 125 126 /* Receive-ring batched refills. */ 127 #define RX_MIN_TARGET 8 128 #define RX_DFL_MIN_TARGET 64 129 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 130 unsigned rx_min_target, rx_max_target, rx_target; 131 struct sk_buff_head rx_batch; 132 133 struct timer_list rx_refill_timer; 134 135 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 136 grant_ref_t gref_rx_head; 137 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 138 139 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 140 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 141 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 142 143 /* Statistics */ 144 struct netfront_stats __percpu *stats; 145 146 unsigned long rx_gso_checksum_fixup; 147 }; 148 149 struct netfront_rx_info { 150 struct xen_netif_rx_response rx; 151 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 152 }; 153 154 static void skb_entry_set_link(union skb_entry *list, unsigned short id) 155 { 156 list->link = id; 157 } 158 159 static int skb_entry_is_link(const union skb_entry *list) 160 { 161 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 162 return (unsigned long)list->skb < PAGE_OFFSET; 163 } 164 165 /* 166 * Access macros for acquiring freeing slots in tx_skbs[]. 167 */ 168 169 static void add_id_to_freelist(unsigned *head, union skb_entry *list, 170 unsigned short id) 171 { 172 skb_entry_set_link(&list[id], *head); 173 *head = id; 174 } 175 176 static unsigned short get_id_from_freelist(unsigned *head, 177 union skb_entry *list) 178 { 179 unsigned int id = *head; 180 *head = list[id].link; 181 return id; 182 } 183 184 static int xennet_rxidx(RING_IDX idx) 185 { 186 return idx & (NET_RX_RING_SIZE - 1); 187 } 188 189 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 190 RING_IDX ri) 191 { 192 int i = xennet_rxidx(ri); 193 struct sk_buff *skb = np->rx_skbs[i]; 194 np->rx_skbs[i] = NULL; 195 return skb; 196 } 197 198 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 199 RING_IDX ri) 200 { 201 int i = xennet_rxidx(ri); 202 grant_ref_t ref = np->grant_rx_ref[i]; 203 np->grant_rx_ref[i] = GRANT_INVALID_REF; 204 return ref; 205 } 206 207 #ifdef CONFIG_SYSFS 208 static int xennet_sysfs_addif(struct net_device *netdev); 209 static void xennet_sysfs_delif(struct net_device *netdev); 210 #else /* !CONFIG_SYSFS */ 211 #define xennet_sysfs_addif(dev) (0) 212 #define xennet_sysfs_delif(dev) do { } while (0) 213 #endif 214 215 static bool xennet_can_sg(struct net_device *dev) 216 { 217 return dev->features & NETIF_F_SG; 218 } 219 220 221 static void rx_refill_timeout(unsigned long data) 222 { 223 struct net_device *dev = (struct net_device *)data; 224 struct netfront_info *np = netdev_priv(dev); 225 napi_schedule(&np->napi); 226 } 227 228 static int netfront_tx_slot_available(struct netfront_info *np) 229 { 230 return (np->tx.req_prod_pvt - np->tx.rsp_cons) < 231 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); 232 } 233 234 static void xennet_maybe_wake_tx(struct net_device *dev) 235 { 236 struct netfront_info *np = netdev_priv(dev); 237 238 if (unlikely(netif_queue_stopped(dev)) && 239 netfront_tx_slot_available(np) && 240 likely(netif_running(dev))) 241 netif_wake_queue(dev); 242 } 243 244 static void xennet_alloc_rx_buffers(struct net_device *dev) 245 { 246 unsigned short id; 247 struct netfront_info *np = netdev_priv(dev); 248 struct sk_buff *skb; 249 struct page *page; 250 int i, batch_target, notify; 251 RING_IDX req_prod = np->rx.req_prod_pvt; 252 grant_ref_t ref; 253 unsigned long pfn; 254 void *vaddr; 255 struct xen_netif_rx_request *req; 256 257 if (unlikely(!netif_carrier_ok(dev))) 258 return; 259 260 /* 261 * Allocate skbuffs greedily, even though we batch updates to the 262 * receive ring. This creates a less bursty demand on the memory 263 * allocator, so should reduce the chance of failed allocation requests 264 * both for ourself and for other kernel subsystems. 265 */ 266 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 267 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 268 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, 269 GFP_ATOMIC | __GFP_NOWARN); 270 if (unlikely(!skb)) 271 goto no_skb; 272 273 /* Align ip header to a 16 bytes boundary */ 274 skb_reserve(skb, NET_IP_ALIGN); 275 276 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 277 if (!page) { 278 kfree_skb(skb); 279 no_skb: 280 /* Any skbuffs queued for refill? Force them out. */ 281 if (i != 0) 282 goto refill; 283 /* Could not allocate any skbuffs. Try again later. */ 284 mod_timer(&np->rx_refill_timer, 285 jiffies + (HZ/10)); 286 break; 287 } 288 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 290 __skb_queue_tail(&np->rx_batch, skb); 291 } 292 293 /* Is the batch large enough to be worthwhile? */ 294 if (i < (np->rx_target/2)) { 295 if (req_prod > np->rx.sring->req_prod) 296 goto push; 297 return; 298 } 299 300 /* Adjust our fill target if we risked running out of buffers. */ 301 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 302 ((np->rx_target *= 2) > np->rx_max_target)) 303 np->rx_target = np->rx_max_target; 304 305 refill: 306 for (i = 0; ; i++) { 307 skb = __skb_dequeue(&np->rx_batch); 308 if (skb == NULL) 309 break; 310 311 skb->dev = dev; 312 313 id = xennet_rxidx(req_prod + i); 314 315 BUG_ON(np->rx_skbs[id]); 316 np->rx_skbs[id] = skb; 317 318 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 319 BUG_ON((signed short)ref < 0); 320 np->grant_rx_ref[id] = ref; 321 322 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 323 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); 324 325 req = RING_GET_REQUEST(&np->rx, req_prod + i); 326 gnttab_grant_foreign_access_ref(ref, 327 np->xbdev->otherend_id, 328 pfn_to_mfn(pfn), 329 0); 330 331 req->id = id; 332 req->gref = ref; 333 } 334 335 wmb(); /* barrier so backend seens requests */ 336 337 /* Above is a suitable barrier to ensure backend will see requests. */ 338 np->rx.req_prod_pvt = req_prod + i; 339 push: 340 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 341 if (notify) 342 notify_remote_via_irq(np->rx_irq); 343 } 344 345 static int xennet_open(struct net_device *dev) 346 { 347 struct netfront_info *np = netdev_priv(dev); 348 349 napi_enable(&np->napi); 350 351 spin_lock_bh(&np->rx_lock); 352 if (netif_carrier_ok(dev)) { 353 xennet_alloc_rx_buffers(dev); 354 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 355 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 356 napi_schedule(&np->napi); 357 } 358 spin_unlock_bh(&np->rx_lock); 359 360 netif_start_queue(dev); 361 362 return 0; 363 } 364 365 static void xennet_tx_buf_gc(struct net_device *dev) 366 { 367 RING_IDX cons, prod; 368 unsigned short id; 369 struct netfront_info *np = netdev_priv(dev); 370 struct sk_buff *skb; 371 372 BUG_ON(!netif_carrier_ok(dev)); 373 374 do { 375 prod = np->tx.sring->rsp_prod; 376 rmb(); /* Ensure we see responses up to 'rp'. */ 377 378 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 379 struct xen_netif_tx_response *txrsp; 380 381 txrsp = RING_GET_RESPONSE(&np->tx, cons); 382 if (txrsp->status == XEN_NETIF_RSP_NULL) 383 continue; 384 385 id = txrsp->id; 386 skb = np->tx_skbs[id].skb; 387 if (unlikely(gnttab_query_foreign_access( 388 np->grant_tx_ref[id]) != 0)) { 389 pr_alert("%s: warning -- grant still in use by backend domain\n", 390 __func__); 391 BUG(); 392 } 393 gnttab_end_foreign_access_ref( 394 np->grant_tx_ref[id], GNTMAP_readonly); 395 gnttab_release_grant_reference( 396 &np->gref_tx_head, np->grant_tx_ref[id]); 397 np->grant_tx_ref[id] = GRANT_INVALID_REF; 398 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 399 dev_kfree_skb_irq(skb); 400 } 401 402 np->tx.rsp_cons = prod; 403 404 /* 405 * Set a new event, then check for race with update of tx_cons. 406 * Note that it is essential to schedule a callback, no matter 407 * how few buffers are pending. Even if there is space in the 408 * transmit ring, higher layers may be blocked because too much 409 * data is outstanding: in such cases notification from Xen is 410 * likely to be the only kick that we'll get. 411 */ 412 np->tx.sring->rsp_event = 413 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 414 mb(); /* update shared area */ 415 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 416 417 xennet_maybe_wake_tx(dev); 418 } 419 420 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 421 struct xen_netif_tx_request *tx) 422 { 423 struct netfront_info *np = netdev_priv(dev); 424 char *data = skb->data; 425 unsigned long mfn; 426 RING_IDX prod = np->tx.req_prod_pvt; 427 int frags = skb_shinfo(skb)->nr_frags; 428 unsigned int offset = offset_in_page(data); 429 unsigned int len = skb_headlen(skb); 430 unsigned int id; 431 grant_ref_t ref; 432 int i; 433 434 /* While the header overlaps a page boundary (including being 435 larger than a page), split it it into page-sized chunks. */ 436 while (len > PAGE_SIZE - offset) { 437 tx->size = PAGE_SIZE - offset; 438 tx->flags |= XEN_NETTXF_more_data; 439 len -= tx->size; 440 data += tx->size; 441 offset = 0; 442 443 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 444 np->tx_skbs[id].skb = skb_get(skb); 445 tx = RING_GET_REQUEST(&np->tx, prod++); 446 tx->id = id; 447 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 448 BUG_ON((signed short)ref < 0); 449 450 mfn = virt_to_mfn(data); 451 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 452 mfn, GNTMAP_readonly); 453 454 tx->gref = np->grant_tx_ref[id] = ref; 455 tx->offset = offset; 456 tx->size = len; 457 tx->flags = 0; 458 } 459 460 /* Grant backend access to each skb fragment page. */ 461 for (i = 0; i < frags; i++) { 462 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 463 struct page *page = skb_frag_page(frag); 464 465 len = skb_frag_size(frag); 466 offset = frag->page_offset; 467 468 /* Data must not cross a page boundary. */ 469 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); 470 471 /* Skip unused frames from start of page */ 472 page += offset >> PAGE_SHIFT; 473 offset &= ~PAGE_MASK; 474 475 while (len > 0) { 476 unsigned long bytes; 477 478 BUG_ON(offset >= PAGE_SIZE); 479 480 bytes = PAGE_SIZE - offset; 481 if (bytes > len) 482 bytes = len; 483 484 tx->flags |= XEN_NETTXF_more_data; 485 486 id = get_id_from_freelist(&np->tx_skb_freelist, 487 np->tx_skbs); 488 np->tx_skbs[id].skb = skb_get(skb); 489 tx = RING_GET_REQUEST(&np->tx, prod++); 490 tx->id = id; 491 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 492 BUG_ON((signed short)ref < 0); 493 494 mfn = pfn_to_mfn(page_to_pfn(page)); 495 gnttab_grant_foreign_access_ref(ref, 496 np->xbdev->otherend_id, 497 mfn, GNTMAP_readonly); 498 499 tx->gref = np->grant_tx_ref[id] = ref; 500 tx->offset = offset; 501 tx->size = bytes; 502 tx->flags = 0; 503 504 offset += bytes; 505 len -= bytes; 506 507 /* Next frame */ 508 if (offset == PAGE_SIZE && len) { 509 BUG_ON(!PageCompound(page)); 510 page++; 511 offset = 0; 512 } 513 } 514 } 515 516 np->tx.req_prod_pvt = prod; 517 } 518 519 /* 520 * Count how many ring slots are required to send the frags of this 521 * skb. Each frag might be a compound page. 522 */ 523 static int xennet_count_skb_frag_slots(struct sk_buff *skb) 524 { 525 int i, frags = skb_shinfo(skb)->nr_frags; 526 int pages = 0; 527 528 for (i = 0; i < frags; i++) { 529 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 530 unsigned long size = skb_frag_size(frag); 531 unsigned long offset = frag->page_offset; 532 533 /* Skip unused frames from start of page */ 534 offset &= ~PAGE_MASK; 535 536 pages += PFN_UP(offset + size); 537 } 538 539 return pages; 540 } 541 542 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 543 { 544 unsigned short id; 545 struct netfront_info *np = netdev_priv(dev); 546 struct netfront_stats *stats = this_cpu_ptr(np->stats); 547 struct xen_netif_tx_request *tx; 548 char *data = skb->data; 549 RING_IDX i; 550 grant_ref_t ref; 551 unsigned long mfn; 552 int notify; 553 int slots; 554 unsigned int offset = offset_in_page(data); 555 unsigned int len = skb_headlen(skb); 556 unsigned long flags; 557 558 /* If skb->len is too big for wire format, drop skb and alert 559 * user about misconfiguration. 560 */ 561 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 562 net_alert_ratelimited( 563 "xennet: skb->len = %u, too big for wire format\n", 564 skb->len); 565 goto drop; 566 } 567 568 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 569 xennet_count_skb_frag_slots(skb); 570 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 571 net_alert_ratelimited( 572 "xennet: skb rides the rocket: %d slots\n", slots); 573 goto drop; 574 } 575 576 spin_lock_irqsave(&np->tx_lock, flags); 577 578 if (unlikely(!netif_carrier_ok(dev) || 579 (slots > 1 && !xennet_can_sg(dev)) || 580 netif_needs_gso(skb, netif_skb_features(skb)))) { 581 spin_unlock_irqrestore(&np->tx_lock, flags); 582 goto drop; 583 } 584 585 i = np->tx.req_prod_pvt; 586 587 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 588 np->tx_skbs[id].skb = skb; 589 590 tx = RING_GET_REQUEST(&np->tx, i); 591 592 tx->id = id; 593 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 594 BUG_ON((signed short)ref < 0); 595 mfn = virt_to_mfn(data); 596 gnttab_grant_foreign_access_ref( 597 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 598 tx->gref = np->grant_tx_ref[id] = ref; 599 tx->offset = offset; 600 tx->size = len; 601 602 tx->flags = 0; 603 if (skb->ip_summed == CHECKSUM_PARTIAL) 604 /* local packet? */ 605 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 606 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 607 /* remote but checksummed. */ 608 tx->flags |= XEN_NETTXF_data_validated; 609 610 if (skb_shinfo(skb)->gso_size) { 611 struct xen_netif_extra_info *gso; 612 613 gso = (struct xen_netif_extra_info *) 614 RING_GET_REQUEST(&np->tx, ++i); 615 616 tx->flags |= XEN_NETTXF_extra_info; 617 618 gso->u.gso.size = skb_shinfo(skb)->gso_size; 619 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 620 gso->u.gso.pad = 0; 621 gso->u.gso.features = 0; 622 623 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 624 gso->flags = 0; 625 } 626 627 np->tx.req_prod_pvt = i + 1; 628 629 xennet_make_frags(skb, dev, tx); 630 tx->size = skb->len; 631 632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 633 if (notify) 634 notify_remote_via_irq(np->tx_irq); 635 636 u64_stats_update_begin(&stats->syncp); 637 stats->tx_bytes += skb->len; 638 stats->tx_packets++; 639 u64_stats_update_end(&stats->syncp); 640 641 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 642 xennet_tx_buf_gc(dev); 643 644 if (!netfront_tx_slot_available(np)) 645 netif_stop_queue(dev); 646 647 spin_unlock_irqrestore(&np->tx_lock, flags); 648 649 return NETDEV_TX_OK; 650 651 drop: 652 dev->stats.tx_dropped++; 653 dev_kfree_skb(skb); 654 return NETDEV_TX_OK; 655 } 656 657 static int xennet_close(struct net_device *dev) 658 { 659 struct netfront_info *np = netdev_priv(dev); 660 netif_stop_queue(np->netdev); 661 napi_disable(&np->napi); 662 return 0; 663 } 664 665 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 666 grant_ref_t ref) 667 { 668 int new = xennet_rxidx(np->rx.req_prod_pvt); 669 670 BUG_ON(np->rx_skbs[new]); 671 np->rx_skbs[new] = skb; 672 np->grant_rx_ref[new] = ref; 673 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 674 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 675 np->rx.req_prod_pvt++; 676 } 677 678 static int xennet_get_extras(struct netfront_info *np, 679 struct xen_netif_extra_info *extras, 680 RING_IDX rp) 681 682 { 683 struct xen_netif_extra_info *extra; 684 struct device *dev = &np->netdev->dev; 685 RING_IDX cons = np->rx.rsp_cons; 686 int err = 0; 687 688 do { 689 struct sk_buff *skb; 690 grant_ref_t ref; 691 692 if (unlikely(cons + 1 == rp)) { 693 if (net_ratelimit()) 694 dev_warn(dev, "Missing extra info\n"); 695 err = -EBADR; 696 break; 697 } 698 699 extra = (struct xen_netif_extra_info *) 700 RING_GET_RESPONSE(&np->rx, ++cons); 701 702 if (unlikely(!extra->type || 703 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 704 if (net_ratelimit()) 705 dev_warn(dev, "Invalid extra type: %d\n", 706 extra->type); 707 err = -EINVAL; 708 } else { 709 memcpy(&extras[extra->type - 1], extra, 710 sizeof(*extra)); 711 } 712 713 skb = xennet_get_rx_skb(np, cons); 714 ref = xennet_get_rx_ref(np, cons); 715 xennet_move_rx_slot(np, skb, ref); 716 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 717 718 np->rx.rsp_cons = cons; 719 return err; 720 } 721 722 static int xennet_get_responses(struct netfront_info *np, 723 struct netfront_rx_info *rinfo, RING_IDX rp, 724 struct sk_buff_head *list) 725 { 726 struct xen_netif_rx_response *rx = &rinfo->rx; 727 struct xen_netif_extra_info *extras = rinfo->extras; 728 struct device *dev = &np->netdev->dev; 729 RING_IDX cons = np->rx.rsp_cons; 730 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 731 grant_ref_t ref = xennet_get_rx_ref(np, cons); 732 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 733 int slots = 1; 734 int err = 0; 735 unsigned long ret; 736 737 if (rx->flags & XEN_NETRXF_extra_info) { 738 err = xennet_get_extras(np, extras, rp); 739 cons = np->rx.rsp_cons; 740 } 741 742 for (;;) { 743 if (unlikely(rx->status < 0 || 744 rx->offset + rx->status > PAGE_SIZE)) { 745 if (net_ratelimit()) 746 dev_warn(dev, "rx->offset: %x, size: %u\n", 747 rx->offset, rx->status); 748 xennet_move_rx_slot(np, skb, ref); 749 err = -EINVAL; 750 goto next; 751 } 752 753 /* 754 * This definitely indicates a bug, either in this driver or in 755 * the backend driver. In future this should flag the bad 756 * situation to the system controller to reboot the backend. 757 */ 758 if (ref == GRANT_INVALID_REF) { 759 if (net_ratelimit()) 760 dev_warn(dev, "Bad rx response id %d.\n", 761 rx->id); 762 err = -EINVAL; 763 goto next; 764 } 765 766 ret = gnttab_end_foreign_access_ref(ref, 0); 767 BUG_ON(!ret); 768 769 gnttab_release_grant_reference(&np->gref_rx_head, ref); 770 771 __skb_queue_tail(list, skb); 772 773 next: 774 if (!(rx->flags & XEN_NETRXF_more_data)) 775 break; 776 777 if (cons + slots == rp) { 778 if (net_ratelimit()) 779 dev_warn(dev, "Need more slots\n"); 780 err = -ENOENT; 781 break; 782 } 783 784 rx = RING_GET_RESPONSE(&np->rx, cons + slots); 785 skb = xennet_get_rx_skb(np, cons + slots); 786 ref = xennet_get_rx_ref(np, cons + slots); 787 slots++; 788 } 789 790 if (unlikely(slots > max)) { 791 if (net_ratelimit()) 792 dev_warn(dev, "Too many slots\n"); 793 err = -E2BIG; 794 } 795 796 if (unlikely(err)) 797 np->rx.rsp_cons = cons + slots; 798 799 return err; 800 } 801 802 static int xennet_set_skb_gso(struct sk_buff *skb, 803 struct xen_netif_extra_info *gso) 804 { 805 if (!gso->u.gso.size) { 806 if (net_ratelimit()) 807 pr_warn("GSO size must not be zero\n"); 808 return -EINVAL; 809 } 810 811 /* Currently only TCPv4 S.O. is supported. */ 812 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 813 if (net_ratelimit()) 814 pr_warn("Bad GSO type %d\n", gso->u.gso.type); 815 return -EINVAL; 816 } 817 818 skb_shinfo(skb)->gso_size = gso->u.gso.size; 819 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 820 821 /* Header must be checked, and gso_segs computed. */ 822 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 823 skb_shinfo(skb)->gso_segs = 0; 824 825 return 0; 826 } 827 828 static RING_IDX xennet_fill_frags(struct netfront_info *np, 829 struct sk_buff *skb, 830 struct sk_buff_head *list) 831 { 832 struct skb_shared_info *shinfo = skb_shinfo(skb); 833 RING_IDX cons = np->rx.rsp_cons; 834 struct sk_buff *nskb; 835 836 while ((nskb = __skb_dequeue(list))) { 837 struct xen_netif_rx_response *rx = 838 RING_GET_RESPONSE(&np->rx, ++cons); 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 840 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 843 844 BUG_ON(pull_to <= skb_headlen(skb)); 845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 846 } 847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 848 849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 850 rx->offset, rx->status, PAGE_SIZE); 851 852 skb_shinfo(nskb)->nr_frags = 0; 853 kfree_skb(nskb); 854 } 855 856 return cons; 857 } 858 859 static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 860 { 861 struct iphdr *iph; 862 int err = -EPROTO; 863 int recalculate_partial_csum = 0; 864 865 /* 866 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 867 * peers can fail to set NETRXF_csum_blank when sending a GSO 868 * frame. In this case force the SKB to CHECKSUM_PARTIAL and 869 * recalculate the partial checksum. 870 */ 871 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 872 struct netfront_info *np = netdev_priv(dev); 873 np->rx_gso_checksum_fixup++; 874 skb->ip_summed = CHECKSUM_PARTIAL; 875 recalculate_partial_csum = 1; 876 } 877 878 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 879 if (skb->ip_summed != CHECKSUM_PARTIAL) 880 return 0; 881 882 if (skb->protocol != htons(ETH_P_IP)) 883 goto out; 884 885 iph = (void *)skb->data; 886 887 switch (iph->protocol) { 888 case IPPROTO_TCP: 889 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 890 offsetof(struct tcphdr, check))) 891 goto out; 892 893 if (recalculate_partial_csum) { 894 struct tcphdr *tcph = tcp_hdr(skb); 895 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 896 skb->len - iph->ihl*4, 897 IPPROTO_TCP, 0); 898 } 899 break; 900 case IPPROTO_UDP: 901 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 902 offsetof(struct udphdr, check))) 903 goto out; 904 905 if (recalculate_partial_csum) { 906 struct udphdr *udph = udp_hdr(skb); 907 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 908 skb->len - iph->ihl*4, 909 IPPROTO_UDP, 0); 910 } 911 break; 912 default: 913 if (net_ratelimit()) 914 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", 915 iph->protocol); 916 goto out; 917 } 918 919 err = 0; 920 921 out: 922 return err; 923 } 924 925 static int handle_incoming_queue(struct net_device *dev, 926 struct sk_buff_head *rxq) 927 { 928 struct netfront_info *np = netdev_priv(dev); 929 struct netfront_stats *stats = this_cpu_ptr(np->stats); 930 int packets_dropped = 0; 931 struct sk_buff *skb; 932 933 while ((skb = __skb_dequeue(rxq)) != NULL) { 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 935 936 if (pull_to > skb_headlen(skb)) 937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 938 939 /* Ethernet work: Delayed to here as it peeks the header. */ 940 skb->protocol = eth_type_trans(skb, dev); 941 942 if (checksum_setup(dev, skb)) { 943 kfree_skb(skb); 944 packets_dropped++; 945 dev->stats.rx_errors++; 946 continue; 947 } 948 949 u64_stats_update_begin(&stats->syncp); 950 stats->rx_packets++; 951 stats->rx_bytes += skb->len; 952 u64_stats_update_end(&stats->syncp); 953 954 /* Pass it up. */ 955 netif_receive_skb(skb); 956 } 957 958 return packets_dropped; 959 } 960 961 static int xennet_poll(struct napi_struct *napi, int budget) 962 { 963 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 964 struct net_device *dev = np->netdev; 965 struct sk_buff *skb; 966 struct netfront_rx_info rinfo; 967 struct xen_netif_rx_response *rx = &rinfo.rx; 968 struct xen_netif_extra_info *extras = rinfo.extras; 969 RING_IDX i, rp; 970 int work_done; 971 struct sk_buff_head rxq; 972 struct sk_buff_head errq; 973 struct sk_buff_head tmpq; 974 unsigned long flags; 975 int err; 976 977 spin_lock(&np->rx_lock); 978 979 skb_queue_head_init(&rxq); 980 skb_queue_head_init(&errq); 981 skb_queue_head_init(&tmpq); 982 983 rp = np->rx.sring->rsp_prod; 984 rmb(); /* Ensure we see queued responses up to 'rp'. */ 985 986 i = np->rx.rsp_cons; 987 work_done = 0; 988 while ((i != rp) && (work_done < budget)) { 989 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 990 memset(extras, 0, sizeof(rinfo.extras)); 991 992 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 993 994 if (unlikely(err)) { 995 err: 996 while ((skb = __skb_dequeue(&tmpq))) 997 __skb_queue_tail(&errq, skb); 998 dev->stats.rx_errors++; 999 i = np->rx.rsp_cons; 1000 continue; 1001 } 1002 1003 skb = __skb_dequeue(&tmpq); 1004 1005 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1006 struct xen_netif_extra_info *gso; 1007 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1008 1009 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1010 __skb_queue_head(&tmpq, skb); 1011 np->rx.rsp_cons += skb_queue_len(&tmpq); 1012 goto err; 1013 } 1014 } 1015 1016 NETFRONT_SKB_CB(skb)->pull_to = rx->status; 1017 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 1018 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 1019 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1022 skb->data_len = rx->status; 1023 skb->len += rx->status; 1024 1025 i = xennet_fill_frags(np, skb, &tmpq); 1026 1027 if (rx->flags & XEN_NETRXF_csum_blank) 1028 skb->ip_summed = CHECKSUM_PARTIAL; 1029 else if (rx->flags & XEN_NETRXF_data_validated) 1030 skb->ip_summed = CHECKSUM_UNNECESSARY; 1031 1032 __skb_queue_tail(&rxq, skb); 1033 1034 np->rx.rsp_cons = ++i; 1035 work_done++; 1036 } 1037 1038 __skb_queue_purge(&errq); 1039 1040 work_done -= handle_incoming_queue(dev, &rxq); 1041 1042 /* If we get a callback with very few responses, reduce fill target. */ 1043 /* NB. Note exponential increase, linear decrease. */ 1044 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1045 ((3*np->rx_target) / 4)) && 1046 (--np->rx_target < np->rx_min_target)) 1047 np->rx_target = np->rx_min_target; 1048 1049 xennet_alloc_rx_buffers(dev); 1050 1051 if (work_done < budget) { 1052 int more_to_do = 0; 1053 1054 local_irq_save(flags); 1055 1056 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 1057 if (!more_to_do) 1058 __napi_complete(napi); 1059 1060 local_irq_restore(flags); 1061 } 1062 1063 spin_unlock(&np->rx_lock); 1064 1065 return work_done; 1066 } 1067 1068 static int xennet_change_mtu(struct net_device *dev, int mtu) 1069 { 1070 int max = xennet_can_sg(dev) ? 1071 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; 1072 1073 if (mtu > max) 1074 return -EINVAL; 1075 dev->mtu = mtu; 1076 return 0; 1077 } 1078 1079 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, 1080 struct rtnl_link_stats64 *tot) 1081 { 1082 struct netfront_info *np = netdev_priv(dev); 1083 int cpu; 1084 1085 for_each_possible_cpu(cpu) { 1086 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); 1087 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1088 unsigned int start; 1089 1090 do { 1091 start = u64_stats_fetch_begin_bh(&stats->syncp); 1092 1093 rx_packets = stats->rx_packets; 1094 tx_packets = stats->tx_packets; 1095 rx_bytes = stats->rx_bytes; 1096 tx_bytes = stats->tx_bytes; 1097 } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); 1098 1099 tot->rx_packets += rx_packets; 1100 tot->tx_packets += tx_packets; 1101 tot->rx_bytes += rx_bytes; 1102 tot->tx_bytes += tx_bytes; 1103 } 1104 1105 tot->rx_errors = dev->stats.rx_errors; 1106 tot->tx_dropped = dev->stats.tx_dropped; 1107 1108 return tot; 1109 } 1110 1111 static void xennet_release_tx_bufs(struct netfront_info *np) 1112 { 1113 struct sk_buff *skb; 1114 int i; 1115 1116 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1117 /* Skip over entries which are actually freelist references */ 1118 if (skb_entry_is_link(&np->tx_skbs[i])) 1119 continue; 1120 1121 skb = np->tx_skbs[i].skb; 1122 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1123 GNTMAP_readonly); 1124 gnttab_release_grant_reference(&np->gref_tx_head, 1125 np->grant_tx_ref[i]); 1126 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1127 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1128 dev_kfree_skb_irq(skb); 1129 } 1130 } 1131 1132 static void xennet_release_rx_bufs(struct netfront_info *np) 1133 { 1134 struct mmu_update *mmu = np->rx_mmu; 1135 struct multicall_entry *mcl = np->rx_mcl; 1136 struct sk_buff_head free_list; 1137 struct sk_buff *skb; 1138 unsigned long mfn; 1139 int xfer = 0, noxfer = 0, unused = 0; 1140 int id, ref; 1141 1142 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", 1143 __func__); 1144 return; 1145 1146 skb_queue_head_init(&free_list); 1147 1148 spin_lock_bh(&np->rx_lock); 1149 1150 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1151 ref = np->grant_rx_ref[id]; 1152 if (ref == GRANT_INVALID_REF) { 1153 unused++; 1154 continue; 1155 } 1156 1157 skb = np->rx_skbs[id]; 1158 mfn = gnttab_end_foreign_transfer_ref(ref); 1159 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1160 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1161 1162 if (0 == mfn) { 1163 skb_shinfo(skb)->nr_frags = 0; 1164 dev_kfree_skb(skb); 1165 noxfer++; 1166 continue; 1167 } 1168 1169 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1170 /* Remap the page. */ 1171 const struct page *page = 1172 skb_frag_page(&skb_shinfo(skb)->frags[0]); 1173 unsigned long pfn = page_to_pfn(page); 1174 void *vaddr = page_address(page); 1175 1176 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1177 mfn_pte(mfn, PAGE_KERNEL), 1178 0); 1179 mcl++; 1180 mmu->ptr = ((u64)mfn << PAGE_SHIFT) 1181 | MMU_MACHPHYS_UPDATE; 1182 mmu->val = pfn; 1183 mmu++; 1184 1185 set_phys_to_machine(pfn, mfn); 1186 } 1187 __skb_queue_tail(&free_list, skb); 1188 xfer++; 1189 } 1190 1191 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1192 __func__, xfer, noxfer, unused); 1193 1194 if (xfer) { 1195 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1196 /* Do all the remapping work and M2P updates. */ 1197 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 1198 NULL, DOMID_SELF); 1199 mcl++; 1200 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); 1201 } 1202 } 1203 1204 __skb_queue_purge(&free_list); 1205 1206 spin_unlock_bh(&np->rx_lock); 1207 } 1208 1209 static void xennet_uninit(struct net_device *dev) 1210 { 1211 struct netfront_info *np = netdev_priv(dev); 1212 xennet_release_tx_bufs(np); 1213 xennet_release_rx_bufs(np); 1214 gnttab_free_grant_references(np->gref_tx_head); 1215 gnttab_free_grant_references(np->gref_rx_head); 1216 } 1217 1218 static netdev_features_t xennet_fix_features(struct net_device *dev, 1219 netdev_features_t features) 1220 { 1221 struct netfront_info *np = netdev_priv(dev); 1222 int val; 1223 1224 if (features & NETIF_F_SG) { 1225 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1226 "%d", &val) < 0) 1227 val = 0; 1228 1229 if (!val) 1230 features &= ~NETIF_F_SG; 1231 } 1232 1233 if (features & NETIF_F_TSO) { 1234 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1235 "feature-gso-tcpv4", "%d", &val) < 0) 1236 val = 0; 1237 1238 if (!val) 1239 features &= ~NETIF_F_TSO; 1240 } 1241 1242 return features; 1243 } 1244 1245 static int xennet_set_features(struct net_device *dev, 1246 netdev_features_t features) 1247 { 1248 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 1249 netdev_info(dev, "Reducing MTU because no SG offload"); 1250 dev->mtu = ETH_DATA_LEN; 1251 } 1252 1253 return 0; 1254 } 1255 1256 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1257 { 1258 struct netfront_info *np = dev_id; 1259 struct net_device *dev = np->netdev; 1260 unsigned long flags; 1261 1262 spin_lock_irqsave(&np->tx_lock, flags); 1263 xennet_tx_buf_gc(dev); 1264 spin_unlock_irqrestore(&np->tx_lock, flags); 1265 1266 return IRQ_HANDLED; 1267 } 1268 1269 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1270 { 1271 struct netfront_info *np = dev_id; 1272 struct net_device *dev = np->netdev; 1273 1274 if (likely(netif_carrier_ok(dev) && 1275 RING_HAS_UNCONSUMED_RESPONSES(&np->rx))) 1276 napi_schedule(&np->napi); 1277 1278 return IRQ_HANDLED; 1279 } 1280 1281 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1282 { 1283 xennet_tx_interrupt(irq, dev_id); 1284 xennet_rx_interrupt(irq, dev_id); 1285 return IRQ_HANDLED; 1286 } 1287 1288 #ifdef CONFIG_NET_POLL_CONTROLLER 1289 static void xennet_poll_controller(struct net_device *dev) 1290 { 1291 xennet_interrupt(0, dev); 1292 } 1293 #endif 1294 1295 static const struct net_device_ops xennet_netdev_ops = { 1296 .ndo_open = xennet_open, 1297 .ndo_uninit = xennet_uninit, 1298 .ndo_stop = xennet_close, 1299 .ndo_start_xmit = xennet_start_xmit, 1300 .ndo_change_mtu = xennet_change_mtu, 1301 .ndo_get_stats64 = xennet_get_stats64, 1302 .ndo_set_mac_address = eth_mac_addr, 1303 .ndo_validate_addr = eth_validate_addr, 1304 .ndo_fix_features = xennet_fix_features, 1305 .ndo_set_features = xennet_set_features, 1306 #ifdef CONFIG_NET_POLL_CONTROLLER 1307 .ndo_poll_controller = xennet_poll_controller, 1308 #endif 1309 }; 1310 1311 static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1312 { 1313 int i, err; 1314 struct net_device *netdev; 1315 struct netfront_info *np; 1316 1317 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1318 if (!netdev) 1319 return ERR_PTR(-ENOMEM); 1320 1321 np = netdev_priv(netdev); 1322 np->xbdev = dev; 1323 1324 spin_lock_init(&np->tx_lock); 1325 spin_lock_init(&np->rx_lock); 1326 1327 skb_queue_head_init(&np->rx_batch); 1328 np->rx_target = RX_DFL_MIN_TARGET; 1329 np->rx_min_target = RX_DFL_MIN_TARGET; 1330 np->rx_max_target = RX_MAX_TARGET; 1331 1332 init_timer(&np->rx_refill_timer); 1333 np->rx_refill_timer.data = (unsigned long)netdev; 1334 np->rx_refill_timer.function = rx_refill_timeout; 1335 1336 err = -ENOMEM; 1337 np->stats = alloc_percpu(struct netfront_stats); 1338 if (np->stats == NULL) 1339 goto exit; 1340 1341 /* Initialise tx_skbs as a free chain containing every entry. */ 1342 np->tx_skb_freelist = 0; 1343 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1344 skb_entry_set_link(&np->tx_skbs[i], i+1); 1345 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1346 } 1347 1348 /* Clear out rx_skbs */ 1349 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1350 np->rx_skbs[i] = NULL; 1351 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1352 } 1353 1354 /* A grant for every tx ring slot */ 1355 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1356 &np->gref_tx_head) < 0) { 1357 pr_alert("can't alloc tx grant refs\n"); 1358 err = -ENOMEM; 1359 goto exit_free_stats; 1360 } 1361 /* A grant for every rx ring slot */ 1362 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1363 &np->gref_rx_head) < 0) { 1364 pr_alert("can't alloc rx grant refs\n"); 1365 err = -ENOMEM; 1366 goto exit_free_tx; 1367 } 1368 1369 netdev->netdev_ops = &xennet_netdev_ops; 1370 1371 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1372 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1373 NETIF_F_GSO_ROBUST; 1374 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; 1375 1376 /* 1377 * Assume that all hw features are available for now. This set 1378 * will be adjusted by the call to netdev_update_features() in 1379 * xennet_connect() which is the earliest point where we can 1380 * negotiate with the backend regarding supported features. 1381 */ 1382 netdev->features |= netdev->hw_features; 1383 1384 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1385 SET_NETDEV_DEV(netdev, &dev->dev); 1386 1387 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 1388 1389 np->netdev = netdev; 1390 1391 netif_carrier_off(netdev); 1392 1393 return netdev; 1394 1395 exit_free_tx: 1396 gnttab_free_grant_references(np->gref_tx_head); 1397 exit_free_stats: 1398 free_percpu(np->stats); 1399 exit: 1400 free_netdev(netdev); 1401 return ERR_PTR(err); 1402 } 1403 1404 /** 1405 * Entry point to this code when a new device is created. Allocate the basic 1406 * structures and the ring buffers for communication with the backend, and 1407 * inform the backend of the appropriate details for those. 1408 */ 1409 static int netfront_probe(struct xenbus_device *dev, 1410 const struct xenbus_device_id *id) 1411 { 1412 int err; 1413 struct net_device *netdev; 1414 struct netfront_info *info; 1415 1416 netdev = xennet_create_dev(dev); 1417 if (IS_ERR(netdev)) { 1418 err = PTR_ERR(netdev); 1419 xenbus_dev_fatal(dev, err, "creating netdev"); 1420 return err; 1421 } 1422 1423 info = netdev_priv(netdev); 1424 dev_set_drvdata(&dev->dev, info); 1425 1426 err = register_netdev(info->netdev); 1427 if (err) { 1428 pr_warn("%s: register_netdev err=%d\n", __func__, err); 1429 goto fail; 1430 } 1431 1432 err = xennet_sysfs_addif(info->netdev); 1433 if (err) { 1434 unregister_netdev(info->netdev); 1435 pr_warn("%s: add sysfs failed err=%d\n", __func__, err); 1436 goto fail; 1437 } 1438 1439 return 0; 1440 1441 fail: 1442 free_netdev(netdev); 1443 dev_set_drvdata(&dev->dev, NULL); 1444 return err; 1445 } 1446 1447 static void xennet_end_access(int ref, void *page) 1448 { 1449 /* This frees the page as a side-effect */ 1450 if (ref != GRANT_INVALID_REF) 1451 gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1452 } 1453 1454 static void xennet_disconnect_backend(struct netfront_info *info) 1455 { 1456 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1457 spin_lock_bh(&info->rx_lock); 1458 spin_lock_irq(&info->tx_lock); 1459 netif_carrier_off(info->netdev); 1460 spin_unlock_irq(&info->tx_lock); 1461 spin_unlock_bh(&info->rx_lock); 1462 1463 if (info->tx_irq && (info->tx_irq == info->rx_irq)) 1464 unbind_from_irqhandler(info->tx_irq, info); 1465 if (info->tx_irq && (info->tx_irq != info->rx_irq)) { 1466 unbind_from_irqhandler(info->tx_irq, info); 1467 unbind_from_irqhandler(info->rx_irq, info); 1468 } 1469 info->tx_evtchn = info->rx_evtchn = 0; 1470 info->tx_irq = info->rx_irq = 0; 1471 1472 /* End access and free the pages */ 1473 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1474 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1475 1476 info->tx_ring_ref = GRANT_INVALID_REF; 1477 info->rx_ring_ref = GRANT_INVALID_REF; 1478 info->tx.sring = NULL; 1479 info->rx.sring = NULL; 1480 } 1481 1482 /** 1483 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1484 * driver restart. We tear down our netif structure and recreate it, but 1485 * leave the device-layer structures intact so that this is transparent to the 1486 * rest of the kernel. 1487 */ 1488 static int netfront_resume(struct xenbus_device *dev) 1489 { 1490 struct netfront_info *info = dev_get_drvdata(&dev->dev); 1491 1492 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1493 1494 xennet_disconnect_backend(info); 1495 return 0; 1496 } 1497 1498 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1499 { 1500 char *s, *e, *macstr; 1501 int i; 1502 1503 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1504 if (IS_ERR(macstr)) 1505 return PTR_ERR(macstr); 1506 1507 for (i = 0; i < ETH_ALEN; i++) { 1508 mac[i] = simple_strtoul(s, &e, 16); 1509 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1510 kfree(macstr); 1511 return -ENOENT; 1512 } 1513 s = e+1; 1514 } 1515 1516 kfree(macstr); 1517 return 0; 1518 } 1519 1520 static int setup_netfront_single(struct netfront_info *info) 1521 { 1522 int err; 1523 1524 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1525 if (err < 0) 1526 goto fail; 1527 1528 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1529 xennet_interrupt, 1530 0, info->netdev->name, info); 1531 if (err < 0) 1532 goto bind_fail; 1533 info->rx_evtchn = info->tx_evtchn; 1534 info->rx_irq = info->tx_irq = err; 1535 1536 return 0; 1537 1538 bind_fail: 1539 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1540 info->tx_evtchn = 0; 1541 fail: 1542 return err; 1543 } 1544 1545 static int setup_netfront_split(struct netfront_info *info) 1546 { 1547 int err; 1548 1549 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1550 if (err < 0) 1551 goto fail; 1552 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn); 1553 if (err < 0) 1554 goto alloc_rx_evtchn_fail; 1555 1556 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name), 1557 "%s-tx", info->netdev->name); 1558 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1559 xennet_tx_interrupt, 1560 0, info->tx_irq_name, info); 1561 if (err < 0) 1562 goto bind_tx_fail; 1563 info->tx_irq = err; 1564 1565 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name), 1566 "%s-rx", info->netdev->name); 1567 err = bind_evtchn_to_irqhandler(info->rx_evtchn, 1568 xennet_rx_interrupt, 1569 0, info->rx_irq_name, info); 1570 if (err < 0) 1571 goto bind_rx_fail; 1572 info->rx_irq = err; 1573 1574 return 0; 1575 1576 bind_rx_fail: 1577 unbind_from_irqhandler(info->tx_irq, info); 1578 info->tx_irq = 0; 1579 bind_tx_fail: 1580 xenbus_free_evtchn(info->xbdev, info->rx_evtchn); 1581 info->rx_evtchn = 0; 1582 alloc_rx_evtchn_fail: 1583 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1584 info->tx_evtchn = 0; 1585 fail: 1586 return err; 1587 } 1588 1589 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1590 { 1591 struct xen_netif_tx_sring *txs; 1592 struct xen_netif_rx_sring *rxs; 1593 int err; 1594 struct net_device *netdev = info->netdev; 1595 unsigned int feature_split_evtchn; 1596 1597 info->tx_ring_ref = GRANT_INVALID_REF; 1598 info->rx_ring_ref = GRANT_INVALID_REF; 1599 info->rx.sring = NULL; 1600 info->tx.sring = NULL; 1601 netdev->irq = 0; 1602 1603 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1604 "feature-split-event-channels", "%u", 1605 &feature_split_evtchn); 1606 if (err < 0) 1607 feature_split_evtchn = 0; 1608 1609 err = xen_net_read_mac(dev, netdev->dev_addr); 1610 if (err) { 1611 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1612 goto fail; 1613 } 1614 1615 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1616 if (!txs) { 1617 err = -ENOMEM; 1618 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1619 goto fail; 1620 } 1621 SHARED_RING_INIT(txs); 1622 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1623 1624 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1625 if (err < 0) 1626 goto grant_tx_ring_fail; 1627 1628 info->tx_ring_ref = err; 1629 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1630 if (!rxs) { 1631 err = -ENOMEM; 1632 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1633 goto alloc_rx_ring_fail; 1634 } 1635 SHARED_RING_INIT(rxs); 1636 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1637 1638 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1639 if (err < 0) 1640 goto grant_rx_ring_fail; 1641 info->rx_ring_ref = err; 1642 1643 if (feature_split_evtchn) 1644 err = setup_netfront_split(info); 1645 /* setup single event channel if 1646 * a) feature-split-event-channels == 0 1647 * b) feature-split-event-channels == 1 but failed to setup 1648 */ 1649 if (!feature_split_evtchn || (feature_split_evtchn && err)) 1650 err = setup_netfront_single(info); 1651 1652 if (err) 1653 goto alloc_evtchn_fail; 1654 1655 return 0; 1656 1657 /* If we fail to setup netfront, it is safe to just revoke access to 1658 * granted pages because backend is not accessing it at this point. 1659 */ 1660 alloc_evtchn_fail: 1661 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0); 1662 grant_rx_ring_fail: 1663 free_page((unsigned long)rxs); 1664 alloc_rx_ring_fail: 1665 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0); 1666 grant_tx_ring_fail: 1667 free_page((unsigned long)txs); 1668 fail: 1669 return err; 1670 } 1671 1672 /* Common code used when first setting up, and when resuming. */ 1673 static int talk_to_netback(struct xenbus_device *dev, 1674 struct netfront_info *info) 1675 { 1676 const char *message; 1677 struct xenbus_transaction xbt; 1678 int err; 1679 1680 /* Create shared ring, alloc event channel. */ 1681 err = setup_netfront(dev, info); 1682 if (err) 1683 goto out; 1684 1685 again: 1686 err = xenbus_transaction_start(&xbt); 1687 if (err) { 1688 xenbus_dev_fatal(dev, err, "starting transaction"); 1689 goto destroy_ring; 1690 } 1691 1692 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1693 info->tx_ring_ref); 1694 if (err) { 1695 message = "writing tx ring-ref"; 1696 goto abort_transaction; 1697 } 1698 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", 1699 info->rx_ring_ref); 1700 if (err) { 1701 message = "writing rx ring-ref"; 1702 goto abort_transaction; 1703 } 1704 1705 if (info->tx_evtchn == info->rx_evtchn) { 1706 err = xenbus_printf(xbt, dev->nodename, 1707 "event-channel", "%u", info->tx_evtchn); 1708 if (err) { 1709 message = "writing event-channel"; 1710 goto abort_transaction; 1711 } 1712 } else { 1713 err = xenbus_printf(xbt, dev->nodename, 1714 "event-channel-tx", "%u", info->tx_evtchn); 1715 if (err) { 1716 message = "writing event-channel-tx"; 1717 goto abort_transaction; 1718 } 1719 err = xenbus_printf(xbt, dev->nodename, 1720 "event-channel-rx", "%u", info->rx_evtchn); 1721 if (err) { 1722 message = "writing event-channel-rx"; 1723 goto abort_transaction; 1724 } 1725 } 1726 1727 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1728 1); 1729 if (err) { 1730 message = "writing request-rx-copy"; 1731 goto abort_transaction; 1732 } 1733 1734 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 1735 if (err) { 1736 message = "writing feature-rx-notify"; 1737 goto abort_transaction; 1738 } 1739 1740 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 1741 if (err) { 1742 message = "writing feature-sg"; 1743 goto abort_transaction; 1744 } 1745 1746 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 1747 if (err) { 1748 message = "writing feature-gso-tcpv4"; 1749 goto abort_transaction; 1750 } 1751 1752 err = xenbus_transaction_end(xbt, 0); 1753 if (err) { 1754 if (err == -EAGAIN) 1755 goto again; 1756 xenbus_dev_fatal(dev, err, "completing transaction"); 1757 goto destroy_ring; 1758 } 1759 1760 return 0; 1761 1762 abort_transaction: 1763 xenbus_transaction_end(xbt, 1); 1764 xenbus_dev_fatal(dev, err, "%s", message); 1765 destroy_ring: 1766 xennet_disconnect_backend(info); 1767 out: 1768 return err; 1769 } 1770 1771 static int xennet_connect(struct net_device *dev) 1772 { 1773 struct netfront_info *np = netdev_priv(dev); 1774 int i, requeue_idx, err; 1775 struct sk_buff *skb; 1776 grant_ref_t ref; 1777 struct xen_netif_rx_request *req; 1778 unsigned int feature_rx_copy; 1779 1780 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1781 "feature-rx-copy", "%u", &feature_rx_copy); 1782 if (err != 1) 1783 feature_rx_copy = 0; 1784 1785 if (!feature_rx_copy) { 1786 dev_info(&dev->dev, 1787 "backend does not support copying receive path\n"); 1788 return -ENODEV; 1789 } 1790 1791 err = talk_to_netback(np->xbdev, np); 1792 if (err) 1793 return err; 1794 1795 rtnl_lock(); 1796 netdev_update_features(dev); 1797 rtnl_unlock(); 1798 1799 spin_lock_bh(&np->rx_lock); 1800 spin_lock_irq(&np->tx_lock); 1801 1802 /* Step 1: Discard all pending TX packet fragments. */ 1803 xennet_release_tx_bufs(np); 1804 1805 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1806 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1807 skb_frag_t *frag; 1808 const struct page *page; 1809 if (!np->rx_skbs[i]) 1810 continue; 1811 1812 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 1813 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1814 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1815 1816 frag = &skb_shinfo(skb)->frags[0]; 1817 page = skb_frag_page(frag); 1818 gnttab_grant_foreign_access_ref( 1819 ref, np->xbdev->otherend_id, 1820 pfn_to_mfn(page_to_pfn(page)), 1821 0); 1822 req->gref = ref; 1823 req->id = requeue_idx; 1824 1825 requeue_idx++; 1826 } 1827 1828 np->rx.req_prod_pvt = requeue_idx; 1829 1830 /* 1831 * Step 3: All public and private state should now be sane. Get 1832 * ready to start sending and receiving packets and give the driver 1833 * domain a kick because we've probably just requeued some 1834 * packets. 1835 */ 1836 netif_carrier_on(np->netdev); 1837 notify_remote_via_irq(np->tx_irq); 1838 if (np->tx_irq != np->rx_irq) 1839 notify_remote_via_irq(np->rx_irq); 1840 xennet_tx_buf_gc(dev); 1841 xennet_alloc_rx_buffers(dev); 1842 1843 spin_unlock_irq(&np->tx_lock); 1844 spin_unlock_bh(&np->rx_lock); 1845 1846 return 0; 1847 } 1848 1849 /** 1850 * Callback received when the backend's state changes. 1851 */ 1852 static void netback_changed(struct xenbus_device *dev, 1853 enum xenbus_state backend_state) 1854 { 1855 struct netfront_info *np = dev_get_drvdata(&dev->dev); 1856 struct net_device *netdev = np->netdev; 1857 1858 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 1859 1860 switch (backend_state) { 1861 case XenbusStateInitialising: 1862 case XenbusStateInitialised: 1863 case XenbusStateReconfiguring: 1864 case XenbusStateReconfigured: 1865 case XenbusStateUnknown: 1866 case XenbusStateClosed: 1867 break; 1868 1869 case XenbusStateInitWait: 1870 if (dev->state != XenbusStateInitialising) 1871 break; 1872 if (xennet_connect(netdev) != 0) 1873 break; 1874 xenbus_switch_state(dev, XenbusStateConnected); 1875 break; 1876 1877 case XenbusStateConnected: 1878 netdev_notify_peers(netdev); 1879 break; 1880 1881 case XenbusStateClosing: 1882 xenbus_frontend_closed(dev); 1883 break; 1884 } 1885 } 1886 1887 static const struct xennet_stat { 1888 char name[ETH_GSTRING_LEN]; 1889 u16 offset; 1890 } xennet_stats[] = { 1891 { 1892 "rx_gso_checksum_fixup", 1893 offsetof(struct netfront_info, rx_gso_checksum_fixup) 1894 }, 1895 }; 1896 1897 static int xennet_get_sset_count(struct net_device *dev, int string_set) 1898 { 1899 switch (string_set) { 1900 case ETH_SS_STATS: 1901 return ARRAY_SIZE(xennet_stats); 1902 default: 1903 return -EINVAL; 1904 } 1905 } 1906 1907 static void xennet_get_ethtool_stats(struct net_device *dev, 1908 struct ethtool_stats *stats, u64 * data) 1909 { 1910 void *np = netdev_priv(dev); 1911 int i; 1912 1913 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 1914 data[i] = *(unsigned long *)(np + xennet_stats[i].offset); 1915 } 1916 1917 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 1918 { 1919 int i; 1920 1921 switch (stringset) { 1922 case ETH_SS_STATS: 1923 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 1924 memcpy(data + i * ETH_GSTRING_LEN, 1925 xennet_stats[i].name, ETH_GSTRING_LEN); 1926 break; 1927 } 1928 } 1929 1930 static const struct ethtool_ops xennet_ethtool_ops = 1931 { 1932 .get_link = ethtool_op_get_link, 1933 1934 .get_sset_count = xennet_get_sset_count, 1935 .get_ethtool_stats = xennet_get_ethtool_stats, 1936 .get_strings = xennet_get_strings, 1937 }; 1938 1939 #ifdef CONFIG_SYSFS 1940 static ssize_t show_rxbuf_min(struct device *dev, 1941 struct device_attribute *attr, char *buf) 1942 { 1943 struct net_device *netdev = to_net_dev(dev); 1944 struct netfront_info *info = netdev_priv(netdev); 1945 1946 return sprintf(buf, "%u\n", info->rx_min_target); 1947 } 1948 1949 static ssize_t store_rxbuf_min(struct device *dev, 1950 struct device_attribute *attr, 1951 const char *buf, size_t len) 1952 { 1953 struct net_device *netdev = to_net_dev(dev); 1954 struct netfront_info *np = netdev_priv(netdev); 1955 char *endp; 1956 unsigned long target; 1957 1958 if (!capable(CAP_NET_ADMIN)) 1959 return -EPERM; 1960 1961 target = simple_strtoul(buf, &endp, 0); 1962 if (endp == buf) 1963 return -EBADMSG; 1964 1965 if (target < RX_MIN_TARGET) 1966 target = RX_MIN_TARGET; 1967 if (target > RX_MAX_TARGET) 1968 target = RX_MAX_TARGET; 1969 1970 spin_lock_bh(&np->rx_lock); 1971 if (target > np->rx_max_target) 1972 np->rx_max_target = target; 1973 np->rx_min_target = target; 1974 if (target > np->rx_target) 1975 np->rx_target = target; 1976 1977 xennet_alloc_rx_buffers(netdev); 1978 1979 spin_unlock_bh(&np->rx_lock); 1980 return len; 1981 } 1982 1983 static ssize_t show_rxbuf_max(struct device *dev, 1984 struct device_attribute *attr, char *buf) 1985 { 1986 struct net_device *netdev = to_net_dev(dev); 1987 struct netfront_info *info = netdev_priv(netdev); 1988 1989 return sprintf(buf, "%u\n", info->rx_max_target); 1990 } 1991 1992 static ssize_t store_rxbuf_max(struct device *dev, 1993 struct device_attribute *attr, 1994 const char *buf, size_t len) 1995 { 1996 struct net_device *netdev = to_net_dev(dev); 1997 struct netfront_info *np = netdev_priv(netdev); 1998 char *endp; 1999 unsigned long target; 2000 2001 if (!capable(CAP_NET_ADMIN)) 2002 return -EPERM; 2003 2004 target = simple_strtoul(buf, &endp, 0); 2005 if (endp == buf) 2006 return -EBADMSG; 2007 2008 if (target < RX_MIN_TARGET) 2009 target = RX_MIN_TARGET; 2010 if (target > RX_MAX_TARGET) 2011 target = RX_MAX_TARGET; 2012 2013 spin_lock_bh(&np->rx_lock); 2014 if (target < np->rx_min_target) 2015 np->rx_min_target = target; 2016 np->rx_max_target = target; 2017 if (target < np->rx_target) 2018 np->rx_target = target; 2019 2020 xennet_alloc_rx_buffers(netdev); 2021 2022 spin_unlock_bh(&np->rx_lock); 2023 return len; 2024 } 2025 2026 static ssize_t show_rxbuf_cur(struct device *dev, 2027 struct device_attribute *attr, char *buf) 2028 { 2029 struct net_device *netdev = to_net_dev(dev); 2030 struct netfront_info *info = netdev_priv(netdev); 2031 2032 return sprintf(buf, "%u\n", info->rx_target); 2033 } 2034 2035 static struct device_attribute xennet_attrs[] = { 2036 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), 2037 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), 2038 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), 2039 }; 2040 2041 static int xennet_sysfs_addif(struct net_device *netdev) 2042 { 2043 int i; 2044 int err; 2045 2046 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { 2047 err = device_create_file(&netdev->dev, 2048 &xennet_attrs[i]); 2049 if (err) 2050 goto fail; 2051 } 2052 return 0; 2053 2054 fail: 2055 while (--i >= 0) 2056 device_remove_file(&netdev->dev, &xennet_attrs[i]); 2057 return err; 2058 } 2059 2060 static void xennet_sysfs_delif(struct net_device *netdev) 2061 { 2062 int i; 2063 2064 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 2065 device_remove_file(&netdev->dev, &xennet_attrs[i]); 2066 } 2067 2068 #endif /* CONFIG_SYSFS */ 2069 2070 static const struct xenbus_device_id netfront_ids[] = { 2071 { "vif" }, 2072 { "" } 2073 }; 2074 2075 2076 static int xennet_remove(struct xenbus_device *dev) 2077 { 2078 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2079 2080 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2081 2082 xennet_disconnect_backend(info); 2083 2084 xennet_sysfs_delif(info->netdev); 2085 2086 unregister_netdev(info->netdev); 2087 2088 del_timer_sync(&info->rx_refill_timer); 2089 2090 free_percpu(info->stats); 2091 2092 free_netdev(info->netdev); 2093 2094 return 0; 2095 } 2096 2097 static DEFINE_XENBUS_DRIVER(netfront, , 2098 .probe = netfront_probe, 2099 .remove = xennet_remove, 2100 .resume = netfront_resume, 2101 .otherend_changed = netback_changed, 2102 ); 2103 2104 static int __init netif_init(void) 2105 { 2106 if (!xen_domain()) 2107 return -ENODEV; 2108 2109 if (xen_hvm_domain() && !xen_platform_pci_unplug) 2110 return -ENODEV; 2111 2112 pr_info("Initialising Xen virtual ethernet driver\n"); 2113 2114 return xenbus_register_frontend(&netfront_driver); 2115 } 2116 module_init(netif_init); 2117 2118 2119 static void __exit netif_exit(void) 2120 { 2121 xenbus_unregister_driver(&netfront_driver); 2122 } 2123 module_exit(netif_exit); 2124 2125 MODULE_DESCRIPTION("Xen virtual network device frontend"); 2126 MODULE_LICENSE("GPL"); 2127 MODULE_ALIAS("xen:vif"); 2128 MODULE_ALIAS("xennet"); 2129