1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #include <linux/module.h> 33 #include <linux/kernel.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/tcp.h> 40 #include <linux/udp.h> 41 #include <linux/moduleparam.h> 42 #include <linux/mm.h> 43 #include <net/ip.h> 44 45 #include <xen/xenbus.h> 46 #include <xen/events.h> 47 #include <xen/page.h> 48 #include <xen/grant_table.h> 49 50 #include <xen/interface/io/netif.h> 51 #include <xen/interface/memory.h> 52 #include <xen/interface/grant_table.h> 53 54 static struct ethtool_ops xennet_ethtool_ops; 55 56 struct netfront_cb { 57 struct page *page; 58 unsigned offset; 59 }; 60 61 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 62 63 #define RX_COPY_THRESHOLD 256 64 65 #define GRANT_INVALID_REF 0 66 67 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 68 #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 69 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 70 71 struct netfront_info { 72 struct list_head list; 73 struct net_device *netdev; 74 75 struct napi_struct napi; 76 77 unsigned int evtchn; 78 struct xenbus_device *xbdev; 79 80 spinlock_t tx_lock; 81 struct xen_netif_tx_front_ring tx; 82 int tx_ring_ref; 83 84 /* 85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 86 * are linked from tx_skb_freelist through skb_entry.link. 87 * 88 * NB. Freelist index entries are always going to be less than 89 * PAGE_OFFSET, whereas pointers to skbs will always be equal or 90 * greater than PAGE_OFFSET: we use this property to distinguish 91 * them. 92 */ 93 union skb_entry { 94 struct sk_buff *skb; 95 unsigned link; 96 } tx_skbs[NET_TX_RING_SIZE]; 97 grant_ref_t gref_tx_head; 98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 99 unsigned tx_skb_freelist; 100 101 spinlock_t rx_lock ____cacheline_aligned_in_smp; 102 struct xen_netif_rx_front_ring rx; 103 int rx_ring_ref; 104 105 /* Receive-ring batched refills. */ 106 #define RX_MIN_TARGET 8 107 #define RX_DFL_MIN_TARGET 64 108 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 109 unsigned rx_min_target, rx_max_target, rx_target; 110 struct sk_buff_head rx_batch; 111 112 struct timer_list rx_refill_timer; 113 114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 115 grant_ref_t gref_rx_head; 116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 117 118 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 120 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 121 }; 122 123 struct netfront_rx_info { 124 struct xen_netif_rx_response rx; 125 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 126 }; 127 128 /* 129 * Access macros for acquiring freeing slots in tx_skbs[]. 130 */ 131 132 static void add_id_to_freelist(unsigned *head, union skb_entry *list, 133 unsigned short id) 134 { 135 list[id].link = *head; 136 *head = id; 137 } 138 139 static unsigned short get_id_from_freelist(unsigned *head, 140 union skb_entry *list) 141 { 142 unsigned int id = *head; 143 *head = list[id].link; 144 return id; 145 } 146 147 static int xennet_rxidx(RING_IDX idx) 148 { 149 return idx & (NET_RX_RING_SIZE - 1); 150 } 151 152 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 153 RING_IDX ri) 154 { 155 int i = xennet_rxidx(ri); 156 struct sk_buff *skb = np->rx_skbs[i]; 157 np->rx_skbs[i] = NULL; 158 return skb; 159 } 160 161 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 162 RING_IDX ri) 163 { 164 int i = xennet_rxidx(ri); 165 grant_ref_t ref = np->grant_rx_ref[i]; 166 np->grant_rx_ref[i] = GRANT_INVALID_REF; 167 return ref; 168 } 169 170 #ifdef CONFIG_SYSFS 171 static int xennet_sysfs_addif(struct net_device *netdev); 172 static void xennet_sysfs_delif(struct net_device *netdev); 173 #else /* !CONFIG_SYSFS */ 174 #define xennet_sysfs_addif(dev) (0) 175 #define xennet_sysfs_delif(dev) do { } while (0) 176 #endif 177 178 static int xennet_can_sg(struct net_device *dev) 179 { 180 return dev->features & NETIF_F_SG; 181 } 182 183 184 static void rx_refill_timeout(unsigned long data) 185 { 186 struct net_device *dev = (struct net_device *)data; 187 struct netfront_info *np = netdev_priv(dev); 188 netif_rx_schedule(dev, &np->napi); 189 } 190 191 static int netfront_tx_slot_available(struct netfront_info *np) 192 { 193 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 194 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); 195 } 196 197 static void xennet_maybe_wake_tx(struct net_device *dev) 198 { 199 struct netfront_info *np = netdev_priv(dev); 200 201 if (unlikely(netif_queue_stopped(dev)) && 202 netfront_tx_slot_available(np) && 203 likely(netif_running(dev))) 204 netif_wake_queue(dev); 205 } 206 207 static void xennet_alloc_rx_buffers(struct net_device *dev) 208 { 209 unsigned short id; 210 struct netfront_info *np = netdev_priv(dev); 211 struct sk_buff *skb; 212 struct page *page; 213 int i, batch_target, notify; 214 RING_IDX req_prod = np->rx.req_prod_pvt; 215 grant_ref_t ref; 216 unsigned long pfn; 217 void *vaddr; 218 struct xen_netif_rx_request *req; 219 220 if (unlikely(!netif_carrier_ok(dev))) 221 return; 222 223 /* 224 * Allocate skbuffs greedily, even though we batch updates to the 225 * receive ring. This creates a less bursty demand on the memory 226 * allocator, so should reduce the chance of failed allocation requests 227 * both for ourself and for other kernel subsystems. 228 */ 229 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 230 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 231 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, 232 GFP_ATOMIC | __GFP_NOWARN); 233 if (unlikely(!skb)) 234 goto no_skb; 235 236 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 237 if (!page) { 238 kfree_skb(skb); 239 no_skb: 240 /* Any skbuffs queued for refill? Force them out. */ 241 if (i != 0) 242 goto refill; 243 /* Could not allocate any skbuffs. Try again later. */ 244 mod_timer(&np->rx_refill_timer, 245 jiffies + (HZ/10)); 246 break; 247 } 248 249 skb_shinfo(skb)->frags[0].page = page; 250 skb_shinfo(skb)->nr_frags = 1; 251 __skb_queue_tail(&np->rx_batch, skb); 252 } 253 254 /* Is the batch large enough to be worthwhile? */ 255 if (i < (np->rx_target/2)) { 256 if (req_prod > np->rx.sring->req_prod) 257 goto push; 258 return; 259 } 260 261 /* Adjust our fill target if we risked running out of buffers. */ 262 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 263 ((np->rx_target *= 2) > np->rx_max_target)) 264 np->rx_target = np->rx_max_target; 265 266 refill: 267 for (i = 0; ; i++) { 268 skb = __skb_dequeue(&np->rx_batch); 269 if (skb == NULL) 270 break; 271 272 skb->dev = dev; 273 274 id = xennet_rxidx(req_prod + i); 275 276 BUG_ON(np->rx_skbs[id]); 277 np->rx_skbs[id] = skb; 278 279 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 280 BUG_ON((signed short)ref < 0); 281 np->grant_rx_ref[id] = ref; 282 283 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); 284 vaddr = page_address(skb_shinfo(skb)->frags[0].page); 285 286 req = RING_GET_REQUEST(&np->rx, req_prod + i); 287 gnttab_grant_foreign_access_ref(ref, 288 np->xbdev->otherend_id, 289 pfn_to_mfn(pfn), 290 0); 291 292 req->id = id; 293 req->gref = ref; 294 } 295 296 wmb(); /* barrier so backend seens requests */ 297 298 /* Above is a suitable barrier to ensure backend will see requests. */ 299 np->rx.req_prod_pvt = req_prod + i; 300 push: 301 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 302 if (notify) 303 notify_remote_via_irq(np->netdev->irq); 304 } 305 306 static int xennet_open(struct net_device *dev) 307 { 308 struct netfront_info *np = netdev_priv(dev); 309 310 napi_enable(&np->napi); 311 312 spin_lock_bh(&np->rx_lock); 313 if (netif_carrier_ok(dev)) { 314 xennet_alloc_rx_buffers(dev); 315 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 316 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 317 netif_rx_schedule(dev, &np->napi); 318 } 319 spin_unlock_bh(&np->rx_lock); 320 321 xennet_maybe_wake_tx(dev); 322 323 return 0; 324 } 325 326 static void xennet_tx_buf_gc(struct net_device *dev) 327 { 328 RING_IDX cons, prod; 329 unsigned short id; 330 struct netfront_info *np = netdev_priv(dev); 331 struct sk_buff *skb; 332 333 BUG_ON(!netif_carrier_ok(dev)); 334 335 do { 336 prod = np->tx.sring->rsp_prod; 337 rmb(); /* Ensure we see responses up to 'rp'. */ 338 339 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 340 struct xen_netif_tx_response *txrsp; 341 342 txrsp = RING_GET_RESPONSE(&np->tx, cons); 343 if (txrsp->status == NETIF_RSP_NULL) 344 continue; 345 346 id = txrsp->id; 347 skb = np->tx_skbs[id].skb; 348 if (unlikely(gnttab_query_foreign_access( 349 np->grant_tx_ref[id]) != 0)) { 350 printk(KERN_ALERT "xennet_tx_buf_gc: warning " 351 "-- grant still in use by backend " 352 "domain.\n"); 353 BUG(); 354 } 355 gnttab_end_foreign_access_ref( 356 np->grant_tx_ref[id], GNTMAP_readonly); 357 gnttab_release_grant_reference( 358 &np->gref_tx_head, np->grant_tx_ref[id]); 359 np->grant_tx_ref[id] = GRANT_INVALID_REF; 360 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 361 dev_kfree_skb_irq(skb); 362 } 363 364 np->tx.rsp_cons = prod; 365 366 /* 367 * Set a new event, then check for race with update of tx_cons. 368 * Note that it is essential to schedule a callback, no matter 369 * how few buffers are pending. Even if there is space in the 370 * transmit ring, higher layers may be blocked because too much 371 * data is outstanding: in such cases notification from Xen is 372 * likely to be the only kick that we'll get. 373 */ 374 np->tx.sring->rsp_event = 375 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 376 mb(); /* update shared area */ 377 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 378 379 xennet_maybe_wake_tx(dev); 380 } 381 382 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 383 struct xen_netif_tx_request *tx) 384 { 385 struct netfront_info *np = netdev_priv(dev); 386 char *data = skb->data; 387 unsigned long mfn; 388 RING_IDX prod = np->tx.req_prod_pvt; 389 int frags = skb_shinfo(skb)->nr_frags; 390 unsigned int offset = offset_in_page(data); 391 unsigned int len = skb_headlen(skb); 392 unsigned int id; 393 grant_ref_t ref; 394 int i; 395 396 /* While the header overlaps a page boundary (including being 397 larger than a page), split it it into page-sized chunks. */ 398 while (len > PAGE_SIZE - offset) { 399 tx->size = PAGE_SIZE - offset; 400 tx->flags |= NETTXF_more_data; 401 len -= tx->size; 402 data += tx->size; 403 offset = 0; 404 405 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 406 np->tx_skbs[id].skb = skb_get(skb); 407 tx = RING_GET_REQUEST(&np->tx, prod++); 408 tx->id = id; 409 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 410 BUG_ON((signed short)ref < 0); 411 412 mfn = virt_to_mfn(data); 413 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 414 mfn, GNTMAP_readonly); 415 416 tx->gref = np->grant_tx_ref[id] = ref; 417 tx->offset = offset; 418 tx->size = len; 419 tx->flags = 0; 420 } 421 422 /* Grant backend access to each skb fragment page. */ 423 for (i = 0; i < frags; i++) { 424 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 425 426 tx->flags |= NETTXF_more_data; 427 428 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 429 np->tx_skbs[id].skb = skb_get(skb); 430 tx = RING_GET_REQUEST(&np->tx, prod++); 431 tx->id = id; 432 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 433 BUG_ON((signed short)ref < 0); 434 435 mfn = pfn_to_mfn(page_to_pfn(frag->page)); 436 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 437 mfn, GNTMAP_readonly); 438 439 tx->gref = np->grant_tx_ref[id] = ref; 440 tx->offset = frag->page_offset; 441 tx->size = frag->size; 442 tx->flags = 0; 443 } 444 445 np->tx.req_prod_pvt = prod; 446 } 447 448 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 449 { 450 unsigned short id; 451 struct netfront_info *np = netdev_priv(dev); 452 struct xen_netif_tx_request *tx; 453 struct xen_netif_extra_info *extra; 454 char *data = skb->data; 455 RING_IDX i; 456 grant_ref_t ref; 457 unsigned long mfn; 458 int notify; 459 int frags = skb_shinfo(skb)->nr_frags; 460 unsigned int offset = offset_in_page(data); 461 unsigned int len = skb_headlen(skb); 462 463 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; 464 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 465 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", 466 frags); 467 dump_stack(); 468 goto drop; 469 } 470 471 spin_lock_irq(&np->tx_lock); 472 473 if (unlikely(!netif_carrier_ok(dev) || 474 (frags > 1 && !xennet_can_sg(dev)) || 475 netif_needs_gso(dev, skb))) { 476 spin_unlock_irq(&np->tx_lock); 477 goto drop; 478 } 479 480 i = np->tx.req_prod_pvt; 481 482 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 483 np->tx_skbs[id].skb = skb; 484 485 tx = RING_GET_REQUEST(&np->tx, i); 486 487 tx->id = id; 488 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 489 BUG_ON((signed short)ref < 0); 490 mfn = virt_to_mfn(data); 491 gnttab_grant_foreign_access_ref( 492 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 493 tx->gref = np->grant_tx_ref[id] = ref; 494 tx->offset = offset; 495 tx->size = len; 496 extra = NULL; 497 498 tx->flags = 0; 499 if (skb->ip_summed == CHECKSUM_PARTIAL) 500 /* local packet? */ 501 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; 502 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 503 /* remote but checksummed. */ 504 tx->flags |= NETTXF_data_validated; 505 506 if (skb_shinfo(skb)->gso_size) { 507 struct xen_netif_extra_info *gso; 508 509 gso = (struct xen_netif_extra_info *) 510 RING_GET_REQUEST(&np->tx, ++i); 511 512 if (extra) 513 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 514 else 515 tx->flags |= NETTXF_extra_info; 516 517 gso->u.gso.size = skb_shinfo(skb)->gso_size; 518 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 519 gso->u.gso.pad = 0; 520 gso->u.gso.features = 0; 521 522 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 523 gso->flags = 0; 524 extra = gso; 525 } 526 527 np->tx.req_prod_pvt = i + 1; 528 529 xennet_make_frags(skb, dev, tx); 530 tx->size = skb->len; 531 532 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 533 if (notify) 534 notify_remote_via_irq(np->netdev->irq); 535 536 dev->stats.tx_bytes += skb->len; 537 dev->stats.tx_packets++; 538 539 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 540 xennet_tx_buf_gc(dev); 541 542 if (!netfront_tx_slot_available(np)) 543 netif_stop_queue(dev); 544 545 spin_unlock_irq(&np->tx_lock); 546 547 return 0; 548 549 drop: 550 dev->stats.tx_dropped++; 551 dev_kfree_skb(skb); 552 return 0; 553 } 554 555 static int xennet_close(struct net_device *dev) 556 { 557 struct netfront_info *np = netdev_priv(dev); 558 netif_stop_queue(np->netdev); 559 napi_disable(&np->napi); 560 return 0; 561 } 562 563 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 564 grant_ref_t ref) 565 { 566 int new = xennet_rxidx(np->rx.req_prod_pvt); 567 568 BUG_ON(np->rx_skbs[new]); 569 np->rx_skbs[new] = skb; 570 np->grant_rx_ref[new] = ref; 571 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 572 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 573 np->rx.req_prod_pvt++; 574 } 575 576 static int xennet_get_extras(struct netfront_info *np, 577 struct xen_netif_extra_info *extras, 578 RING_IDX rp) 579 580 { 581 struct xen_netif_extra_info *extra; 582 struct device *dev = &np->netdev->dev; 583 RING_IDX cons = np->rx.rsp_cons; 584 int err = 0; 585 586 do { 587 struct sk_buff *skb; 588 grant_ref_t ref; 589 590 if (unlikely(cons + 1 == rp)) { 591 if (net_ratelimit()) 592 dev_warn(dev, "Missing extra info\n"); 593 err = -EBADR; 594 break; 595 } 596 597 extra = (struct xen_netif_extra_info *) 598 RING_GET_RESPONSE(&np->rx, ++cons); 599 600 if (unlikely(!extra->type || 601 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 602 if (net_ratelimit()) 603 dev_warn(dev, "Invalid extra type: %d\n", 604 extra->type); 605 err = -EINVAL; 606 } else { 607 memcpy(&extras[extra->type - 1], extra, 608 sizeof(*extra)); 609 } 610 611 skb = xennet_get_rx_skb(np, cons); 612 ref = xennet_get_rx_ref(np, cons); 613 xennet_move_rx_slot(np, skb, ref); 614 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 615 616 np->rx.rsp_cons = cons; 617 return err; 618 } 619 620 static int xennet_get_responses(struct netfront_info *np, 621 struct netfront_rx_info *rinfo, RING_IDX rp, 622 struct sk_buff_head *list) 623 { 624 struct xen_netif_rx_response *rx = &rinfo->rx; 625 struct xen_netif_extra_info *extras = rinfo->extras; 626 struct device *dev = &np->netdev->dev; 627 RING_IDX cons = np->rx.rsp_cons; 628 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 629 grant_ref_t ref = xennet_get_rx_ref(np, cons); 630 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 631 int frags = 1; 632 int err = 0; 633 unsigned long ret; 634 635 if (rx->flags & NETRXF_extra_info) { 636 err = xennet_get_extras(np, extras, rp); 637 cons = np->rx.rsp_cons; 638 } 639 640 for (;;) { 641 if (unlikely(rx->status < 0 || 642 rx->offset + rx->status > PAGE_SIZE)) { 643 if (net_ratelimit()) 644 dev_warn(dev, "rx->offset: %x, size: %u\n", 645 rx->offset, rx->status); 646 xennet_move_rx_slot(np, skb, ref); 647 err = -EINVAL; 648 goto next; 649 } 650 651 /* 652 * This definitely indicates a bug, either in this driver or in 653 * the backend driver. In future this should flag the bad 654 * situation to the system controller to reboot the backed. 655 */ 656 if (ref == GRANT_INVALID_REF) { 657 if (net_ratelimit()) 658 dev_warn(dev, "Bad rx response id %d.\n", 659 rx->id); 660 err = -EINVAL; 661 goto next; 662 } 663 664 ret = gnttab_end_foreign_access_ref(ref, 0); 665 BUG_ON(!ret); 666 667 gnttab_release_grant_reference(&np->gref_rx_head, ref); 668 669 __skb_queue_tail(list, skb); 670 671 next: 672 if (!(rx->flags & NETRXF_more_data)) 673 break; 674 675 if (cons + frags == rp) { 676 if (net_ratelimit()) 677 dev_warn(dev, "Need more frags\n"); 678 err = -ENOENT; 679 break; 680 } 681 682 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 683 skb = xennet_get_rx_skb(np, cons + frags); 684 ref = xennet_get_rx_ref(np, cons + frags); 685 frags++; 686 } 687 688 if (unlikely(frags > max)) { 689 if (net_ratelimit()) 690 dev_warn(dev, "Too many frags\n"); 691 err = -E2BIG; 692 } 693 694 if (unlikely(err)) 695 np->rx.rsp_cons = cons + frags; 696 697 return err; 698 } 699 700 static int xennet_set_skb_gso(struct sk_buff *skb, 701 struct xen_netif_extra_info *gso) 702 { 703 if (!gso->u.gso.size) { 704 if (net_ratelimit()) 705 printk(KERN_WARNING "GSO size must not be zero.\n"); 706 return -EINVAL; 707 } 708 709 /* Currently only TCPv4 S.O. is supported. */ 710 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 711 if (net_ratelimit()) 712 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); 713 return -EINVAL; 714 } 715 716 skb_shinfo(skb)->gso_size = gso->u.gso.size; 717 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 718 719 /* Header must be checked, and gso_segs computed. */ 720 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 721 skb_shinfo(skb)->gso_segs = 0; 722 723 return 0; 724 } 725 726 static RING_IDX xennet_fill_frags(struct netfront_info *np, 727 struct sk_buff *skb, 728 struct sk_buff_head *list) 729 { 730 struct skb_shared_info *shinfo = skb_shinfo(skb); 731 int nr_frags = shinfo->nr_frags; 732 RING_IDX cons = np->rx.rsp_cons; 733 skb_frag_t *frag = shinfo->frags + nr_frags; 734 struct sk_buff *nskb; 735 736 while ((nskb = __skb_dequeue(list))) { 737 struct xen_netif_rx_response *rx = 738 RING_GET_RESPONSE(&np->rx, ++cons); 739 740 frag->page = skb_shinfo(nskb)->frags[0].page; 741 frag->page_offset = rx->offset; 742 frag->size = rx->status; 743 744 skb->data_len += rx->status; 745 746 skb_shinfo(nskb)->nr_frags = 0; 747 kfree_skb(nskb); 748 749 frag++; 750 nr_frags++; 751 } 752 753 shinfo->nr_frags = nr_frags; 754 return cons; 755 } 756 757 static int skb_checksum_setup(struct sk_buff *skb) 758 { 759 struct iphdr *iph; 760 unsigned char *th; 761 int err = -EPROTO; 762 763 if (skb->protocol != htons(ETH_P_IP)) 764 goto out; 765 766 iph = (void *)skb->data; 767 th = skb->data + 4 * iph->ihl; 768 if (th >= skb_tail_pointer(skb)) 769 goto out; 770 771 skb->csum_start = th - skb->head; 772 switch (iph->protocol) { 773 case IPPROTO_TCP: 774 skb->csum_offset = offsetof(struct tcphdr, check); 775 break; 776 case IPPROTO_UDP: 777 skb->csum_offset = offsetof(struct udphdr, check); 778 break; 779 default: 780 if (net_ratelimit()) 781 printk(KERN_ERR "Attempting to checksum a non-" 782 "TCP/UDP packet, dropping a protocol" 783 " %d packet", iph->protocol); 784 goto out; 785 } 786 787 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) 788 goto out; 789 790 err = 0; 791 792 out: 793 return err; 794 } 795 796 static int handle_incoming_queue(struct net_device *dev, 797 struct sk_buff_head *rxq) 798 { 799 int packets_dropped = 0; 800 struct sk_buff *skb; 801 802 while ((skb = __skb_dequeue(rxq)) != NULL) { 803 struct page *page = NETFRONT_SKB_CB(skb)->page; 804 void *vaddr = page_address(page); 805 unsigned offset = NETFRONT_SKB_CB(skb)->offset; 806 807 memcpy(skb->data, vaddr + offset, 808 skb_headlen(skb)); 809 810 if (page != skb_shinfo(skb)->frags[0].page) 811 __free_page(page); 812 813 /* Ethernet work: Delayed to here as it peeks the header. */ 814 skb->protocol = eth_type_trans(skb, dev); 815 816 if (skb->ip_summed == CHECKSUM_PARTIAL) { 817 if (skb_checksum_setup(skb)) { 818 kfree_skb(skb); 819 packets_dropped++; 820 dev->stats.rx_errors++; 821 continue; 822 } 823 } 824 825 dev->stats.rx_packets++; 826 dev->stats.rx_bytes += skb->len; 827 828 /* Pass it up. */ 829 netif_receive_skb(skb); 830 dev->last_rx = jiffies; 831 } 832 833 return packets_dropped; 834 } 835 836 static int xennet_poll(struct napi_struct *napi, int budget) 837 { 838 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 839 struct net_device *dev = np->netdev; 840 struct sk_buff *skb; 841 struct netfront_rx_info rinfo; 842 struct xen_netif_rx_response *rx = &rinfo.rx; 843 struct xen_netif_extra_info *extras = rinfo.extras; 844 RING_IDX i, rp; 845 int work_done; 846 struct sk_buff_head rxq; 847 struct sk_buff_head errq; 848 struct sk_buff_head tmpq; 849 unsigned long flags; 850 unsigned int len; 851 int err; 852 853 spin_lock(&np->rx_lock); 854 855 skb_queue_head_init(&rxq); 856 skb_queue_head_init(&errq); 857 skb_queue_head_init(&tmpq); 858 859 rp = np->rx.sring->rsp_prod; 860 rmb(); /* Ensure we see queued responses up to 'rp'. */ 861 862 i = np->rx.rsp_cons; 863 work_done = 0; 864 while ((i != rp) && (work_done < budget)) { 865 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 866 memset(extras, 0, sizeof(rinfo.extras)); 867 868 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 869 870 if (unlikely(err)) { 871 err: 872 while ((skb = __skb_dequeue(&tmpq))) 873 __skb_queue_tail(&errq, skb); 874 dev->stats.rx_errors++; 875 i = np->rx.rsp_cons; 876 continue; 877 } 878 879 skb = __skb_dequeue(&tmpq); 880 881 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 882 struct xen_netif_extra_info *gso; 883 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 884 885 if (unlikely(xennet_set_skb_gso(skb, gso))) { 886 __skb_queue_head(&tmpq, skb); 887 np->rx.rsp_cons += skb_queue_len(&tmpq); 888 goto err; 889 } 890 } 891 892 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; 893 NETFRONT_SKB_CB(skb)->offset = rx->offset; 894 895 len = rx->status; 896 if (len > RX_COPY_THRESHOLD) 897 len = RX_COPY_THRESHOLD; 898 skb_put(skb, len); 899 900 if (rx->status > len) { 901 skb_shinfo(skb)->frags[0].page_offset = 902 rx->offset + len; 903 skb_shinfo(skb)->frags[0].size = rx->status - len; 904 skb->data_len = rx->status - len; 905 } else { 906 skb_shinfo(skb)->frags[0].page = NULL; 907 skb_shinfo(skb)->nr_frags = 0; 908 } 909 910 i = xennet_fill_frags(np, skb, &tmpq); 911 912 /* 913 * Truesize approximates the size of true data plus 914 * any supervisor overheads. Adding hypervisor 915 * overheads has been shown to significantly reduce 916 * achievable bandwidth with the default receive 917 * buffer size. It is therefore not wise to account 918 * for it here. 919 * 920 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set 921 * to RX_COPY_THRESHOLD + the supervisor 922 * overheads. Here, we add the size of the data pulled 923 * in xennet_fill_frags(). 924 * 925 * We also adjust for any unused space in the main 926 * data area by subtracting (RX_COPY_THRESHOLD - 927 * len). This is especially important with drivers 928 * which split incoming packets into header and data, 929 * using only 66 bytes of the main data area (see the 930 * e1000 driver for example.) On such systems, 931 * without this last adjustement, our achievable 932 * receive throughout using the standard receive 933 * buffer size was cut by 25%(!!!). 934 */ 935 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 936 skb->len += skb->data_len; 937 938 if (rx->flags & NETRXF_csum_blank) 939 skb->ip_summed = CHECKSUM_PARTIAL; 940 else if (rx->flags & NETRXF_data_validated) 941 skb->ip_summed = CHECKSUM_UNNECESSARY; 942 943 __skb_queue_tail(&rxq, skb); 944 945 np->rx.rsp_cons = ++i; 946 work_done++; 947 } 948 949 __skb_queue_purge(&errq); 950 951 work_done -= handle_incoming_queue(dev, &rxq); 952 953 /* If we get a callback with very few responses, reduce fill target. */ 954 /* NB. Note exponential increase, linear decrease. */ 955 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 956 ((3*np->rx_target) / 4)) && 957 (--np->rx_target < np->rx_min_target)) 958 np->rx_target = np->rx_min_target; 959 960 xennet_alloc_rx_buffers(dev); 961 962 if (work_done < budget) { 963 int more_to_do = 0; 964 965 local_irq_save(flags); 966 967 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 968 if (!more_to_do) 969 __netif_rx_complete(dev, napi); 970 971 local_irq_restore(flags); 972 } 973 974 spin_unlock(&np->rx_lock); 975 976 return work_done; 977 } 978 979 static int xennet_change_mtu(struct net_device *dev, int mtu) 980 { 981 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; 982 983 if (mtu > max) 984 return -EINVAL; 985 dev->mtu = mtu; 986 return 0; 987 } 988 989 static void xennet_release_tx_bufs(struct netfront_info *np) 990 { 991 struct sk_buff *skb; 992 int i; 993 994 for (i = 0; i < NET_TX_RING_SIZE; i++) { 995 /* Skip over entries which are actually freelist references */ 996 if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) 997 continue; 998 999 skb = np->tx_skbs[i].skb; 1000 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1001 GNTMAP_readonly); 1002 gnttab_release_grant_reference(&np->gref_tx_head, 1003 np->grant_tx_ref[i]); 1004 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1005 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1006 dev_kfree_skb_irq(skb); 1007 } 1008 } 1009 1010 static void xennet_release_rx_bufs(struct netfront_info *np) 1011 { 1012 struct mmu_update *mmu = np->rx_mmu; 1013 struct multicall_entry *mcl = np->rx_mcl; 1014 struct sk_buff_head free_list; 1015 struct sk_buff *skb; 1016 unsigned long mfn; 1017 int xfer = 0, noxfer = 0, unused = 0; 1018 int id, ref; 1019 1020 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", 1021 __func__); 1022 return; 1023 1024 skb_queue_head_init(&free_list); 1025 1026 spin_lock_bh(&np->rx_lock); 1027 1028 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1029 ref = np->grant_rx_ref[id]; 1030 if (ref == GRANT_INVALID_REF) { 1031 unused++; 1032 continue; 1033 } 1034 1035 skb = np->rx_skbs[id]; 1036 mfn = gnttab_end_foreign_transfer_ref(ref); 1037 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1038 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1039 1040 if (0 == mfn) { 1041 skb_shinfo(skb)->nr_frags = 0; 1042 dev_kfree_skb(skb); 1043 noxfer++; 1044 continue; 1045 } 1046 1047 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1048 /* Remap the page. */ 1049 struct page *page = skb_shinfo(skb)->frags[0].page; 1050 unsigned long pfn = page_to_pfn(page); 1051 void *vaddr = page_address(page); 1052 1053 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1054 mfn_pte(mfn, PAGE_KERNEL), 1055 0); 1056 mcl++; 1057 mmu->ptr = ((u64)mfn << PAGE_SHIFT) 1058 | MMU_MACHPHYS_UPDATE; 1059 mmu->val = pfn; 1060 mmu++; 1061 1062 set_phys_to_machine(pfn, mfn); 1063 } 1064 __skb_queue_tail(&free_list, skb); 1065 xfer++; 1066 } 1067 1068 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1069 __func__, xfer, noxfer, unused); 1070 1071 if (xfer) { 1072 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1073 /* Do all the remapping work and M2P updates. */ 1074 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 1075 NULL, DOMID_SELF); 1076 mcl++; 1077 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); 1078 } 1079 } 1080 1081 __skb_queue_purge(&free_list); 1082 1083 spin_unlock_bh(&np->rx_lock); 1084 } 1085 1086 static void xennet_uninit(struct net_device *dev) 1087 { 1088 struct netfront_info *np = netdev_priv(dev); 1089 xennet_release_tx_bufs(np); 1090 xennet_release_rx_bufs(np); 1091 gnttab_free_grant_references(np->gref_tx_head); 1092 gnttab_free_grant_references(np->gref_rx_head); 1093 } 1094 1095 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1096 { 1097 int i, err; 1098 struct net_device *netdev; 1099 struct netfront_info *np; 1100 1101 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1102 if (!netdev) { 1103 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", 1104 __func__); 1105 return ERR_PTR(-ENOMEM); 1106 } 1107 1108 np = netdev_priv(netdev); 1109 np->xbdev = dev; 1110 1111 spin_lock_init(&np->tx_lock); 1112 spin_lock_init(&np->rx_lock); 1113 1114 skb_queue_head_init(&np->rx_batch); 1115 np->rx_target = RX_DFL_MIN_TARGET; 1116 np->rx_min_target = RX_DFL_MIN_TARGET; 1117 np->rx_max_target = RX_MAX_TARGET; 1118 1119 init_timer(&np->rx_refill_timer); 1120 np->rx_refill_timer.data = (unsigned long)netdev; 1121 np->rx_refill_timer.function = rx_refill_timeout; 1122 1123 /* Initialise tx_skbs as a free chain containing every entry. */ 1124 np->tx_skb_freelist = 0; 1125 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1126 np->tx_skbs[i].link = i+1; 1127 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1128 } 1129 1130 /* Clear out rx_skbs */ 1131 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1132 np->rx_skbs[i] = NULL; 1133 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1134 } 1135 1136 /* A grant for every tx ring slot */ 1137 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1138 &np->gref_tx_head) < 0) { 1139 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1140 err = -ENOMEM; 1141 goto exit; 1142 } 1143 /* A grant for every rx ring slot */ 1144 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1145 &np->gref_rx_head) < 0) { 1146 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 1147 err = -ENOMEM; 1148 goto exit_free_tx; 1149 } 1150 1151 netdev->open = xennet_open; 1152 netdev->hard_start_xmit = xennet_start_xmit; 1153 netdev->stop = xennet_close; 1154 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1155 netdev->uninit = xennet_uninit; 1156 netdev->change_mtu = xennet_change_mtu; 1157 netdev->features = NETIF_F_IP_CSUM; 1158 1159 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1160 SET_NETDEV_DEV(netdev, &dev->dev); 1161 1162 np->netdev = netdev; 1163 1164 netif_carrier_off(netdev); 1165 1166 return netdev; 1167 1168 exit_free_tx: 1169 gnttab_free_grant_references(np->gref_tx_head); 1170 exit: 1171 free_netdev(netdev); 1172 return ERR_PTR(err); 1173 } 1174 1175 /** 1176 * Entry point to this code when a new device is created. Allocate the basic 1177 * structures and the ring buffers for communication with the backend, and 1178 * inform the backend of the appropriate details for those. 1179 */ 1180 static int __devinit netfront_probe(struct xenbus_device *dev, 1181 const struct xenbus_device_id *id) 1182 { 1183 int err; 1184 struct net_device *netdev; 1185 struct netfront_info *info; 1186 1187 netdev = xennet_create_dev(dev); 1188 if (IS_ERR(netdev)) { 1189 err = PTR_ERR(netdev); 1190 xenbus_dev_fatal(dev, err, "creating netdev"); 1191 return err; 1192 } 1193 1194 info = netdev_priv(netdev); 1195 dev->dev.driver_data = info; 1196 1197 err = register_netdev(info->netdev); 1198 if (err) { 1199 printk(KERN_WARNING "%s: register_netdev err=%d\n", 1200 __func__, err); 1201 goto fail; 1202 } 1203 1204 err = xennet_sysfs_addif(info->netdev); 1205 if (err) { 1206 unregister_netdev(info->netdev); 1207 printk(KERN_WARNING "%s: add sysfs failed err=%d\n", 1208 __func__, err); 1209 goto fail; 1210 } 1211 1212 return 0; 1213 1214 fail: 1215 free_netdev(netdev); 1216 dev->dev.driver_data = NULL; 1217 return err; 1218 } 1219 1220 static void xennet_end_access(int ref, void *page) 1221 { 1222 /* This frees the page as a side-effect */ 1223 if (ref != GRANT_INVALID_REF) 1224 gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1225 } 1226 1227 static void xennet_disconnect_backend(struct netfront_info *info) 1228 { 1229 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1230 spin_lock_bh(&info->rx_lock); 1231 spin_lock_irq(&info->tx_lock); 1232 netif_carrier_off(info->netdev); 1233 spin_unlock_irq(&info->tx_lock); 1234 spin_unlock_bh(&info->rx_lock); 1235 1236 if (info->netdev->irq) 1237 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1238 info->evtchn = info->netdev->irq = 0; 1239 1240 /* End access and free the pages */ 1241 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1242 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1243 1244 info->tx_ring_ref = GRANT_INVALID_REF; 1245 info->rx_ring_ref = GRANT_INVALID_REF; 1246 info->tx.sring = NULL; 1247 info->rx.sring = NULL; 1248 } 1249 1250 /** 1251 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1252 * driver restart. We tear down our netif structure and recreate it, but 1253 * leave the device-layer structures intact so that this is transparent to the 1254 * rest of the kernel. 1255 */ 1256 static int netfront_resume(struct xenbus_device *dev) 1257 { 1258 struct netfront_info *info = dev->dev.driver_data; 1259 1260 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1261 1262 xennet_disconnect_backend(info); 1263 return 0; 1264 } 1265 1266 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1267 { 1268 char *s, *e, *macstr; 1269 int i; 1270 1271 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1272 if (IS_ERR(macstr)) 1273 return PTR_ERR(macstr); 1274 1275 for (i = 0; i < ETH_ALEN; i++) { 1276 mac[i] = simple_strtoul(s, &e, 16); 1277 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1278 kfree(macstr); 1279 return -ENOENT; 1280 } 1281 s = e+1; 1282 } 1283 1284 kfree(macstr); 1285 return 0; 1286 } 1287 1288 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1289 { 1290 struct net_device *dev = dev_id; 1291 struct netfront_info *np = netdev_priv(dev); 1292 unsigned long flags; 1293 1294 spin_lock_irqsave(&np->tx_lock, flags); 1295 1296 if (likely(netif_carrier_ok(dev))) { 1297 xennet_tx_buf_gc(dev); 1298 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1299 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1300 netif_rx_schedule(dev, &np->napi); 1301 } 1302 1303 spin_unlock_irqrestore(&np->tx_lock, flags); 1304 1305 return IRQ_HANDLED; 1306 } 1307 1308 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1309 { 1310 struct xen_netif_tx_sring *txs; 1311 struct xen_netif_rx_sring *rxs; 1312 int err; 1313 struct net_device *netdev = info->netdev; 1314 1315 info->tx_ring_ref = GRANT_INVALID_REF; 1316 info->rx_ring_ref = GRANT_INVALID_REF; 1317 info->rx.sring = NULL; 1318 info->tx.sring = NULL; 1319 netdev->irq = 0; 1320 1321 err = xen_net_read_mac(dev, netdev->dev_addr); 1322 if (err) { 1323 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1324 goto fail; 1325 } 1326 1327 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); 1328 if (!txs) { 1329 err = -ENOMEM; 1330 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1331 goto fail; 1332 } 1333 SHARED_RING_INIT(txs); 1334 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1335 1336 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1337 if (err < 0) { 1338 free_page((unsigned long)txs); 1339 goto fail; 1340 } 1341 1342 info->tx_ring_ref = err; 1343 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); 1344 if (!rxs) { 1345 err = -ENOMEM; 1346 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1347 goto fail; 1348 } 1349 SHARED_RING_INIT(rxs); 1350 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1351 1352 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1353 if (err < 0) { 1354 free_page((unsigned long)rxs); 1355 goto fail; 1356 } 1357 info->rx_ring_ref = err; 1358 1359 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1360 if (err) 1361 goto fail; 1362 1363 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 1364 IRQF_SAMPLE_RANDOM, netdev->name, 1365 netdev); 1366 if (err < 0) 1367 goto fail; 1368 netdev->irq = err; 1369 return 0; 1370 1371 fail: 1372 return err; 1373 } 1374 1375 /* Common code used when first setting up, and when resuming. */ 1376 static int talk_to_backend(struct xenbus_device *dev, 1377 struct netfront_info *info) 1378 { 1379 const char *message; 1380 struct xenbus_transaction xbt; 1381 int err; 1382 1383 /* Create shared ring, alloc event channel. */ 1384 err = setup_netfront(dev, info); 1385 if (err) 1386 goto out; 1387 1388 again: 1389 err = xenbus_transaction_start(&xbt); 1390 if (err) { 1391 xenbus_dev_fatal(dev, err, "starting transaction"); 1392 goto destroy_ring; 1393 } 1394 1395 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1396 info->tx_ring_ref); 1397 if (err) { 1398 message = "writing tx ring-ref"; 1399 goto abort_transaction; 1400 } 1401 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", 1402 info->rx_ring_ref); 1403 if (err) { 1404 message = "writing rx ring-ref"; 1405 goto abort_transaction; 1406 } 1407 err = xenbus_printf(xbt, dev->nodename, 1408 "event-channel", "%u", info->evtchn); 1409 if (err) { 1410 message = "writing event-channel"; 1411 goto abort_transaction; 1412 } 1413 1414 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1415 1); 1416 if (err) { 1417 message = "writing request-rx-copy"; 1418 goto abort_transaction; 1419 } 1420 1421 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 1422 if (err) { 1423 message = "writing feature-rx-notify"; 1424 goto abort_transaction; 1425 } 1426 1427 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 1428 if (err) { 1429 message = "writing feature-sg"; 1430 goto abort_transaction; 1431 } 1432 1433 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 1434 if (err) { 1435 message = "writing feature-gso-tcpv4"; 1436 goto abort_transaction; 1437 } 1438 1439 err = xenbus_transaction_end(xbt, 0); 1440 if (err) { 1441 if (err == -EAGAIN) 1442 goto again; 1443 xenbus_dev_fatal(dev, err, "completing transaction"); 1444 goto destroy_ring; 1445 } 1446 1447 return 0; 1448 1449 abort_transaction: 1450 xenbus_transaction_end(xbt, 1); 1451 xenbus_dev_fatal(dev, err, "%s", message); 1452 destroy_ring: 1453 xennet_disconnect_backend(info); 1454 out: 1455 return err; 1456 } 1457 1458 static int xennet_set_sg(struct net_device *dev, u32 data) 1459 { 1460 if (data) { 1461 struct netfront_info *np = netdev_priv(dev); 1462 int val; 1463 1464 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1465 "%d", &val) < 0) 1466 val = 0; 1467 if (!val) 1468 return -ENOSYS; 1469 } else if (dev->mtu > ETH_DATA_LEN) 1470 dev->mtu = ETH_DATA_LEN; 1471 1472 return ethtool_op_set_sg(dev, data); 1473 } 1474 1475 static int xennet_set_tso(struct net_device *dev, u32 data) 1476 { 1477 if (data) { 1478 struct netfront_info *np = netdev_priv(dev); 1479 int val; 1480 1481 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1482 "feature-gso-tcpv4", "%d", &val) < 0) 1483 val = 0; 1484 if (!val) 1485 return -ENOSYS; 1486 } 1487 1488 return ethtool_op_set_tso(dev, data); 1489 } 1490 1491 static void xennet_set_features(struct net_device *dev) 1492 { 1493 /* Turn off all GSO bits except ROBUST. */ 1494 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; 1495 dev->features |= NETIF_F_GSO_ROBUST; 1496 xennet_set_sg(dev, 0); 1497 1498 /* We need checksum offload to enable scatter/gather and TSO. */ 1499 if (!(dev->features & NETIF_F_IP_CSUM)) 1500 return; 1501 1502 if (!xennet_set_sg(dev, 1)) 1503 xennet_set_tso(dev, 1); 1504 } 1505 1506 static int xennet_connect(struct net_device *dev) 1507 { 1508 struct netfront_info *np = netdev_priv(dev); 1509 int i, requeue_idx, err; 1510 struct sk_buff *skb; 1511 grant_ref_t ref; 1512 struct xen_netif_rx_request *req; 1513 unsigned int feature_rx_copy; 1514 1515 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1516 "feature-rx-copy", "%u", &feature_rx_copy); 1517 if (err != 1) 1518 feature_rx_copy = 0; 1519 1520 if (!feature_rx_copy) { 1521 dev_info(&dev->dev, 1522 "backend does not support copying receive path\n"); 1523 return -ENODEV; 1524 } 1525 1526 err = talk_to_backend(np->xbdev, np); 1527 if (err) 1528 return err; 1529 1530 xennet_set_features(dev); 1531 1532 spin_lock_bh(&np->rx_lock); 1533 spin_lock_irq(&np->tx_lock); 1534 1535 /* Step 1: Discard all pending TX packet fragments. */ 1536 xennet_release_tx_bufs(np); 1537 1538 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1539 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1540 if (!np->rx_skbs[i]) 1541 continue; 1542 1543 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 1544 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1545 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1546 1547 gnttab_grant_foreign_access_ref( 1548 ref, np->xbdev->otherend_id, 1549 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> 1550 frags->page)), 1551 0); 1552 req->gref = ref; 1553 req->id = requeue_idx; 1554 1555 requeue_idx++; 1556 } 1557 1558 np->rx.req_prod_pvt = requeue_idx; 1559 1560 /* 1561 * Step 3: All public and private state should now be sane. Get 1562 * ready to start sending and receiving packets and give the driver 1563 * domain a kick because we've probably just requeued some 1564 * packets. 1565 */ 1566 netif_carrier_on(np->netdev); 1567 notify_remote_via_irq(np->netdev->irq); 1568 xennet_tx_buf_gc(dev); 1569 xennet_alloc_rx_buffers(dev); 1570 1571 spin_unlock_irq(&np->tx_lock); 1572 spin_unlock_bh(&np->rx_lock); 1573 1574 return 0; 1575 } 1576 1577 /** 1578 * Callback received when the backend's state changes. 1579 */ 1580 static void backend_changed(struct xenbus_device *dev, 1581 enum xenbus_state backend_state) 1582 { 1583 struct netfront_info *np = dev->dev.driver_data; 1584 struct net_device *netdev = np->netdev; 1585 1586 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 1587 1588 switch (backend_state) { 1589 case XenbusStateInitialising: 1590 case XenbusStateInitialised: 1591 case XenbusStateConnected: 1592 case XenbusStateUnknown: 1593 case XenbusStateClosed: 1594 break; 1595 1596 case XenbusStateInitWait: 1597 if (dev->state != XenbusStateInitialising) 1598 break; 1599 if (xennet_connect(netdev) != 0) 1600 break; 1601 xenbus_switch_state(dev, XenbusStateConnected); 1602 break; 1603 1604 case XenbusStateClosing: 1605 xenbus_frontend_closed(dev); 1606 break; 1607 } 1608 } 1609 1610 static struct ethtool_ops xennet_ethtool_ops = 1611 { 1612 .set_tx_csum = ethtool_op_set_tx_csum, 1613 .set_sg = xennet_set_sg, 1614 .set_tso = xennet_set_tso, 1615 .get_link = ethtool_op_get_link, 1616 }; 1617 1618 #ifdef CONFIG_SYSFS 1619 static ssize_t show_rxbuf_min(struct device *dev, 1620 struct device_attribute *attr, char *buf) 1621 { 1622 struct net_device *netdev = to_net_dev(dev); 1623 struct netfront_info *info = netdev_priv(netdev); 1624 1625 return sprintf(buf, "%u\n", info->rx_min_target); 1626 } 1627 1628 static ssize_t store_rxbuf_min(struct device *dev, 1629 struct device_attribute *attr, 1630 const char *buf, size_t len) 1631 { 1632 struct net_device *netdev = to_net_dev(dev); 1633 struct netfront_info *np = netdev_priv(netdev); 1634 char *endp; 1635 unsigned long target; 1636 1637 if (!capable(CAP_NET_ADMIN)) 1638 return -EPERM; 1639 1640 target = simple_strtoul(buf, &endp, 0); 1641 if (endp == buf) 1642 return -EBADMSG; 1643 1644 if (target < RX_MIN_TARGET) 1645 target = RX_MIN_TARGET; 1646 if (target > RX_MAX_TARGET) 1647 target = RX_MAX_TARGET; 1648 1649 spin_lock_bh(&np->rx_lock); 1650 if (target > np->rx_max_target) 1651 np->rx_max_target = target; 1652 np->rx_min_target = target; 1653 if (target > np->rx_target) 1654 np->rx_target = target; 1655 1656 xennet_alloc_rx_buffers(netdev); 1657 1658 spin_unlock_bh(&np->rx_lock); 1659 return len; 1660 } 1661 1662 static ssize_t show_rxbuf_max(struct device *dev, 1663 struct device_attribute *attr, char *buf) 1664 { 1665 struct net_device *netdev = to_net_dev(dev); 1666 struct netfront_info *info = netdev_priv(netdev); 1667 1668 return sprintf(buf, "%u\n", info->rx_max_target); 1669 } 1670 1671 static ssize_t store_rxbuf_max(struct device *dev, 1672 struct device_attribute *attr, 1673 const char *buf, size_t len) 1674 { 1675 struct net_device *netdev = to_net_dev(dev); 1676 struct netfront_info *np = netdev_priv(netdev); 1677 char *endp; 1678 unsigned long target; 1679 1680 if (!capable(CAP_NET_ADMIN)) 1681 return -EPERM; 1682 1683 target = simple_strtoul(buf, &endp, 0); 1684 if (endp == buf) 1685 return -EBADMSG; 1686 1687 if (target < RX_MIN_TARGET) 1688 target = RX_MIN_TARGET; 1689 if (target > RX_MAX_TARGET) 1690 target = RX_MAX_TARGET; 1691 1692 spin_lock_bh(&np->rx_lock); 1693 if (target < np->rx_min_target) 1694 np->rx_min_target = target; 1695 np->rx_max_target = target; 1696 if (target < np->rx_target) 1697 np->rx_target = target; 1698 1699 xennet_alloc_rx_buffers(netdev); 1700 1701 spin_unlock_bh(&np->rx_lock); 1702 return len; 1703 } 1704 1705 static ssize_t show_rxbuf_cur(struct device *dev, 1706 struct device_attribute *attr, char *buf) 1707 { 1708 struct net_device *netdev = to_net_dev(dev); 1709 struct netfront_info *info = netdev_priv(netdev); 1710 1711 return sprintf(buf, "%u\n", info->rx_target); 1712 } 1713 1714 static struct device_attribute xennet_attrs[] = { 1715 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), 1716 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), 1717 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), 1718 }; 1719 1720 static int xennet_sysfs_addif(struct net_device *netdev) 1721 { 1722 int i; 1723 int err; 1724 1725 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { 1726 err = device_create_file(&netdev->dev, 1727 &xennet_attrs[i]); 1728 if (err) 1729 goto fail; 1730 } 1731 return 0; 1732 1733 fail: 1734 while (--i >= 0) 1735 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1736 return err; 1737 } 1738 1739 static void xennet_sysfs_delif(struct net_device *netdev) 1740 { 1741 int i; 1742 1743 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 1744 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1745 } 1746 1747 #endif /* CONFIG_SYSFS */ 1748 1749 static struct xenbus_device_id netfront_ids[] = { 1750 { "vif" }, 1751 { "" } 1752 }; 1753 1754 1755 static int __devexit xennet_remove(struct xenbus_device *dev) 1756 { 1757 struct netfront_info *info = dev->dev.driver_data; 1758 1759 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1760 1761 unregister_netdev(info->netdev); 1762 1763 xennet_disconnect_backend(info); 1764 1765 del_timer_sync(&info->rx_refill_timer); 1766 1767 xennet_sysfs_delif(info->netdev); 1768 1769 free_netdev(info->netdev); 1770 1771 return 0; 1772 } 1773 1774 static struct xenbus_driver netfront = { 1775 .name = "vif", 1776 .owner = THIS_MODULE, 1777 .ids = netfront_ids, 1778 .probe = netfront_probe, 1779 .remove = __devexit_p(xennet_remove), 1780 .resume = netfront_resume, 1781 .otherend_changed = backend_changed, 1782 }; 1783 1784 static int __init netif_init(void) 1785 { 1786 if (!is_running_on_xen()) 1787 return -ENODEV; 1788 1789 if (is_initial_xendomain()) 1790 return 0; 1791 1792 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1793 1794 return xenbus_register_frontend(&netfront); 1795 } 1796 module_init(netif_init); 1797 1798 1799 static void __exit netif_exit(void) 1800 { 1801 if (is_initial_xendomain()) 1802 return; 1803 1804 xenbus_unregister_driver(&netfront); 1805 } 1806 module_exit(netif_exit); 1807 1808 MODULE_DESCRIPTION("Xen virtual network device frontend"); 1809 MODULE_LICENSE("GPL"); 1810 MODULE_ALIAS("xen:vif"); 1811 MODULE_ALIAS("xennet"); 1812