1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #include <linux/module.h> 33 #include <linux/kernel.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/tcp.h> 40 #include <linux/udp.h> 41 #include <linux/moduleparam.h> 42 #include <linux/mm.h> 43 #include <net/ip.h> 44 45 #include <xen/xenbus.h> 46 #include <xen/events.h> 47 #include <xen/page.h> 48 #include <xen/grant_table.h> 49 50 #include <xen/interface/io/netif.h> 51 #include <xen/interface/memory.h> 52 #include <xen/interface/grant_table.h> 53 54 static struct ethtool_ops xennet_ethtool_ops; 55 56 struct netfront_cb { 57 struct page *page; 58 unsigned offset; 59 }; 60 61 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 62 63 #define RX_COPY_THRESHOLD 256 64 65 #define GRANT_INVALID_REF 0 66 67 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 68 #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 69 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 70 71 struct netfront_info { 72 struct list_head list; 73 struct net_device *netdev; 74 75 struct napi_struct napi; 76 77 unsigned int evtchn; 78 struct xenbus_device *xbdev; 79 80 spinlock_t tx_lock; 81 struct xen_netif_tx_front_ring tx; 82 int tx_ring_ref; 83 84 /* 85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 86 * are linked from tx_skb_freelist through skb_entry.link. 87 * 88 * NB. Freelist index entries are always going to be less than 89 * PAGE_OFFSET, whereas pointers to skbs will always be equal or 90 * greater than PAGE_OFFSET: we use this property to distinguish 91 * them. 92 */ 93 union skb_entry { 94 struct sk_buff *skb; 95 unsigned link; 96 } tx_skbs[NET_TX_RING_SIZE]; 97 grant_ref_t gref_tx_head; 98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 99 unsigned tx_skb_freelist; 100 101 spinlock_t rx_lock ____cacheline_aligned_in_smp; 102 struct xen_netif_rx_front_ring rx; 103 int rx_ring_ref; 104 105 /* Receive-ring batched refills. */ 106 #define RX_MIN_TARGET 8 107 #define RX_DFL_MIN_TARGET 64 108 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 109 unsigned rx_min_target, rx_max_target, rx_target; 110 struct sk_buff_head rx_batch; 111 112 struct timer_list rx_refill_timer; 113 114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 115 grant_ref_t gref_rx_head; 116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 117 118 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 120 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 121 }; 122 123 struct netfront_rx_info { 124 struct xen_netif_rx_response rx; 125 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 126 }; 127 128 /* 129 * Access macros for acquiring freeing slots in tx_skbs[]. 130 */ 131 132 static void add_id_to_freelist(unsigned *head, union skb_entry *list, 133 unsigned short id) 134 { 135 list[id].link = *head; 136 *head = id; 137 } 138 139 static unsigned short get_id_from_freelist(unsigned *head, 140 union skb_entry *list) 141 { 142 unsigned int id = *head; 143 *head = list[id].link; 144 return id; 145 } 146 147 static int xennet_rxidx(RING_IDX idx) 148 { 149 return idx & (NET_RX_RING_SIZE - 1); 150 } 151 152 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 153 RING_IDX ri) 154 { 155 int i = xennet_rxidx(ri); 156 struct sk_buff *skb = np->rx_skbs[i]; 157 np->rx_skbs[i] = NULL; 158 return skb; 159 } 160 161 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 162 RING_IDX ri) 163 { 164 int i = xennet_rxidx(ri); 165 grant_ref_t ref = np->grant_rx_ref[i]; 166 np->grant_rx_ref[i] = GRANT_INVALID_REF; 167 return ref; 168 } 169 170 #ifdef CONFIG_SYSFS 171 static int xennet_sysfs_addif(struct net_device *netdev); 172 static void xennet_sysfs_delif(struct net_device *netdev); 173 #else /* !CONFIG_SYSFS */ 174 #define xennet_sysfs_addif(dev) (0) 175 #define xennet_sysfs_delif(dev) do { } while (0) 176 #endif 177 178 static int xennet_can_sg(struct net_device *dev) 179 { 180 return dev->features & NETIF_F_SG; 181 } 182 183 184 static void rx_refill_timeout(unsigned long data) 185 { 186 struct net_device *dev = (struct net_device *)data; 187 struct netfront_info *np = netdev_priv(dev); 188 netif_rx_schedule(dev, &np->napi); 189 } 190 191 static int netfront_tx_slot_available(struct netfront_info *np) 192 { 193 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 194 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); 195 } 196 197 static void xennet_maybe_wake_tx(struct net_device *dev) 198 { 199 struct netfront_info *np = netdev_priv(dev); 200 201 if (unlikely(netif_queue_stopped(dev)) && 202 netfront_tx_slot_available(np) && 203 likely(netif_running(dev))) 204 netif_wake_queue(dev); 205 } 206 207 static void xennet_alloc_rx_buffers(struct net_device *dev) 208 { 209 unsigned short id; 210 struct netfront_info *np = netdev_priv(dev); 211 struct sk_buff *skb; 212 struct page *page; 213 int i, batch_target, notify; 214 RING_IDX req_prod = np->rx.req_prod_pvt; 215 grant_ref_t ref; 216 unsigned long pfn; 217 void *vaddr; 218 struct xen_netif_rx_request *req; 219 220 if (unlikely(!netif_carrier_ok(dev))) 221 return; 222 223 /* 224 * Allocate skbuffs greedily, even though we batch updates to the 225 * receive ring. This creates a less bursty demand on the memory 226 * allocator, so should reduce the chance of failed allocation requests 227 * both for ourself and for other kernel subsystems. 228 */ 229 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 230 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 231 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, 232 GFP_ATOMIC | __GFP_NOWARN); 233 if (unlikely(!skb)) 234 goto no_skb; 235 236 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 237 if (!page) { 238 kfree_skb(skb); 239 no_skb: 240 /* Any skbuffs queued for refill? Force them out. */ 241 if (i != 0) 242 goto refill; 243 /* Could not allocate any skbuffs. Try again later. */ 244 mod_timer(&np->rx_refill_timer, 245 jiffies + (HZ/10)); 246 break; 247 } 248 249 skb_shinfo(skb)->frags[0].page = page; 250 skb_shinfo(skb)->nr_frags = 1; 251 __skb_queue_tail(&np->rx_batch, skb); 252 } 253 254 /* Is the batch large enough to be worthwhile? */ 255 if (i < (np->rx_target/2)) { 256 if (req_prod > np->rx.sring->req_prod) 257 goto push; 258 return; 259 } 260 261 /* Adjust our fill target if we risked running out of buffers. */ 262 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 263 ((np->rx_target *= 2) > np->rx_max_target)) 264 np->rx_target = np->rx_max_target; 265 266 refill: 267 for (i = 0; ; i++) { 268 skb = __skb_dequeue(&np->rx_batch); 269 if (skb == NULL) 270 break; 271 272 skb->dev = dev; 273 274 id = xennet_rxidx(req_prod + i); 275 276 BUG_ON(np->rx_skbs[id]); 277 np->rx_skbs[id] = skb; 278 279 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 280 BUG_ON((signed short)ref < 0); 281 np->grant_rx_ref[id] = ref; 282 283 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); 284 vaddr = page_address(skb_shinfo(skb)->frags[0].page); 285 286 req = RING_GET_REQUEST(&np->rx, req_prod + i); 287 gnttab_grant_foreign_access_ref(ref, 288 np->xbdev->otherend_id, 289 pfn_to_mfn(pfn), 290 0); 291 292 req->id = id; 293 req->gref = ref; 294 } 295 296 wmb(); /* barrier so backend seens requests */ 297 298 /* Above is a suitable barrier to ensure backend will see requests. */ 299 np->rx.req_prod_pvt = req_prod + i; 300 push: 301 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 302 if (notify) 303 notify_remote_via_irq(np->netdev->irq); 304 } 305 306 static int xennet_open(struct net_device *dev) 307 { 308 struct netfront_info *np = netdev_priv(dev); 309 310 napi_enable(&np->napi); 311 312 spin_lock_bh(&np->rx_lock); 313 if (netif_carrier_ok(dev)) { 314 xennet_alloc_rx_buffers(dev); 315 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 316 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 317 netif_rx_schedule(dev, &np->napi); 318 } 319 spin_unlock_bh(&np->rx_lock); 320 321 xennet_maybe_wake_tx(dev); 322 323 return 0; 324 } 325 326 static void xennet_tx_buf_gc(struct net_device *dev) 327 { 328 RING_IDX cons, prod; 329 unsigned short id; 330 struct netfront_info *np = netdev_priv(dev); 331 struct sk_buff *skb; 332 333 BUG_ON(!netif_carrier_ok(dev)); 334 335 do { 336 prod = np->tx.sring->rsp_prod; 337 rmb(); /* Ensure we see responses up to 'rp'. */ 338 339 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 340 struct xen_netif_tx_response *txrsp; 341 342 txrsp = RING_GET_RESPONSE(&np->tx, cons); 343 if (txrsp->status == NETIF_RSP_NULL) 344 continue; 345 346 id = txrsp->id; 347 skb = np->tx_skbs[id].skb; 348 if (unlikely(gnttab_query_foreign_access( 349 np->grant_tx_ref[id]) != 0)) { 350 printk(KERN_ALERT "xennet_tx_buf_gc: warning " 351 "-- grant still in use by backend " 352 "domain.\n"); 353 BUG(); 354 } 355 gnttab_end_foreign_access_ref( 356 np->grant_tx_ref[id], GNTMAP_readonly); 357 gnttab_release_grant_reference( 358 &np->gref_tx_head, np->grant_tx_ref[id]); 359 np->grant_tx_ref[id] = GRANT_INVALID_REF; 360 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 361 dev_kfree_skb_irq(skb); 362 } 363 364 np->tx.rsp_cons = prod; 365 366 /* 367 * Set a new event, then check for race with update of tx_cons. 368 * Note that it is essential to schedule a callback, no matter 369 * how few buffers are pending. Even if there is space in the 370 * transmit ring, higher layers may be blocked because too much 371 * data is outstanding: in such cases notification from Xen is 372 * likely to be the only kick that we'll get. 373 */ 374 np->tx.sring->rsp_event = 375 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 376 mb(); /* update shared area */ 377 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 378 379 xennet_maybe_wake_tx(dev); 380 } 381 382 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 383 struct xen_netif_tx_request *tx) 384 { 385 struct netfront_info *np = netdev_priv(dev); 386 char *data = skb->data; 387 unsigned long mfn; 388 RING_IDX prod = np->tx.req_prod_pvt; 389 int frags = skb_shinfo(skb)->nr_frags; 390 unsigned int offset = offset_in_page(data); 391 unsigned int len = skb_headlen(skb); 392 unsigned int id; 393 grant_ref_t ref; 394 int i; 395 396 /* While the header overlaps a page boundary (including being 397 larger than a page), split it it into page-sized chunks. */ 398 while (len > PAGE_SIZE - offset) { 399 tx->size = PAGE_SIZE - offset; 400 tx->flags |= NETTXF_more_data; 401 len -= tx->size; 402 data += tx->size; 403 offset = 0; 404 405 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 406 np->tx_skbs[id].skb = skb_get(skb); 407 tx = RING_GET_REQUEST(&np->tx, prod++); 408 tx->id = id; 409 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 410 BUG_ON((signed short)ref < 0); 411 412 mfn = virt_to_mfn(data); 413 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 414 mfn, GNTMAP_readonly); 415 416 tx->gref = np->grant_tx_ref[id] = ref; 417 tx->offset = offset; 418 tx->size = len; 419 tx->flags = 0; 420 } 421 422 /* Grant backend access to each skb fragment page. */ 423 for (i = 0; i < frags; i++) { 424 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 425 426 tx->flags |= NETTXF_more_data; 427 428 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 429 np->tx_skbs[id].skb = skb_get(skb); 430 tx = RING_GET_REQUEST(&np->tx, prod++); 431 tx->id = id; 432 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 433 BUG_ON((signed short)ref < 0); 434 435 mfn = pfn_to_mfn(page_to_pfn(frag->page)); 436 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 437 mfn, GNTMAP_readonly); 438 439 tx->gref = np->grant_tx_ref[id] = ref; 440 tx->offset = frag->page_offset; 441 tx->size = frag->size; 442 tx->flags = 0; 443 } 444 445 np->tx.req_prod_pvt = prod; 446 } 447 448 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 449 { 450 unsigned short id; 451 struct netfront_info *np = netdev_priv(dev); 452 struct xen_netif_tx_request *tx; 453 struct xen_netif_extra_info *extra; 454 char *data = skb->data; 455 RING_IDX i; 456 grant_ref_t ref; 457 unsigned long mfn; 458 int notify; 459 int frags = skb_shinfo(skb)->nr_frags; 460 unsigned int offset = offset_in_page(data); 461 unsigned int len = skb_headlen(skb); 462 463 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; 464 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 465 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", 466 frags); 467 dump_stack(); 468 goto drop; 469 } 470 471 spin_lock_irq(&np->tx_lock); 472 473 if (unlikely(!netif_carrier_ok(dev) || 474 (frags > 1 && !xennet_can_sg(dev)) || 475 netif_needs_gso(dev, skb))) { 476 spin_unlock_irq(&np->tx_lock); 477 goto drop; 478 } 479 480 i = np->tx.req_prod_pvt; 481 482 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 483 np->tx_skbs[id].skb = skb; 484 485 tx = RING_GET_REQUEST(&np->tx, i); 486 487 tx->id = id; 488 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 489 BUG_ON((signed short)ref < 0); 490 mfn = virt_to_mfn(data); 491 gnttab_grant_foreign_access_ref( 492 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 493 tx->gref = np->grant_tx_ref[id] = ref; 494 tx->offset = offset; 495 tx->size = len; 496 extra = NULL; 497 498 tx->flags = 0; 499 if (skb->ip_summed == CHECKSUM_PARTIAL) 500 /* local packet? */ 501 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; 502 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 503 /* remote but checksummed. */ 504 tx->flags |= NETTXF_data_validated; 505 506 if (skb_shinfo(skb)->gso_size) { 507 struct xen_netif_extra_info *gso; 508 509 gso = (struct xen_netif_extra_info *) 510 RING_GET_REQUEST(&np->tx, ++i); 511 512 if (extra) 513 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 514 else 515 tx->flags |= NETTXF_extra_info; 516 517 gso->u.gso.size = skb_shinfo(skb)->gso_size; 518 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 519 gso->u.gso.pad = 0; 520 gso->u.gso.features = 0; 521 522 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 523 gso->flags = 0; 524 extra = gso; 525 } 526 527 np->tx.req_prod_pvt = i + 1; 528 529 xennet_make_frags(skb, dev, tx); 530 tx->size = skb->len; 531 532 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 533 if (notify) 534 notify_remote_via_irq(np->netdev->irq); 535 536 dev->stats.tx_bytes += skb->len; 537 dev->stats.tx_packets++; 538 539 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 540 xennet_tx_buf_gc(dev); 541 542 if (!netfront_tx_slot_available(np)) 543 netif_stop_queue(dev); 544 545 spin_unlock_irq(&np->tx_lock); 546 547 return 0; 548 549 drop: 550 dev->stats.tx_dropped++; 551 dev_kfree_skb(skb); 552 return 0; 553 } 554 555 static int xennet_close(struct net_device *dev) 556 { 557 struct netfront_info *np = netdev_priv(dev); 558 netif_stop_queue(np->netdev); 559 napi_disable(&np->napi); 560 return 0; 561 } 562 563 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 564 grant_ref_t ref) 565 { 566 int new = xennet_rxidx(np->rx.req_prod_pvt); 567 568 BUG_ON(np->rx_skbs[new]); 569 np->rx_skbs[new] = skb; 570 np->grant_rx_ref[new] = ref; 571 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 572 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 573 np->rx.req_prod_pvt++; 574 } 575 576 static int xennet_get_extras(struct netfront_info *np, 577 struct xen_netif_extra_info *extras, 578 RING_IDX rp) 579 580 { 581 struct xen_netif_extra_info *extra; 582 struct device *dev = &np->netdev->dev; 583 RING_IDX cons = np->rx.rsp_cons; 584 int err = 0; 585 586 do { 587 struct sk_buff *skb; 588 grant_ref_t ref; 589 590 if (unlikely(cons + 1 == rp)) { 591 if (net_ratelimit()) 592 dev_warn(dev, "Missing extra info\n"); 593 err = -EBADR; 594 break; 595 } 596 597 extra = (struct xen_netif_extra_info *) 598 RING_GET_RESPONSE(&np->rx, ++cons); 599 600 if (unlikely(!extra->type || 601 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 602 if (net_ratelimit()) 603 dev_warn(dev, "Invalid extra type: %d\n", 604 extra->type); 605 err = -EINVAL; 606 } else { 607 memcpy(&extras[extra->type - 1], extra, 608 sizeof(*extra)); 609 } 610 611 skb = xennet_get_rx_skb(np, cons); 612 ref = xennet_get_rx_ref(np, cons); 613 xennet_move_rx_slot(np, skb, ref); 614 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 615 616 np->rx.rsp_cons = cons; 617 return err; 618 } 619 620 static int xennet_get_responses(struct netfront_info *np, 621 struct netfront_rx_info *rinfo, RING_IDX rp, 622 struct sk_buff_head *list) 623 { 624 struct xen_netif_rx_response *rx = &rinfo->rx; 625 struct xen_netif_extra_info *extras = rinfo->extras; 626 struct device *dev = &np->netdev->dev; 627 RING_IDX cons = np->rx.rsp_cons; 628 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 629 grant_ref_t ref = xennet_get_rx_ref(np, cons); 630 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 631 int frags = 1; 632 int err = 0; 633 unsigned long ret; 634 635 if (rx->flags & NETRXF_extra_info) { 636 err = xennet_get_extras(np, extras, rp); 637 cons = np->rx.rsp_cons; 638 } 639 640 for (;;) { 641 if (unlikely(rx->status < 0 || 642 rx->offset + rx->status > PAGE_SIZE)) { 643 if (net_ratelimit()) 644 dev_warn(dev, "rx->offset: %x, size: %u\n", 645 rx->offset, rx->status); 646 xennet_move_rx_slot(np, skb, ref); 647 err = -EINVAL; 648 goto next; 649 } 650 651 /* 652 * This definitely indicates a bug, either in this driver or in 653 * the backend driver. In future this should flag the bad 654 * situation to the system controller to reboot the backed. 655 */ 656 if (ref == GRANT_INVALID_REF) { 657 if (net_ratelimit()) 658 dev_warn(dev, "Bad rx response id %d.\n", 659 rx->id); 660 err = -EINVAL; 661 goto next; 662 } 663 664 ret = gnttab_end_foreign_access_ref(ref, 0); 665 BUG_ON(!ret); 666 667 gnttab_release_grant_reference(&np->gref_rx_head, ref); 668 669 __skb_queue_tail(list, skb); 670 671 next: 672 if (!(rx->flags & NETRXF_more_data)) 673 break; 674 675 if (cons + frags == rp) { 676 if (net_ratelimit()) 677 dev_warn(dev, "Need more frags\n"); 678 err = -ENOENT; 679 break; 680 } 681 682 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 683 skb = xennet_get_rx_skb(np, cons + frags); 684 ref = xennet_get_rx_ref(np, cons + frags); 685 frags++; 686 } 687 688 if (unlikely(frags > max)) { 689 if (net_ratelimit()) 690 dev_warn(dev, "Too many frags\n"); 691 err = -E2BIG; 692 } 693 694 if (unlikely(err)) 695 np->rx.rsp_cons = cons + frags; 696 697 return err; 698 } 699 700 static int xennet_set_skb_gso(struct sk_buff *skb, 701 struct xen_netif_extra_info *gso) 702 { 703 if (!gso->u.gso.size) { 704 if (net_ratelimit()) 705 printk(KERN_WARNING "GSO size must not be zero.\n"); 706 return -EINVAL; 707 } 708 709 /* Currently only TCPv4 S.O. is supported. */ 710 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 711 if (net_ratelimit()) 712 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); 713 return -EINVAL; 714 } 715 716 skb_shinfo(skb)->gso_size = gso->u.gso.size; 717 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 718 719 /* Header must be checked, and gso_segs computed. */ 720 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 721 skb_shinfo(skb)->gso_segs = 0; 722 723 return 0; 724 } 725 726 static RING_IDX xennet_fill_frags(struct netfront_info *np, 727 struct sk_buff *skb, 728 struct sk_buff_head *list) 729 { 730 struct skb_shared_info *shinfo = skb_shinfo(skb); 731 int nr_frags = shinfo->nr_frags; 732 RING_IDX cons = np->rx.rsp_cons; 733 skb_frag_t *frag = shinfo->frags + nr_frags; 734 struct sk_buff *nskb; 735 736 while ((nskb = __skb_dequeue(list))) { 737 struct xen_netif_rx_response *rx = 738 RING_GET_RESPONSE(&np->rx, ++cons); 739 740 frag->page = skb_shinfo(nskb)->frags[0].page; 741 frag->page_offset = rx->offset; 742 frag->size = rx->status; 743 744 skb->data_len += rx->status; 745 746 skb_shinfo(nskb)->nr_frags = 0; 747 kfree_skb(nskb); 748 749 frag++; 750 nr_frags++; 751 } 752 753 shinfo->nr_frags = nr_frags; 754 return cons; 755 } 756 757 static int skb_checksum_setup(struct sk_buff *skb) 758 { 759 struct iphdr *iph; 760 unsigned char *th; 761 int err = -EPROTO; 762 763 if (skb->protocol != htons(ETH_P_IP)) 764 goto out; 765 766 iph = (void *)skb->data; 767 th = skb->data + 4 * iph->ihl; 768 if (th >= skb_tail_pointer(skb)) 769 goto out; 770 771 skb->csum_start = th - skb->head; 772 switch (iph->protocol) { 773 case IPPROTO_TCP: 774 skb->csum_offset = offsetof(struct tcphdr, check); 775 break; 776 case IPPROTO_UDP: 777 skb->csum_offset = offsetof(struct udphdr, check); 778 break; 779 default: 780 if (net_ratelimit()) 781 printk(KERN_ERR "Attempting to checksum a non-" 782 "TCP/UDP packet, dropping a protocol" 783 " %d packet", iph->protocol); 784 goto out; 785 } 786 787 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) 788 goto out; 789 790 err = 0; 791 792 out: 793 return err; 794 } 795 796 static int handle_incoming_queue(struct net_device *dev, 797 struct sk_buff_head *rxq) 798 { 799 int packets_dropped = 0; 800 struct sk_buff *skb; 801 802 while ((skb = __skb_dequeue(rxq)) != NULL) { 803 struct page *page = NETFRONT_SKB_CB(skb)->page; 804 void *vaddr = page_address(page); 805 unsigned offset = NETFRONT_SKB_CB(skb)->offset; 806 807 memcpy(skb->data, vaddr + offset, 808 skb_headlen(skb)); 809 810 if (page != skb_shinfo(skb)->frags[0].page) 811 __free_page(page); 812 813 /* Ethernet work: Delayed to here as it peeks the header. */ 814 skb->protocol = eth_type_trans(skb, dev); 815 816 if (skb->ip_summed == CHECKSUM_PARTIAL) { 817 if (skb_checksum_setup(skb)) { 818 kfree_skb(skb); 819 packets_dropped++; 820 dev->stats.rx_errors++; 821 continue; 822 } 823 } 824 825 dev->stats.rx_packets++; 826 dev->stats.rx_bytes += skb->len; 827 828 /* Pass it up. */ 829 netif_receive_skb(skb); 830 dev->last_rx = jiffies; 831 } 832 833 return packets_dropped; 834 } 835 836 static int xennet_poll(struct napi_struct *napi, int budget) 837 { 838 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 839 struct net_device *dev = np->netdev; 840 struct sk_buff *skb; 841 struct netfront_rx_info rinfo; 842 struct xen_netif_rx_response *rx = &rinfo.rx; 843 struct xen_netif_extra_info *extras = rinfo.extras; 844 RING_IDX i, rp; 845 int work_done; 846 struct sk_buff_head rxq; 847 struct sk_buff_head errq; 848 struct sk_buff_head tmpq; 849 unsigned long flags; 850 unsigned int len; 851 int err; 852 853 spin_lock(&np->rx_lock); 854 855 skb_queue_head_init(&rxq); 856 skb_queue_head_init(&errq); 857 skb_queue_head_init(&tmpq); 858 859 rp = np->rx.sring->rsp_prod; 860 rmb(); /* Ensure we see queued responses up to 'rp'. */ 861 862 i = np->rx.rsp_cons; 863 work_done = 0; 864 while ((i != rp) && (work_done < budget)) { 865 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 866 memset(extras, 0, sizeof(rinfo.extras)); 867 868 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 869 870 if (unlikely(err)) { 871 err: 872 while ((skb = __skb_dequeue(&tmpq))) 873 __skb_queue_tail(&errq, skb); 874 dev->stats.rx_errors++; 875 i = np->rx.rsp_cons; 876 continue; 877 } 878 879 skb = __skb_dequeue(&tmpq); 880 881 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 882 struct xen_netif_extra_info *gso; 883 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 884 885 if (unlikely(xennet_set_skb_gso(skb, gso))) { 886 __skb_queue_head(&tmpq, skb); 887 np->rx.rsp_cons += skb_queue_len(&tmpq); 888 goto err; 889 } 890 } 891 892 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; 893 NETFRONT_SKB_CB(skb)->offset = rx->offset; 894 895 len = rx->status; 896 if (len > RX_COPY_THRESHOLD) 897 len = RX_COPY_THRESHOLD; 898 skb_put(skb, len); 899 900 if (rx->status > len) { 901 skb_shinfo(skb)->frags[0].page_offset = 902 rx->offset + len; 903 skb_shinfo(skb)->frags[0].size = rx->status - len; 904 skb->data_len = rx->status - len; 905 } else { 906 skb_shinfo(skb)->frags[0].page = NULL; 907 skb_shinfo(skb)->nr_frags = 0; 908 } 909 910 i = xennet_fill_frags(np, skb, &tmpq); 911 912 /* 913 * Truesize approximates the size of true data plus 914 * any supervisor overheads. Adding hypervisor 915 * overheads has been shown to significantly reduce 916 * achievable bandwidth with the default receive 917 * buffer size. It is therefore not wise to account 918 * for it here. 919 * 920 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set 921 * to RX_COPY_THRESHOLD + the supervisor 922 * overheads. Here, we add the size of the data pulled 923 * in xennet_fill_frags(). 924 * 925 * We also adjust for any unused space in the main 926 * data area by subtracting (RX_COPY_THRESHOLD - 927 * len). This is especially important with drivers 928 * which split incoming packets into header and data, 929 * using only 66 bytes of the main data area (see the 930 * e1000 driver for example.) On such systems, 931 * without this last adjustement, our achievable 932 * receive throughout using the standard receive 933 * buffer size was cut by 25%(!!!). 934 */ 935 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 936 skb->len += skb->data_len; 937 938 if (rx->flags & NETRXF_csum_blank) 939 skb->ip_summed = CHECKSUM_PARTIAL; 940 else if (rx->flags & NETRXF_data_validated) 941 skb->ip_summed = CHECKSUM_UNNECESSARY; 942 943 __skb_queue_tail(&rxq, skb); 944 945 np->rx.rsp_cons = ++i; 946 work_done++; 947 } 948 949 while ((skb = __skb_dequeue(&errq))) 950 kfree_skb(skb); 951 952 work_done -= handle_incoming_queue(dev, &rxq); 953 954 /* If we get a callback with very few responses, reduce fill target. */ 955 /* NB. Note exponential increase, linear decrease. */ 956 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 957 ((3*np->rx_target) / 4)) && 958 (--np->rx_target < np->rx_min_target)) 959 np->rx_target = np->rx_min_target; 960 961 xennet_alloc_rx_buffers(dev); 962 963 if (work_done < budget) { 964 int more_to_do = 0; 965 966 local_irq_save(flags); 967 968 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 969 if (!more_to_do) 970 __netif_rx_complete(dev, napi); 971 972 local_irq_restore(flags); 973 } 974 975 spin_unlock(&np->rx_lock); 976 977 return work_done; 978 } 979 980 static int xennet_change_mtu(struct net_device *dev, int mtu) 981 { 982 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; 983 984 if (mtu > max) 985 return -EINVAL; 986 dev->mtu = mtu; 987 return 0; 988 } 989 990 static void xennet_release_tx_bufs(struct netfront_info *np) 991 { 992 struct sk_buff *skb; 993 int i; 994 995 for (i = 0; i < NET_TX_RING_SIZE; i++) { 996 /* Skip over entries which are actually freelist references */ 997 if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) 998 continue; 999 1000 skb = np->tx_skbs[i].skb; 1001 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1002 GNTMAP_readonly); 1003 gnttab_release_grant_reference(&np->gref_tx_head, 1004 np->grant_tx_ref[i]); 1005 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1006 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1007 dev_kfree_skb_irq(skb); 1008 } 1009 } 1010 1011 static void xennet_release_rx_bufs(struct netfront_info *np) 1012 { 1013 struct mmu_update *mmu = np->rx_mmu; 1014 struct multicall_entry *mcl = np->rx_mcl; 1015 struct sk_buff_head free_list; 1016 struct sk_buff *skb; 1017 unsigned long mfn; 1018 int xfer = 0, noxfer = 0, unused = 0; 1019 int id, ref; 1020 1021 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", 1022 __func__); 1023 return; 1024 1025 skb_queue_head_init(&free_list); 1026 1027 spin_lock_bh(&np->rx_lock); 1028 1029 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1030 ref = np->grant_rx_ref[id]; 1031 if (ref == GRANT_INVALID_REF) { 1032 unused++; 1033 continue; 1034 } 1035 1036 skb = np->rx_skbs[id]; 1037 mfn = gnttab_end_foreign_transfer_ref(ref); 1038 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1039 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1040 1041 if (0 == mfn) { 1042 skb_shinfo(skb)->nr_frags = 0; 1043 dev_kfree_skb(skb); 1044 noxfer++; 1045 continue; 1046 } 1047 1048 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1049 /* Remap the page. */ 1050 struct page *page = skb_shinfo(skb)->frags[0].page; 1051 unsigned long pfn = page_to_pfn(page); 1052 void *vaddr = page_address(page); 1053 1054 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1055 mfn_pte(mfn, PAGE_KERNEL), 1056 0); 1057 mcl++; 1058 mmu->ptr = ((u64)mfn << PAGE_SHIFT) 1059 | MMU_MACHPHYS_UPDATE; 1060 mmu->val = pfn; 1061 mmu++; 1062 1063 set_phys_to_machine(pfn, mfn); 1064 } 1065 __skb_queue_tail(&free_list, skb); 1066 xfer++; 1067 } 1068 1069 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1070 __func__, xfer, noxfer, unused); 1071 1072 if (xfer) { 1073 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1074 /* Do all the remapping work and M2P updates. */ 1075 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 1076 NULL, DOMID_SELF); 1077 mcl++; 1078 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); 1079 } 1080 } 1081 1082 while ((skb = __skb_dequeue(&free_list)) != NULL) 1083 dev_kfree_skb(skb); 1084 1085 spin_unlock_bh(&np->rx_lock); 1086 } 1087 1088 static void xennet_uninit(struct net_device *dev) 1089 { 1090 struct netfront_info *np = netdev_priv(dev); 1091 xennet_release_tx_bufs(np); 1092 xennet_release_rx_bufs(np); 1093 gnttab_free_grant_references(np->gref_tx_head); 1094 gnttab_free_grant_references(np->gref_rx_head); 1095 } 1096 1097 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1098 { 1099 int i, err; 1100 struct net_device *netdev; 1101 struct netfront_info *np; 1102 1103 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1104 if (!netdev) { 1105 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", 1106 __func__); 1107 return ERR_PTR(-ENOMEM); 1108 } 1109 1110 np = netdev_priv(netdev); 1111 np->xbdev = dev; 1112 1113 spin_lock_init(&np->tx_lock); 1114 spin_lock_init(&np->rx_lock); 1115 1116 skb_queue_head_init(&np->rx_batch); 1117 np->rx_target = RX_DFL_MIN_TARGET; 1118 np->rx_min_target = RX_DFL_MIN_TARGET; 1119 np->rx_max_target = RX_MAX_TARGET; 1120 1121 init_timer(&np->rx_refill_timer); 1122 np->rx_refill_timer.data = (unsigned long)netdev; 1123 np->rx_refill_timer.function = rx_refill_timeout; 1124 1125 /* Initialise tx_skbs as a free chain containing every entry. */ 1126 np->tx_skb_freelist = 0; 1127 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1128 np->tx_skbs[i].link = i+1; 1129 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1130 } 1131 1132 /* Clear out rx_skbs */ 1133 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1134 np->rx_skbs[i] = NULL; 1135 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1136 } 1137 1138 /* A grant for every tx ring slot */ 1139 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1140 &np->gref_tx_head) < 0) { 1141 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1142 err = -ENOMEM; 1143 goto exit; 1144 } 1145 /* A grant for every rx ring slot */ 1146 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1147 &np->gref_rx_head) < 0) { 1148 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 1149 err = -ENOMEM; 1150 goto exit_free_tx; 1151 } 1152 1153 netdev->open = xennet_open; 1154 netdev->hard_start_xmit = xennet_start_xmit; 1155 netdev->stop = xennet_close; 1156 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1157 netdev->uninit = xennet_uninit; 1158 netdev->change_mtu = xennet_change_mtu; 1159 netdev->features = NETIF_F_IP_CSUM; 1160 1161 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1162 SET_NETDEV_DEV(netdev, &dev->dev); 1163 1164 np->netdev = netdev; 1165 1166 netif_carrier_off(netdev); 1167 1168 return netdev; 1169 1170 exit_free_tx: 1171 gnttab_free_grant_references(np->gref_tx_head); 1172 exit: 1173 free_netdev(netdev); 1174 return ERR_PTR(err); 1175 } 1176 1177 /** 1178 * Entry point to this code when a new device is created. Allocate the basic 1179 * structures and the ring buffers for communication with the backend, and 1180 * inform the backend of the appropriate details for those. 1181 */ 1182 static int __devinit netfront_probe(struct xenbus_device *dev, 1183 const struct xenbus_device_id *id) 1184 { 1185 int err; 1186 struct net_device *netdev; 1187 struct netfront_info *info; 1188 1189 netdev = xennet_create_dev(dev); 1190 if (IS_ERR(netdev)) { 1191 err = PTR_ERR(netdev); 1192 xenbus_dev_fatal(dev, err, "creating netdev"); 1193 return err; 1194 } 1195 1196 info = netdev_priv(netdev); 1197 dev->dev.driver_data = info; 1198 1199 err = register_netdev(info->netdev); 1200 if (err) { 1201 printk(KERN_WARNING "%s: register_netdev err=%d\n", 1202 __func__, err); 1203 goto fail; 1204 } 1205 1206 err = xennet_sysfs_addif(info->netdev); 1207 if (err) { 1208 unregister_netdev(info->netdev); 1209 printk(KERN_WARNING "%s: add sysfs failed err=%d\n", 1210 __func__, err); 1211 goto fail; 1212 } 1213 1214 return 0; 1215 1216 fail: 1217 free_netdev(netdev); 1218 dev->dev.driver_data = NULL; 1219 return err; 1220 } 1221 1222 static void xennet_end_access(int ref, void *page) 1223 { 1224 /* This frees the page as a side-effect */ 1225 if (ref != GRANT_INVALID_REF) 1226 gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1227 } 1228 1229 static void xennet_disconnect_backend(struct netfront_info *info) 1230 { 1231 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1232 spin_lock_bh(&info->rx_lock); 1233 spin_lock_irq(&info->tx_lock); 1234 netif_carrier_off(info->netdev); 1235 spin_unlock_irq(&info->tx_lock); 1236 spin_unlock_bh(&info->rx_lock); 1237 1238 if (info->netdev->irq) 1239 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1240 info->evtchn = info->netdev->irq = 0; 1241 1242 /* End access and free the pages */ 1243 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1244 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1245 1246 info->tx_ring_ref = GRANT_INVALID_REF; 1247 info->rx_ring_ref = GRANT_INVALID_REF; 1248 info->tx.sring = NULL; 1249 info->rx.sring = NULL; 1250 } 1251 1252 /** 1253 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1254 * driver restart. We tear down our netif structure and recreate it, but 1255 * leave the device-layer structures intact so that this is transparent to the 1256 * rest of the kernel. 1257 */ 1258 static int netfront_resume(struct xenbus_device *dev) 1259 { 1260 struct netfront_info *info = dev->dev.driver_data; 1261 1262 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1263 1264 xennet_disconnect_backend(info); 1265 return 0; 1266 } 1267 1268 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1269 { 1270 char *s, *e, *macstr; 1271 int i; 1272 1273 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1274 if (IS_ERR(macstr)) 1275 return PTR_ERR(macstr); 1276 1277 for (i = 0; i < ETH_ALEN; i++) { 1278 mac[i] = simple_strtoul(s, &e, 16); 1279 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1280 kfree(macstr); 1281 return -ENOENT; 1282 } 1283 s = e+1; 1284 } 1285 1286 kfree(macstr); 1287 return 0; 1288 } 1289 1290 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1291 { 1292 struct net_device *dev = dev_id; 1293 struct netfront_info *np = netdev_priv(dev); 1294 unsigned long flags; 1295 1296 spin_lock_irqsave(&np->tx_lock, flags); 1297 1298 if (likely(netif_carrier_ok(dev))) { 1299 xennet_tx_buf_gc(dev); 1300 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1301 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1302 netif_rx_schedule(dev, &np->napi); 1303 } 1304 1305 spin_unlock_irqrestore(&np->tx_lock, flags); 1306 1307 return IRQ_HANDLED; 1308 } 1309 1310 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1311 { 1312 struct xen_netif_tx_sring *txs; 1313 struct xen_netif_rx_sring *rxs; 1314 int err; 1315 struct net_device *netdev = info->netdev; 1316 1317 info->tx_ring_ref = GRANT_INVALID_REF; 1318 info->rx_ring_ref = GRANT_INVALID_REF; 1319 info->rx.sring = NULL; 1320 info->tx.sring = NULL; 1321 netdev->irq = 0; 1322 1323 err = xen_net_read_mac(dev, netdev->dev_addr); 1324 if (err) { 1325 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1326 goto fail; 1327 } 1328 1329 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); 1330 if (!txs) { 1331 err = -ENOMEM; 1332 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1333 goto fail; 1334 } 1335 SHARED_RING_INIT(txs); 1336 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1337 1338 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1339 if (err < 0) { 1340 free_page((unsigned long)txs); 1341 goto fail; 1342 } 1343 1344 info->tx_ring_ref = err; 1345 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); 1346 if (!rxs) { 1347 err = -ENOMEM; 1348 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1349 goto fail; 1350 } 1351 SHARED_RING_INIT(rxs); 1352 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1353 1354 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1355 if (err < 0) { 1356 free_page((unsigned long)rxs); 1357 goto fail; 1358 } 1359 info->rx_ring_ref = err; 1360 1361 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1362 if (err) 1363 goto fail; 1364 1365 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 1366 IRQF_SAMPLE_RANDOM, netdev->name, 1367 netdev); 1368 if (err < 0) 1369 goto fail; 1370 netdev->irq = err; 1371 return 0; 1372 1373 fail: 1374 return err; 1375 } 1376 1377 /* Common code used when first setting up, and when resuming. */ 1378 static int talk_to_backend(struct xenbus_device *dev, 1379 struct netfront_info *info) 1380 { 1381 const char *message; 1382 struct xenbus_transaction xbt; 1383 int err; 1384 1385 /* Create shared ring, alloc event channel. */ 1386 err = setup_netfront(dev, info); 1387 if (err) 1388 goto out; 1389 1390 again: 1391 err = xenbus_transaction_start(&xbt); 1392 if (err) { 1393 xenbus_dev_fatal(dev, err, "starting transaction"); 1394 goto destroy_ring; 1395 } 1396 1397 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1398 info->tx_ring_ref); 1399 if (err) { 1400 message = "writing tx ring-ref"; 1401 goto abort_transaction; 1402 } 1403 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", 1404 info->rx_ring_ref); 1405 if (err) { 1406 message = "writing rx ring-ref"; 1407 goto abort_transaction; 1408 } 1409 err = xenbus_printf(xbt, dev->nodename, 1410 "event-channel", "%u", info->evtchn); 1411 if (err) { 1412 message = "writing event-channel"; 1413 goto abort_transaction; 1414 } 1415 1416 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1417 1); 1418 if (err) { 1419 message = "writing request-rx-copy"; 1420 goto abort_transaction; 1421 } 1422 1423 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 1424 if (err) { 1425 message = "writing feature-rx-notify"; 1426 goto abort_transaction; 1427 } 1428 1429 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 1430 if (err) { 1431 message = "writing feature-sg"; 1432 goto abort_transaction; 1433 } 1434 1435 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 1436 if (err) { 1437 message = "writing feature-gso-tcpv4"; 1438 goto abort_transaction; 1439 } 1440 1441 err = xenbus_transaction_end(xbt, 0); 1442 if (err) { 1443 if (err == -EAGAIN) 1444 goto again; 1445 xenbus_dev_fatal(dev, err, "completing transaction"); 1446 goto destroy_ring; 1447 } 1448 1449 return 0; 1450 1451 abort_transaction: 1452 xenbus_transaction_end(xbt, 1); 1453 xenbus_dev_fatal(dev, err, "%s", message); 1454 destroy_ring: 1455 xennet_disconnect_backend(info); 1456 out: 1457 return err; 1458 } 1459 1460 static int xennet_set_sg(struct net_device *dev, u32 data) 1461 { 1462 if (data) { 1463 struct netfront_info *np = netdev_priv(dev); 1464 int val; 1465 1466 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1467 "%d", &val) < 0) 1468 val = 0; 1469 if (!val) 1470 return -ENOSYS; 1471 } else if (dev->mtu > ETH_DATA_LEN) 1472 dev->mtu = ETH_DATA_LEN; 1473 1474 return ethtool_op_set_sg(dev, data); 1475 } 1476 1477 static int xennet_set_tso(struct net_device *dev, u32 data) 1478 { 1479 if (data) { 1480 struct netfront_info *np = netdev_priv(dev); 1481 int val; 1482 1483 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1484 "feature-gso-tcpv4", "%d", &val) < 0) 1485 val = 0; 1486 if (!val) 1487 return -ENOSYS; 1488 } 1489 1490 return ethtool_op_set_tso(dev, data); 1491 } 1492 1493 static void xennet_set_features(struct net_device *dev) 1494 { 1495 /* Turn off all GSO bits except ROBUST. */ 1496 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; 1497 dev->features |= NETIF_F_GSO_ROBUST; 1498 xennet_set_sg(dev, 0); 1499 1500 /* We need checksum offload to enable scatter/gather and TSO. */ 1501 if (!(dev->features & NETIF_F_IP_CSUM)) 1502 return; 1503 1504 if (!xennet_set_sg(dev, 1)) 1505 xennet_set_tso(dev, 1); 1506 } 1507 1508 static int xennet_connect(struct net_device *dev) 1509 { 1510 struct netfront_info *np = netdev_priv(dev); 1511 int i, requeue_idx, err; 1512 struct sk_buff *skb; 1513 grant_ref_t ref; 1514 struct xen_netif_rx_request *req; 1515 unsigned int feature_rx_copy; 1516 1517 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1518 "feature-rx-copy", "%u", &feature_rx_copy); 1519 if (err != 1) 1520 feature_rx_copy = 0; 1521 1522 if (!feature_rx_copy) { 1523 dev_info(&dev->dev, 1524 "backend does not support copying receive path\n"); 1525 return -ENODEV; 1526 } 1527 1528 err = talk_to_backend(np->xbdev, np); 1529 if (err) 1530 return err; 1531 1532 xennet_set_features(dev); 1533 1534 spin_lock_bh(&np->rx_lock); 1535 spin_lock_irq(&np->tx_lock); 1536 1537 /* Step 1: Discard all pending TX packet fragments. */ 1538 xennet_release_tx_bufs(np); 1539 1540 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1541 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1542 if (!np->rx_skbs[i]) 1543 continue; 1544 1545 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 1546 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1547 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1548 1549 gnttab_grant_foreign_access_ref( 1550 ref, np->xbdev->otherend_id, 1551 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> 1552 frags->page)), 1553 0); 1554 req->gref = ref; 1555 req->id = requeue_idx; 1556 1557 requeue_idx++; 1558 } 1559 1560 np->rx.req_prod_pvt = requeue_idx; 1561 1562 /* 1563 * Step 3: All public and private state should now be sane. Get 1564 * ready to start sending and receiving packets and give the driver 1565 * domain a kick because we've probably just requeued some 1566 * packets. 1567 */ 1568 netif_carrier_on(np->netdev); 1569 notify_remote_via_irq(np->netdev->irq); 1570 xennet_tx_buf_gc(dev); 1571 xennet_alloc_rx_buffers(dev); 1572 1573 spin_unlock_irq(&np->tx_lock); 1574 spin_unlock_bh(&np->rx_lock); 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * Callback received when the backend's state changes. 1581 */ 1582 static void backend_changed(struct xenbus_device *dev, 1583 enum xenbus_state backend_state) 1584 { 1585 struct netfront_info *np = dev->dev.driver_data; 1586 struct net_device *netdev = np->netdev; 1587 1588 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 1589 1590 switch (backend_state) { 1591 case XenbusStateInitialising: 1592 case XenbusStateInitialised: 1593 case XenbusStateConnected: 1594 case XenbusStateUnknown: 1595 case XenbusStateClosed: 1596 break; 1597 1598 case XenbusStateInitWait: 1599 if (dev->state != XenbusStateInitialising) 1600 break; 1601 if (xennet_connect(netdev) != 0) 1602 break; 1603 xenbus_switch_state(dev, XenbusStateConnected); 1604 break; 1605 1606 case XenbusStateClosing: 1607 xenbus_frontend_closed(dev); 1608 break; 1609 } 1610 } 1611 1612 static struct ethtool_ops xennet_ethtool_ops = 1613 { 1614 .set_tx_csum = ethtool_op_set_tx_csum, 1615 .set_sg = xennet_set_sg, 1616 .set_tso = xennet_set_tso, 1617 .get_link = ethtool_op_get_link, 1618 }; 1619 1620 #ifdef CONFIG_SYSFS 1621 static ssize_t show_rxbuf_min(struct device *dev, 1622 struct device_attribute *attr, char *buf) 1623 { 1624 struct net_device *netdev = to_net_dev(dev); 1625 struct netfront_info *info = netdev_priv(netdev); 1626 1627 return sprintf(buf, "%u\n", info->rx_min_target); 1628 } 1629 1630 static ssize_t store_rxbuf_min(struct device *dev, 1631 struct device_attribute *attr, 1632 const char *buf, size_t len) 1633 { 1634 struct net_device *netdev = to_net_dev(dev); 1635 struct netfront_info *np = netdev_priv(netdev); 1636 char *endp; 1637 unsigned long target; 1638 1639 if (!capable(CAP_NET_ADMIN)) 1640 return -EPERM; 1641 1642 target = simple_strtoul(buf, &endp, 0); 1643 if (endp == buf) 1644 return -EBADMSG; 1645 1646 if (target < RX_MIN_TARGET) 1647 target = RX_MIN_TARGET; 1648 if (target > RX_MAX_TARGET) 1649 target = RX_MAX_TARGET; 1650 1651 spin_lock_bh(&np->rx_lock); 1652 if (target > np->rx_max_target) 1653 np->rx_max_target = target; 1654 np->rx_min_target = target; 1655 if (target > np->rx_target) 1656 np->rx_target = target; 1657 1658 xennet_alloc_rx_buffers(netdev); 1659 1660 spin_unlock_bh(&np->rx_lock); 1661 return len; 1662 } 1663 1664 static ssize_t show_rxbuf_max(struct device *dev, 1665 struct device_attribute *attr, char *buf) 1666 { 1667 struct net_device *netdev = to_net_dev(dev); 1668 struct netfront_info *info = netdev_priv(netdev); 1669 1670 return sprintf(buf, "%u\n", info->rx_max_target); 1671 } 1672 1673 static ssize_t store_rxbuf_max(struct device *dev, 1674 struct device_attribute *attr, 1675 const char *buf, size_t len) 1676 { 1677 struct net_device *netdev = to_net_dev(dev); 1678 struct netfront_info *np = netdev_priv(netdev); 1679 char *endp; 1680 unsigned long target; 1681 1682 if (!capable(CAP_NET_ADMIN)) 1683 return -EPERM; 1684 1685 target = simple_strtoul(buf, &endp, 0); 1686 if (endp == buf) 1687 return -EBADMSG; 1688 1689 if (target < RX_MIN_TARGET) 1690 target = RX_MIN_TARGET; 1691 if (target > RX_MAX_TARGET) 1692 target = RX_MAX_TARGET; 1693 1694 spin_lock_bh(&np->rx_lock); 1695 if (target < np->rx_min_target) 1696 np->rx_min_target = target; 1697 np->rx_max_target = target; 1698 if (target < np->rx_target) 1699 np->rx_target = target; 1700 1701 xennet_alloc_rx_buffers(netdev); 1702 1703 spin_unlock_bh(&np->rx_lock); 1704 return len; 1705 } 1706 1707 static ssize_t show_rxbuf_cur(struct device *dev, 1708 struct device_attribute *attr, char *buf) 1709 { 1710 struct net_device *netdev = to_net_dev(dev); 1711 struct netfront_info *info = netdev_priv(netdev); 1712 1713 return sprintf(buf, "%u\n", info->rx_target); 1714 } 1715 1716 static struct device_attribute xennet_attrs[] = { 1717 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), 1718 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), 1719 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), 1720 }; 1721 1722 static int xennet_sysfs_addif(struct net_device *netdev) 1723 { 1724 int i; 1725 int err; 1726 1727 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { 1728 err = device_create_file(&netdev->dev, 1729 &xennet_attrs[i]); 1730 if (err) 1731 goto fail; 1732 } 1733 return 0; 1734 1735 fail: 1736 while (--i >= 0) 1737 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1738 return err; 1739 } 1740 1741 static void xennet_sysfs_delif(struct net_device *netdev) 1742 { 1743 int i; 1744 1745 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 1746 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1747 } 1748 1749 #endif /* CONFIG_SYSFS */ 1750 1751 static struct xenbus_device_id netfront_ids[] = { 1752 { "vif" }, 1753 { "" } 1754 }; 1755 1756 1757 static int __devexit xennet_remove(struct xenbus_device *dev) 1758 { 1759 struct netfront_info *info = dev->dev.driver_data; 1760 1761 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1762 1763 unregister_netdev(info->netdev); 1764 1765 xennet_disconnect_backend(info); 1766 1767 del_timer_sync(&info->rx_refill_timer); 1768 1769 xennet_sysfs_delif(info->netdev); 1770 1771 free_netdev(info->netdev); 1772 1773 return 0; 1774 } 1775 1776 static struct xenbus_driver netfront = { 1777 .name = "vif", 1778 .owner = THIS_MODULE, 1779 .ids = netfront_ids, 1780 .probe = netfront_probe, 1781 .remove = __devexit_p(xennet_remove), 1782 .resume = netfront_resume, 1783 .otherend_changed = backend_changed, 1784 }; 1785 1786 static int __init netif_init(void) 1787 { 1788 if (!is_running_on_xen()) 1789 return -ENODEV; 1790 1791 if (is_initial_xendomain()) 1792 return 0; 1793 1794 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1795 1796 return xenbus_register_frontend(&netfront); 1797 } 1798 module_init(netif_init); 1799 1800 1801 static void __exit netif_exit(void) 1802 { 1803 if (is_initial_xendomain()) 1804 return; 1805 1806 return xenbus_unregister_driver(&netfront); 1807 } 1808 module_exit(netif_exit); 1809 1810 MODULE_DESCRIPTION("Xen virtual network device frontend"); 1811 MODULE_LICENSE("GPL"); 1812