1 /* A simple network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 //#define DEBUG 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/module.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 28 static int napi_weight = 128; 29 module_param(napi_weight, int, 0444); 30 31 static int csum = 1, gso = 1; 32 module_param(csum, bool, 0444); 33 module_param(gso, bool, 0444); 34 35 /* FIXME: MTU in config. */ 36 #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 37 #define GOOD_COPY_LEN 128 38 39 struct virtnet_info 40 { 41 struct virtio_device *vdev; 42 struct virtqueue *rvq, *svq; 43 struct net_device *dev; 44 struct napi_struct napi; 45 unsigned int status; 46 47 /* The skb we couldn't send because buffers were full. */ 48 struct sk_buff *last_xmit_skb; 49 50 /* If we need to free in a timer, this is it. */ 51 struct timer_list xmit_free_timer; 52 53 /* Number of input buffers, and max we've ever had. */ 54 unsigned int num, max; 55 56 /* For cleaning up after transmission. */ 57 struct tasklet_struct tasklet; 58 bool free_in_tasklet; 59 60 /* I like... big packets and I cannot lie! */ 61 bool big_packets; 62 63 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 64 bool mergeable_rx_bufs; 65 66 /* Receive & send queues. */ 67 struct sk_buff_head recv; 68 struct sk_buff_head send; 69 70 /* Chain pages by the private ptr. */ 71 struct page *pages; 72 }; 73 74 static inline void *skb_vnet_hdr(struct sk_buff *skb) 75 { 76 return (struct virtio_net_hdr *)skb->cb; 77 } 78 79 static void give_a_page(struct virtnet_info *vi, struct page *page) 80 { 81 page->private = (unsigned long)vi->pages; 82 vi->pages = page; 83 } 84 85 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb) 86 { 87 unsigned int i; 88 89 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 90 give_a_page(vi, skb_shinfo(skb)->frags[i].page); 91 skb_shinfo(skb)->nr_frags = 0; 92 skb->data_len = 0; 93 } 94 95 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 96 { 97 struct page *p = vi->pages; 98 99 if (p) 100 vi->pages = (struct page *)p->private; 101 else 102 p = alloc_page(gfp_mask); 103 return p; 104 } 105 106 static void skb_xmit_done(struct virtqueue *svq) 107 { 108 struct virtnet_info *vi = svq->vdev->priv; 109 110 /* Suppress further interrupts. */ 111 svq->vq_ops->disable_cb(svq); 112 113 /* We were probably waiting for more output buffers. */ 114 netif_wake_queue(vi->dev); 115 116 /* Make sure we re-xmit last_xmit_skb: if there are no more packets 117 * queued, start_xmit won't be called. */ 118 tasklet_schedule(&vi->tasklet); 119 } 120 121 static void receive_skb(struct net_device *dev, struct sk_buff *skb, 122 unsigned len) 123 { 124 struct virtnet_info *vi = netdev_priv(dev); 125 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 126 int err; 127 int i; 128 129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 130 pr_debug("%s: short packet %i\n", dev->name, len); 131 dev->stats.rx_length_errors++; 132 goto drop; 133 } 134 135 if (vi->mergeable_rx_bufs) { 136 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 137 unsigned int copy; 138 char *p = page_address(skb_shinfo(skb)->frags[0].page); 139 140 if (len > PAGE_SIZE) 141 len = PAGE_SIZE; 142 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 143 144 memcpy(hdr, p, sizeof(*mhdr)); 145 p += sizeof(*mhdr); 146 147 copy = len; 148 if (copy > skb_tailroom(skb)) 149 copy = skb_tailroom(skb); 150 151 memcpy(skb_put(skb, copy), p, copy); 152 153 len -= copy; 154 155 if (!len) { 156 give_a_page(vi, skb_shinfo(skb)->frags[0].page); 157 skb_shinfo(skb)->nr_frags--; 158 } else { 159 skb_shinfo(skb)->frags[0].page_offset += 160 sizeof(*mhdr) + copy; 161 skb_shinfo(skb)->frags[0].size = len; 162 skb->data_len += len; 163 skb->len += len; 164 } 165 166 while (--mhdr->num_buffers) { 167 struct sk_buff *nskb; 168 169 i = skb_shinfo(skb)->nr_frags; 170 if (i >= MAX_SKB_FRAGS) { 171 pr_debug("%s: packet too long %d\n", dev->name, 172 len); 173 dev->stats.rx_length_errors++; 174 goto drop; 175 } 176 177 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 178 if (!nskb) { 179 pr_debug("%s: rx error: %d buffers missing\n", 180 dev->name, mhdr->num_buffers); 181 dev->stats.rx_length_errors++; 182 goto drop; 183 } 184 185 __skb_unlink(nskb, &vi->recv); 186 vi->num--; 187 188 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0]; 189 skb_shinfo(nskb)->nr_frags = 0; 190 kfree_skb(nskb); 191 192 if (len > PAGE_SIZE) 193 len = PAGE_SIZE; 194 195 skb_shinfo(skb)->frags[i].size = len; 196 skb_shinfo(skb)->nr_frags++; 197 skb->data_len += len; 198 skb->len += len; 199 } 200 } else { 201 len -= sizeof(struct virtio_net_hdr); 202 203 if (len <= MAX_PACKET_LEN) 204 trim_pages(vi, skb); 205 206 err = pskb_trim(skb, len); 207 if (err) { 208 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, 209 len, err); 210 dev->stats.rx_dropped++; 211 goto drop; 212 } 213 } 214 215 skb->truesize += skb->data_len; 216 dev->stats.rx_bytes += skb->len; 217 dev->stats.rx_packets++; 218 219 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 220 pr_debug("Needs csum!\n"); 221 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 222 goto frame_err; 223 } 224 225 skb->protocol = eth_type_trans(skb, dev); 226 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 227 ntohs(skb->protocol), skb->len, skb->pkt_type); 228 229 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 230 pr_debug("GSO!\n"); 231 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 232 case VIRTIO_NET_HDR_GSO_TCPV4: 233 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 234 break; 235 case VIRTIO_NET_HDR_GSO_UDP: 236 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 237 break; 238 case VIRTIO_NET_HDR_GSO_TCPV6: 239 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 240 break; 241 default: 242 if (net_ratelimit()) 243 printk(KERN_WARNING "%s: bad gso type %u.\n", 244 dev->name, hdr->gso_type); 245 goto frame_err; 246 } 247 248 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 249 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 250 251 skb_shinfo(skb)->gso_size = hdr->gso_size; 252 if (skb_shinfo(skb)->gso_size == 0) { 253 if (net_ratelimit()) 254 printk(KERN_WARNING "%s: zero gso size.\n", 255 dev->name); 256 goto frame_err; 257 } 258 259 /* Header must be checked, and gso_segs computed. */ 260 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 261 skb_shinfo(skb)->gso_segs = 0; 262 } 263 264 netif_receive_skb(skb); 265 return; 266 267 frame_err: 268 dev->stats.rx_frame_errors++; 269 drop: 270 dev_kfree_skb(skb); 271 } 272 273 static void try_fill_recv_maxbufs(struct virtnet_info *vi) 274 { 275 struct sk_buff *skb; 276 struct scatterlist sg[2+MAX_SKB_FRAGS]; 277 int num, err, i; 278 279 sg_init_table(sg, 2+MAX_SKB_FRAGS); 280 for (;;) { 281 struct virtio_net_hdr *hdr; 282 283 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 284 if (unlikely(!skb)) 285 break; 286 287 skb_put(skb, MAX_PACKET_LEN); 288 289 hdr = skb_vnet_hdr(skb); 290 sg_init_one(sg, hdr, sizeof(*hdr)); 291 292 if (vi->big_packets) { 293 for (i = 0; i < MAX_SKB_FRAGS; i++) { 294 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 295 f->page = get_a_page(vi, GFP_ATOMIC); 296 if (!f->page) 297 break; 298 299 f->page_offset = 0; 300 f->size = PAGE_SIZE; 301 302 skb->data_len += PAGE_SIZE; 303 skb->len += PAGE_SIZE; 304 305 skb_shinfo(skb)->nr_frags++; 306 } 307 } 308 309 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 310 skb_queue_head(&vi->recv, skb); 311 312 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 313 if (err) { 314 skb_unlink(skb, &vi->recv); 315 trim_pages(vi, skb); 316 kfree_skb(skb); 317 break; 318 } 319 vi->num++; 320 } 321 if (unlikely(vi->num > vi->max)) 322 vi->max = vi->num; 323 vi->rvq->vq_ops->kick(vi->rvq); 324 } 325 326 static void try_fill_recv(struct virtnet_info *vi) 327 { 328 struct sk_buff *skb; 329 struct scatterlist sg[1]; 330 int err; 331 332 if (!vi->mergeable_rx_bufs) { 333 try_fill_recv_maxbufs(vi); 334 return; 335 } 336 337 for (;;) { 338 skb_frag_t *f; 339 340 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 341 if (unlikely(!skb)) 342 break; 343 344 skb_reserve(skb, NET_IP_ALIGN); 345 346 f = &skb_shinfo(skb)->frags[0]; 347 f->page = get_a_page(vi, GFP_ATOMIC); 348 if (!f->page) { 349 kfree_skb(skb); 350 break; 351 } 352 353 f->page_offset = 0; 354 f->size = PAGE_SIZE; 355 356 skb_shinfo(skb)->nr_frags++; 357 358 sg_init_one(sg, page_address(f->page), PAGE_SIZE); 359 skb_queue_head(&vi->recv, skb); 360 361 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb); 362 if (err) { 363 skb_unlink(skb, &vi->recv); 364 kfree_skb(skb); 365 break; 366 } 367 vi->num++; 368 } 369 if (unlikely(vi->num > vi->max)) 370 vi->max = vi->num; 371 vi->rvq->vq_ops->kick(vi->rvq); 372 } 373 374 static void skb_recv_done(struct virtqueue *rvq) 375 { 376 struct virtnet_info *vi = rvq->vdev->priv; 377 /* Schedule NAPI, Suppress further interrupts if successful. */ 378 if (napi_schedule_prep(&vi->napi)) { 379 rvq->vq_ops->disable_cb(rvq); 380 __napi_schedule(&vi->napi); 381 } 382 } 383 384 static int virtnet_poll(struct napi_struct *napi, int budget) 385 { 386 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 387 struct sk_buff *skb = NULL; 388 unsigned int len, received = 0; 389 390 again: 391 while (received < budget && 392 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 393 __skb_unlink(skb, &vi->recv); 394 receive_skb(vi->dev, skb, len); 395 vi->num--; 396 received++; 397 } 398 399 /* FIXME: If we oom and completely run out of inbufs, we need 400 * to start a timer trying to fill more. */ 401 if (vi->num < vi->max / 2) 402 try_fill_recv(vi); 403 404 /* Out of packets? */ 405 if (received < budget) { 406 napi_complete(napi); 407 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 408 && napi_schedule_prep(napi)) { 409 vi->rvq->vq_ops->disable_cb(vi->rvq); 410 __napi_schedule(napi); 411 goto again; 412 } 413 } 414 415 return received; 416 } 417 418 static void free_old_xmit_skbs(struct virtnet_info *vi) 419 { 420 struct sk_buff *skb; 421 unsigned int len; 422 423 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 424 pr_debug("Sent skb %p\n", skb); 425 __skb_unlink(skb, &vi->send); 426 vi->dev->stats.tx_bytes += skb->len; 427 vi->dev->stats.tx_packets++; 428 kfree_skb(skb); 429 } 430 } 431 432 /* If the virtio transport doesn't always notify us when all in-flight packets 433 * are consumed, we fall back to using this function on a timer to free them. */ 434 static void xmit_free(unsigned long data) 435 { 436 struct virtnet_info *vi = (void *)data; 437 438 netif_tx_lock(vi->dev); 439 440 free_old_xmit_skbs(vi); 441 442 if (!skb_queue_empty(&vi->send)) 443 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 444 445 netif_tx_unlock(vi->dev); 446 } 447 448 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 449 { 450 int num, err; 451 struct scatterlist sg[2+MAX_SKB_FRAGS]; 452 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 453 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 454 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 455 456 sg_init_table(sg, 2+MAX_SKB_FRAGS); 457 458 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 459 460 if (skb->ip_summed == CHECKSUM_PARTIAL) { 461 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 462 hdr->csum_start = skb->csum_start - skb_headroom(skb); 463 hdr->csum_offset = skb->csum_offset; 464 } else { 465 hdr->flags = 0; 466 hdr->csum_offset = hdr->csum_start = 0; 467 } 468 469 if (skb_is_gso(skb)) { 470 hdr->hdr_len = skb_transport_header(skb) - skb->data; 471 hdr->gso_size = skb_shinfo(skb)->gso_size; 472 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 473 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 474 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 475 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 476 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 477 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 478 else 479 BUG(); 480 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 481 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 482 } else { 483 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 484 hdr->gso_size = hdr->hdr_len = 0; 485 } 486 487 mhdr->num_buffers = 0; 488 489 /* Encode metadata header at front. */ 490 if (vi->mergeable_rx_bufs) 491 sg_init_one(sg, mhdr, sizeof(*mhdr)); 492 else 493 sg_init_one(sg, hdr, sizeof(*hdr)); 494 495 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 496 497 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 498 if (!err && !vi->free_in_tasklet) 499 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 500 501 return err; 502 } 503 504 static void xmit_tasklet(unsigned long data) 505 { 506 struct virtnet_info *vi = (void *)data; 507 508 netif_tx_lock_bh(vi->dev); 509 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { 510 vi->svq->vq_ops->kick(vi->svq); 511 vi->last_xmit_skb = NULL; 512 } 513 if (vi->free_in_tasklet) 514 free_old_xmit_skbs(vi); 515 netif_tx_unlock_bh(vi->dev); 516 } 517 518 static int start_xmit(struct sk_buff *skb, struct net_device *dev) 519 { 520 struct virtnet_info *vi = netdev_priv(dev); 521 522 again: 523 /* Free up any pending old buffers before queueing new ones. */ 524 free_old_xmit_skbs(vi); 525 526 /* If we has a buffer left over from last time, send it now. */ 527 if (unlikely(vi->last_xmit_skb) && 528 xmit_skb(vi, vi->last_xmit_skb) != 0) 529 goto stop_queue; 530 531 vi->last_xmit_skb = NULL; 532 533 /* Put new one in send queue and do transmit */ 534 if (likely(skb)) { 535 __skb_queue_head(&vi->send, skb); 536 if (xmit_skb(vi, skb) != 0) { 537 vi->last_xmit_skb = skb; 538 skb = NULL; 539 goto stop_queue; 540 } 541 } 542 done: 543 vi->svq->vq_ops->kick(vi->svq); 544 return NETDEV_TX_OK; 545 546 stop_queue: 547 pr_debug("%s: virtio not prepared to send\n", dev->name); 548 netif_stop_queue(dev); 549 550 /* Activate callback for using skbs: if this returns false it 551 * means some were used in the meantime. */ 552 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 553 vi->svq->vq_ops->disable_cb(vi->svq); 554 netif_start_queue(dev); 555 goto again; 556 } 557 if (skb) { 558 /* Drop this skb: we only queue one. */ 559 vi->dev->stats.tx_dropped++; 560 kfree_skb(skb); 561 } 562 goto done; 563 } 564 565 #ifdef CONFIG_NET_POLL_CONTROLLER 566 static void virtnet_netpoll(struct net_device *dev) 567 { 568 struct virtnet_info *vi = netdev_priv(dev); 569 570 napi_schedule(&vi->napi); 571 } 572 #endif 573 574 static int virtnet_open(struct net_device *dev) 575 { 576 struct virtnet_info *vi = netdev_priv(dev); 577 578 napi_enable(&vi->napi); 579 580 /* If all buffers were filled by other side before we napi_enabled, we 581 * won't get another interrupt, so process any outstanding packets 582 * now. virtnet_poll wants re-enable the queue, so we disable here. 583 * We synchronize against interrupts via NAPI_STATE_SCHED */ 584 if (napi_schedule_prep(&vi->napi)) { 585 vi->rvq->vq_ops->disable_cb(vi->rvq); 586 __napi_schedule(&vi->napi); 587 } 588 return 0; 589 } 590 591 static int virtnet_close(struct net_device *dev) 592 { 593 struct virtnet_info *vi = netdev_priv(dev); 594 595 napi_disable(&vi->napi); 596 597 return 0; 598 } 599 600 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 601 { 602 struct virtnet_info *vi = netdev_priv(dev); 603 struct virtio_device *vdev = vi->vdev; 604 605 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) 606 return -ENOSYS; 607 608 return ethtool_op_set_tx_hw_csum(dev, data); 609 } 610 611 static struct ethtool_ops virtnet_ethtool_ops = { 612 .set_tx_csum = virtnet_set_tx_csum, 613 .set_sg = ethtool_op_set_sg, 614 .set_tso = ethtool_op_set_tso, 615 .get_link = ethtool_op_get_link, 616 }; 617 618 #define MIN_MTU 68 619 #define MAX_MTU 65535 620 621 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) 622 { 623 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) 624 return -EINVAL; 625 dev->mtu = new_mtu; 626 return 0; 627 } 628 629 static const struct net_device_ops virtnet_netdev = { 630 .ndo_open = virtnet_open, 631 .ndo_stop = virtnet_close, 632 .ndo_start_xmit = start_xmit, 633 .ndo_validate_addr = eth_validate_addr, 634 .ndo_set_mac_address = eth_mac_addr, 635 .ndo_change_mtu = virtnet_change_mtu, 636 #ifdef CONFIG_NET_POLL_CONTROLLER 637 .ndo_poll_controller = virtnet_netpoll, 638 #endif 639 }; 640 641 static void virtnet_update_status(struct virtnet_info *vi) 642 { 643 u16 v; 644 645 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) 646 return; 647 648 vi->vdev->config->get(vi->vdev, 649 offsetof(struct virtio_net_config, status), 650 &v, sizeof(v)); 651 652 /* Ignore unknown (future) status bits */ 653 v &= VIRTIO_NET_S_LINK_UP; 654 655 if (vi->status == v) 656 return; 657 658 vi->status = v; 659 660 if (vi->status & VIRTIO_NET_S_LINK_UP) { 661 netif_carrier_on(vi->dev); 662 netif_wake_queue(vi->dev); 663 } else { 664 netif_carrier_off(vi->dev); 665 netif_stop_queue(vi->dev); 666 } 667 } 668 669 static void virtnet_config_changed(struct virtio_device *vdev) 670 { 671 struct virtnet_info *vi = vdev->priv; 672 673 virtnet_update_status(vi); 674 } 675 676 static int virtnet_probe(struct virtio_device *vdev) 677 { 678 int err; 679 struct net_device *dev; 680 struct virtnet_info *vi; 681 682 /* Allocate ourselves a network device with room for our info */ 683 dev = alloc_etherdev(sizeof(struct virtnet_info)); 684 if (!dev) 685 return -ENOMEM; 686 687 /* Set up network device as normal. */ 688 dev->netdev_ops = &virtnet_netdev; 689 dev->features = NETIF_F_HIGHDMA; 690 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 691 SET_NETDEV_DEV(dev, &vdev->dev); 692 693 /* Do we support "hardware" checksums? */ 694 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 695 /* This opens up the world of extra features. */ 696 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 697 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 698 dev->features |= NETIF_F_TSO | NETIF_F_UFO 699 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 700 } 701 /* Individual feature bits: what can host handle? */ 702 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 703 dev->features |= NETIF_F_TSO; 704 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 705 dev->features |= NETIF_F_TSO6; 706 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 707 dev->features |= NETIF_F_TSO_ECN; 708 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 709 dev->features |= NETIF_F_UFO; 710 } 711 712 /* Configuration may specify what MAC to use. Otherwise random. */ 713 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 714 vdev->config->get(vdev, 715 offsetof(struct virtio_net_config, mac), 716 dev->dev_addr, dev->addr_len); 717 } else 718 random_ether_addr(dev->dev_addr); 719 720 /* Set up our device-specific information */ 721 vi = netdev_priv(dev); 722 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); 723 vi->dev = dev; 724 vi->vdev = vdev; 725 vdev->priv = vi; 726 vi->pages = NULL; 727 728 /* If they give us a callback when all buffers are done, we don't need 729 * the timer. */ 730 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 731 732 /* If we can receive ANY GSO packets, we must allocate large ones. */ 733 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 734 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 735 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 736 vi->big_packets = true; 737 738 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 739 vi->mergeable_rx_bufs = true; 740 741 /* We expect two virtqueues, receive then send. */ 742 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 743 if (IS_ERR(vi->rvq)) { 744 err = PTR_ERR(vi->rvq); 745 goto free; 746 } 747 748 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 749 if (IS_ERR(vi->svq)) { 750 err = PTR_ERR(vi->svq); 751 goto free_recv; 752 } 753 754 /* Initialize our empty receive and send queues. */ 755 skb_queue_head_init(&vi->recv); 756 skb_queue_head_init(&vi->send); 757 758 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); 759 760 if (!vi->free_in_tasklet) 761 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); 762 763 err = register_netdev(dev); 764 if (err) { 765 pr_debug("virtio_net: registering device failed\n"); 766 goto free_send; 767 } 768 769 /* Last of all, set up some receive buffers. */ 770 try_fill_recv(vi); 771 772 /* If we didn't even get one input buffer, we're useless. */ 773 if (vi->num == 0) { 774 err = -ENOMEM; 775 goto unregister; 776 } 777 778 vi->status = VIRTIO_NET_S_LINK_UP; 779 virtnet_update_status(vi); 780 781 pr_debug("virtnet: registered device %s\n", dev->name); 782 return 0; 783 784 unregister: 785 unregister_netdev(dev); 786 free_send: 787 vdev->config->del_vq(vi->svq); 788 free_recv: 789 vdev->config->del_vq(vi->rvq); 790 free: 791 free_netdev(dev); 792 return err; 793 } 794 795 static void virtnet_remove(struct virtio_device *vdev) 796 { 797 struct virtnet_info *vi = vdev->priv; 798 struct sk_buff *skb; 799 800 /* Stop all the virtqueues. */ 801 vdev->config->reset(vdev); 802 803 if (!vi->free_in_tasklet) 804 del_timer_sync(&vi->xmit_free_timer); 805 806 /* Free our skbs in send and recv queues, if any. */ 807 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 808 kfree_skb(skb); 809 vi->num--; 810 } 811 __skb_queue_purge(&vi->send); 812 813 BUG_ON(vi->num != 0); 814 815 vdev->config->del_vq(vi->svq); 816 vdev->config->del_vq(vi->rvq); 817 unregister_netdev(vi->dev); 818 819 while (vi->pages) 820 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 821 822 free_netdev(vi->dev); 823 } 824 825 static struct virtio_device_id id_table[] = { 826 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 827 { 0 }, 828 }; 829 830 static unsigned int features[] = { 831 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 832 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 833 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 834 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 835 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 836 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, 837 VIRTIO_F_NOTIFY_ON_EMPTY, 838 }; 839 840 static struct virtio_driver virtio_net = { 841 .feature_table = features, 842 .feature_table_size = ARRAY_SIZE(features), 843 .driver.name = KBUILD_MODNAME, 844 .driver.owner = THIS_MODULE, 845 .id_table = id_table, 846 .probe = virtnet_probe, 847 .remove = __devexit_p(virtnet_remove), 848 .config_changed = virtnet_config_changed, 849 }; 850 851 static int __init init(void) 852 { 853 return register_virtio_driver(&virtio_net); 854 } 855 856 static void __exit fini(void) 857 { 858 unregister_virtio_driver(&virtio_net); 859 } 860 module_init(init); 861 module_exit(fini); 862 863 MODULE_DEVICE_TABLE(virtio, id_table); 864 MODULE_DESCRIPTION("Virtio network driver"); 865 MODULE_LICENSE("GPL"); 866