1 /* A simple network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 //#define DEBUG 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/module.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 28 static int napi_weight = 128; 29 module_param(napi_weight, int, 0444); 30 31 static int csum = 1, gso = 1; 32 module_param(csum, bool, 0444); 33 module_param(gso, bool, 0444); 34 35 /* FIXME: MTU in config. */ 36 #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 37 38 struct virtnet_info 39 { 40 struct virtio_device *vdev; 41 struct virtqueue *rvq, *svq; 42 struct net_device *dev; 43 struct napi_struct napi; 44 45 /* The skb we couldn't send because buffers were full. */ 46 struct sk_buff *last_xmit_skb; 47 48 /* If we need to free in a timer, this is it. */ 49 struct timer_list xmit_free_timer; 50 51 /* Number of input buffers, and max we've ever had. */ 52 unsigned int num, max; 53 54 /* For cleaning up after transmission. */ 55 struct tasklet_struct tasklet; 56 bool free_in_tasklet; 57 58 /* I like... big packets and I cannot lie! */ 59 bool big_packets; 60 61 /* Receive & send queues. */ 62 struct sk_buff_head recv; 63 struct sk_buff_head send; 64 65 /* Chain pages by the private ptr. */ 66 struct page *pages; 67 }; 68 69 static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) 70 { 71 return (struct virtio_net_hdr *)skb->cb; 72 } 73 74 static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) 75 { 76 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); 77 } 78 79 static void give_a_page(struct virtnet_info *vi, struct page *page) 80 { 81 page->private = (unsigned long)vi->pages; 82 vi->pages = page; 83 } 84 85 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 86 { 87 struct page *p = vi->pages; 88 89 if (p) 90 vi->pages = (struct page *)p->private; 91 else 92 p = alloc_page(gfp_mask); 93 return p; 94 } 95 96 static void skb_xmit_done(struct virtqueue *svq) 97 { 98 struct virtnet_info *vi = svq->vdev->priv; 99 100 /* Suppress further interrupts. */ 101 svq->vq_ops->disable_cb(svq); 102 103 /* We were probably waiting for more output buffers. */ 104 netif_wake_queue(vi->dev); 105 106 /* Make sure we re-xmit last_xmit_skb: if there are no more packets 107 * queued, start_xmit won't be called. */ 108 tasklet_schedule(&vi->tasklet); 109 } 110 111 static void receive_skb(struct net_device *dev, struct sk_buff *skb, 112 unsigned len) 113 { 114 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 115 int err; 116 117 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 118 pr_debug("%s: short packet %i\n", dev->name, len); 119 dev->stats.rx_length_errors++; 120 goto drop; 121 } 122 len -= sizeof(struct virtio_net_hdr); 123 124 if (len <= MAX_PACKET_LEN) { 125 unsigned int i; 126 127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 128 give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); 129 skb->data_len = 0; 130 skb_shinfo(skb)->nr_frags = 0; 131 } 132 133 err = pskb_trim(skb, len); 134 if (err) { 135 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); 136 dev->stats.rx_dropped++; 137 goto drop; 138 } 139 skb->truesize += skb->data_len; 140 dev->stats.rx_bytes += skb->len; 141 dev->stats.rx_packets++; 142 143 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 144 pr_debug("Needs csum!\n"); 145 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 146 goto frame_err; 147 } 148 149 skb->protocol = eth_type_trans(skb, dev); 150 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 151 ntohs(skb->protocol), skb->len, skb->pkt_type); 152 153 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 154 pr_debug("GSO!\n"); 155 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 156 case VIRTIO_NET_HDR_GSO_TCPV4: 157 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 158 break; 159 case VIRTIO_NET_HDR_GSO_UDP: 160 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 161 break; 162 case VIRTIO_NET_HDR_GSO_TCPV6: 163 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 164 break; 165 default: 166 if (net_ratelimit()) 167 printk(KERN_WARNING "%s: bad gso type %u.\n", 168 dev->name, hdr->gso_type); 169 goto frame_err; 170 } 171 172 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 173 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 174 175 skb_shinfo(skb)->gso_size = hdr->gso_size; 176 if (skb_shinfo(skb)->gso_size == 0) { 177 if (net_ratelimit()) 178 printk(KERN_WARNING "%s: zero gso size.\n", 179 dev->name); 180 goto frame_err; 181 } 182 183 /* Header must be checked, and gso_segs computed. */ 184 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 185 skb_shinfo(skb)->gso_segs = 0; 186 } 187 188 netif_receive_skb(skb); 189 return; 190 191 frame_err: 192 dev->stats.rx_frame_errors++; 193 drop: 194 dev_kfree_skb(skb); 195 } 196 197 static void try_fill_recv(struct virtnet_info *vi) 198 { 199 struct sk_buff *skb; 200 struct scatterlist sg[2+MAX_SKB_FRAGS]; 201 int num, err, i; 202 203 sg_init_table(sg, 2+MAX_SKB_FRAGS); 204 for (;;) { 205 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 206 if (unlikely(!skb)) 207 break; 208 209 skb_put(skb, MAX_PACKET_LEN); 210 vnet_hdr_to_sg(sg, skb); 211 212 if (vi->big_packets) { 213 for (i = 0; i < MAX_SKB_FRAGS; i++) { 214 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 215 f->page = get_a_page(vi, GFP_ATOMIC); 216 if (!f->page) 217 break; 218 219 f->page_offset = 0; 220 f->size = PAGE_SIZE; 221 222 skb->data_len += PAGE_SIZE; 223 skb->len += PAGE_SIZE; 224 225 skb_shinfo(skb)->nr_frags++; 226 } 227 } 228 229 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 230 skb_queue_head(&vi->recv, skb); 231 232 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 233 if (err) { 234 skb_unlink(skb, &vi->recv); 235 kfree_skb(skb); 236 break; 237 } 238 vi->num++; 239 } 240 if (unlikely(vi->num > vi->max)) 241 vi->max = vi->num; 242 vi->rvq->vq_ops->kick(vi->rvq); 243 } 244 245 static void skb_recv_done(struct virtqueue *rvq) 246 { 247 struct virtnet_info *vi = rvq->vdev->priv; 248 /* Schedule NAPI, Suppress further interrupts if successful. */ 249 if (netif_rx_schedule_prep(vi->dev, &vi->napi)) { 250 rvq->vq_ops->disable_cb(rvq); 251 __netif_rx_schedule(vi->dev, &vi->napi); 252 } 253 } 254 255 static int virtnet_poll(struct napi_struct *napi, int budget) 256 { 257 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 258 struct sk_buff *skb = NULL; 259 unsigned int len, received = 0; 260 261 again: 262 while (received < budget && 263 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 264 __skb_unlink(skb, &vi->recv); 265 receive_skb(vi->dev, skb, len); 266 vi->num--; 267 received++; 268 } 269 270 /* FIXME: If we oom and completely run out of inbufs, we need 271 * to start a timer trying to fill more. */ 272 if (vi->num < vi->max / 2) 273 try_fill_recv(vi); 274 275 /* Out of packets? */ 276 if (received < budget) { 277 netif_rx_complete(vi->dev, napi); 278 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 279 && napi_schedule_prep(napi)) { 280 vi->rvq->vq_ops->disable_cb(vi->rvq); 281 __netif_rx_schedule(vi->dev, napi); 282 goto again; 283 } 284 } 285 286 return received; 287 } 288 289 static void free_old_xmit_skbs(struct virtnet_info *vi) 290 { 291 struct sk_buff *skb; 292 unsigned int len; 293 294 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 295 pr_debug("Sent skb %p\n", skb); 296 __skb_unlink(skb, &vi->send); 297 vi->dev->stats.tx_bytes += skb->len; 298 vi->dev->stats.tx_packets++; 299 kfree_skb(skb); 300 } 301 } 302 303 /* If the virtio transport doesn't always notify us when all in-flight packets 304 * are consumed, we fall back to using this function on a timer to free them. */ 305 static void xmit_free(unsigned long data) 306 { 307 struct virtnet_info *vi = (void *)data; 308 309 netif_tx_lock(vi->dev); 310 311 free_old_xmit_skbs(vi); 312 313 if (!skb_queue_empty(&vi->send)) 314 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 315 316 netif_tx_unlock(vi->dev); 317 } 318 319 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 320 { 321 int num, err; 322 struct scatterlist sg[2+MAX_SKB_FRAGS]; 323 struct virtio_net_hdr *hdr; 324 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 325 326 sg_init_table(sg, 2+MAX_SKB_FRAGS); 327 328 pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb, 329 dest[0], dest[1], dest[2], 330 dest[3], dest[4], dest[5]); 331 332 /* Encode metadata header at front. */ 333 hdr = skb_vnet_hdr(skb); 334 if (skb->ip_summed == CHECKSUM_PARTIAL) { 335 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 336 hdr->csum_start = skb->csum_start - skb_headroom(skb); 337 hdr->csum_offset = skb->csum_offset; 338 } else { 339 hdr->flags = 0; 340 hdr->csum_offset = hdr->csum_start = 0; 341 } 342 343 if (skb_is_gso(skb)) { 344 hdr->hdr_len = skb_transport_header(skb) - skb->data; 345 hdr->gso_size = skb_shinfo(skb)->gso_size; 346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 347 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 349 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 350 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 351 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 352 else 353 BUG(); 354 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 355 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 356 } else { 357 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 358 hdr->gso_size = hdr->hdr_len = 0; 359 } 360 361 vnet_hdr_to_sg(sg, skb); 362 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 363 364 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 365 if (!err && !vi->free_in_tasklet) 366 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 367 368 return err; 369 } 370 371 static void xmit_tasklet(unsigned long data) 372 { 373 struct virtnet_info *vi = (void *)data; 374 375 netif_tx_lock_bh(vi->dev); 376 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { 377 vi->svq->vq_ops->kick(vi->svq); 378 vi->last_xmit_skb = NULL; 379 } 380 if (vi->free_in_tasklet) 381 free_old_xmit_skbs(vi); 382 netif_tx_unlock_bh(vi->dev); 383 } 384 385 static int start_xmit(struct sk_buff *skb, struct net_device *dev) 386 { 387 struct virtnet_info *vi = netdev_priv(dev); 388 389 again: 390 /* Free up any pending old buffers before queueing new ones. */ 391 free_old_xmit_skbs(vi); 392 393 /* If we has a buffer left over from last time, send it now. */ 394 if (unlikely(vi->last_xmit_skb) && 395 xmit_skb(vi, vi->last_xmit_skb) != 0) 396 goto stop_queue; 397 398 vi->last_xmit_skb = NULL; 399 400 /* Put new one in send queue and do transmit */ 401 if (likely(skb)) { 402 __skb_queue_head(&vi->send, skb); 403 if (xmit_skb(vi, skb) != 0) { 404 vi->last_xmit_skb = skb; 405 skb = NULL; 406 goto stop_queue; 407 } 408 } 409 done: 410 vi->svq->vq_ops->kick(vi->svq); 411 return NETDEV_TX_OK; 412 413 stop_queue: 414 pr_debug("%s: virtio not prepared to send\n", dev->name); 415 netif_stop_queue(dev); 416 417 /* Activate callback for using skbs: if this returns false it 418 * means some were used in the meantime. */ 419 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 420 vi->svq->vq_ops->disable_cb(vi->svq); 421 netif_start_queue(dev); 422 goto again; 423 } 424 if (skb) { 425 /* Drop this skb: we only queue one. */ 426 vi->dev->stats.tx_dropped++; 427 kfree_skb(skb); 428 } 429 goto done; 430 } 431 432 #ifdef CONFIG_NET_POLL_CONTROLLER 433 static void virtnet_netpoll(struct net_device *dev) 434 { 435 struct virtnet_info *vi = netdev_priv(dev); 436 437 napi_schedule(&vi->napi); 438 } 439 #endif 440 441 static int virtnet_open(struct net_device *dev) 442 { 443 struct virtnet_info *vi = netdev_priv(dev); 444 445 napi_enable(&vi->napi); 446 447 /* If all buffers were filled by other side before we napi_enabled, we 448 * won't get another interrupt, so process any outstanding packets 449 * now. virtnet_poll wants re-enable the queue, so we disable here. 450 * We synchronize against interrupts via NAPI_STATE_SCHED */ 451 if (netif_rx_schedule_prep(dev, &vi->napi)) { 452 vi->rvq->vq_ops->disable_cb(vi->rvq); 453 __netif_rx_schedule(dev, &vi->napi); 454 } 455 return 0; 456 } 457 458 static int virtnet_close(struct net_device *dev) 459 { 460 struct virtnet_info *vi = netdev_priv(dev); 461 462 napi_disable(&vi->napi); 463 464 return 0; 465 } 466 467 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 468 { 469 struct virtnet_info *vi = netdev_priv(dev); 470 struct virtio_device *vdev = vi->vdev; 471 472 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) 473 return -ENOSYS; 474 475 return ethtool_op_set_tx_hw_csum(dev, data); 476 } 477 478 static struct ethtool_ops virtnet_ethtool_ops = { 479 .set_tx_csum = virtnet_set_tx_csum, 480 .set_sg = ethtool_op_set_sg, 481 }; 482 483 static int virtnet_probe(struct virtio_device *vdev) 484 { 485 int err; 486 struct net_device *dev; 487 struct virtnet_info *vi; 488 489 /* Allocate ourselves a network device with room for our info */ 490 dev = alloc_etherdev(sizeof(struct virtnet_info)); 491 if (!dev) 492 return -ENOMEM; 493 494 /* Set up network device as normal. */ 495 dev->open = virtnet_open; 496 dev->stop = virtnet_close; 497 dev->hard_start_xmit = start_xmit; 498 dev->features = NETIF_F_HIGHDMA; 499 #ifdef CONFIG_NET_POLL_CONTROLLER 500 dev->poll_controller = virtnet_netpoll; 501 #endif 502 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 503 SET_NETDEV_DEV(dev, &vdev->dev); 504 505 /* Do we support "hardware" checksums? */ 506 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 507 /* This opens up the world of extra features. */ 508 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 509 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 510 dev->features |= NETIF_F_TSO | NETIF_F_UFO 511 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 512 } 513 /* Individual feature bits: what can host handle? */ 514 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 515 dev->features |= NETIF_F_TSO; 516 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 517 dev->features |= NETIF_F_TSO6; 518 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 519 dev->features |= NETIF_F_TSO_ECN; 520 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 521 dev->features |= NETIF_F_UFO; 522 } 523 524 /* Configuration may specify what MAC to use. Otherwise random. */ 525 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 526 vdev->config->get(vdev, 527 offsetof(struct virtio_net_config, mac), 528 dev->dev_addr, dev->addr_len); 529 } else 530 random_ether_addr(dev->dev_addr); 531 532 /* Set up our device-specific information */ 533 vi = netdev_priv(dev); 534 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); 535 vi->dev = dev; 536 vi->vdev = vdev; 537 vdev->priv = vi; 538 vi->pages = NULL; 539 540 /* If they give us a callback when all buffers are done, we don't need 541 * the timer. */ 542 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 543 544 /* If we can receive ANY GSO packets, we must allocate large ones. */ 545 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 546 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 547 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 548 vi->big_packets = true; 549 550 /* We expect two virtqueues, receive then send. */ 551 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 552 if (IS_ERR(vi->rvq)) { 553 err = PTR_ERR(vi->rvq); 554 goto free; 555 } 556 557 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 558 if (IS_ERR(vi->svq)) { 559 err = PTR_ERR(vi->svq); 560 goto free_recv; 561 } 562 563 /* Initialize our empty receive and send queues. */ 564 skb_queue_head_init(&vi->recv); 565 skb_queue_head_init(&vi->send); 566 567 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); 568 569 if (!vi->free_in_tasklet) 570 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); 571 572 err = register_netdev(dev); 573 if (err) { 574 pr_debug("virtio_net: registering device failed\n"); 575 goto free_send; 576 } 577 578 /* Last of all, set up some receive buffers. */ 579 try_fill_recv(vi); 580 581 /* If we didn't even get one input buffer, we're useless. */ 582 if (vi->num == 0) { 583 err = -ENOMEM; 584 goto unregister; 585 } 586 587 pr_debug("virtnet: registered device %s\n", dev->name); 588 return 0; 589 590 unregister: 591 unregister_netdev(dev); 592 free_send: 593 vdev->config->del_vq(vi->svq); 594 free_recv: 595 vdev->config->del_vq(vi->rvq); 596 free: 597 free_netdev(dev); 598 return err; 599 } 600 601 static void virtnet_remove(struct virtio_device *vdev) 602 { 603 struct virtnet_info *vi = vdev->priv; 604 struct sk_buff *skb; 605 606 /* Stop all the virtqueues. */ 607 vdev->config->reset(vdev); 608 609 if (!vi->free_in_tasklet) 610 del_timer_sync(&vi->xmit_free_timer); 611 612 /* Free our skbs in send and recv queues, if any. */ 613 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 614 kfree_skb(skb); 615 vi->num--; 616 } 617 __skb_queue_purge(&vi->send); 618 619 BUG_ON(vi->num != 0); 620 621 vdev->config->del_vq(vi->svq); 622 vdev->config->del_vq(vi->rvq); 623 unregister_netdev(vi->dev); 624 625 while (vi->pages) 626 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 627 628 free_netdev(vi->dev); 629 } 630 631 static struct virtio_device_id id_table[] = { 632 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 633 { 0 }, 634 }; 635 636 static unsigned int features[] = { 637 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 638 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 639 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 640 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 641 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 642 VIRTIO_F_NOTIFY_ON_EMPTY, 643 }; 644 645 static struct virtio_driver virtio_net = { 646 .feature_table = features, 647 .feature_table_size = ARRAY_SIZE(features), 648 .driver.name = KBUILD_MODNAME, 649 .driver.owner = THIS_MODULE, 650 .id_table = id_table, 651 .probe = virtnet_probe, 652 .remove = __devexit_p(virtnet_remove), 653 }; 654 655 static int __init init(void) 656 { 657 return register_virtio_driver(&virtio_net); 658 } 659 660 static void __exit fini(void) 661 { 662 unregister_virtio_driver(&virtio_net); 663 } 664 module_init(init); 665 module_exit(fini); 666 667 MODULE_DEVICE_TABLE(virtio, id_table); 668 MODULE_DESCRIPTION("Virtio network driver"); 669 MODULE_LICENSE("GPL"); 670