1 /* A network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 //#define DEBUG 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/module.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 #include <linux/if_vlan.h> 28 #include <linux/slab.h> 29 30 static int napi_weight = 128; 31 module_param(napi_weight, int, 0444); 32 33 static bool csum = true, gso = true; 34 module_param(csum, bool, 0444); 35 module_param(gso, bool, 0444); 36 37 /* FIXME: MTU in config. */ 38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 39 #define GOOD_COPY_LEN 128 40 41 #define VIRTNET_SEND_COMMAND_SG_MAX 2 42 #define VIRTNET_DRIVER_VERSION "1.0.0" 43 44 struct virtnet_stats { 45 struct u64_stats_sync tx_syncp; 46 struct u64_stats_sync rx_syncp; 47 u64 tx_bytes; 48 u64 tx_packets; 49 50 u64 rx_bytes; 51 u64 rx_packets; 52 }; 53 54 /* Internal representation of a send virtqueue */ 55 struct send_queue { 56 /* Virtqueue associated with this send _queue */ 57 struct virtqueue *vq; 58 59 /* TX: fragments + linear part + virtio header */ 60 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 61 62 /* Name of the send queue: output.$index */ 63 char name[40]; 64 }; 65 66 /* Internal representation of a receive virtqueue */ 67 struct receive_queue { 68 /* Virtqueue associated with this receive_queue */ 69 struct virtqueue *vq; 70 71 struct napi_struct napi; 72 73 /* Number of input buffers, and max we've ever had. */ 74 unsigned int num, max; 75 76 /* Chain pages by the private ptr. */ 77 struct page *pages; 78 79 /* RX: fragments + linear part + virtio header */ 80 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 81 82 /* Name of this receive queue: input.$index */ 83 char name[40]; 84 }; 85 86 struct virtnet_info { 87 struct virtio_device *vdev; 88 struct virtqueue *cvq; 89 struct net_device *dev; 90 struct send_queue *sq; 91 struct receive_queue *rq; 92 unsigned int status; 93 94 /* Max # of queue pairs supported by the device */ 95 u16 max_queue_pairs; 96 97 /* # of queue pairs currently used by the driver */ 98 u16 curr_queue_pairs; 99 100 /* I like... big packets and I cannot lie! */ 101 bool big_packets; 102 103 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 104 bool mergeable_rx_bufs; 105 106 /* Has control virtqueue */ 107 bool has_cvq; 108 109 /* enable config space updates */ 110 bool config_enable; 111 112 /* Active statistics */ 113 struct virtnet_stats __percpu *stats; 114 115 /* Work struct for refilling if we run low on memory. */ 116 struct delayed_work refill; 117 118 /* Work struct for config space updates */ 119 struct work_struct config_work; 120 121 /* Lock for config space updates */ 122 struct mutex config_lock; 123 124 /* Does the affinity hint is set for virtqueues? */ 125 bool affinity_hint_set; 126 127 /* Per-cpu variable to show the mapping from CPU to virtqueue */ 128 int __percpu *vq_index; 129 }; 130 131 struct skb_vnet_hdr { 132 union { 133 struct virtio_net_hdr hdr; 134 struct virtio_net_hdr_mrg_rxbuf mhdr; 135 }; 136 }; 137 138 struct padded_vnet_hdr { 139 struct virtio_net_hdr hdr; 140 /* 141 * virtio_net_hdr should be in a separated sg buffer because of a 142 * QEMU bug, and data sg buffer shares same page with this header sg. 143 * This padding makes next sg 16 byte aligned after virtio_net_hdr. 144 */ 145 char padding[6]; 146 }; 147 148 /* Converting between virtqueue no. and kernel tx/rx queue no. 149 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 150 */ 151 static int vq2txq(struct virtqueue *vq) 152 { 153 return (virtqueue_get_queue_index(vq) - 1) / 2; 154 } 155 156 static int txq2vq(int txq) 157 { 158 return txq * 2 + 1; 159 } 160 161 static int vq2rxq(struct virtqueue *vq) 162 { 163 return virtqueue_get_queue_index(vq) / 2; 164 } 165 166 static int rxq2vq(int rxq) 167 { 168 return rxq * 2; 169 } 170 171 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 172 { 173 return (struct skb_vnet_hdr *)skb->cb; 174 } 175 176 /* 177 * private is used to chain pages for big packets, put the whole 178 * most recent used list in the beginning for reuse 179 */ 180 static void give_pages(struct receive_queue *rq, struct page *page) 181 { 182 struct page *end; 183 184 /* Find end of list, sew whole thing into vi->rq.pages. */ 185 for (end = page; end->private; end = (struct page *)end->private); 186 end->private = (unsigned long)rq->pages; 187 rq->pages = page; 188 } 189 190 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 191 { 192 struct page *p = rq->pages; 193 194 if (p) { 195 rq->pages = (struct page *)p->private; 196 /* clear private here, it is used to chain pages */ 197 p->private = 0; 198 } else 199 p = alloc_page(gfp_mask); 200 return p; 201 } 202 203 static void skb_xmit_done(struct virtqueue *vq) 204 { 205 struct virtnet_info *vi = vq->vdev->priv; 206 207 /* Suppress further interrupts. */ 208 virtqueue_disable_cb(vq); 209 210 /* We were probably waiting for more output buffers. */ 211 netif_wake_subqueue(vi->dev, vq2txq(vq)); 212 } 213 214 static void set_skb_frag(struct sk_buff *skb, struct page *page, 215 unsigned int offset, unsigned int *len) 216 { 217 int size = min((unsigned)PAGE_SIZE - offset, *len); 218 int i = skb_shinfo(skb)->nr_frags; 219 220 __skb_fill_page_desc(skb, i, page, offset, size); 221 222 skb->data_len += size; 223 skb->len += size; 224 skb->truesize += PAGE_SIZE; 225 skb_shinfo(skb)->nr_frags++; 226 *len -= size; 227 } 228 229 /* Called from bottom half context */ 230 static struct sk_buff *page_to_skb(struct receive_queue *rq, 231 struct page *page, unsigned int len) 232 { 233 struct virtnet_info *vi = rq->vq->vdev->priv; 234 struct sk_buff *skb; 235 struct skb_vnet_hdr *hdr; 236 unsigned int copy, hdr_len, offset; 237 char *p; 238 239 p = page_address(page); 240 241 /* copy small packet so we can reuse these pages for small data */ 242 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 243 if (unlikely(!skb)) 244 return NULL; 245 246 hdr = skb_vnet_hdr(skb); 247 248 if (vi->mergeable_rx_bufs) { 249 hdr_len = sizeof hdr->mhdr; 250 offset = hdr_len; 251 } else { 252 hdr_len = sizeof hdr->hdr; 253 offset = sizeof(struct padded_vnet_hdr); 254 } 255 256 memcpy(hdr, p, hdr_len); 257 258 len -= hdr_len; 259 p += offset; 260 261 copy = len; 262 if (copy > skb_tailroom(skb)) 263 copy = skb_tailroom(skb); 264 memcpy(skb_put(skb, copy), p, copy); 265 266 len -= copy; 267 offset += copy; 268 269 /* 270 * Verify that we can indeed put this data into a skb. 271 * This is here to handle cases when the device erroneously 272 * tries to receive more than is possible. This is usually 273 * the case of a broken device. 274 */ 275 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 276 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 277 dev_kfree_skb(skb); 278 return NULL; 279 } 280 281 while (len) { 282 set_skb_frag(skb, page, offset, &len); 283 page = (struct page *)page->private; 284 offset = 0; 285 } 286 287 if (page) 288 give_pages(rq, page); 289 290 return skb; 291 } 292 293 static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) 294 { 295 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 296 struct page *page; 297 int num_buf, i, len; 298 299 num_buf = hdr->mhdr.num_buffers; 300 while (--num_buf) { 301 i = skb_shinfo(skb)->nr_frags; 302 if (i >= MAX_SKB_FRAGS) { 303 pr_debug("%s: packet too long\n", skb->dev->name); 304 skb->dev->stats.rx_length_errors++; 305 return -EINVAL; 306 } 307 page = virtqueue_get_buf(rq->vq, &len); 308 if (!page) { 309 pr_debug("%s: rx error: %d buffers missing\n", 310 skb->dev->name, hdr->mhdr.num_buffers); 311 skb->dev->stats.rx_length_errors++; 312 return -EINVAL; 313 } 314 315 if (len > PAGE_SIZE) 316 len = PAGE_SIZE; 317 318 set_skb_frag(skb, page, 0, &len); 319 320 --rq->num; 321 } 322 return 0; 323 } 324 325 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 326 { 327 struct virtnet_info *vi = rq->vq->vdev->priv; 328 struct net_device *dev = vi->dev; 329 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 330 struct sk_buff *skb; 331 struct page *page; 332 struct skb_vnet_hdr *hdr; 333 334 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 335 pr_debug("%s: short packet %i\n", dev->name, len); 336 dev->stats.rx_length_errors++; 337 if (vi->mergeable_rx_bufs || vi->big_packets) 338 give_pages(rq, buf); 339 else 340 dev_kfree_skb(buf); 341 return; 342 } 343 344 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 345 skb = buf; 346 len -= sizeof(struct virtio_net_hdr); 347 skb_trim(skb, len); 348 } else { 349 page = buf; 350 skb = page_to_skb(rq, page, len); 351 if (unlikely(!skb)) { 352 dev->stats.rx_dropped++; 353 give_pages(rq, page); 354 return; 355 } 356 if (vi->mergeable_rx_bufs) 357 if (receive_mergeable(rq, skb)) { 358 dev_kfree_skb(skb); 359 return; 360 } 361 } 362 363 hdr = skb_vnet_hdr(skb); 364 365 u64_stats_update_begin(&stats->rx_syncp); 366 stats->rx_bytes += skb->len; 367 stats->rx_packets++; 368 u64_stats_update_end(&stats->rx_syncp); 369 370 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 371 pr_debug("Needs csum!\n"); 372 if (!skb_partial_csum_set(skb, 373 hdr->hdr.csum_start, 374 hdr->hdr.csum_offset)) 375 goto frame_err; 376 } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { 377 skb->ip_summed = CHECKSUM_UNNECESSARY; 378 } 379 380 skb->protocol = eth_type_trans(skb, dev); 381 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 382 ntohs(skb->protocol), skb->len, skb->pkt_type); 383 384 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 385 pr_debug("GSO!\n"); 386 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 387 case VIRTIO_NET_HDR_GSO_TCPV4: 388 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 389 break; 390 case VIRTIO_NET_HDR_GSO_UDP: 391 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 392 break; 393 case VIRTIO_NET_HDR_GSO_TCPV6: 394 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 395 break; 396 default: 397 net_warn_ratelimited("%s: bad gso type %u.\n", 398 dev->name, hdr->hdr.gso_type); 399 goto frame_err; 400 } 401 402 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 403 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 404 405 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; 406 if (skb_shinfo(skb)->gso_size == 0) { 407 net_warn_ratelimited("%s: zero gso size.\n", dev->name); 408 goto frame_err; 409 } 410 411 /* Header must be checked, and gso_segs computed. */ 412 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 413 skb_shinfo(skb)->gso_segs = 0; 414 } 415 416 netif_receive_skb(skb); 417 return; 418 419 frame_err: 420 dev->stats.rx_frame_errors++; 421 dev_kfree_skb(skb); 422 } 423 424 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) 425 { 426 struct virtnet_info *vi = rq->vq->vdev->priv; 427 struct sk_buff *skb; 428 struct skb_vnet_hdr *hdr; 429 int err; 430 431 skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); 432 if (unlikely(!skb)) 433 return -ENOMEM; 434 435 skb_put(skb, MAX_PACKET_LEN); 436 437 hdr = skb_vnet_hdr(skb); 438 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); 439 440 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 441 442 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); 443 if (err < 0) 444 dev_kfree_skb(skb); 445 446 return err; 447 } 448 449 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) 450 { 451 struct page *first, *list = NULL; 452 char *p; 453 int i, err, offset; 454 455 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 456 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 457 first = get_a_page(rq, gfp); 458 if (!first) { 459 if (list) 460 give_pages(rq, list); 461 return -ENOMEM; 462 } 463 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 464 465 /* chain new page in list head to match sg */ 466 first->private = (unsigned long)list; 467 list = first; 468 } 469 470 first = get_a_page(rq, gfp); 471 if (!first) { 472 give_pages(rq, list); 473 return -ENOMEM; 474 } 475 p = page_address(first); 476 477 /* rq->sg[0], rq->sg[1] share the same page */ 478 /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ 479 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); 480 481 /* rq->sg[1] for data packet, from offset */ 482 offset = sizeof(struct padded_vnet_hdr); 483 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 484 485 /* chain first in list head */ 486 first->private = (unsigned long)list; 487 err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, 488 first, gfp); 489 if (err < 0) 490 give_pages(rq, first); 491 492 return err; 493 } 494 495 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 496 { 497 struct page *page; 498 int err; 499 500 page = get_a_page(rq, gfp); 501 if (!page) 502 return -ENOMEM; 503 504 sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 505 506 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); 507 if (err < 0) 508 give_pages(rq, page); 509 510 return err; 511 } 512 513 /* 514 * Returns false if we couldn't fill entirely (OOM). 515 * 516 * Normally run in the receive path, but can also be run from ndo_open 517 * before we're receiving packets, or from refill_work which is 518 * careful to disable receiving (using napi_disable). 519 */ 520 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) 521 { 522 struct virtnet_info *vi = rq->vq->vdev->priv; 523 int err; 524 bool oom; 525 526 do { 527 if (vi->mergeable_rx_bufs) 528 err = add_recvbuf_mergeable(rq, gfp); 529 else if (vi->big_packets) 530 err = add_recvbuf_big(rq, gfp); 531 else 532 err = add_recvbuf_small(rq, gfp); 533 534 oom = err == -ENOMEM; 535 if (err) 536 break; 537 ++rq->num; 538 } while (rq->vq->num_free); 539 if (unlikely(rq->num > rq->max)) 540 rq->max = rq->num; 541 virtqueue_kick(rq->vq); 542 return !oom; 543 } 544 545 static void skb_recv_done(struct virtqueue *rvq) 546 { 547 struct virtnet_info *vi = rvq->vdev->priv; 548 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 549 550 /* Schedule NAPI, Suppress further interrupts if successful. */ 551 if (napi_schedule_prep(&rq->napi)) { 552 virtqueue_disable_cb(rvq); 553 __napi_schedule(&rq->napi); 554 } 555 } 556 557 static void virtnet_napi_enable(struct receive_queue *rq) 558 { 559 napi_enable(&rq->napi); 560 561 /* If all buffers were filled by other side before we napi_enabled, we 562 * won't get another interrupt, so process any outstanding packets 563 * now. virtnet_poll wants re-enable the queue, so we disable here. 564 * We synchronize against interrupts via NAPI_STATE_SCHED */ 565 if (napi_schedule_prep(&rq->napi)) { 566 virtqueue_disable_cb(rq->vq); 567 local_bh_disable(); 568 __napi_schedule(&rq->napi); 569 local_bh_enable(); 570 } 571 } 572 573 static void refill_work(struct work_struct *work) 574 { 575 struct virtnet_info *vi = 576 container_of(work, struct virtnet_info, refill.work); 577 bool still_empty; 578 int i; 579 580 for (i = 0; i < vi->max_queue_pairs; i++) { 581 struct receive_queue *rq = &vi->rq[i]; 582 583 napi_disable(&rq->napi); 584 still_empty = !try_fill_recv(rq, GFP_KERNEL); 585 virtnet_napi_enable(rq); 586 587 /* In theory, this can happen: if we don't get any buffers in 588 * we will *never* try to fill again. 589 */ 590 if (still_empty) 591 schedule_delayed_work(&vi->refill, HZ/2); 592 } 593 } 594 595 static int virtnet_poll(struct napi_struct *napi, int budget) 596 { 597 struct receive_queue *rq = 598 container_of(napi, struct receive_queue, napi); 599 struct virtnet_info *vi = rq->vq->vdev->priv; 600 void *buf; 601 unsigned int len, received = 0; 602 603 again: 604 while (received < budget && 605 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 606 receive_buf(rq, buf, len); 607 --rq->num; 608 received++; 609 } 610 611 if (rq->num < rq->max / 2) { 612 if (!try_fill_recv(rq, GFP_ATOMIC)) 613 schedule_delayed_work(&vi->refill, 0); 614 } 615 616 /* Out of packets? */ 617 if (received < budget) { 618 napi_complete(napi); 619 if (unlikely(!virtqueue_enable_cb(rq->vq)) && 620 napi_schedule_prep(napi)) { 621 virtqueue_disable_cb(rq->vq); 622 __napi_schedule(napi); 623 goto again; 624 } 625 } 626 627 return received; 628 } 629 630 static int virtnet_open(struct net_device *dev) 631 { 632 struct virtnet_info *vi = netdev_priv(dev); 633 int i; 634 635 for (i = 0; i < vi->max_queue_pairs; i++) { 636 /* Make sure we have some buffers: if oom use wq. */ 637 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 638 schedule_delayed_work(&vi->refill, 0); 639 virtnet_napi_enable(&vi->rq[i]); 640 } 641 642 return 0; 643 } 644 645 static void free_old_xmit_skbs(struct send_queue *sq) 646 { 647 struct sk_buff *skb; 648 unsigned int len; 649 struct virtnet_info *vi = sq->vq->vdev->priv; 650 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 651 652 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 653 pr_debug("Sent skb %p\n", skb); 654 655 u64_stats_update_begin(&stats->tx_syncp); 656 stats->tx_bytes += skb->len; 657 stats->tx_packets++; 658 u64_stats_update_end(&stats->tx_syncp); 659 660 dev_kfree_skb_any(skb); 661 } 662 } 663 664 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 665 { 666 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 667 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 668 struct virtnet_info *vi = sq->vq->vdev->priv; 669 unsigned num_sg; 670 671 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 672 673 if (skb->ip_summed == CHECKSUM_PARTIAL) { 674 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 675 hdr->hdr.csum_start = skb_checksum_start_offset(skb); 676 hdr->hdr.csum_offset = skb->csum_offset; 677 } else { 678 hdr->hdr.flags = 0; 679 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; 680 } 681 682 if (skb_is_gso(skb)) { 683 hdr->hdr.hdr_len = skb_headlen(skb); 684 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; 685 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 686 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 687 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 688 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 689 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 690 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 691 else 692 BUG(); 693 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 694 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 695 } else { 696 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; 697 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; 698 } 699 700 hdr->mhdr.num_buffers = 0; 701 702 /* Encode metadata header at front. */ 703 if (vi->mergeable_rx_bufs) 704 sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); 705 else 706 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); 707 708 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 709 return virtqueue_add_buf(sq->vq, sq->sg, num_sg, 710 0, skb, GFP_ATOMIC); 711 } 712 713 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 714 { 715 struct virtnet_info *vi = netdev_priv(dev); 716 int qnum = skb_get_queue_mapping(skb); 717 struct send_queue *sq = &vi->sq[qnum]; 718 int err; 719 720 /* Free up any pending old buffers before queueing new ones. */ 721 free_old_xmit_skbs(sq); 722 723 /* Try to transmit */ 724 err = xmit_skb(sq, skb); 725 726 /* This should not happen! */ 727 if (unlikely(err)) { 728 dev->stats.tx_fifo_errors++; 729 if (net_ratelimit()) 730 dev_warn(&dev->dev, 731 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 732 dev->stats.tx_dropped++; 733 kfree_skb(skb); 734 return NETDEV_TX_OK; 735 } 736 virtqueue_kick(sq->vq); 737 738 /* Don't wait up for transmitted skbs to be freed. */ 739 skb_orphan(skb); 740 nf_reset(skb); 741 742 /* Apparently nice girls don't return TX_BUSY; stop the queue 743 * before it gets out of hand. Naturally, this wastes entries. */ 744 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 745 netif_stop_subqueue(dev, qnum); 746 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 747 /* More just got used, free them then recheck. */ 748 free_old_xmit_skbs(sq); 749 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 750 netif_start_subqueue(dev, qnum); 751 virtqueue_disable_cb(sq->vq); 752 } 753 } 754 } 755 756 return NETDEV_TX_OK; 757 } 758 759 static int virtnet_set_mac_address(struct net_device *dev, void *p) 760 { 761 struct virtnet_info *vi = netdev_priv(dev); 762 struct virtio_device *vdev = vi->vdev; 763 int ret; 764 765 ret = eth_mac_addr(dev, p); 766 if (ret) 767 return ret; 768 769 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 770 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), 771 dev->dev_addr, dev->addr_len); 772 773 return 0; 774 } 775 776 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, 777 struct rtnl_link_stats64 *tot) 778 { 779 struct virtnet_info *vi = netdev_priv(dev); 780 int cpu; 781 unsigned int start; 782 783 for_each_possible_cpu(cpu) { 784 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 785 u64 tpackets, tbytes, rpackets, rbytes; 786 787 do { 788 start = u64_stats_fetch_begin_bh(&stats->tx_syncp); 789 tpackets = stats->tx_packets; 790 tbytes = stats->tx_bytes; 791 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); 792 793 do { 794 start = u64_stats_fetch_begin_bh(&stats->rx_syncp); 795 rpackets = stats->rx_packets; 796 rbytes = stats->rx_bytes; 797 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); 798 799 tot->rx_packets += rpackets; 800 tot->tx_packets += tpackets; 801 tot->rx_bytes += rbytes; 802 tot->tx_bytes += tbytes; 803 } 804 805 tot->tx_dropped = dev->stats.tx_dropped; 806 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 807 tot->rx_dropped = dev->stats.rx_dropped; 808 tot->rx_length_errors = dev->stats.rx_length_errors; 809 tot->rx_frame_errors = dev->stats.rx_frame_errors; 810 811 return tot; 812 } 813 814 #ifdef CONFIG_NET_POLL_CONTROLLER 815 static void virtnet_netpoll(struct net_device *dev) 816 { 817 struct virtnet_info *vi = netdev_priv(dev); 818 int i; 819 820 for (i = 0; i < vi->curr_queue_pairs; i++) 821 napi_schedule(&vi->rq[i].napi); 822 } 823 #endif 824 825 /* 826 * Send command via the control virtqueue and check status. Commands 827 * supported by the hypervisor, as indicated by feature bits, should 828 * never fail unless improperly formated. 829 */ 830 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 831 struct scatterlist *data, int out, int in) 832 { 833 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 834 struct virtio_net_ctrl_hdr ctrl; 835 virtio_net_ctrl_ack status = ~0; 836 unsigned int tmp; 837 int i; 838 839 /* Caller should know better */ 840 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || 841 (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); 842 843 out++; /* Add header */ 844 in++; /* Add return status */ 845 846 ctrl.class = class; 847 ctrl.cmd = cmd; 848 849 sg_init_table(sg, out + in); 850 851 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 852 for_each_sg(data, s, out + in - 2, i) 853 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 854 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 855 856 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); 857 858 virtqueue_kick(vi->cvq); 859 860 /* 861 * Spin for a response, the kick causes an ioport write, trapping 862 * into the hypervisor, so the request should be handled immediately. 863 */ 864 while (!virtqueue_get_buf(vi->cvq, &tmp)) 865 cpu_relax(); 866 867 return status == VIRTIO_NET_OK; 868 } 869 870 static void virtnet_ack_link_announce(struct virtnet_info *vi) 871 { 872 rtnl_lock(); 873 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 874 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, 875 0, 0)) 876 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 877 rtnl_unlock(); 878 } 879 880 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 881 { 882 struct scatterlist sg; 883 struct virtio_net_ctrl_mq s; 884 struct net_device *dev = vi->dev; 885 886 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 887 return 0; 888 889 s.virtqueue_pairs = queue_pairs; 890 sg_init_one(&sg, &s, sizeof(s)); 891 892 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 893 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ 894 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 895 queue_pairs); 896 return -EINVAL; 897 } else 898 vi->curr_queue_pairs = queue_pairs; 899 900 return 0; 901 } 902 903 static int virtnet_close(struct net_device *dev) 904 { 905 struct virtnet_info *vi = netdev_priv(dev); 906 int i; 907 908 /* Make sure refill_work doesn't re-enable napi! */ 909 cancel_delayed_work_sync(&vi->refill); 910 911 for (i = 0; i < vi->max_queue_pairs; i++) 912 napi_disable(&vi->rq[i].napi); 913 914 return 0; 915 } 916 917 static void virtnet_set_rx_mode(struct net_device *dev) 918 { 919 struct virtnet_info *vi = netdev_priv(dev); 920 struct scatterlist sg[2]; 921 u8 promisc, allmulti; 922 struct virtio_net_ctrl_mac *mac_data; 923 struct netdev_hw_addr *ha; 924 int uc_count; 925 int mc_count; 926 void *buf; 927 int i; 928 929 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ 930 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 931 return; 932 933 promisc = ((dev->flags & IFF_PROMISC) != 0); 934 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 935 936 sg_init_one(sg, &promisc, sizeof(promisc)); 937 938 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 939 VIRTIO_NET_CTRL_RX_PROMISC, 940 sg, 1, 0)) 941 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 942 promisc ? "en" : "dis"); 943 944 sg_init_one(sg, &allmulti, sizeof(allmulti)); 945 946 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 947 VIRTIO_NET_CTRL_RX_ALLMULTI, 948 sg, 1, 0)) 949 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 950 allmulti ? "en" : "dis"); 951 952 uc_count = netdev_uc_count(dev); 953 mc_count = netdev_mc_count(dev); 954 /* MAC filter - use one buffer for both lists */ 955 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 956 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 957 mac_data = buf; 958 if (!buf) { 959 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 960 return; 961 } 962 963 sg_init_table(sg, 2); 964 965 /* Store the unicast list and count in the front of the buffer */ 966 mac_data->entries = uc_count; 967 i = 0; 968 netdev_for_each_uc_addr(ha, dev) 969 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 970 971 sg_set_buf(&sg[0], mac_data, 972 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 973 974 /* multicast list and count fill the end */ 975 mac_data = (void *)&mac_data->macs[uc_count][0]; 976 977 mac_data->entries = mc_count; 978 i = 0; 979 netdev_for_each_mc_addr(ha, dev) 980 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 981 982 sg_set_buf(&sg[1], mac_data, 983 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 984 985 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 986 VIRTIO_NET_CTRL_MAC_TABLE_SET, 987 sg, 2, 0)) 988 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 989 990 kfree(buf); 991 } 992 993 static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 994 { 995 struct virtnet_info *vi = netdev_priv(dev); 996 struct scatterlist sg; 997 998 sg_init_one(&sg, &vid, sizeof(vid)); 999 1000 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1001 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 1002 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 1003 return 0; 1004 } 1005 1006 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 1007 { 1008 struct virtnet_info *vi = netdev_priv(dev); 1009 struct scatterlist sg; 1010 1011 sg_init_one(&sg, &vid, sizeof(vid)); 1012 1013 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1014 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) 1015 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 1016 return 0; 1017 } 1018 1019 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1020 { 1021 int i; 1022 int cpu; 1023 1024 if (vi->affinity_hint_set) { 1025 for (i = 0; i < vi->max_queue_pairs; i++) { 1026 virtqueue_set_affinity(vi->rq[i].vq, -1); 1027 virtqueue_set_affinity(vi->sq[i].vq, -1); 1028 } 1029 1030 vi->affinity_hint_set = false; 1031 } 1032 1033 i = 0; 1034 for_each_online_cpu(cpu) { 1035 if (cpu == hcpu) { 1036 *per_cpu_ptr(vi->vq_index, cpu) = -1; 1037 } else { 1038 *per_cpu_ptr(vi->vq_index, cpu) = 1039 ++i % vi->curr_queue_pairs; 1040 } 1041 } 1042 } 1043 1044 static void virtnet_set_affinity(struct virtnet_info *vi) 1045 { 1046 int i; 1047 int cpu; 1048 1049 /* In multiqueue mode, when the number of cpu is equal to the number of 1050 * queue pairs, we let the queue pairs to be private to one cpu by 1051 * setting the affinity hint to eliminate the contention. 1052 */ 1053 if (vi->curr_queue_pairs == 1 || 1054 vi->max_queue_pairs != num_online_cpus()) { 1055 virtnet_clean_affinity(vi, -1); 1056 return; 1057 } 1058 1059 i = 0; 1060 for_each_online_cpu(cpu) { 1061 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1062 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1063 *per_cpu_ptr(vi->vq_index, cpu) = i; 1064 i++; 1065 } 1066 1067 vi->affinity_hint_set = true; 1068 } 1069 1070 static void virtnet_get_ringparam(struct net_device *dev, 1071 struct ethtool_ringparam *ring) 1072 { 1073 struct virtnet_info *vi = netdev_priv(dev); 1074 1075 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1076 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 1077 ring->rx_pending = ring->rx_max_pending; 1078 ring->tx_pending = ring->tx_max_pending; 1079 } 1080 1081 1082 static void virtnet_get_drvinfo(struct net_device *dev, 1083 struct ethtool_drvinfo *info) 1084 { 1085 struct virtnet_info *vi = netdev_priv(dev); 1086 struct virtio_device *vdev = vi->vdev; 1087 1088 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1089 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 1090 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 1091 1092 } 1093 1094 /* TODO: Eliminate OOO packets during switching */ 1095 static int virtnet_set_channels(struct net_device *dev, 1096 struct ethtool_channels *channels) 1097 { 1098 struct virtnet_info *vi = netdev_priv(dev); 1099 u16 queue_pairs = channels->combined_count; 1100 int err; 1101 1102 /* We don't support separate rx/tx channels. 1103 * We don't allow setting 'other' channels. 1104 */ 1105 if (channels->rx_count || channels->tx_count || channels->other_count) 1106 return -EINVAL; 1107 1108 if (queue_pairs > vi->max_queue_pairs) 1109 return -EINVAL; 1110 1111 get_online_cpus(); 1112 err = virtnet_set_queues(vi, queue_pairs); 1113 if (!err) { 1114 netif_set_real_num_tx_queues(dev, queue_pairs); 1115 netif_set_real_num_rx_queues(dev, queue_pairs); 1116 1117 virtnet_set_affinity(vi); 1118 } 1119 put_online_cpus(); 1120 1121 return err; 1122 } 1123 1124 static void virtnet_get_channels(struct net_device *dev, 1125 struct ethtool_channels *channels) 1126 { 1127 struct virtnet_info *vi = netdev_priv(dev); 1128 1129 channels->combined_count = vi->curr_queue_pairs; 1130 channels->max_combined = vi->max_queue_pairs; 1131 channels->max_other = 0; 1132 channels->rx_count = 0; 1133 channels->tx_count = 0; 1134 channels->other_count = 0; 1135 } 1136 1137 static const struct ethtool_ops virtnet_ethtool_ops = { 1138 .get_drvinfo = virtnet_get_drvinfo, 1139 .get_link = ethtool_op_get_link, 1140 .get_ringparam = virtnet_get_ringparam, 1141 .set_channels = virtnet_set_channels, 1142 .get_channels = virtnet_get_channels, 1143 }; 1144 1145 #define MIN_MTU 68 1146 #define MAX_MTU 65535 1147 1148 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) 1149 { 1150 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) 1151 return -EINVAL; 1152 dev->mtu = new_mtu; 1153 return 0; 1154 } 1155 1156 /* To avoid contending a lock hold by a vcpu who would exit to host, select the 1157 * txq based on the processor id. 1158 */ 1159 static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) 1160 { 1161 int txq; 1162 struct virtnet_info *vi = netdev_priv(dev); 1163 1164 if (skb_rx_queue_recorded(skb)) { 1165 txq = skb_get_rx_queue(skb); 1166 } else { 1167 txq = *__this_cpu_ptr(vi->vq_index); 1168 if (txq == -1) 1169 txq = 0; 1170 } 1171 1172 while (unlikely(txq >= dev->real_num_tx_queues)) 1173 txq -= dev->real_num_tx_queues; 1174 1175 return txq; 1176 } 1177 1178 static const struct net_device_ops virtnet_netdev = { 1179 .ndo_open = virtnet_open, 1180 .ndo_stop = virtnet_close, 1181 .ndo_start_xmit = start_xmit, 1182 .ndo_validate_addr = eth_validate_addr, 1183 .ndo_set_mac_address = virtnet_set_mac_address, 1184 .ndo_set_rx_mode = virtnet_set_rx_mode, 1185 .ndo_change_mtu = virtnet_change_mtu, 1186 .ndo_get_stats64 = virtnet_stats, 1187 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 1188 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1189 .ndo_select_queue = virtnet_select_queue, 1190 #ifdef CONFIG_NET_POLL_CONTROLLER 1191 .ndo_poll_controller = virtnet_netpoll, 1192 #endif 1193 }; 1194 1195 static void virtnet_config_changed_work(struct work_struct *work) 1196 { 1197 struct virtnet_info *vi = 1198 container_of(work, struct virtnet_info, config_work); 1199 u16 v; 1200 1201 mutex_lock(&vi->config_lock); 1202 if (!vi->config_enable) 1203 goto done; 1204 1205 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 1206 offsetof(struct virtio_net_config, status), 1207 &v) < 0) 1208 goto done; 1209 1210 if (v & VIRTIO_NET_S_ANNOUNCE) { 1211 netdev_notify_peers(vi->dev); 1212 virtnet_ack_link_announce(vi); 1213 } 1214 1215 /* Ignore unknown (future) status bits */ 1216 v &= VIRTIO_NET_S_LINK_UP; 1217 1218 if (vi->status == v) 1219 goto done; 1220 1221 vi->status = v; 1222 1223 if (vi->status & VIRTIO_NET_S_LINK_UP) { 1224 netif_carrier_on(vi->dev); 1225 netif_tx_wake_all_queues(vi->dev); 1226 } else { 1227 netif_carrier_off(vi->dev); 1228 netif_tx_stop_all_queues(vi->dev); 1229 } 1230 done: 1231 mutex_unlock(&vi->config_lock); 1232 } 1233 1234 static void virtnet_config_changed(struct virtio_device *vdev) 1235 { 1236 struct virtnet_info *vi = vdev->priv; 1237 1238 schedule_work(&vi->config_work); 1239 } 1240 1241 static void virtnet_free_queues(struct virtnet_info *vi) 1242 { 1243 kfree(vi->rq); 1244 kfree(vi->sq); 1245 } 1246 1247 static void free_receive_bufs(struct virtnet_info *vi) 1248 { 1249 int i; 1250 1251 for (i = 0; i < vi->max_queue_pairs; i++) { 1252 while (vi->rq[i].pages) 1253 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1254 } 1255 } 1256 1257 static void free_unused_bufs(struct virtnet_info *vi) 1258 { 1259 void *buf; 1260 int i; 1261 1262 for (i = 0; i < vi->max_queue_pairs; i++) { 1263 struct virtqueue *vq = vi->sq[i].vq; 1264 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 1265 dev_kfree_skb(buf); 1266 } 1267 1268 for (i = 0; i < vi->max_queue_pairs; i++) { 1269 struct virtqueue *vq = vi->rq[i].vq; 1270 1271 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1272 if (vi->mergeable_rx_bufs || vi->big_packets) 1273 give_pages(&vi->rq[i], buf); 1274 else 1275 dev_kfree_skb(buf); 1276 --vi->rq[i].num; 1277 } 1278 BUG_ON(vi->rq[i].num != 0); 1279 } 1280 } 1281 1282 static void virtnet_del_vqs(struct virtnet_info *vi) 1283 { 1284 struct virtio_device *vdev = vi->vdev; 1285 1286 virtnet_clean_affinity(vi, -1); 1287 1288 vdev->config->del_vqs(vdev); 1289 1290 virtnet_free_queues(vi); 1291 } 1292 1293 static int virtnet_find_vqs(struct virtnet_info *vi) 1294 { 1295 vq_callback_t **callbacks; 1296 struct virtqueue **vqs; 1297 int ret = -ENOMEM; 1298 int i, total_vqs; 1299 const char **names; 1300 1301 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 1302 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 1303 * possible control vq. 1304 */ 1305 total_vqs = vi->max_queue_pairs * 2 + 1306 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 1307 1308 /* Allocate space for find_vqs parameters */ 1309 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 1310 if (!vqs) 1311 goto err_vq; 1312 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 1313 if (!callbacks) 1314 goto err_callback; 1315 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 1316 if (!names) 1317 goto err_names; 1318 1319 /* Parameters for control virtqueue, if any */ 1320 if (vi->has_cvq) { 1321 callbacks[total_vqs - 1] = NULL; 1322 names[total_vqs - 1] = "control"; 1323 } 1324 1325 /* Allocate/initialize parameters for send/receive virtqueues */ 1326 for (i = 0; i < vi->max_queue_pairs; i++) { 1327 callbacks[rxq2vq(i)] = skb_recv_done; 1328 callbacks[txq2vq(i)] = skb_xmit_done; 1329 sprintf(vi->rq[i].name, "input.%d", i); 1330 sprintf(vi->sq[i].name, "output.%d", i); 1331 names[rxq2vq(i)] = vi->rq[i].name; 1332 names[txq2vq(i)] = vi->sq[i].name; 1333 } 1334 1335 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 1336 names); 1337 if (ret) 1338 goto err_find; 1339 1340 if (vi->has_cvq) { 1341 vi->cvq = vqs[total_vqs - 1]; 1342 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1343 vi->dev->features |= NETIF_F_HW_VLAN_FILTER; 1344 } 1345 1346 for (i = 0; i < vi->max_queue_pairs; i++) { 1347 vi->rq[i].vq = vqs[rxq2vq(i)]; 1348 vi->sq[i].vq = vqs[txq2vq(i)]; 1349 } 1350 1351 kfree(names); 1352 kfree(callbacks); 1353 kfree(vqs); 1354 1355 return 0; 1356 1357 err_find: 1358 kfree(names); 1359 err_names: 1360 kfree(callbacks); 1361 err_callback: 1362 kfree(vqs); 1363 err_vq: 1364 return ret; 1365 } 1366 1367 static int virtnet_alloc_queues(struct virtnet_info *vi) 1368 { 1369 int i; 1370 1371 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 1372 if (!vi->sq) 1373 goto err_sq; 1374 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 1375 if (!vi->rq) 1376 goto err_rq; 1377 1378 INIT_DELAYED_WORK(&vi->refill, refill_work); 1379 for (i = 0; i < vi->max_queue_pairs; i++) { 1380 vi->rq[i].pages = NULL; 1381 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 1382 napi_weight); 1383 1384 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 1385 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 1386 } 1387 1388 return 0; 1389 1390 err_rq: 1391 kfree(vi->sq); 1392 err_sq: 1393 return -ENOMEM; 1394 } 1395 1396 static int init_vqs(struct virtnet_info *vi) 1397 { 1398 int ret; 1399 1400 /* Allocate send & receive queues */ 1401 ret = virtnet_alloc_queues(vi); 1402 if (ret) 1403 goto err; 1404 1405 ret = virtnet_find_vqs(vi); 1406 if (ret) 1407 goto err_free; 1408 1409 get_online_cpus(); 1410 virtnet_set_affinity(vi); 1411 put_online_cpus(); 1412 1413 return 0; 1414 1415 err_free: 1416 virtnet_free_queues(vi); 1417 err: 1418 return ret; 1419 } 1420 1421 static int virtnet_probe(struct virtio_device *vdev) 1422 { 1423 int i, err; 1424 struct net_device *dev; 1425 struct virtnet_info *vi; 1426 u16 max_queue_pairs; 1427 1428 /* Find if host supports multiqueue virtio_net device */ 1429 err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, 1430 offsetof(struct virtio_net_config, 1431 max_virtqueue_pairs), &max_queue_pairs); 1432 1433 /* We need at least 2 queue's */ 1434 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 1435 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 1436 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1437 max_queue_pairs = 1; 1438 1439 /* Allocate ourselves a network device with room for our info */ 1440 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 1441 if (!dev) 1442 return -ENOMEM; 1443 1444 /* Set up network device as normal. */ 1445 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 1446 dev->netdev_ops = &virtnet_netdev; 1447 dev->features = NETIF_F_HIGHDMA; 1448 1449 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 1450 SET_NETDEV_DEV(dev, &vdev->dev); 1451 1452 /* Do we support "hardware" checksums? */ 1453 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1454 /* This opens up the world of extra features. */ 1455 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1456 if (csum) 1457 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1458 1459 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1460 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1461 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1462 } 1463 /* Individual feature bits: what can host handle? */ 1464 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 1465 dev->hw_features |= NETIF_F_TSO; 1466 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 1467 dev->hw_features |= NETIF_F_TSO6; 1468 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1469 dev->hw_features |= NETIF_F_TSO_ECN; 1470 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 1471 dev->hw_features |= NETIF_F_UFO; 1472 1473 if (gso) 1474 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1475 /* (!csum && gso) case will be fixed by register_netdev() */ 1476 } 1477 1478 /* Configuration may specify what MAC to use. Otherwise random. */ 1479 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, 1480 offsetof(struct virtio_net_config, mac), 1481 dev->dev_addr, dev->addr_len) < 0) 1482 eth_hw_addr_random(dev); 1483 1484 /* Set up our device-specific information */ 1485 vi = netdev_priv(dev); 1486 vi->dev = dev; 1487 vi->vdev = vdev; 1488 vdev->priv = vi; 1489 vi->stats = alloc_percpu(struct virtnet_stats); 1490 err = -ENOMEM; 1491 if (vi->stats == NULL) 1492 goto free; 1493 1494 vi->vq_index = alloc_percpu(int); 1495 if (vi->vq_index == NULL) 1496 goto free_stats; 1497 1498 mutex_init(&vi->config_lock); 1499 vi->config_enable = true; 1500 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1501 1502 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1503 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1504 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1505 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1506 vi->big_packets = true; 1507 1508 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1509 vi->mergeable_rx_bufs = true; 1510 1511 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1512 vi->has_cvq = true; 1513 1514 /* Use single tx/rx queue pair as default */ 1515 vi->curr_queue_pairs = 1; 1516 vi->max_queue_pairs = max_queue_pairs; 1517 1518 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1519 err = init_vqs(vi); 1520 if (err) 1521 goto free_index; 1522 1523 netif_set_real_num_tx_queues(dev, 1); 1524 netif_set_real_num_rx_queues(dev, 1); 1525 1526 err = register_netdev(dev); 1527 if (err) { 1528 pr_debug("virtio_net: registering device failed\n"); 1529 goto free_vqs; 1530 } 1531 1532 /* Last of all, set up some receive buffers. */ 1533 for (i = 0; i < vi->max_queue_pairs; i++) { 1534 try_fill_recv(&vi->rq[i], GFP_KERNEL); 1535 1536 /* If we didn't even get one input buffer, we're useless. */ 1537 if (vi->rq[i].num == 0) { 1538 free_unused_bufs(vi); 1539 err = -ENOMEM; 1540 goto free_recv_bufs; 1541 } 1542 } 1543 1544 /* Assume link up if device can't report link status, 1545 otherwise get link status from config. */ 1546 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1547 netif_carrier_off(dev); 1548 schedule_work(&vi->config_work); 1549 } else { 1550 vi->status = VIRTIO_NET_S_LINK_UP; 1551 netif_carrier_on(dev); 1552 } 1553 1554 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 1555 dev->name, max_queue_pairs); 1556 1557 return 0; 1558 1559 free_recv_bufs: 1560 free_receive_bufs(vi); 1561 unregister_netdev(dev); 1562 free_vqs: 1563 cancel_delayed_work_sync(&vi->refill); 1564 virtnet_del_vqs(vi); 1565 free_index: 1566 free_percpu(vi->vq_index); 1567 free_stats: 1568 free_percpu(vi->stats); 1569 free: 1570 free_netdev(dev); 1571 return err; 1572 } 1573 1574 static void remove_vq_common(struct virtnet_info *vi) 1575 { 1576 vi->vdev->config->reset(vi->vdev); 1577 1578 /* Free unused buffers in both send and recv, if any. */ 1579 free_unused_bufs(vi); 1580 1581 free_receive_bufs(vi); 1582 1583 virtnet_del_vqs(vi); 1584 } 1585 1586 static void virtnet_remove(struct virtio_device *vdev) 1587 { 1588 struct virtnet_info *vi = vdev->priv; 1589 1590 /* Prevent config work handler from accessing the device. */ 1591 mutex_lock(&vi->config_lock); 1592 vi->config_enable = false; 1593 mutex_unlock(&vi->config_lock); 1594 1595 unregister_netdev(vi->dev); 1596 1597 remove_vq_common(vi); 1598 1599 flush_work(&vi->config_work); 1600 1601 free_percpu(vi->vq_index); 1602 free_percpu(vi->stats); 1603 free_netdev(vi->dev); 1604 } 1605 1606 #ifdef CONFIG_PM 1607 static int virtnet_freeze(struct virtio_device *vdev) 1608 { 1609 struct virtnet_info *vi = vdev->priv; 1610 int i; 1611 1612 /* Prevent config work handler from accessing the device */ 1613 mutex_lock(&vi->config_lock); 1614 vi->config_enable = false; 1615 mutex_unlock(&vi->config_lock); 1616 1617 netif_device_detach(vi->dev); 1618 cancel_delayed_work_sync(&vi->refill); 1619 1620 if (netif_running(vi->dev)) 1621 for (i = 0; i < vi->max_queue_pairs; i++) { 1622 napi_disable(&vi->rq[i].napi); 1623 netif_napi_del(&vi->rq[i].napi); 1624 } 1625 1626 remove_vq_common(vi); 1627 1628 flush_work(&vi->config_work); 1629 1630 return 0; 1631 } 1632 1633 static int virtnet_restore(struct virtio_device *vdev) 1634 { 1635 struct virtnet_info *vi = vdev->priv; 1636 int err, i; 1637 1638 err = init_vqs(vi); 1639 if (err) 1640 return err; 1641 1642 if (netif_running(vi->dev)) 1643 for (i = 0; i < vi->max_queue_pairs; i++) 1644 virtnet_napi_enable(&vi->rq[i]); 1645 1646 netif_device_attach(vi->dev); 1647 1648 for (i = 0; i < vi->max_queue_pairs; i++) 1649 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 1650 schedule_delayed_work(&vi->refill, 0); 1651 1652 mutex_lock(&vi->config_lock); 1653 vi->config_enable = true; 1654 mutex_unlock(&vi->config_lock); 1655 1656 virtnet_set_queues(vi, vi->curr_queue_pairs); 1657 1658 return 0; 1659 } 1660 #endif 1661 1662 static struct virtio_device_id id_table[] = { 1663 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 1664 { 0 }, 1665 }; 1666 1667 static unsigned int features[] = { 1668 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1669 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1670 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 1671 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1672 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1673 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1674 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1675 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1676 }; 1677 1678 static struct virtio_driver virtio_net_driver = { 1679 .feature_table = features, 1680 .feature_table_size = ARRAY_SIZE(features), 1681 .driver.name = KBUILD_MODNAME, 1682 .driver.owner = THIS_MODULE, 1683 .id_table = id_table, 1684 .probe = virtnet_probe, 1685 .remove = virtnet_remove, 1686 .config_changed = virtnet_config_changed, 1687 #ifdef CONFIG_PM 1688 .freeze = virtnet_freeze, 1689 .restore = virtnet_restore, 1690 #endif 1691 }; 1692 1693 static int __init init(void) 1694 { 1695 return register_virtio_driver(&virtio_net_driver); 1696 } 1697 1698 static void __exit fini(void) 1699 { 1700 unregister_virtio_driver(&virtio_net_driver); 1701 } 1702 module_init(init); 1703 module_exit(fini); 1704 1705 MODULE_DEVICE_TABLE(virtio, id_table); 1706 MODULE_DESCRIPTION("Virtio network driver"); 1707 MODULE_LICENSE("GPL"); 1708