1 /* A network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 //#define DEBUG 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/ethtool.h> 22 #include <linux/module.h> 23 #include <linux/virtio.h> 24 #include <linux/virtio_net.h> 25 #include <linux/bpf.h> 26 #include <linux/scatterlist.h> 27 #include <linux/if_vlan.h> 28 #include <linux/slab.h> 29 #include <linux/cpu.h> 30 #include <linux/average.h> 31 #include <net/busy_poll.h> 32 33 static int napi_weight = NAPI_POLL_WEIGHT; 34 module_param(napi_weight, int, 0444); 35 36 static bool csum = true, gso = true; 37 module_param(csum, bool, 0444); 38 module_param(gso, bool, 0444); 39 40 /* FIXME: MTU in config. */ 41 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 42 #define GOOD_COPY_LEN 128 43 44 /* RX packet size EWMA. The average packet size is used to determine the packet 45 * buffer size when refilling RX rings. As the entire RX ring may be refilled 46 * at once, the weight is chosen so that the EWMA will be insensitive to short- 47 * term, transient changes in packet size. 48 */ 49 DECLARE_EWMA(pkt_len, 1, 64) 50 51 /* Minimum alignment for mergeable packet buffers. */ 52 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) 53 54 #define VIRTNET_DRIVER_VERSION "1.0.0" 55 56 struct virtnet_stats { 57 struct u64_stats_sync tx_syncp; 58 struct u64_stats_sync rx_syncp; 59 u64 tx_bytes; 60 u64 tx_packets; 61 62 u64 rx_bytes; 63 u64 rx_packets; 64 }; 65 66 /* Internal representation of a send virtqueue */ 67 struct send_queue { 68 /* Virtqueue associated with this send _queue */ 69 struct virtqueue *vq; 70 71 /* TX: fragments + linear part + virtio header */ 72 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 73 74 /* Name of the send queue: output.$index */ 75 char name[40]; 76 }; 77 78 /* Internal representation of a receive virtqueue */ 79 struct receive_queue { 80 /* Virtqueue associated with this receive_queue */ 81 struct virtqueue *vq; 82 83 struct napi_struct napi; 84 85 struct bpf_prog __rcu *xdp_prog; 86 87 /* Chain pages by the private ptr. */ 88 struct page *pages; 89 90 /* Average packet length for mergeable receive buffers. */ 91 struct ewma_pkt_len mrg_avg_pkt_len; 92 93 /* Page frag for packet buffer allocation. */ 94 struct page_frag alloc_frag; 95 96 /* RX: fragments + linear part + virtio header */ 97 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 98 99 /* Name of this receive queue: input.$index */ 100 char name[40]; 101 }; 102 103 struct virtnet_info { 104 struct virtio_device *vdev; 105 struct virtqueue *cvq; 106 struct net_device *dev; 107 struct send_queue *sq; 108 struct receive_queue *rq; 109 unsigned int status; 110 111 /* Max # of queue pairs supported by the device */ 112 u16 max_queue_pairs; 113 114 /* # of queue pairs currently used by the driver */ 115 u16 curr_queue_pairs; 116 117 /* # of XDP queue pairs currently used by the driver */ 118 u16 xdp_queue_pairs; 119 120 /* I like... big packets and I cannot lie! */ 121 bool big_packets; 122 123 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 124 bool mergeable_rx_bufs; 125 126 /* Has control virtqueue */ 127 bool has_cvq; 128 129 /* Host can handle any s/g split between our header and packet data */ 130 bool any_header_sg; 131 132 /* Packet virtio header size */ 133 u8 hdr_len; 134 135 /* Active statistics */ 136 struct virtnet_stats __percpu *stats; 137 138 /* Work struct for refilling if we run low on memory. */ 139 struct delayed_work refill; 140 141 /* Work struct for config space updates */ 142 struct work_struct config_work; 143 144 /* Does the affinity hint is set for virtqueues? */ 145 bool affinity_hint_set; 146 147 /* CPU hotplug instances for online & dead */ 148 struct hlist_node node; 149 struct hlist_node node_dead; 150 151 /* Control VQ buffers: protected by the rtnl lock */ 152 struct virtio_net_ctrl_hdr ctrl_hdr; 153 virtio_net_ctrl_ack ctrl_status; 154 struct virtio_net_ctrl_mq ctrl_mq; 155 u8 ctrl_promisc; 156 u8 ctrl_allmulti; 157 u16 ctrl_vid; 158 159 /* Ethtool settings */ 160 u8 duplex; 161 u32 speed; 162 }; 163 164 struct padded_vnet_hdr { 165 struct virtio_net_hdr_mrg_rxbuf hdr; 166 /* 167 * hdr is in a separate sg buffer, and data sg buffer shares same page 168 * with this header sg. This padding makes next sg 16 byte aligned 169 * after the header. 170 */ 171 char padding[4]; 172 }; 173 174 /* Converting between virtqueue no. and kernel tx/rx queue no. 175 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 176 */ 177 static int vq2txq(struct virtqueue *vq) 178 { 179 return (vq->index - 1) / 2; 180 } 181 182 static int txq2vq(int txq) 183 { 184 return txq * 2 + 1; 185 } 186 187 static int vq2rxq(struct virtqueue *vq) 188 { 189 return vq->index / 2; 190 } 191 192 static int rxq2vq(int rxq) 193 { 194 return rxq * 2; 195 } 196 197 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 198 { 199 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 200 } 201 202 /* 203 * private is used to chain pages for big packets, put the whole 204 * most recent used list in the beginning for reuse 205 */ 206 static void give_pages(struct receive_queue *rq, struct page *page) 207 { 208 struct page *end; 209 210 /* Find end of list, sew whole thing into vi->rq.pages. */ 211 for (end = page; end->private; end = (struct page *)end->private); 212 end->private = (unsigned long)rq->pages; 213 rq->pages = page; 214 } 215 216 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 217 { 218 struct page *p = rq->pages; 219 220 if (p) { 221 rq->pages = (struct page *)p->private; 222 /* clear private here, it is used to chain pages */ 223 p->private = 0; 224 } else 225 p = alloc_page(gfp_mask); 226 return p; 227 } 228 229 static void skb_xmit_done(struct virtqueue *vq) 230 { 231 struct virtnet_info *vi = vq->vdev->priv; 232 233 /* Suppress further interrupts. */ 234 virtqueue_disable_cb(vq); 235 236 /* We were probably waiting for more output buffers. */ 237 netif_wake_subqueue(vi->dev, vq2txq(vq)); 238 } 239 240 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) 241 { 242 unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); 243 return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; 244 } 245 246 static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) 247 { 248 return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); 249 250 } 251 252 static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) 253 { 254 unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; 255 return (unsigned long)buf | (size - 1); 256 } 257 258 /* Called from bottom half context */ 259 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 260 struct receive_queue *rq, 261 struct page *page, unsigned int offset, 262 unsigned int len, unsigned int truesize) 263 { 264 struct sk_buff *skb; 265 struct virtio_net_hdr_mrg_rxbuf *hdr; 266 unsigned int copy, hdr_len, hdr_padded_len; 267 char *p; 268 269 p = page_address(page) + offset; 270 271 /* copy small packet so we can reuse these pages for small data */ 272 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 273 if (unlikely(!skb)) 274 return NULL; 275 276 hdr = skb_vnet_hdr(skb); 277 278 hdr_len = vi->hdr_len; 279 if (vi->mergeable_rx_bufs) 280 hdr_padded_len = sizeof *hdr; 281 else 282 hdr_padded_len = sizeof(struct padded_vnet_hdr); 283 284 memcpy(hdr, p, hdr_len); 285 286 len -= hdr_len; 287 offset += hdr_padded_len; 288 p += hdr_padded_len; 289 290 copy = len; 291 if (copy > skb_tailroom(skb)) 292 copy = skb_tailroom(skb); 293 memcpy(skb_put(skb, copy), p, copy); 294 295 len -= copy; 296 offset += copy; 297 298 if (vi->mergeable_rx_bufs) { 299 if (len) 300 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 301 else 302 put_page(page); 303 return skb; 304 } 305 306 /* 307 * Verify that we can indeed put this data into a skb. 308 * This is here to handle cases when the device erroneously 309 * tries to receive more than is possible. This is usually 310 * the case of a broken device. 311 */ 312 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 313 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 314 dev_kfree_skb(skb); 315 return NULL; 316 } 317 BUG_ON(offset >= PAGE_SIZE); 318 while (len) { 319 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 320 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 321 frag_size, truesize); 322 len -= frag_size; 323 page = (struct page *)page->private; 324 offset = 0; 325 } 326 327 if (page) 328 give_pages(rq, page); 329 330 return skb; 331 } 332 333 static void virtnet_xdp_xmit(struct virtnet_info *vi, 334 struct receive_queue *rq, 335 struct send_queue *sq, 336 struct xdp_buff *xdp, 337 void *data) 338 { 339 struct virtio_net_hdr_mrg_rxbuf *hdr; 340 unsigned int num_sg, len; 341 void *xdp_sent; 342 int err; 343 344 /* Free up any pending old buffers before queueing new ones. */ 345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 346 if (vi->mergeable_rx_bufs) { 347 struct page *sent_page = virt_to_head_page(xdp_sent); 348 349 put_page(sent_page); 350 } else { /* small buffer */ 351 struct sk_buff *skb = xdp_sent; 352 353 kfree_skb(skb); 354 } 355 } 356 357 if (vi->mergeable_rx_bufs) { 358 /* Zero header and leave csum up to XDP layers */ 359 hdr = xdp->data; 360 memset(hdr, 0, vi->hdr_len); 361 362 num_sg = 1; 363 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 364 } else { /* small buffer */ 365 struct sk_buff *skb = data; 366 367 /* Zero header and leave csum up to XDP layers */ 368 hdr = skb_vnet_hdr(skb); 369 memset(hdr, 0, vi->hdr_len); 370 371 num_sg = 2; 372 sg_init_table(sq->sg, 2); 373 sg_set_buf(sq->sg, hdr, vi->hdr_len); 374 skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 375 } 376 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, 377 data, GFP_ATOMIC); 378 if (unlikely(err)) { 379 if (vi->mergeable_rx_bufs) { 380 struct page *page = virt_to_head_page(xdp->data); 381 382 put_page(page); 383 } else /* small buffer */ 384 kfree_skb(data); 385 return; // On error abort to avoid unnecessary kick 386 } 387 388 virtqueue_kick(sq->vq); 389 } 390 391 static u32 do_xdp_prog(struct virtnet_info *vi, 392 struct receive_queue *rq, 393 struct bpf_prog *xdp_prog, 394 void *data, int len) 395 { 396 int hdr_padded_len; 397 struct xdp_buff xdp; 398 void *buf; 399 unsigned int qp; 400 u32 act; 401 402 if (vi->mergeable_rx_bufs) { 403 hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 404 xdp.data = data + hdr_padded_len; 405 xdp.data_end = xdp.data + (len - vi->hdr_len); 406 buf = data; 407 } else { /* small buffers */ 408 struct sk_buff *skb = data; 409 410 xdp.data = skb->data; 411 xdp.data_end = xdp.data + len; 412 buf = skb->data; 413 } 414 415 act = bpf_prog_run_xdp(xdp_prog, &xdp); 416 switch (act) { 417 case XDP_PASS: 418 return XDP_PASS; 419 case XDP_TX: 420 qp = vi->curr_queue_pairs - 421 vi->xdp_queue_pairs + 422 smp_processor_id(); 423 xdp.data = buf; 424 virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); 425 return XDP_TX; 426 default: 427 bpf_warn_invalid_xdp_action(act); 428 case XDP_ABORTED: 429 case XDP_DROP: 430 return XDP_DROP; 431 } 432 } 433 434 static struct sk_buff *receive_small(struct net_device *dev, 435 struct virtnet_info *vi, 436 struct receive_queue *rq, 437 void *buf, unsigned int len) 438 { 439 struct sk_buff * skb = buf; 440 struct bpf_prog *xdp_prog; 441 442 len -= vi->hdr_len; 443 skb_trim(skb, len); 444 445 rcu_read_lock(); 446 xdp_prog = rcu_dereference(rq->xdp_prog); 447 if (xdp_prog) { 448 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 449 u32 act; 450 451 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) 452 goto err_xdp; 453 act = do_xdp_prog(vi, rq, xdp_prog, skb, len); 454 switch (act) { 455 case XDP_PASS: 456 break; 457 case XDP_TX: 458 rcu_read_unlock(); 459 goto xdp_xmit; 460 case XDP_DROP: 461 default: 462 goto err_xdp; 463 } 464 } 465 rcu_read_unlock(); 466 467 return skb; 468 469 err_xdp: 470 rcu_read_unlock(); 471 dev->stats.rx_dropped++; 472 kfree_skb(skb); 473 xdp_xmit: 474 return NULL; 475 } 476 477 static struct sk_buff *receive_big(struct net_device *dev, 478 struct virtnet_info *vi, 479 struct receive_queue *rq, 480 void *buf, 481 unsigned int len) 482 { 483 struct page *page = buf; 484 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 485 486 if (unlikely(!skb)) 487 goto err; 488 489 return skb; 490 491 err: 492 dev->stats.rx_dropped++; 493 give_pages(rq, page); 494 return NULL; 495 } 496 497 /* The conditions to enable XDP should preclude the underlying device from 498 * sending packets across multiple buffers (num_buf > 1). However per spec 499 * it does not appear to be illegal to do so but rather just against convention. 500 * So in order to avoid making a system unresponsive the packets are pushed 501 * into a page and the XDP program is run. This will be extremely slow and we 502 * push a warning to the user to fix this as soon as possible. Fixing this may 503 * require resolving the underlying hardware to determine why multiple buffers 504 * are being received or simply loading the XDP program in the ingress stack 505 * after the skb is built because there is no advantage to running it here 506 * anymore. 507 */ 508 static struct page *xdp_linearize_page(struct receive_queue *rq, 509 u16 *num_buf, 510 struct page *p, 511 int offset, 512 unsigned int *len) 513 { 514 struct page *page = alloc_page(GFP_ATOMIC); 515 unsigned int page_off = 0; 516 517 if (!page) 518 return NULL; 519 520 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 521 page_off += *len; 522 523 while (--*num_buf) { 524 unsigned int buflen; 525 unsigned long ctx; 526 void *buf; 527 int off; 528 529 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); 530 if (unlikely(!ctx)) 531 goto err_buf; 532 533 buf = mergeable_ctx_to_buf_address(ctx); 534 p = virt_to_head_page(buf); 535 off = buf - page_address(p); 536 537 /* guard against a misconfigured or uncooperative backend that 538 * is sending packet larger than the MTU. 539 */ 540 if ((page_off + buflen) > PAGE_SIZE) { 541 put_page(p); 542 goto err_buf; 543 } 544 545 memcpy(page_address(page) + page_off, 546 page_address(p) + off, buflen); 547 page_off += buflen; 548 put_page(p); 549 } 550 551 *len = page_off; 552 return page; 553 err_buf: 554 __free_pages(page, 0); 555 return NULL; 556 } 557 558 static struct sk_buff *receive_mergeable(struct net_device *dev, 559 struct virtnet_info *vi, 560 struct receive_queue *rq, 561 unsigned long ctx, 562 unsigned int len) 563 { 564 void *buf = mergeable_ctx_to_buf_address(ctx); 565 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 566 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 567 struct page *page = virt_to_head_page(buf); 568 int offset = buf - page_address(page); 569 struct sk_buff *head_skb, *curr_skb; 570 struct bpf_prog *xdp_prog; 571 unsigned int truesize; 572 573 head_skb = NULL; 574 575 rcu_read_lock(); 576 xdp_prog = rcu_dereference(rq->xdp_prog); 577 if (xdp_prog) { 578 struct page *xdp_page; 579 u32 act; 580 581 /* This happens when rx buffer size is underestimated */ 582 if (unlikely(num_buf > 1)) { 583 /* linearize data for XDP */ 584 xdp_page = xdp_linearize_page(rq, &num_buf, 585 page, offset, &len); 586 if (!xdp_page) 587 goto err_xdp; 588 offset = 0; 589 } else { 590 xdp_page = page; 591 } 592 593 /* Transient failure which in theory could occur if 594 * in-flight packets from before XDP was enabled reach 595 * the receive path after XDP is loaded. In practice I 596 * was not able to create this condition. 597 */ 598 if (unlikely(hdr->hdr.gso_type)) 599 goto err_xdp; 600 601 act = do_xdp_prog(vi, rq, xdp_prog, 602 page_address(xdp_page) + offset, len); 603 switch (act) { 604 case XDP_PASS: 605 /* We can only create skb based on xdp_page. */ 606 if (unlikely(xdp_page != page)) { 607 rcu_read_unlock(); 608 put_page(page); 609 head_skb = page_to_skb(vi, rq, xdp_page, 610 0, len, PAGE_SIZE); 611 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 612 return head_skb; 613 } 614 break; 615 case XDP_TX: 616 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 617 if (unlikely(xdp_page != page)) 618 goto err_xdp; 619 rcu_read_unlock(); 620 goto xdp_xmit; 621 case XDP_DROP: 622 default: 623 if (unlikely(xdp_page != page)) 624 __free_pages(xdp_page, 0); 625 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 626 goto err_xdp; 627 } 628 } 629 rcu_read_unlock(); 630 631 truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 632 head_skb = page_to_skb(vi, rq, page, offset, len, truesize); 633 curr_skb = head_skb; 634 635 if (unlikely(!curr_skb)) 636 goto err_skb; 637 while (--num_buf) { 638 int num_skb_frags; 639 640 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 641 if (unlikely(!ctx)) { 642 pr_debug("%s: rx error: %d buffers out of %d missing\n", 643 dev->name, num_buf, 644 virtio16_to_cpu(vi->vdev, 645 hdr->num_buffers)); 646 dev->stats.rx_length_errors++; 647 goto err_buf; 648 } 649 650 buf = mergeable_ctx_to_buf_address(ctx); 651 page = virt_to_head_page(buf); 652 653 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 654 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 655 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 656 657 if (unlikely(!nskb)) 658 goto err_skb; 659 if (curr_skb == head_skb) 660 skb_shinfo(curr_skb)->frag_list = nskb; 661 else 662 curr_skb->next = nskb; 663 curr_skb = nskb; 664 head_skb->truesize += nskb->truesize; 665 num_skb_frags = 0; 666 } 667 truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 668 if (curr_skb != head_skb) { 669 head_skb->data_len += len; 670 head_skb->len += len; 671 head_skb->truesize += truesize; 672 } 673 offset = buf - page_address(page); 674 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 675 put_page(page); 676 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 677 len, truesize); 678 } else { 679 skb_add_rx_frag(curr_skb, num_skb_frags, page, 680 offset, len, truesize); 681 } 682 } 683 684 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 685 return head_skb; 686 687 err_xdp: 688 rcu_read_unlock(); 689 err_skb: 690 put_page(page); 691 while (--num_buf) { 692 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 693 if (unlikely(!ctx)) { 694 pr_debug("%s: rx error: %d buffers missing\n", 695 dev->name, num_buf); 696 dev->stats.rx_length_errors++; 697 break; 698 } 699 page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); 700 put_page(page); 701 } 702 err_buf: 703 dev->stats.rx_dropped++; 704 dev_kfree_skb(head_skb); 705 xdp_xmit: 706 return NULL; 707 } 708 709 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 710 void *buf, unsigned int len) 711 { 712 struct net_device *dev = vi->dev; 713 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 714 struct sk_buff *skb; 715 struct virtio_net_hdr_mrg_rxbuf *hdr; 716 717 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 718 pr_debug("%s: short packet %i\n", dev->name, len); 719 dev->stats.rx_length_errors++; 720 if (vi->mergeable_rx_bufs) { 721 unsigned long ctx = (unsigned long)buf; 722 void *base = mergeable_ctx_to_buf_address(ctx); 723 put_page(virt_to_head_page(base)); 724 } else if (vi->big_packets) { 725 give_pages(rq, buf); 726 } else { 727 dev_kfree_skb(buf); 728 } 729 return; 730 } 731 732 if (vi->mergeable_rx_bufs) 733 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); 734 else if (vi->big_packets) 735 skb = receive_big(dev, vi, rq, buf, len); 736 else 737 skb = receive_small(dev, vi, rq, buf, len); 738 739 if (unlikely(!skb)) 740 return; 741 742 hdr = skb_vnet_hdr(skb); 743 744 u64_stats_update_begin(&stats->rx_syncp); 745 stats->rx_bytes += skb->len; 746 stats->rx_packets++; 747 u64_stats_update_end(&stats->rx_syncp); 748 749 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 750 skb->ip_summed = CHECKSUM_UNNECESSARY; 751 752 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 753 virtio_is_little_endian(vi->vdev))) { 754 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 755 dev->name, hdr->hdr.gso_type, 756 hdr->hdr.gso_size); 757 goto frame_err; 758 } 759 760 skb->protocol = eth_type_trans(skb, dev); 761 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 762 ntohs(skb->protocol), skb->len, skb->pkt_type); 763 764 napi_gro_receive(&rq->napi, skb); 765 return; 766 767 frame_err: 768 dev->stats.rx_frame_errors++; 769 dev_kfree_skb(skb); 770 } 771 772 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 773 gfp_t gfp) 774 { 775 struct sk_buff *skb; 776 struct virtio_net_hdr_mrg_rxbuf *hdr; 777 int err; 778 779 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); 780 if (unlikely(!skb)) 781 return -ENOMEM; 782 783 skb_put(skb, GOOD_PACKET_LEN); 784 785 hdr = skb_vnet_hdr(skb); 786 sg_init_table(rq->sg, 2); 787 sg_set_buf(rq->sg, hdr, vi->hdr_len); 788 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 789 790 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); 791 if (err < 0) 792 dev_kfree_skb(skb); 793 794 return err; 795 } 796 797 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 798 gfp_t gfp) 799 { 800 struct page *first, *list = NULL; 801 char *p; 802 int i, err, offset; 803 804 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 805 806 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 807 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 808 first = get_a_page(rq, gfp); 809 if (!first) { 810 if (list) 811 give_pages(rq, list); 812 return -ENOMEM; 813 } 814 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 815 816 /* chain new page in list head to match sg */ 817 first->private = (unsigned long)list; 818 list = first; 819 } 820 821 first = get_a_page(rq, gfp); 822 if (!first) { 823 give_pages(rq, list); 824 return -ENOMEM; 825 } 826 p = page_address(first); 827 828 /* rq->sg[0], rq->sg[1] share the same page */ 829 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 830 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 831 832 /* rq->sg[1] for data packet, from offset */ 833 offset = sizeof(struct padded_vnet_hdr); 834 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 835 836 /* chain first in list head */ 837 first->private = (unsigned long)list; 838 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 839 first, gfp); 840 if (err < 0) 841 give_pages(rq, first); 842 843 return err; 844 } 845 846 static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) 847 { 848 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 849 unsigned int len; 850 851 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 852 GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); 853 return ALIGN(len, MERGEABLE_BUFFER_ALIGN); 854 } 855 856 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 857 { 858 struct page_frag *alloc_frag = &rq->alloc_frag; 859 char *buf; 860 unsigned long ctx; 861 int err; 862 unsigned int len, hole; 863 864 len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); 865 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 866 return -ENOMEM; 867 868 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 869 ctx = mergeable_buf_to_ctx(buf, len); 870 get_page(alloc_frag->page); 871 alloc_frag->offset += len; 872 hole = alloc_frag->size - alloc_frag->offset; 873 if (hole < len) { 874 /* To avoid internal fragmentation, if there is very likely not 875 * enough space for another buffer, add the remaining space to 876 * the current buffer. This extra space is not included in 877 * the truesize stored in ctx. 878 */ 879 len += hole; 880 alloc_frag->offset += hole; 881 } 882 883 sg_init_one(rq->sg, buf, len); 884 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); 885 if (err < 0) 886 put_page(virt_to_head_page(buf)); 887 888 return err; 889 } 890 891 /* 892 * Returns false if we couldn't fill entirely (OOM). 893 * 894 * Normally run in the receive path, but can also be run from ndo_open 895 * before we're receiving packets, or from refill_work which is 896 * careful to disable receiving (using napi_disable). 897 */ 898 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 899 gfp_t gfp) 900 { 901 int err; 902 bool oom; 903 904 gfp |= __GFP_COLD; 905 do { 906 if (vi->mergeable_rx_bufs) 907 err = add_recvbuf_mergeable(rq, gfp); 908 else if (vi->big_packets) 909 err = add_recvbuf_big(vi, rq, gfp); 910 else 911 err = add_recvbuf_small(vi, rq, gfp); 912 913 oom = err == -ENOMEM; 914 if (err) 915 break; 916 } while (rq->vq->num_free); 917 virtqueue_kick(rq->vq); 918 return !oom; 919 } 920 921 static void skb_recv_done(struct virtqueue *rvq) 922 { 923 struct virtnet_info *vi = rvq->vdev->priv; 924 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 925 926 /* Schedule NAPI, Suppress further interrupts if successful. */ 927 if (napi_schedule_prep(&rq->napi)) { 928 virtqueue_disable_cb(rvq); 929 __napi_schedule(&rq->napi); 930 } 931 } 932 933 static void virtnet_napi_enable(struct receive_queue *rq) 934 { 935 napi_enable(&rq->napi); 936 937 /* If all buffers were filled by other side before we napi_enabled, we 938 * won't get another interrupt, so process any outstanding packets 939 * now. virtnet_poll wants re-enable the queue, so we disable here. 940 * We synchronize against interrupts via NAPI_STATE_SCHED */ 941 if (napi_schedule_prep(&rq->napi)) { 942 virtqueue_disable_cb(rq->vq); 943 local_bh_disable(); 944 __napi_schedule(&rq->napi); 945 local_bh_enable(); 946 } 947 } 948 949 static void refill_work(struct work_struct *work) 950 { 951 struct virtnet_info *vi = 952 container_of(work, struct virtnet_info, refill.work); 953 bool still_empty; 954 int i; 955 956 for (i = 0; i < vi->curr_queue_pairs; i++) { 957 struct receive_queue *rq = &vi->rq[i]; 958 959 napi_disable(&rq->napi); 960 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 961 virtnet_napi_enable(rq); 962 963 /* In theory, this can happen: if we don't get any buffers in 964 * we will *never* try to fill again. 965 */ 966 if (still_empty) 967 schedule_delayed_work(&vi->refill, HZ/2); 968 } 969 } 970 971 static int virtnet_receive(struct receive_queue *rq, int budget) 972 { 973 struct virtnet_info *vi = rq->vq->vdev->priv; 974 unsigned int len, received = 0; 975 void *buf; 976 977 while (received < budget && 978 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 979 receive_buf(vi, rq, buf, len); 980 received++; 981 } 982 983 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 984 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 985 schedule_delayed_work(&vi->refill, 0); 986 } 987 988 return received; 989 } 990 991 static int virtnet_poll(struct napi_struct *napi, int budget) 992 { 993 struct receive_queue *rq = 994 container_of(napi, struct receive_queue, napi); 995 unsigned int r, received; 996 997 received = virtnet_receive(rq, budget); 998 999 /* Out of packets? */ 1000 if (received < budget) { 1001 r = virtqueue_enable_cb_prepare(rq->vq); 1002 napi_complete_done(napi, received); 1003 if (unlikely(virtqueue_poll(rq->vq, r)) && 1004 napi_schedule_prep(napi)) { 1005 virtqueue_disable_cb(rq->vq); 1006 __napi_schedule(napi); 1007 } 1008 } 1009 1010 return received; 1011 } 1012 1013 #ifdef CONFIG_NET_RX_BUSY_POLL 1014 /* must be called with local_bh_disable()d */ 1015 static int virtnet_busy_poll(struct napi_struct *napi) 1016 { 1017 struct receive_queue *rq = 1018 container_of(napi, struct receive_queue, napi); 1019 struct virtnet_info *vi = rq->vq->vdev->priv; 1020 int r, received = 0, budget = 4; 1021 1022 if (!(vi->status & VIRTIO_NET_S_LINK_UP)) 1023 return LL_FLUSH_FAILED; 1024 1025 if (!napi_schedule_prep(napi)) 1026 return LL_FLUSH_BUSY; 1027 1028 virtqueue_disable_cb(rq->vq); 1029 1030 again: 1031 received += virtnet_receive(rq, budget); 1032 1033 r = virtqueue_enable_cb_prepare(rq->vq); 1034 clear_bit(NAPI_STATE_SCHED, &napi->state); 1035 if (unlikely(virtqueue_poll(rq->vq, r)) && 1036 napi_schedule_prep(napi)) { 1037 virtqueue_disable_cb(rq->vq); 1038 if (received < budget) { 1039 budget -= received; 1040 goto again; 1041 } else { 1042 __napi_schedule(napi); 1043 } 1044 } 1045 1046 return received; 1047 } 1048 #endif /* CONFIG_NET_RX_BUSY_POLL */ 1049 1050 static int virtnet_open(struct net_device *dev) 1051 { 1052 struct virtnet_info *vi = netdev_priv(dev); 1053 int i; 1054 1055 for (i = 0; i < vi->max_queue_pairs; i++) { 1056 if (i < vi->curr_queue_pairs) 1057 /* Make sure we have some buffers: if oom use wq. */ 1058 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1059 schedule_delayed_work(&vi->refill, 0); 1060 virtnet_napi_enable(&vi->rq[i]); 1061 } 1062 1063 return 0; 1064 } 1065 1066 static void free_old_xmit_skbs(struct send_queue *sq) 1067 { 1068 struct sk_buff *skb; 1069 unsigned int len; 1070 struct virtnet_info *vi = sq->vq->vdev->priv; 1071 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 1072 1073 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1074 pr_debug("Sent skb %p\n", skb); 1075 1076 u64_stats_update_begin(&stats->tx_syncp); 1077 stats->tx_bytes += skb->len; 1078 stats->tx_packets++; 1079 u64_stats_update_end(&stats->tx_syncp); 1080 1081 dev_kfree_skb_any(skb); 1082 } 1083 } 1084 1085 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1086 { 1087 struct virtio_net_hdr_mrg_rxbuf *hdr; 1088 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1089 struct virtnet_info *vi = sq->vq->vdev->priv; 1090 unsigned num_sg; 1091 unsigned hdr_len = vi->hdr_len; 1092 bool can_push; 1093 1094 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1095 1096 can_push = vi->any_header_sg && 1097 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1098 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1099 /* Even if we can, don't push here yet as this would skew 1100 * csum_start offset below. */ 1101 if (can_push) 1102 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1103 else 1104 hdr = skb_vnet_hdr(skb); 1105 1106 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1107 virtio_is_little_endian(vi->vdev))) 1108 BUG(); 1109 1110 if (vi->mergeable_rx_bufs) 1111 hdr->num_buffers = 0; 1112 1113 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1114 if (can_push) { 1115 __skb_push(skb, hdr_len); 1116 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1117 /* Pull header back to avoid skew in tx bytes calculations. */ 1118 __skb_pull(skb, hdr_len); 1119 } else { 1120 sg_set_buf(sq->sg, hdr, hdr_len); 1121 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 1122 } 1123 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 1124 } 1125 1126 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 1127 { 1128 struct virtnet_info *vi = netdev_priv(dev); 1129 int qnum = skb_get_queue_mapping(skb); 1130 struct send_queue *sq = &vi->sq[qnum]; 1131 int err; 1132 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 1133 bool kick = !skb->xmit_more; 1134 1135 /* Free up any pending old buffers before queueing new ones. */ 1136 free_old_xmit_skbs(sq); 1137 1138 /* timestamp packet in software */ 1139 skb_tx_timestamp(skb); 1140 1141 /* Try to transmit */ 1142 err = xmit_skb(sq, skb); 1143 1144 /* This should not happen! */ 1145 if (unlikely(err)) { 1146 dev->stats.tx_fifo_errors++; 1147 if (net_ratelimit()) 1148 dev_warn(&dev->dev, 1149 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 1150 dev->stats.tx_dropped++; 1151 dev_kfree_skb_any(skb); 1152 return NETDEV_TX_OK; 1153 } 1154 1155 /* Don't wait up for transmitted skbs to be freed. */ 1156 skb_orphan(skb); 1157 nf_reset(skb); 1158 1159 /* If running out of space, stop queue to avoid getting packets that we 1160 * are then unable to transmit. 1161 * An alternative would be to force queuing layer to requeue the skb by 1162 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 1163 * returned in a normal path of operation: it means that driver is not 1164 * maintaining the TX queue stop/start state properly, and causes 1165 * the stack to do a non-trivial amount of useless work. 1166 * Since most packets only take 1 or 2 ring slots, stopping the queue 1167 * early means 16 slots are typically wasted. 1168 */ 1169 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1170 netif_stop_subqueue(dev, qnum); 1171 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1172 /* More just got used, free them then recheck. */ 1173 free_old_xmit_skbs(sq); 1174 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1175 netif_start_subqueue(dev, qnum); 1176 virtqueue_disable_cb(sq->vq); 1177 } 1178 } 1179 } 1180 1181 if (kick || netif_xmit_stopped(txq)) 1182 virtqueue_kick(sq->vq); 1183 1184 return NETDEV_TX_OK; 1185 } 1186 1187 /* 1188 * Send command via the control virtqueue and check status. Commands 1189 * supported by the hypervisor, as indicated by feature bits, should 1190 * never fail unless improperly formatted. 1191 */ 1192 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1193 struct scatterlist *out) 1194 { 1195 struct scatterlist *sgs[4], hdr, stat; 1196 unsigned out_num = 0, tmp; 1197 1198 /* Caller should know better */ 1199 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 1200 1201 vi->ctrl_status = ~0; 1202 vi->ctrl_hdr.class = class; 1203 vi->ctrl_hdr.cmd = cmd; 1204 /* Add header */ 1205 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1206 sgs[out_num++] = &hdr; 1207 1208 if (out) 1209 sgs[out_num++] = out; 1210 1211 /* Add return status. */ 1212 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1213 sgs[out_num] = &stat; 1214 1215 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1216 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1217 1218 if (unlikely(!virtqueue_kick(vi->cvq))) 1219 return vi->ctrl_status == VIRTIO_NET_OK; 1220 1221 /* Spin for a response, the kick causes an ioport write, trapping 1222 * into the hypervisor, so the request should be handled immediately. 1223 */ 1224 while (!virtqueue_get_buf(vi->cvq, &tmp) && 1225 !virtqueue_is_broken(vi->cvq)) 1226 cpu_relax(); 1227 1228 return vi->ctrl_status == VIRTIO_NET_OK; 1229 } 1230 1231 static int virtnet_set_mac_address(struct net_device *dev, void *p) 1232 { 1233 struct virtnet_info *vi = netdev_priv(dev); 1234 struct virtio_device *vdev = vi->vdev; 1235 int ret; 1236 struct sockaddr *addr; 1237 struct scatterlist sg; 1238 1239 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1240 if (!addr) 1241 return -ENOMEM; 1242 1243 ret = eth_prepare_mac_addr_change(dev, addr); 1244 if (ret) 1245 goto out; 1246 1247 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 1248 sg_init_one(&sg, addr->sa_data, dev->addr_len); 1249 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1250 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 1251 dev_warn(&vdev->dev, 1252 "Failed to set mac address by vq command.\n"); 1253 ret = -EINVAL; 1254 goto out; 1255 } 1256 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 1257 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1258 unsigned int i; 1259 1260 /* Naturally, this has an atomicity problem. */ 1261 for (i = 0; i < dev->addr_len; i++) 1262 virtio_cwrite8(vdev, 1263 offsetof(struct virtio_net_config, mac) + 1264 i, addr->sa_data[i]); 1265 } 1266 1267 eth_commit_mac_addr_change(dev, p); 1268 ret = 0; 1269 1270 out: 1271 kfree(addr); 1272 return ret; 1273 } 1274 1275 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, 1276 struct rtnl_link_stats64 *tot) 1277 { 1278 struct virtnet_info *vi = netdev_priv(dev); 1279 int cpu; 1280 unsigned int start; 1281 1282 for_each_possible_cpu(cpu) { 1283 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 1284 u64 tpackets, tbytes, rpackets, rbytes; 1285 1286 do { 1287 start = u64_stats_fetch_begin_irq(&stats->tx_syncp); 1288 tpackets = stats->tx_packets; 1289 tbytes = stats->tx_bytes; 1290 } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); 1291 1292 do { 1293 start = u64_stats_fetch_begin_irq(&stats->rx_syncp); 1294 rpackets = stats->rx_packets; 1295 rbytes = stats->rx_bytes; 1296 } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); 1297 1298 tot->rx_packets += rpackets; 1299 tot->tx_packets += tpackets; 1300 tot->rx_bytes += rbytes; 1301 tot->tx_bytes += tbytes; 1302 } 1303 1304 tot->tx_dropped = dev->stats.tx_dropped; 1305 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 1306 tot->rx_dropped = dev->stats.rx_dropped; 1307 tot->rx_length_errors = dev->stats.rx_length_errors; 1308 tot->rx_frame_errors = dev->stats.rx_frame_errors; 1309 1310 return tot; 1311 } 1312 1313 #ifdef CONFIG_NET_POLL_CONTROLLER 1314 static void virtnet_netpoll(struct net_device *dev) 1315 { 1316 struct virtnet_info *vi = netdev_priv(dev); 1317 int i; 1318 1319 for (i = 0; i < vi->curr_queue_pairs; i++) 1320 napi_schedule(&vi->rq[i].napi); 1321 } 1322 #endif 1323 1324 static void virtnet_ack_link_announce(struct virtnet_info *vi) 1325 { 1326 rtnl_lock(); 1327 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 1328 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 1329 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 1330 rtnl_unlock(); 1331 } 1332 1333 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1334 { 1335 struct scatterlist sg; 1336 struct net_device *dev = vi->dev; 1337 1338 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1339 return 0; 1340 1341 vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1342 sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1343 1344 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1345 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1346 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 1347 queue_pairs); 1348 return -EINVAL; 1349 } else { 1350 vi->curr_queue_pairs = queue_pairs; 1351 /* virtnet_open() will refill when device is going to up. */ 1352 if (dev->flags & IFF_UP) 1353 schedule_delayed_work(&vi->refill, 0); 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int virtnet_close(struct net_device *dev) 1360 { 1361 struct virtnet_info *vi = netdev_priv(dev); 1362 int i; 1363 1364 /* Make sure refill_work doesn't re-enable napi! */ 1365 cancel_delayed_work_sync(&vi->refill); 1366 1367 for (i = 0; i < vi->max_queue_pairs; i++) 1368 napi_disable(&vi->rq[i].napi); 1369 1370 return 0; 1371 } 1372 1373 static void virtnet_set_rx_mode(struct net_device *dev) 1374 { 1375 struct virtnet_info *vi = netdev_priv(dev); 1376 struct scatterlist sg[2]; 1377 struct virtio_net_ctrl_mac *mac_data; 1378 struct netdev_hw_addr *ha; 1379 int uc_count; 1380 int mc_count; 1381 void *buf; 1382 int i; 1383 1384 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 1385 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1386 return; 1387 1388 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 1389 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1390 1391 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 1392 1393 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1394 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1395 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1396 vi->ctrl_promisc ? "en" : "dis"); 1397 1398 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 1399 1400 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1401 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1402 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1403 vi->ctrl_allmulti ? "en" : "dis"); 1404 1405 uc_count = netdev_uc_count(dev); 1406 mc_count = netdev_mc_count(dev); 1407 /* MAC filter - use one buffer for both lists */ 1408 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 1409 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 1410 mac_data = buf; 1411 if (!buf) 1412 return; 1413 1414 sg_init_table(sg, 2); 1415 1416 /* Store the unicast list and count in the front of the buffer */ 1417 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 1418 i = 0; 1419 netdev_for_each_uc_addr(ha, dev) 1420 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1421 1422 sg_set_buf(&sg[0], mac_data, 1423 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 1424 1425 /* multicast list and count fill the end */ 1426 mac_data = (void *)&mac_data->macs[uc_count][0]; 1427 1428 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 1429 i = 0; 1430 netdev_for_each_mc_addr(ha, dev) 1431 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1432 1433 sg_set_buf(&sg[1], mac_data, 1434 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 1435 1436 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1437 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 1438 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1439 1440 kfree(buf); 1441 } 1442 1443 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 1444 __be16 proto, u16 vid) 1445 { 1446 struct virtnet_info *vi = netdev_priv(dev); 1447 struct scatterlist sg; 1448 1449 vi->ctrl_vid = vid; 1450 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1451 1452 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1453 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 1454 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 1455 return 0; 1456 } 1457 1458 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 1459 __be16 proto, u16 vid) 1460 { 1461 struct virtnet_info *vi = netdev_priv(dev); 1462 struct scatterlist sg; 1463 1464 vi->ctrl_vid = vid; 1465 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1466 1467 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1468 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 1469 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 1470 return 0; 1471 } 1472 1473 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1474 { 1475 int i; 1476 1477 if (vi->affinity_hint_set) { 1478 for (i = 0; i < vi->max_queue_pairs; i++) { 1479 virtqueue_set_affinity(vi->rq[i].vq, -1); 1480 virtqueue_set_affinity(vi->sq[i].vq, -1); 1481 } 1482 1483 vi->affinity_hint_set = false; 1484 } 1485 } 1486 1487 static void virtnet_set_affinity(struct virtnet_info *vi) 1488 { 1489 int i; 1490 int cpu; 1491 1492 /* In multiqueue mode, when the number of cpu is equal to the number of 1493 * queue pairs, we let the queue pairs to be private to one cpu by 1494 * setting the affinity hint to eliminate the contention. 1495 */ 1496 if (vi->curr_queue_pairs == 1 || 1497 vi->max_queue_pairs != num_online_cpus()) { 1498 virtnet_clean_affinity(vi, -1); 1499 return; 1500 } 1501 1502 i = 0; 1503 for_each_online_cpu(cpu) { 1504 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1505 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1506 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); 1507 i++; 1508 } 1509 1510 vi->affinity_hint_set = true; 1511 } 1512 1513 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 1514 { 1515 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1516 node); 1517 virtnet_set_affinity(vi); 1518 return 0; 1519 } 1520 1521 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 1522 { 1523 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1524 node_dead); 1525 virtnet_set_affinity(vi); 1526 return 0; 1527 } 1528 1529 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 1530 { 1531 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1532 node); 1533 1534 virtnet_clean_affinity(vi, cpu); 1535 return 0; 1536 } 1537 1538 static enum cpuhp_state virtionet_online; 1539 1540 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 1541 { 1542 int ret; 1543 1544 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 1545 if (ret) 1546 return ret; 1547 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 1548 &vi->node_dead); 1549 if (!ret) 1550 return ret; 1551 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 1552 return ret; 1553 } 1554 1555 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 1556 { 1557 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 1558 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 1559 &vi->node_dead); 1560 } 1561 1562 static void virtnet_get_ringparam(struct net_device *dev, 1563 struct ethtool_ringparam *ring) 1564 { 1565 struct virtnet_info *vi = netdev_priv(dev); 1566 1567 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1568 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 1569 ring->rx_pending = ring->rx_max_pending; 1570 ring->tx_pending = ring->tx_max_pending; 1571 } 1572 1573 1574 static void virtnet_get_drvinfo(struct net_device *dev, 1575 struct ethtool_drvinfo *info) 1576 { 1577 struct virtnet_info *vi = netdev_priv(dev); 1578 struct virtio_device *vdev = vi->vdev; 1579 1580 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1581 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 1582 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 1583 1584 } 1585 1586 /* TODO: Eliminate OOO packets during switching */ 1587 static int virtnet_set_channels(struct net_device *dev, 1588 struct ethtool_channels *channels) 1589 { 1590 struct virtnet_info *vi = netdev_priv(dev); 1591 u16 queue_pairs = channels->combined_count; 1592 int err; 1593 1594 /* We don't support separate rx/tx channels. 1595 * We don't allow setting 'other' channels. 1596 */ 1597 if (channels->rx_count || channels->tx_count || channels->other_count) 1598 return -EINVAL; 1599 1600 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1601 return -EINVAL; 1602 1603 /* For now we don't support modifying channels while XDP is loaded 1604 * also when XDP is loaded all RX queues have XDP programs so we only 1605 * need to check a single RX queue. 1606 */ 1607 if (vi->rq[0].xdp_prog) 1608 return -EINVAL; 1609 1610 get_online_cpus(); 1611 err = virtnet_set_queues(vi, queue_pairs); 1612 if (!err) { 1613 netif_set_real_num_tx_queues(dev, queue_pairs); 1614 netif_set_real_num_rx_queues(dev, queue_pairs); 1615 1616 virtnet_set_affinity(vi); 1617 } 1618 put_online_cpus(); 1619 1620 return err; 1621 } 1622 1623 static void virtnet_get_channels(struct net_device *dev, 1624 struct ethtool_channels *channels) 1625 { 1626 struct virtnet_info *vi = netdev_priv(dev); 1627 1628 channels->combined_count = vi->curr_queue_pairs; 1629 channels->max_combined = vi->max_queue_pairs; 1630 channels->max_other = 0; 1631 channels->rx_count = 0; 1632 channels->tx_count = 0; 1633 channels->other_count = 0; 1634 } 1635 1636 /* Check if the user is trying to change anything besides speed/duplex */ 1637 static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) 1638 { 1639 struct ethtool_cmd diff1 = *cmd; 1640 struct ethtool_cmd diff2 = {}; 1641 1642 /* cmd is always set so we need to clear it, validate the port type 1643 * and also without autonegotiation we can ignore advertising 1644 */ 1645 ethtool_cmd_speed_set(&diff1, 0); 1646 diff2.port = PORT_OTHER; 1647 diff1.advertising = 0; 1648 diff1.duplex = 0; 1649 diff1.cmd = 0; 1650 1651 return !memcmp(&diff1, &diff2, sizeof(diff1)); 1652 } 1653 1654 static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1655 { 1656 struct virtnet_info *vi = netdev_priv(dev); 1657 u32 speed; 1658 1659 speed = ethtool_cmd_speed(cmd); 1660 /* don't allow custom speed and duplex */ 1661 if (!ethtool_validate_speed(speed) || 1662 !ethtool_validate_duplex(cmd->duplex) || 1663 !virtnet_validate_ethtool_cmd(cmd)) 1664 return -EINVAL; 1665 vi->speed = speed; 1666 vi->duplex = cmd->duplex; 1667 1668 return 0; 1669 } 1670 1671 static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1672 { 1673 struct virtnet_info *vi = netdev_priv(dev); 1674 1675 ethtool_cmd_speed_set(cmd, vi->speed); 1676 cmd->duplex = vi->duplex; 1677 cmd->port = PORT_OTHER; 1678 1679 return 0; 1680 } 1681 1682 static void virtnet_init_settings(struct net_device *dev) 1683 { 1684 struct virtnet_info *vi = netdev_priv(dev); 1685 1686 vi->speed = SPEED_UNKNOWN; 1687 vi->duplex = DUPLEX_UNKNOWN; 1688 } 1689 1690 static const struct ethtool_ops virtnet_ethtool_ops = { 1691 .get_drvinfo = virtnet_get_drvinfo, 1692 .get_link = ethtool_op_get_link, 1693 .get_ringparam = virtnet_get_ringparam, 1694 .set_channels = virtnet_set_channels, 1695 .get_channels = virtnet_get_channels, 1696 .get_ts_info = ethtool_op_get_ts_info, 1697 .get_settings = virtnet_get_settings, 1698 .set_settings = virtnet_set_settings, 1699 }; 1700 1701 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) 1702 { 1703 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 1704 struct virtnet_info *vi = netdev_priv(dev); 1705 struct bpf_prog *old_prog; 1706 u16 xdp_qp = 0, curr_qp; 1707 int i, err; 1708 1709 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 1710 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 1711 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 1712 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { 1713 netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); 1714 return -EOPNOTSUPP; 1715 } 1716 1717 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 1718 netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); 1719 return -EINVAL; 1720 } 1721 1722 if (dev->mtu > max_sz) { 1723 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 1724 return -EINVAL; 1725 } 1726 1727 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 1728 if (prog) 1729 xdp_qp = nr_cpu_ids; 1730 1731 /* XDP requires extra queues for XDP_TX */ 1732 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 1733 netdev_warn(dev, "request %i queues but max is %i\n", 1734 curr_qp + xdp_qp, vi->max_queue_pairs); 1735 return -ENOMEM; 1736 } 1737 1738 err = virtnet_set_queues(vi, curr_qp + xdp_qp); 1739 if (err) { 1740 dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); 1741 return err; 1742 } 1743 1744 if (prog) { 1745 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 1746 if (IS_ERR(prog)) { 1747 virtnet_set_queues(vi, curr_qp); 1748 return PTR_ERR(prog); 1749 } 1750 } 1751 1752 vi->xdp_queue_pairs = xdp_qp; 1753 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 1754 1755 for (i = 0; i < vi->max_queue_pairs; i++) { 1756 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1757 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 1758 if (old_prog) 1759 bpf_prog_put(old_prog); 1760 } 1761 1762 return 0; 1763 } 1764 1765 static bool virtnet_xdp_query(struct net_device *dev) 1766 { 1767 struct virtnet_info *vi = netdev_priv(dev); 1768 int i; 1769 1770 for (i = 0; i < vi->max_queue_pairs; i++) { 1771 if (vi->rq[i].xdp_prog) 1772 return true; 1773 } 1774 return false; 1775 } 1776 1777 static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1778 { 1779 switch (xdp->command) { 1780 case XDP_SETUP_PROG: 1781 return virtnet_xdp_set(dev, xdp->prog); 1782 case XDP_QUERY_PROG: 1783 xdp->prog_attached = virtnet_xdp_query(dev); 1784 return 0; 1785 default: 1786 return -EINVAL; 1787 } 1788 } 1789 1790 static const struct net_device_ops virtnet_netdev = { 1791 .ndo_open = virtnet_open, 1792 .ndo_stop = virtnet_close, 1793 .ndo_start_xmit = start_xmit, 1794 .ndo_validate_addr = eth_validate_addr, 1795 .ndo_set_mac_address = virtnet_set_mac_address, 1796 .ndo_set_rx_mode = virtnet_set_rx_mode, 1797 .ndo_get_stats64 = virtnet_stats, 1798 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 1799 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1800 #ifdef CONFIG_NET_POLL_CONTROLLER 1801 .ndo_poll_controller = virtnet_netpoll, 1802 #endif 1803 #ifdef CONFIG_NET_RX_BUSY_POLL 1804 .ndo_busy_poll = virtnet_busy_poll, 1805 #endif 1806 .ndo_xdp = virtnet_xdp, 1807 }; 1808 1809 static void virtnet_config_changed_work(struct work_struct *work) 1810 { 1811 struct virtnet_info *vi = 1812 container_of(work, struct virtnet_info, config_work); 1813 u16 v; 1814 1815 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 1816 struct virtio_net_config, status, &v) < 0) 1817 return; 1818 1819 if (v & VIRTIO_NET_S_ANNOUNCE) { 1820 netdev_notify_peers(vi->dev); 1821 virtnet_ack_link_announce(vi); 1822 } 1823 1824 /* Ignore unknown (future) status bits */ 1825 v &= VIRTIO_NET_S_LINK_UP; 1826 1827 if (vi->status == v) 1828 return; 1829 1830 vi->status = v; 1831 1832 if (vi->status & VIRTIO_NET_S_LINK_UP) { 1833 netif_carrier_on(vi->dev); 1834 netif_tx_wake_all_queues(vi->dev); 1835 } else { 1836 netif_carrier_off(vi->dev); 1837 netif_tx_stop_all_queues(vi->dev); 1838 } 1839 } 1840 1841 static void virtnet_config_changed(struct virtio_device *vdev) 1842 { 1843 struct virtnet_info *vi = vdev->priv; 1844 1845 schedule_work(&vi->config_work); 1846 } 1847 1848 static void virtnet_free_queues(struct virtnet_info *vi) 1849 { 1850 int i; 1851 1852 for (i = 0; i < vi->max_queue_pairs; i++) { 1853 napi_hash_del(&vi->rq[i].napi); 1854 netif_napi_del(&vi->rq[i].napi); 1855 } 1856 1857 /* We called napi_hash_del() before netif_napi_del(), 1858 * we need to respect an RCU grace period before freeing vi->rq 1859 */ 1860 synchronize_net(); 1861 1862 kfree(vi->rq); 1863 kfree(vi->sq); 1864 } 1865 1866 static void free_receive_bufs(struct virtnet_info *vi) 1867 { 1868 struct bpf_prog *old_prog; 1869 int i; 1870 1871 rtnl_lock(); 1872 for (i = 0; i < vi->max_queue_pairs; i++) { 1873 while (vi->rq[i].pages) 1874 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1875 1876 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1877 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 1878 if (old_prog) 1879 bpf_prog_put(old_prog); 1880 } 1881 rtnl_unlock(); 1882 } 1883 1884 static void free_receive_page_frags(struct virtnet_info *vi) 1885 { 1886 int i; 1887 for (i = 0; i < vi->max_queue_pairs; i++) 1888 if (vi->rq[i].alloc_frag.page) 1889 put_page(vi->rq[i].alloc_frag.page); 1890 } 1891 1892 static bool is_xdp_queue(struct virtnet_info *vi, int q) 1893 { 1894 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1895 return false; 1896 else if (q < vi->curr_queue_pairs) 1897 return true; 1898 else 1899 return false; 1900 } 1901 1902 static void free_unused_bufs(struct virtnet_info *vi) 1903 { 1904 void *buf; 1905 int i; 1906 1907 for (i = 0; i < vi->max_queue_pairs; i++) { 1908 struct virtqueue *vq = vi->sq[i].vq; 1909 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1910 if (!is_xdp_queue(vi, i)) 1911 dev_kfree_skb(buf); 1912 else 1913 put_page(virt_to_head_page(buf)); 1914 } 1915 } 1916 1917 for (i = 0; i < vi->max_queue_pairs; i++) { 1918 struct virtqueue *vq = vi->rq[i].vq; 1919 1920 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1921 if (vi->mergeable_rx_bufs) { 1922 unsigned long ctx = (unsigned long)buf; 1923 void *base = mergeable_ctx_to_buf_address(ctx); 1924 put_page(virt_to_head_page(base)); 1925 } else if (vi->big_packets) { 1926 give_pages(&vi->rq[i], buf); 1927 } else { 1928 dev_kfree_skb(buf); 1929 } 1930 } 1931 } 1932 } 1933 1934 static void virtnet_del_vqs(struct virtnet_info *vi) 1935 { 1936 struct virtio_device *vdev = vi->vdev; 1937 1938 virtnet_clean_affinity(vi, -1); 1939 1940 vdev->config->del_vqs(vdev); 1941 1942 virtnet_free_queues(vi); 1943 } 1944 1945 static int virtnet_find_vqs(struct virtnet_info *vi) 1946 { 1947 vq_callback_t **callbacks; 1948 struct virtqueue **vqs; 1949 int ret = -ENOMEM; 1950 int i, total_vqs; 1951 const char **names; 1952 1953 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 1954 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 1955 * possible control vq. 1956 */ 1957 total_vqs = vi->max_queue_pairs * 2 + 1958 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 1959 1960 /* Allocate space for find_vqs parameters */ 1961 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 1962 if (!vqs) 1963 goto err_vq; 1964 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 1965 if (!callbacks) 1966 goto err_callback; 1967 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 1968 if (!names) 1969 goto err_names; 1970 1971 /* Parameters for control virtqueue, if any */ 1972 if (vi->has_cvq) { 1973 callbacks[total_vqs - 1] = NULL; 1974 names[total_vqs - 1] = "control"; 1975 } 1976 1977 /* Allocate/initialize parameters for send/receive virtqueues */ 1978 for (i = 0; i < vi->max_queue_pairs; i++) { 1979 callbacks[rxq2vq(i)] = skb_recv_done; 1980 callbacks[txq2vq(i)] = skb_xmit_done; 1981 sprintf(vi->rq[i].name, "input.%d", i); 1982 sprintf(vi->sq[i].name, "output.%d", i); 1983 names[rxq2vq(i)] = vi->rq[i].name; 1984 names[txq2vq(i)] = vi->sq[i].name; 1985 } 1986 1987 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 1988 names); 1989 if (ret) 1990 goto err_find; 1991 1992 if (vi->has_cvq) { 1993 vi->cvq = vqs[total_vqs - 1]; 1994 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1995 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1996 } 1997 1998 for (i = 0; i < vi->max_queue_pairs; i++) { 1999 vi->rq[i].vq = vqs[rxq2vq(i)]; 2000 vi->sq[i].vq = vqs[txq2vq(i)]; 2001 } 2002 2003 kfree(names); 2004 kfree(callbacks); 2005 kfree(vqs); 2006 2007 return 0; 2008 2009 err_find: 2010 kfree(names); 2011 err_names: 2012 kfree(callbacks); 2013 err_callback: 2014 kfree(vqs); 2015 err_vq: 2016 return ret; 2017 } 2018 2019 static int virtnet_alloc_queues(struct virtnet_info *vi) 2020 { 2021 int i; 2022 2023 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2024 if (!vi->sq) 2025 goto err_sq; 2026 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 2027 if (!vi->rq) 2028 goto err_rq; 2029 2030 INIT_DELAYED_WORK(&vi->refill, refill_work); 2031 for (i = 0; i < vi->max_queue_pairs; i++) { 2032 vi->rq[i].pages = NULL; 2033 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 2034 napi_weight); 2035 2036 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 2037 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 2038 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 2039 } 2040 2041 return 0; 2042 2043 err_rq: 2044 kfree(vi->sq); 2045 err_sq: 2046 return -ENOMEM; 2047 } 2048 2049 static int init_vqs(struct virtnet_info *vi) 2050 { 2051 int ret; 2052 2053 /* Allocate send & receive queues */ 2054 ret = virtnet_alloc_queues(vi); 2055 if (ret) 2056 goto err; 2057 2058 ret = virtnet_find_vqs(vi); 2059 if (ret) 2060 goto err_free; 2061 2062 get_online_cpus(); 2063 virtnet_set_affinity(vi); 2064 put_online_cpus(); 2065 2066 return 0; 2067 2068 err_free: 2069 virtnet_free_queues(vi); 2070 err: 2071 return ret; 2072 } 2073 2074 #ifdef CONFIG_SYSFS 2075 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 2076 struct rx_queue_attribute *attribute, char *buf) 2077 { 2078 struct virtnet_info *vi = netdev_priv(queue->dev); 2079 unsigned int queue_index = get_netdev_rx_queue_index(queue); 2080 struct ewma_pkt_len *avg; 2081 2082 BUG_ON(queue_index >= vi->max_queue_pairs); 2083 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2084 return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); 2085 } 2086 2087 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2088 __ATTR_RO(mergeable_rx_buffer_size); 2089 2090 static struct attribute *virtio_net_mrg_rx_attrs[] = { 2091 &mergeable_rx_buffer_size_attribute.attr, 2092 NULL 2093 }; 2094 2095 static const struct attribute_group virtio_net_mrg_rx_group = { 2096 .name = "virtio_net", 2097 .attrs = virtio_net_mrg_rx_attrs 2098 }; 2099 #endif 2100 2101 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 2102 unsigned int fbit, 2103 const char *fname, const char *dname) 2104 { 2105 if (!virtio_has_feature(vdev, fbit)) 2106 return false; 2107 2108 dev_err(&vdev->dev, "device advertises feature %s but not %s", 2109 fname, dname); 2110 2111 return true; 2112 } 2113 2114 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 2115 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 2116 2117 static bool virtnet_validate_features(struct virtio_device *vdev) 2118 { 2119 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 2120 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 2121 "VIRTIO_NET_F_CTRL_VQ") || 2122 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 2123 "VIRTIO_NET_F_CTRL_VQ") || 2124 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 2125 "VIRTIO_NET_F_CTRL_VQ") || 2126 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 2127 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 2128 "VIRTIO_NET_F_CTRL_VQ"))) { 2129 return false; 2130 } 2131 2132 return true; 2133 } 2134 2135 #define MIN_MTU ETH_MIN_MTU 2136 #define MAX_MTU ETH_MAX_MTU 2137 2138 static int virtnet_probe(struct virtio_device *vdev) 2139 { 2140 int i, err; 2141 struct net_device *dev; 2142 struct virtnet_info *vi; 2143 u16 max_queue_pairs; 2144 int mtu; 2145 2146 if (!vdev->config->get) { 2147 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2148 __func__); 2149 return -EINVAL; 2150 } 2151 2152 if (!virtnet_validate_features(vdev)) 2153 return -EINVAL; 2154 2155 /* Find if host supports multiqueue virtio_net device */ 2156 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2157 struct virtio_net_config, 2158 max_virtqueue_pairs, &max_queue_pairs); 2159 2160 /* We need at least 2 queue's */ 2161 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 2162 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 2163 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2164 max_queue_pairs = 1; 2165 2166 /* Allocate ourselves a network device with room for our info */ 2167 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 2168 if (!dev) 2169 return -ENOMEM; 2170 2171 /* Set up network device as normal. */ 2172 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 2173 dev->netdev_ops = &virtnet_netdev; 2174 dev->features = NETIF_F_HIGHDMA; 2175 2176 dev->ethtool_ops = &virtnet_ethtool_ops; 2177 SET_NETDEV_DEV(dev, &vdev->dev); 2178 2179 /* Do we support "hardware" checksums? */ 2180 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 2181 /* This opens up the world of extra features. */ 2182 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2183 if (csum) 2184 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2185 2186 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 2187 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 2188 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 2189 } 2190 /* Individual feature bits: what can host handle? */ 2191 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 2192 dev->hw_features |= NETIF_F_TSO; 2193 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 2194 dev->hw_features |= NETIF_F_TSO6; 2195 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 2196 dev->hw_features |= NETIF_F_TSO_ECN; 2197 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 2198 dev->hw_features |= NETIF_F_UFO; 2199 2200 dev->features |= NETIF_F_GSO_ROBUST; 2201 2202 if (gso) 2203 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 2204 /* (!csum && gso) case will be fixed by register_netdev() */ 2205 } 2206 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 2207 dev->features |= NETIF_F_RXCSUM; 2208 2209 dev->vlan_features = dev->features; 2210 2211 /* MTU range: 68 - 65535 */ 2212 dev->min_mtu = MIN_MTU; 2213 dev->max_mtu = MAX_MTU; 2214 2215 /* Configuration may specify what MAC to use. Otherwise random. */ 2216 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 2217 virtio_cread_bytes(vdev, 2218 offsetof(struct virtio_net_config, mac), 2219 dev->dev_addr, dev->addr_len); 2220 else 2221 eth_hw_addr_random(dev); 2222 2223 /* Set up our device-specific information */ 2224 vi = netdev_priv(dev); 2225 vi->dev = dev; 2226 vi->vdev = vdev; 2227 vdev->priv = vi; 2228 vi->stats = alloc_percpu(struct virtnet_stats); 2229 err = -ENOMEM; 2230 if (vi->stats == NULL) 2231 goto free; 2232 2233 for_each_possible_cpu(i) { 2234 struct virtnet_stats *virtnet_stats; 2235 virtnet_stats = per_cpu_ptr(vi->stats, i); 2236 u64_stats_init(&virtnet_stats->tx_syncp); 2237 u64_stats_init(&virtnet_stats->rx_syncp); 2238 } 2239 2240 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 2241 2242 /* If we can receive ANY GSO packets, we must allocate large ones. */ 2243 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 2244 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 2245 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 2246 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 2247 vi->big_packets = true; 2248 2249 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 2250 vi->mergeable_rx_bufs = true; 2251 2252 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 2253 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2254 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2255 else 2256 vi->hdr_len = sizeof(struct virtio_net_hdr); 2257 2258 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 2259 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2260 vi->any_header_sg = true; 2261 2262 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2263 vi->has_cvq = true; 2264 2265 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 2266 mtu = virtio_cread16(vdev, 2267 offsetof(struct virtio_net_config, 2268 mtu)); 2269 if (mtu < dev->min_mtu) { 2270 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2271 } else { 2272 dev->mtu = mtu; 2273 dev->max_mtu = mtu; 2274 } 2275 } 2276 2277 if (vi->any_header_sg) 2278 dev->needed_headroom = vi->hdr_len; 2279 2280 /* Enable multiqueue by default */ 2281 if (num_online_cpus() >= max_queue_pairs) 2282 vi->curr_queue_pairs = max_queue_pairs; 2283 else 2284 vi->curr_queue_pairs = num_online_cpus(); 2285 vi->max_queue_pairs = max_queue_pairs; 2286 2287 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 2288 err = init_vqs(vi); 2289 if (err) 2290 goto free_stats; 2291 2292 #ifdef CONFIG_SYSFS 2293 if (vi->mergeable_rx_bufs) 2294 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 2295 #endif 2296 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 2297 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 2298 2299 virtnet_init_settings(dev); 2300 2301 err = register_netdev(dev); 2302 if (err) { 2303 pr_debug("virtio_net: registering device failed\n"); 2304 goto free_vqs; 2305 } 2306 2307 virtio_device_ready(vdev); 2308 2309 err = virtnet_cpu_notif_add(vi); 2310 if (err) { 2311 pr_debug("virtio_net: registering cpu notifier failed\n"); 2312 goto free_unregister_netdev; 2313 } 2314 2315 rtnl_lock(); 2316 virtnet_set_queues(vi, vi->curr_queue_pairs); 2317 rtnl_unlock(); 2318 2319 /* Assume link up if device can't report link status, 2320 otherwise get link status from config. */ 2321 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 2322 netif_carrier_off(dev); 2323 schedule_work(&vi->config_work); 2324 } else { 2325 vi->status = VIRTIO_NET_S_LINK_UP; 2326 netif_carrier_on(dev); 2327 } 2328 2329 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 2330 dev->name, max_queue_pairs); 2331 2332 return 0; 2333 2334 free_unregister_netdev: 2335 vi->vdev->config->reset(vdev); 2336 2337 unregister_netdev(dev); 2338 free_vqs: 2339 cancel_delayed_work_sync(&vi->refill); 2340 free_receive_page_frags(vi); 2341 virtnet_del_vqs(vi); 2342 free_stats: 2343 free_percpu(vi->stats); 2344 free: 2345 free_netdev(dev); 2346 return err; 2347 } 2348 2349 static void remove_vq_common(struct virtnet_info *vi) 2350 { 2351 vi->vdev->config->reset(vi->vdev); 2352 2353 /* Free unused buffers in both send and recv, if any. */ 2354 free_unused_bufs(vi); 2355 2356 free_receive_bufs(vi); 2357 2358 free_receive_page_frags(vi); 2359 2360 virtnet_del_vqs(vi); 2361 } 2362 2363 static void virtnet_remove(struct virtio_device *vdev) 2364 { 2365 struct virtnet_info *vi = vdev->priv; 2366 2367 virtnet_cpu_notif_remove(vi); 2368 2369 /* Make sure no work handler is accessing the device. */ 2370 flush_work(&vi->config_work); 2371 2372 unregister_netdev(vi->dev); 2373 2374 remove_vq_common(vi); 2375 2376 free_percpu(vi->stats); 2377 free_netdev(vi->dev); 2378 } 2379 2380 #ifdef CONFIG_PM_SLEEP 2381 static int virtnet_freeze(struct virtio_device *vdev) 2382 { 2383 struct virtnet_info *vi = vdev->priv; 2384 int i; 2385 2386 virtnet_cpu_notif_remove(vi); 2387 2388 /* Make sure no work handler is accessing the device */ 2389 flush_work(&vi->config_work); 2390 2391 netif_device_detach(vi->dev); 2392 cancel_delayed_work_sync(&vi->refill); 2393 2394 if (netif_running(vi->dev)) { 2395 for (i = 0; i < vi->max_queue_pairs; i++) 2396 napi_disable(&vi->rq[i].napi); 2397 } 2398 2399 remove_vq_common(vi); 2400 2401 return 0; 2402 } 2403 2404 static int virtnet_restore(struct virtio_device *vdev) 2405 { 2406 struct virtnet_info *vi = vdev->priv; 2407 int err, i; 2408 2409 err = init_vqs(vi); 2410 if (err) 2411 return err; 2412 2413 virtio_device_ready(vdev); 2414 2415 if (netif_running(vi->dev)) { 2416 for (i = 0; i < vi->curr_queue_pairs; i++) 2417 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 2418 schedule_delayed_work(&vi->refill, 0); 2419 2420 for (i = 0; i < vi->max_queue_pairs; i++) 2421 virtnet_napi_enable(&vi->rq[i]); 2422 } 2423 2424 netif_device_attach(vi->dev); 2425 2426 rtnl_lock(); 2427 virtnet_set_queues(vi, vi->curr_queue_pairs); 2428 rtnl_unlock(); 2429 2430 err = virtnet_cpu_notif_add(vi); 2431 if (err) 2432 return err; 2433 2434 return 0; 2435 } 2436 #endif 2437 2438 static struct virtio_device_id id_table[] = { 2439 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 2440 { 0 }, 2441 }; 2442 2443 #define VIRTNET_FEATURES \ 2444 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 2445 VIRTIO_NET_F_MAC, \ 2446 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 2447 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 2448 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 2449 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 2450 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 2451 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 2452 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 2453 VIRTIO_NET_F_MTU 2454 2455 static unsigned int features[] = { 2456 VIRTNET_FEATURES, 2457 }; 2458 2459 static unsigned int features_legacy[] = { 2460 VIRTNET_FEATURES, 2461 VIRTIO_NET_F_GSO, 2462 VIRTIO_F_ANY_LAYOUT, 2463 }; 2464 2465 static struct virtio_driver virtio_net_driver = { 2466 .feature_table = features, 2467 .feature_table_size = ARRAY_SIZE(features), 2468 .feature_table_legacy = features_legacy, 2469 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 2470 .driver.name = KBUILD_MODNAME, 2471 .driver.owner = THIS_MODULE, 2472 .id_table = id_table, 2473 .probe = virtnet_probe, 2474 .remove = virtnet_remove, 2475 .config_changed = virtnet_config_changed, 2476 #ifdef CONFIG_PM_SLEEP 2477 .freeze = virtnet_freeze, 2478 .restore = virtnet_restore, 2479 #endif 2480 }; 2481 2482 static __init int virtio_net_driver_init(void) 2483 { 2484 int ret; 2485 2486 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 2487 virtnet_cpu_online, 2488 virtnet_cpu_down_prep); 2489 if (ret < 0) 2490 goto out; 2491 virtionet_online = ret; 2492 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 2493 NULL, virtnet_cpu_dead); 2494 if (ret) 2495 goto err_dead; 2496 2497 ret = register_virtio_driver(&virtio_net_driver); 2498 if (ret) 2499 goto err_virtio; 2500 return 0; 2501 err_virtio: 2502 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2503 err_dead: 2504 cpuhp_remove_multi_state(virtionet_online); 2505 out: 2506 return ret; 2507 } 2508 module_init(virtio_net_driver_init); 2509 2510 static __exit void virtio_net_driver_exit(void) 2511 { 2512 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2513 cpuhp_remove_multi_state(virtionet_online); 2514 unregister_virtio_driver(&virtio_net_driver); 2515 } 2516 module_exit(virtio_net_driver_exit); 2517 2518 MODULE_DEVICE_TABLE(virtio, id_table); 2519 MODULE_DESCRIPTION("Virtio network driver"); 2520 MODULE_LICENSE("GPL"); 2521