1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/init.h> 23 #include <linux/atomic.h> 24 #include <linux/module.h> 25 #include <linux/highmem.h> 26 #include <linux/device.h> 27 #include <linux/io.h> 28 #include <linux/delay.h> 29 #include <linux/netdevice.h> 30 #include <linux/inetdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/skbuff.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/slab.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/netpoll.h> 38 #include <linux/reciprocal_div.h> 39 40 #include <net/arp.h> 41 #include <net/route.h> 42 #include <net/sock.h> 43 #include <net/pkt_sched.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 47 #include "hyperv_net.h" 48 49 #define RING_SIZE_MIN 64 50 #define RETRY_US_LO 5000 51 #define RETRY_US_HI 10000 52 #define RETRY_MAX 2000 /* >10 sec */ 53 54 #define LINKCHANGE_INT (2 * HZ) 55 #define VF_TAKEOVER_INT (HZ / 10) 56 57 static unsigned int ring_size __ro_after_init = 128; 58 module_param(ring_size, uint, 0444); 59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 60 unsigned int netvsc_ring_bytes __ro_after_init; 61 struct reciprocal_value netvsc_ring_reciprocal __ro_after_init; 62 63 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 64 NETIF_MSG_LINK | NETIF_MSG_IFUP | 65 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 66 NETIF_MSG_TX_ERR; 67 68 static int debug = -1; 69 module_param(debug, int, 0444); 70 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 71 72 static void netvsc_change_rx_flags(struct net_device *net, int change) 73 { 74 struct net_device_context *ndev_ctx = netdev_priv(net); 75 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 76 int inc; 77 78 if (!vf_netdev) 79 return; 80 81 if (change & IFF_PROMISC) { 82 inc = (net->flags & IFF_PROMISC) ? 1 : -1; 83 dev_set_promiscuity(vf_netdev, inc); 84 } 85 86 if (change & IFF_ALLMULTI) { 87 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; 88 dev_set_allmulti(vf_netdev, inc); 89 } 90 } 91 92 static void netvsc_set_rx_mode(struct net_device *net) 93 { 94 struct net_device_context *ndev_ctx = netdev_priv(net); 95 struct net_device *vf_netdev; 96 struct netvsc_device *nvdev; 97 98 rcu_read_lock(); 99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); 100 if (vf_netdev) { 101 dev_uc_sync(vf_netdev, net); 102 dev_mc_sync(vf_netdev, net); 103 } 104 105 nvdev = rcu_dereference(ndev_ctx->nvdev); 106 if (nvdev) 107 rndis_filter_update(nvdev); 108 rcu_read_unlock(); 109 } 110 111 static int netvsc_open(struct net_device *net) 112 { 113 struct net_device_context *ndev_ctx = netdev_priv(net); 114 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 115 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 116 struct rndis_device *rdev; 117 int ret = 0; 118 119 netif_carrier_off(net); 120 121 /* Open up the device */ 122 ret = rndis_filter_open(nvdev); 123 if (ret != 0) { 124 netdev_err(net, "unable to open device (ret %d).\n", ret); 125 return ret; 126 } 127 128 rdev = nvdev->extension; 129 if (!rdev->link_state) 130 netif_carrier_on(net); 131 132 if (vf_netdev) { 133 /* Setting synthetic device up transparently sets 134 * slave as up. If open fails, then slave will be 135 * still be offline (and not used). 136 */ 137 ret = dev_open(vf_netdev); 138 if (ret) 139 netdev_warn(net, 140 "unable to open slave: %s: %d\n", 141 vf_netdev->name, ret); 142 } 143 return 0; 144 } 145 146 static int netvsc_wait_until_empty(struct netvsc_device *nvdev) 147 { 148 unsigned int retry = 0; 149 int i; 150 151 /* Ensure pending bytes in ring are read */ 152 for (;;) { 153 u32 aread = 0; 154 155 for (i = 0; i < nvdev->num_chn; i++) { 156 struct vmbus_channel *chn 157 = nvdev->chan_table[i].channel; 158 159 if (!chn) 160 continue; 161 162 /* make sure receive not running now */ 163 napi_synchronize(&nvdev->chan_table[i].napi); 164 165 aread = hv_get_bytes_to_read(&chn->inbound); 166 if (aread) 167 break; 168 169 aread = hv_get_bytes_to_read(&chn->outbound); 170 if (aread) 171 break; 172 } 173 174 if (aread == 0) 175 return 0; 176 177 if (++retry > RETRY_MAX) 178 return -ETIMEDOUT; 179 180 usleep_range(RETRY_US_LO, RETRY_US_HI); 181 } 182 } 183 184 static int netvsc_close(struct net_device *net) 185 { 186 struct net_device_context *net_device_ctx = netdev_priv(net); 187 struct net_device *vf_netdev 188 = rtnl_dereference(net_device_ctx->vf_netdev); 189 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 190 int ret; 191 192 netif_tx_disable(net); 193 194 /* No need to close rndis filter if it is removed already */ 195 if (!nvdev) 196 return 0; 197 198 ret = rndis_filter_close(nvdev); 199 if (ret != 0) { 200 netdev_err(net, "unable to close device (ret %d).\n", ret); 201 return ret; 202 } 203 204 ret = netvsc_wait_until_empty(nvdev); 205 if (ret) 206 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 207 208 if (vf_netdev) 209 dev_close(vf_netdev); 210 211 return ret; 212 } 213 214 static inline void *init_ppi_data(struct rndis_message *msg, 215 u32 ppi_size, u32 pkt_type) 216 { 217 struct rndis_packet *rndis_pkt = &msg->msg.pkt; 218 struct rndis_per_packet_info *ppi; 219 220 rndis_pkt->data_offset += ppi_size; 221 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset 222 + rndis_pkt->per_pkt_info_len; 223 224 ppi->size = ppi_size; 225 ppi->type = pkt_type; 226 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 227 228 rndis_pkt->per_pkt_info_len += ppi_size; 229 230 return ppi + 1; 231 } 232 233 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented 234 * packets. We can use ethtool to change UDP hash level when necessary. 235 */ 236 static inline u32 netvsc_get_hash( 237 struct sk_buff *skb, 238 const struct net_device_context *ndc) 239 { 240 struct flow_keys flow; 241 u32 hash, pkt_proto = 0; 242 static u32 hashrnd __read_mostly; 243 244 net_get_random_once(&hashrnd, sizeof(hashrnd)); 245 246 if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) 247 return 0; 248 249 switch (flow.basic.ip_proto) { 250 case IPPROTO_TCP: 251 if (flow.basic.n_proto == htons(ETH_P_IP)) 252 pkt_proto = HV_TCP4_L4HASH; 253 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 254 pkt_proto = HV_TCP6_L4HASH; 255 256 break; 257 258 case IPPROTO_UDP: 259 if (flow.basic.n_proto == htons(ETH_P_IP)) 260 pkt_proto = HV_UDP4_L4HASH; 261 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 262 pkt_proto = HV_UDP6_L4HASH; 263 264 break; 265 } 266 267 if (pkt_proto & ndc->l4_hash) { 268 return skb_get_hash(skb); 269 } else { 270 if (flow.basic.n_proto == htons(ETH_P_IP)) 271 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); 272 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 273 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); 274 else 275 hash = 0; 276 277 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); 278 } 279 280 return hash; 281 } 282 283 static inline int netvsc_get_tx_queue(struct net_device *ndev, 284 struct sk_buff *skb, int old_idx) 285 { 286 const struct net_device_context *ndc = netdev_priv(ndev); 287 struct sock *sk = skb->sk; 288 int q_idx; 289 290 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & 291 (VRSS_SEND_TAB_SIZE - 1)]; 292 293 /* If queue index changed record the new value */ 294 if (q_idx != old_idx && 295 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 296 sk_tx_queue_set(sk, q_idx); 297 298 return q_idx; 299 } 300 301 /* 302 * Select queue for transmit. 303 * 304 * If a valid queue has already been assigned, then use that. 305 * Otherwise compute tx queue based on hash and the send table. 306 * 307 * This is basically similar to default (__netdev_pick_tx) with the added step 308 * of using the host send_table when no other queue has been assigned. 309 * 310 * TODO support XPS - but get_xps_queue not exported 311 */ 312 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) 313 { 314 int q_idx = sk_tx_queue_get(skb->sk); 315 316 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { 317 /* If forwarding a packet, we use the recorded queue when 318 * available for better cache locality. 319 */ 320 if (skb_rx_queue_recorded(skb)) 321 q_idx = skb_get_rx_queue(skb); 322 else 323 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); 324 } 325 326 return q_idx; 327 } 328 329 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 330 void *accel_priv, 331 select_queue_fallback_t fallback) 332 { 333 struct net_device_context *ndc = netdev_priv(ndev); 334 struct net_device *vf_netdev; 335 u16 txq; 336 337 rcu_read_lock(); 338 vf_netdev = rcu_dereference(ndc->vf_netdev); 339 if (vf_netdev) { 340 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; 341 342 if (vf_ops->ndo_select_queue) 343 txq = vf_ops->ndo_select_queue(vf_netdev, skb, 344 accel_priv, fallback); 345 else 346 txq = fallback(vf_netdev, skb); 347 348 /* Record the queue selected by VF so that it can be 349 * used for common case where VF has more queues than 350 * the synthetic device. 351 */ 352 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; 353 } else { 354 txq = netvsc_pick_tx(ndev, skb); 355 } 356 rcu_read_unlock(); 357 358 while (unlikely(txq >= ndev->real_num_tx_queues)) 359 txq -= ndev->real_num_tx_queues; 360 361 return txq; 362 } 363 364 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, 365 struct hv_page_buffer *pb) 366 { 367 int j = 0; 368 369 /* Deal with compund pages by ignoring unused part 370 * of the page. 371 */ 372 page += (offset >> PAGE_SHIFT); 373 offset &= ~PAGE_MASK; 374 375 while (len > 0) { 376 unsigned long bytes; 377 378 bytes = PAGE_SIZE - offset; 379 if (bytes > len) 380 bytes = len; 381 pb[j].pfn = page_to_pfn(page); 382 pb[j].offset = offset; 383 pb[j].len = bytes; 384 385 offset += bytes; 386 len -= bytes; 387 388 if (offset == PAGE_SIZE && len) { 389 page++; 390 offset = 0; 391 j++; 392 } 393 } 394 395 return j + 1; 396 } 397 398 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 399 struct hv_netvsc_packet *packet, 400 struct hv_page_buffer *pb) 401 { 402 u32 slots_used = 0; 403 char *data = skb->data; 404 int frags = skb_shinfo(skb)->nr_frags; 405 int i; 406 407 /* The packet is laid out thus: 408 * 1. hdr: RNDIS header and PPI 409 * 2. skb linear data 410 * 3. skb fragment data 411 */ 412 slots_used += fill_pg_buf(virt_to_page(hdr), 413 offset_in_page(hdr), 414 len, &pb[slots_used]); 415 416 packet->rmsg_size = len; 417 packet->rmsg_pgcnt = slots_used; 418 419 slots_used += fill_pg_buf(virt_to_page(data), 420 offset_in_page(data), 421 skb_headlen(skb), &pb[slots_used]); 422 423 for (i = 0; i < frags; i++) { 424 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 425 426 slots_used += fill_pg_buf(skb_frag_page(frag), 427 frag->page_offset, 428 skb_frag_size(frag), &pb[slots_used]); 429 } 430 return slots_used; 431 } 432 433 static int count_skb_frag_slots(struct sk_buff *skb) 434 { 435 int i, frags = skb_shinfo(skb)->nr_frags; 436 int pages = 0; 437 438 for (i = 0; i < frags; i++) { 439 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 440 unsigned long size = skb_frag_size(frag); 441 unsigned long offset = frag->page_offset; 442 443 /* Skip unused frames from start of page */ 444 offset &= ~PAGE_MASK; 445 pages += PFN_UP(offset + size); 446 } 447 return pages; 448 } 449 450 static int netvsc_get_slots(struct sk_buff *skb) 451 { 452 char *data = skb->data; 453 unsigned int offset = offset_in_page(data); 454 unsigned int len = skb_headlen(skb); 455 int slots; 456 int frag_slots; 457 458 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); 459 frag_slots = count_skb_frag_slots(skb); 460 return slots + frag_slots; 461 } 462 463 static u32 net_checksum_info(struct sk_buff *skb) 464 { 465 if (skb->protocol == htons(ETH_P_IP)) { 466 struct iphdr *ip = ip_hdr(skb); 467 468 if (ip->protocol == IPPROTO_TCP) 469 return TRANSPORT_INFO_IPV4_TCP; 470 else if (ip->protocol == IPPROTO_UDP) 471 return TRANSPORT_INFO_IPV4_UDP; 472 } else { 473 struct ipv6hdr *ip6 = ipv6_hdr(skb); 474 475 if (ip6->nexthdr == IPPROTO_TCP) 476 return TRANSPORT_INFO_IPV6_TCP; 477 else if (ip6->nexthdr == IPPROTO_UDP) 478 return TRANSPORT_INFO_IPV6_UDP; 479 } 480 481 return TRANSPORT_INFO_NOT_IP; 482 } 483 484 /* Send skb on the slave VF device. */ 485 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, 486 struct sk_buff *skb) 487 { 488 struct net_device_context *ndev_ctx = netdev_priv(net); 489 unsigned int len = skb->len; 490 int rc; 491 492 skb->dev = vf_netdev; 493 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; 494 495 rc = dev_queue_xmit(skb); 496 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { 497 struct netvsc_vf_pcpu_stats *pcpu_stats 498 = this_cpu_ptr(ndev_ctx->vf_stats); 499 500 u64_stats_update_begin(&pcpu_stats->syncp); 501 pcpu_stats->tx_packets++; 502 pcpu_stats->tx_bytes += len; 503 u64_stats_update_end(&pcpu_stats->syncp); 504 } else { 505 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); 506 } 507 508 return rc; 509 } 510 511 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 512 { 513 struct net_device_context *net_device_ctx = netdev_priv(net); 514 struct hv_netvsc_packet *packet = NULL; 515 int ret; 516 unsigned int num_data_pgs; 517 struct rndis_message *rndis_msg; 518 struct net_device *vf_netdev; 519 u32 rndis_msg_size; 520 u32 hash; 521 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; 522 523 /* if VF is present and up then redirect packets 524 * already called with rcu_read_lock_bh 525 */ 526 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); 527 if (vf_netdev && netif_running(vf_netdev) && 528 !netpoll_tx_running(net)) 529 return netvsc_vf_xmit(net, vf_netdev, skb); 530 531 /* We will atmost need two pages to describe the rndis 532 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 533 * of pages in a single packet. If skb is scattered around 534 * more pages we try linearizing it. 535 */ 536 537 num_data_pgs = netvsc_get_slots(skb) + 2; 538 539 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 540 ++net_device_ctx->eth_stats.tx_scattered; 541 542 if (skb_linearize(skb)) 543 goto no_memory; 544 545 num_data_pgs = netvsc_get_slots(skb) + 2; 546 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 547 ++net_device_ctx->eth_stats.tx_too_big; 548 goto drop; 549 } 550 } 551 552 /* 553 * Place the rndis header in the skb head room and 554 * the skb->cb will be used for hv_netvsc_packet 555 * structure. 556 */ 557 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); 558 if (ret) 559 goto no_memory; 560 561 /* Use the skb control buffer for building up the packet */ 562 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 563 FIELD_SIZEOF(struct sk_buff, cb)); 564 packet = (struct hv_netvsc_packet *)skb->cb; 565 566 packet->q_idx = skb_get_queue_mapping(skb); 567 568 packet->total_data_buflen = skb->len; 569 packet->total_bytes = skb->len; 570 packet->total_packets = 1; 571 572 rndis_msg = (struct rndis_message *)skb->head; 573 574 /* Add the rndis header */ 575 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 576 rndis_msg->msg_len = packet->total_data_buflen; 577 578 rndis_msg->msg.pkt = (struct rndis_packet) { 579 .data_offset = sizeof(struct rndis_packet), 580 .data_len = packet->total_data_buflen, 581 .per_pkt_info_offset = sizeof(struct rndis_packet), 582 }; 583 584 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 585 586 hash = skb_get_hash_raw(skb); 587 if (hash != 0 && net->real_num_tx_queues > 1) { 588 u32 *hash_info; 589 590 rndis_msg_size += NDIS_HASH_PPI_SIZE; 591 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 592 NBL_HASH_VALUE); 593 *hash_info = hash; 594 } 595 596 if (skb_vlan_tag_present(skb)) { 597 struct ndis_pkt_8021q_info *vlan; 598 599 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 600 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 601 IEEE_8021Q_INFO); 602 603 vlan->value = 0; 604 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; 605 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> 606 VLAN_PRIO_SHIFT; 607 } 608 609 if (skb_is_gso(skb)) { 610 struct ndis_tcp_lso_info *lso_info; 611 612 rndis_msg_size += NDIS_LSO_PPI_SIZE; 613 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 614 TCP_LARGESEND_PKTINFO); 615 616 lso_info->value = 0; 617 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 618 if (skb->protocol == htons(ETH_P_IP)) { 619 lso_info->lso_v2_transmit.ip_version = 620 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 621 ip_hdr(skb)->tot_len = 0; 622 ip_hdr(skb)->check = 0; 623 tcp_hdr(skb)->check = 624 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 625 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 626 } else { 627 lso_info->lso_v2_transmit.ip_version = 628 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 629 ipv6_hdr(skb)->payload_len = 0; 630 tcp_hdr(skb)->check = 631 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 632 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 633 } 634 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); 635 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 636 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 637 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { 638 struct ndis_tcp_ip_checksum_info *csum_info; 639 640 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 641 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 642 TCPIP_CHKSUM_PKTINFO); 643 644 csum_info->value = 0; 645 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); 646 647 if (skb->protocol == htons(ETH_P_IP)) { 648 csum_info->transmit.is_ipv4 = 1; 649 650 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 651 csum_info->transmit.tcp_checksum = 1; 652 else 653 csum_info->transmit.udp_checksum = 1; 654 } else { 655 csum_info->transmit.is_ipv6 = 1; 656 657 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 658 csum_info->transmit.tcp_checksum = 1; 659 else 660 csum_info->transmit.udp_checksum = 1; 661 } 662 } else { 663 /* Can't do offload of this type of checksum */ 664 if (skb_checksum_help(skb)) 665 goto drop; 666 } 667 } 668 669 /* Start filling in the page buffers with the rndis hdr */ 670 rndis_msg->msg_len += rndis_msg_size; 671 packet->total_data_buflen = rndis_msg->msg_len; 672 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 673 skb, packet, pb); 674 675 /* timestamp packet in software */ 676 skb_tx_timestamp(skb); 677 678 ret = netvsc_send(net, packet, rndis_msg, pb, skb); 679 if (likely(ret == 0)) 680 return NETDEV_TX_OK; 681 682 if (ret == -EAGAIN) { 683 ++net_device_ctx->eth_stats.tx_busy; 684 return NETDEV_TX_BUSY; 685 } 686 687 if (ret == -ENOSPC) 688 ++net_device_ctx->eth_stats.tx_no_space; 689 690 drop: 691 dev_kfree_skb_any(skb); 692 net->stats.tx_dropped++; 693 694 return NETDEV_TX_OK; 695 696 no_memory: 697 ++net_device_ctx->eth_stats.tx_no_memory; 698 goto drop; 699 } 700 701 /* 702 * netvsc_linkstatus_callback - Link up/down notification 703 */ 704 void netvsc_linkstatus_callback(struct net_device *net, 705 struct rndis_message *resp) 706 { 707 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 708 struct net_device_context *ndev_ctx = netdev_priv(net); 709 struct netvsc_reconfig *event; 710 unsigned long flags; 711 712 /* Update the physical link speed when changing to another vSwitch */ 713 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { 714 u32 speed; 715 716 speed = *(u32 *)((void *)indicate 717 + indicate->status_buf_offset) / 10000; 718 ndev_ctx->speed = speed; 719 return; 720 } 721 722 /* Handle these link change statuses below */ 723 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && 724 indicate->status != RNDIS_STATUS_MEDIA_CONNECT && 725 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) 726 return; 727 728 if (net->reg_state != NETREG_REGISTERED) 729 return; 730 731 event = kzalloc(sizeof(*event), GFP_ATOMIC); 732 if (!event) 733 return; 734 event->event = indicate->status; 735 736 spin_lock_irqsave(&ndev_ctx->lock, flags); 737 list_add_tail(&event->list, &ndev_ctx->reconfig_events); 738 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 739 740 schedule_delayed_work(&ndev_ctx->dwork, 0); 741 } 742 743 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 744 struct napi_struct *napi, 745 const struct ndis_tcp_ip_checksum_info *csum_info, 746 const struct ndis_pkt_8021q_info *vlan, 747 void *data, u32 buflen) 748 { 749 struct sk_buff *skb; 750 751 skb = napi_alloc_skb(napi, buflen); 752 if (!skb) 753 return skb; 754 755 /* 756 * Copy to skb. This copy is needed here since the memory pointed by 757 * hv_netvsc_packet cannot be deallocated 758 */ 759 skb_put_data(skb, data, buflen); 760 761 skb->protocol = eth_type_trans(skb, net); 762 763 /* skb is already created with CHECKSUM_NONE */ 764 skb_checksum_none_assert(skb); 765 766 /* 767 * In Linux, the IP checksum is always checked. 768 * Do L4 checksum offload if enabled and present. 769 */ 770 if (csum_info && (net->features & NETIF_F_RXCSUM)) { 771 if (csum_info->receive.tcp_checksum_succeeded || 772 csum_info->receive.udp_checksum_succeeded) 773 skb->ip_summed = CHECKSUM_UNNECESSARY; 774 } 775 776 if (vlan) { 777 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); 778 779 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 780 vlan_tci); 781 } 782 783 return skb; 784 } 785 786 /* 787 * netvsc_recv_callback - Callback when we receive a packet from the 788 * "wire" on the specified device. 789 */ 790 int netvsc_recv_callback(struct net_device *net, 791 struct netvsc_device *net_device, 792 struct vmbus_channel *channel, 793 void *data, u32 len, 794 const struct ndis_tcp_ip_checksum_info *csum_info, 795 const struct ndis_pkt_8021q_info *vlan) 796 { 797 struct net_device_context *net_device_ctx = netdev_priv(net); 798 u16 q_idx = channel->offermsg.offer.sub_channel_index; 799 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; 800 struct sk_buff *skb; 801 struct netvsc_stats *rx_stats; 802 803 if (net->reg_state != NETREG_REGISTERED) 804 return NVSP_STAT_FAIL; 805 806 /* Allocate a skb - TODO direct I/O to pages? */ 807 skb = netvsc_alloc_recv_skb(net, &nvchan->napi, 808 csum_info, vlan, data, len); 809 if (unlikely(!skb)) { 810 ++net_device_ctx->eth_stats.rx_no_memory; 811 rcu_read_unlock(); 812 return NVSP_STAT_FAIL; 813 } 814 815 skb_record_rx_queue(skb, q_idx); 816 817 /* 818 * Even if injecting the packet, record the statistics 819 * on the synthetic device because modifying the VF device 820 * statistics will not work correctly. 821 */ 822 rx_stats = &nvchan->rx_stats; 823 u64_stats_update_begin(&rx_stats->syncp); 824 rx_stats->packets++; 825 rx_stats->bytes += len; 826 827 if (skb->pkt_type == PACKET_BROADCAST) 828 ++rx_stats->broadcast; 829 else if (skb->pkt_type == PACKET_MULTICAST) 830 ++rx_stats->multicast; 831 u64_stats_update_end(&rx_stats->syncp); 832 833 napi_gro_receive(&nvchan->napi, skb); 834 return NVSP_STAT_SUCCESS; 835 } 836 837 static void netvsc_get_drvinfo(struct net_device *net, 838 struct ethtool_drvinfo *info) 839 { 840 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 841 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 842 } 843 844 static void netvsc_get_channels(struct net_device *net, 845 struct ethtool_channels *channel) 846 { 847 struct net_device_context *net_device_ctx = netdev_priv(net); 848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 849 850 if (nvdev) { 851 channel->max_combined = nvdev->max_chn; 852 channel->combined_count = nvdev->num_chn; 853 } 854 } 855 856 static int netvsc_detach(struct net_device *ndev, 857 struct netvsc_device *nvdev) 858 { 859 struct net_device_context *ndev_ctx = netdev_priv(ndev); 860 struct hv_device *hdev = ndev_ctx->device_ctx; 861 int ret; 862 863 /* Don't try continuing to try and setup sub channels */ 864 if (cancel_work_sync(&nvdev->subchan_work)) 865 nvdev->num_chn = 1; 866 867 /* If device was up (receiving) then shutdown */ 868 if (netif_running(ndev)) { 869 netif_tx_disable(ndev); 870 871 ret = rndis_filter_close(nvdev); 872 if (ret) { 873 netdev_err(ndev, 874 "unable to close device (ret %d).\n", ret); 875 return ret; 876 } 877 878 ret = netvsc_wait_until_empty(nvdev); 879 if (ret) { 880 netdev_err(ndev, 881 "Ring buffer not empty after closing rndis\n"); 882 return ret; 883 } 884 } 885 886 netif_device_detach(ndev); 887 888 rndis_filter_device_remove(hdev, nvdev); 889 890 return 0; 891 } 892 893 static int netvsc_attach(struct net_device *ndev, 894 struct netvsc_device_info *dev_info) 895 { 896 struct net_device_context *ndev_ctx = netdev_priv(ndev); 897 struct hv_device *hdev = ndev_ctx->device_ctx; 898 struct netvsc_device *nvdev; 899 struct rndis_device *rdev; 900 int ret; 901 902 nvdev = rndis_filter_device_add(hdev, dev_info); 903 if (IS_ERR(nvdev)) 904 return PTR_ERR(nvdev); 905 906 /* Note: enable and attach happen when sub-channels setup */ 907 908 netif_carrier_off(ndev); 909 910 if (netif_running(ndev)) { 911 ret = rndis_filter_open(nvdev); 912 if (ret) 913 return ret; 914 915 rdev = nvdev->extension; 916 if (!rdev->link_state) 917 netif_carrier_on(ndev); 918 } 919 920 return 0; 921 } 922 923 static int netvsc_set_channels(struct net_device *net, 924 struct ethtool_channels *channels) 925 { 926 struct net_device_context *net_device_ctx = netdev_priv(net); 927 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 928 unsigned int orig, count = channels->combined_count; 929 struct netvsc_device_info device_info; 930 int ret; 931 932 /* We do not support separate count for rx, tx, or other */ 933 if (count == 0 || 934 channels->rx_count || channels->tx_count || channels->other_count) 935 return -EINVAL; 936 937 if (!nvdev || nvdev->destroy) 938 return -ENODEV; 939 940 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) 941 return -EINVAL; 942 943 if (count > nvdev->max_chn) 944 return -EINVAL; 945 946 orig = nvdev->num_chn; 947 948 memset(&device_info, 0, sizeof(device_info)); 949 device_info.num_chn = count; 950 device_info.send_sections = nvdev->send_section_cnt; 951 device_info.send_section_size = nvdev->send_section_size; 952 device_info.recv_sections = nvdev->recv_section_cnt; 953 device_info.recv_section_size = nvdev->recv_section_size; 954 955 ret = netvsc_detach(net, nvdev); 956 if (ret) 957 return ret; 958 959 ret = netvsc_attach(net, &device_info); 960 if (ret) { 961 device_info.num_chn = orig; 962 if (netvsc_attach(net, &device_info)) 963 netdev_err(net, "restoring channel setting failed\n"); 964 } 965 966 return ret; 967 } 968 969 static bool 970 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd) 971 { 972 struct ethtool_link_ksettings diff1 = *cmd; 973 struct ethtool_link_ksettings diff2 = {}; 974 975 diff1.base.speed = 0; 976 diff1.base.duplex = 0; 977 /* advertising and cmd are usually set */ 978 ethtool_link_ksettings_zero_link_mode(&diff1, advertising); 979 diff1.base.cmd = 0; 980 /* We set port to PORT_OTHER */ 981 diff2.base.port = PORT_OTHER; 982 983 return !memcmp(&diff1, &diff2, sizeof(diff1)); 984 } 985 986 static void netvsc_init_settings(struct net_device *dev) 987 { 988 struct net_device_context *ndc = netdev_priv(dev); 989 990 ndc->l4_hash = HV_DEFAULT_L4HASH; 991 992 ndc->speed = SPEED_UNKNOWN; 993 ndc->duplex = DUPLEX_FULL; 994 } 995 996 static int netvsc_get_link_ksettings(struct net_device *dev, 997 struct ethtool_link_ksettings *cmd) 998 { 999 struct net_device_context *ndc = netdev_priv(dev); 1000 1001 cmd->base.speed = ndc->speed; 1002 cmd->base.duplex = ndc->duplex; 1003 cmd->base.port = PORT_OTHER; 1004 1005 return 0; 1006 } 1007 1008 static int netvsc_set_link_ksettings(struct net_device *dev, 1009 const struct ethtool_link_ksettings *cmd) 1010 { 1011 struct net_device_context *ndc = netdev_priv(dev); 1012 u32 speed; 1013 1014 speed = cmd->base.speed; 1015 if (!ethtool_validate_speed(speed) || 1016 !ethtool_validate_duplex(cmd->base.duplex) || 1017 !netvsc_validate_ethtool_ss_cmd(cmd)) 1018 return -EINVAL; 1019 1020 ndc->speed = speed; 1021 ndc->duplex = cmd->base.duplex; 1022 1023 return 0; 1024 } 1025 1026 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 1027 { 1028 struct net_device_context *ndevctx = netdev_priv(ndev); 1029 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1030 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1031 int orig_mtu = ndev->mtu; 1032 struct netvsc_device_info device_info; 1033 int ret = 0; 1034 1035 if (!nvdev || nvdev->destroy) 1036 return -ENODEV; 1037 1038 /* Change MTU of underlying VF netdev first. */ 1039 if (vf_netdev) { 1040 ret = dev_set_mtu(vf_netdev, mtu); 1041 if (ret) 1042 return ret; 1043 } 1044 1045 memset(&device_info, 0, sizeof(device_info)); 1046 device_info.num_chn = nvdev->num_chn; 1047 device_info.send_sections = nvdev->send_section_cnt; 1048 device_info.send_section_size = nvdev->send_section_size; 1049 device_info.recv_sections = nvdev->recv_section_cnt; 1050 device_info.recv_section_size = nvdev->recv_section_size; 1051 1052 ret = netvsc_detach(ndev, nvdev); 1053 if (ret) 1054 goto rollback_vf; 1055 1056 ndev->mtu = mtu; 1057 1058 ret = netvsc_attach(ndev, &device_info); 1059 if (ret) 1060 goto rollback; 1061 1062 return 0; 1063 1064 rollback: 1065 /* Attempt rollback to original MTU */ 1066 ndev->mtu = orig_mtu; 1067 1068 if (netvsc_attach(ndev, &device_info)) 1069 netdev_err(ndev, "restoring mtu failed\n"); 1070 rollback_vf: 1071 if (vf_netdev) 1072 dev_set_mtu(vf_netdev, orig_mtu); 1073 1074 return ret; 1075 } 1076 1077 static void netvsc_get_vf_stats(struct net_device *net, 1078 struct netvsc_vf_pcpu_stats *tot) 1079 { 1080 struct net_device_context *ndev_ctx = netdev_priv(net); 1081 int i; 1082 1083 memset(tot, 0, sizeof(*tot)); 1084 1085 for_each_possible_cpu(i) { 1086 const struct netvsc_vf_pcpu_stats *stats 1087 = per_cpu_ptr(ndev_ctx->vf_stats, i); 1088 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1089 unsigned int start; 1090 1091 do { 1092 start = u64_stats_fetch_begin_irq(&stats->syncp); 1093 rx_packets = stats->rx_packets; 1094 tx_packets = stats->tx_packets; 1095 rx_bytes = stats->rx_bytes; 1096 tx_bytes = stats->tx_bytes; 1097 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1098 1099 tot->rx_packets += rx_packets; 1100 tot->tx_packets += tx_packets; 1101 tot->rx_bytes += rx_bytes; 1102 tot->tx_bytes += tx_bytes; 1103 tot->tx_dropped += stats->tx_dropped; 1104 } 1105 } 1106 1107 static void netvsc_get_stats64(struct net_device *net, 1108 struct rtnl_link_stats64 *t) 1109 { 1110 struct net_device_context *ndev_ctx = netdev_priv(net); 1111 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1112 struct netvsc_vf_pcpu_stats vf_tot; 1113 int i; 1114 1115 if (!nvdev) 1116 return; 1117 1118 netdev_stats_to_stats64(t, &net->stats); 1119 1120 netvsc_get_vf_stats(net, &vf_tot); 1121 t->rx_packets += vf_tot.rx_packets; 1122 t->tx_packets += vf_tot.tx_packets; 1123 t->rx_bytes += vf_tot.rx_bytes; 1124 t->tx_bytes += vf_tot.tx_bytes; 1125 t->tx_dropped += vf_tot.tx_dropped; 1126 1127 for (i = 0; i < nvdev->num_chn; i++) { 1128 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1129 const struct netvsc_stats *stats; 1130 u64 packets, bytes, multicast; 1131 unsigned int start; 1132 1133 stats = &nvchan->tx_stats; 1134 do { 1135 start = u64_stats_fetch_begin_irq(&stats->syncp); 1136 packets = stats->packets; 1137 bytes = stats->bytes; 1138 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1139 1140 t->tx_bytes += bytes; 1141 t->tx_packets += packets; 1142 1143 stats = &nvchan->rx_stats; 1144 do { 1145 start = u64_stats_fetch_begin_irq(&stats->syncp); 1146 packets = stats->packets; 1147 bytes = stats->bytes; 1148 multicast = stats->multicast + stats->broadcast; 1149 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1150 1151 t->rx_bytes += bytes; 1152 t->rx_packets += packets; 1153 t->multicast += multicast; 1154 } 1155 } 1156 1157 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1158 { 1159 struct net_device_context *ndc = netdev_priv(ndev); 1160 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1161 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1162 struct sockaddr *addr = p; 1163 int err; 1164 1165 err = eth_prepare_mac_addr_change(ndev, p); 1166 if (err) 1167 return err; 1168 1169 if (!nvdev) 1170 return -ENODEV; 1171 1172 if (vf_netdev) { 1173 err = dev_set_mac_address(vf_netdev, addr); 1174 if (err) 1175 return err; 1176 } 1177 1178 err = rndis_filter_set_device_mac(nvdev, addr->sa_data); 1179 if (!err) { 1180 eth_commit_mac_addr_change(ndev, p); 1181 } else if (vf_netdev) { 1182 /* rollback change on VF */ 1183 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); 1184 dev_set_mac_address(vf_netdev, addr); 1185 } 1186 1187 return err; 1188 } 1189 1190 static const struct { 1191 char name[ETH_GSTRING_LEN]; 1192 u16 offset; 1193 } netvsc_stats[] = { 1194 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, 1195 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, 1196 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, 1197 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, 1198 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, 1199 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, 1200 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, 1201 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, 1202 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, 1203 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, 1204 }, vf_stats[] = { 1205 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, 1206 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, 1207 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, 1208 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, 1209 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, 1210 }; 1211 1212 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) 1213 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) 1214 1215 /* 4 statistics per queue (rx/tx packets/bytes) */ 1216 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) 1217 1218 static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1219 { 1220 struct net_device_context *ndc = netdev_priv(dev); 1221 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1222 1223 if (!nvdev) 1224 return -ENODEV; 1225 1226 switch (string_set) { 1227 case ETH_SS_STATS: 1228 return NETVSC_GLOBAL_STATS_LEN 1229 + NETVSC_VF_STATS_LEN 1230 + NETVSC_QUEUE_STATS_LEN(nvdev); 1231 default: 1232 return -EINVAL; 1233 } 1234 } 1235 1236 static void netvsc_get_ethtool_stats(struct net_device *dev, 1237 struct ethtool_stats *stats, u64 *data) 1238 { 1239 struct net_device_context *ndc = netdev_priv(dev); 1240 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1241 const void *nds = &ndc->eth_stats; 1242 const struct netvsc_stats *qstats; 1243 struct netvsc_vf_pcpu_stats sum; 1244 unsigned int start; 1245 u64 packets, bytes; 1246 int i, j; 1247 1248 if (!nvdev) 1249 return; 1250 1251 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) 1252 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); 1253 1254 netvsc_get_vf_stats(dev, &sum); 1255 for (j = 0; j < NETVSC_VF_STATS_LEN; j++) 1256 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); 1257 1258 for (j = 0; j < nvdev->num_chn; j++) { 1259 qstats = &nvdev->chan_table[j].tx_stats; 1260 1261 do { 1262 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1263 packets = qstats->packets; 1264 bytes = qstats->bytes; 1265 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1266 data[i++] = packets; 1267 data[i++] = bytes; 1268 1269 qstats = &nvdev->chan_table[j].rx_stats; 1270 do { 1271 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1272 packets = qstats->packets; 1273 bytes = qstats->bytes; 1274 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1275 data[i++] = packets; 1276 data[i++] = bytes; 1277 } 1278 } 1279 1280 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1281 { 1282 struct net_device_context *ndc = netdev_priv(dev); 1283 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1284 u8 *p = data; 1285 int i; 1286 1287 if (!nvdev) 1288 return; 1289 1290 switch (stringset) { 1291 case ETH_SS_STATS: 1292 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { 1293 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); 1294 p += ETH_GSTRING_LEN; 1295 } 1296 1297 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { 1298 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); 1299 p += ETH_GSTRING_LEN; 1300 } 1301 1302 for (i = 0; i < nvdev->num_chn; i++) { 1303 sprintf(p, "tx_queue_%u_packets", i); 1304 p += ETH_GSTRING_LEN; 1305 sprintf(p, "tx_queue_%u_bytes", i); 1306 p += ETH_GSTRING_LEN; 1307 sprintf(p, "rx_queue_%u_packets", i); 1308 p += ETH_GSTRING_LEN; 1309 sprintf(p, "rx_queue_%u_bytes", i); 1310 p += ETH_GSTRING_LEN; 1311 } 1312 1313 break; 1314 } 1315 } 1316 1317 static int 1318 netvsc_get_rss_hash_opts(struct net_device_context *ndc, 1319 struct ethtool_rxnfc *info) 1320 { 1321 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; 1322 1323 info->data = RXH_IP_SRC | RXH_IP_DST; 1324 1325 switch (info->flow_type) { 1326 case TCP_V4_FLOW: 1327 if (ndc->l4_hash & HV_TCP4_L4HASH) 1328 info->data |= l4_flag; 1329 1330 break; 1331 1332 case TCP_V6_FLOW: 1333 if (ndc->l4_hash & HV_TCP6_L4HASH) 1334 info->data |= l4_flag; 1335 1336 break; 1337 1338 case UDP_V4_FLOW: 1339 if (ndc->l4_hash & HV_UDP4_L4HASH) 1340 info->data |= l4_flag; 1341 1342 break; 1343 1344 case UDP_V6_FLOW: 1345 if (ndc->l4_hash & HV_UDP6_L4HASH) 1346 info->data |= l4_flag; 1347 1348 break; 1349 1350 case IPV4_FLOW: 1351 case IPV6_FLOW: 1352 break; 1353 default: 1354 info->data = 0; 1355 break; 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int 1362 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1363 u32 *rules) 1364 { 1365 struct net_device_context *ndc = netdev_priv(dev); 1366 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1367 1368 if (!nvdev) 1369 return -ENODEV; 1370 1371 switch (info->cmd) { 1372 case ETHTOOL_GRXRINGS: 1373 info->data = nvdev->num_chn; 1374 return 0; 1375 1376 case ETHTOOL_GRXFH: 1377 return netvsc_get_rss_hash_opts(ndc, info); 1378 } 1379 return -EOPNOTSUPP; 1380 } 1381 1382 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, 1383 struct ethtool_rxnfc *info) 1384 { 1385 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1386 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1387 switch (info->flow_type) { 1388 case TCP_V4_FLOW: 1389 ndc->l4_hash |= HV_TCP4_L4HASH; 1390 break; 1391 1392 case TCP_V6_FLOW: 1393 ndc->l4_hash |= HV_TCP6_L4HASH; 1394 break; 1395 1396 case UDP_V4_FLOW: 1397 ndc->l4_hash |= HV_UDP4_L4HASH; 1398 break; 1399 1400 case UDP_V6_FLOW: 1401 ndc->l4_hash |= HV_UDP6_L4HASH; 1402 break; 1403 1404 default: 1405 return -EOPNOTSUPP; 1406 } 1407 1408 return 0; 1409 } 1410 1411 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1412 switch (info->flow_type) { 1413 case TCP_V4_FLOW: 1414 ndc->l4_hash &= ~HV_TCP4_L4HASH; 1415 break; 1416 1417 case TCP_V6_FLOW: 1418 ndc->l4_hash &= ~HV_TCP6_L4HASH; 1419 break; 1420 1421 case UDP_V4_FLOW: 1422 ndc->l4_hash &= ~HV_UDP4_L4HASH; 1423 break; 1424 1425 case UDP_V6_FLOW: 1426 ndc->l4_hash &= ~HV_UDP6_L4HASH; 1427 break; 1428 1429 default: 1430 return -EOPNOTSUPP; 1431 } 1432 1433 return 0; 1434 } 1435 1436 return -EOPNOTSUPP; 1437 } 1438 1439 static int 1440 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) 1441 { 1442 struct net_device_context *ndc = netdev_priv(ndev); 1443 1444 if (info->cmd == ETHTOOL_SRXFH) 1445 return netvsc_set_rss_hash_opts(ndc, info); 1446 1447 return -EOPNOTSUPP; 1448 } 1449 1450 #ifdef CONFIG_NET_POLL_CONTROLLER 1451 static void netvsc_poll_controller(struct net_device *dev) 1452 { 1453 struct net_device_context *ndc = netdev_priv(dev); 1454 struct netvsc_device *ndev; 1455 int i; 1456 1457 rcu_read_lock(); 1458 ndev = rcu_dereference(ndc->nvdev); 1459 if (ndev) { 1460 for (i = 0; i < ndev->num_chn; i++) { 1461 struct netvsc_channel *nvchan = &ndev->chan_table[i]; 1462 1463 napi_schedule(&nvchan->napi); 1464 } 1465 } 1466 rcu_read_unlock(); 1467 } 1468 #endif 1469 1470 static u32 netvsc_get_rxfh_key_size(struct net_device *dev) 1471 { 1472 return NETVSC_HASH_KEYLEN; 1473 } 1474 1475 static u32 netvsc_rss_indir_size(struct net_device *dev) 1476 { 1477 return ITAB_NUM; 1478 } 1479 1480 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1481 u8 *hfunc) 1482 { 1483 struct net_device_context *ndc = netdev_priv(dev); 1484 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1485 struct rndis_device *rndis_dev; 1486 int i; 1487 1488 if (!ndev) 1489 return -ENODEV; 1490 1491 if (hfunc) 1492 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 1493 1494 rndis_dev = ndev->extension; 1495 if (indir) { 1496 for (i = 0; i < ITAB_NUM; i++) 1497 indir[i] = rndis_dev->rx_table[i]; 1498 } 1499 1500 if (key) 1501 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); 1502 1503 return 0; 1504 } 1505 1506 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, 1507 const u8 *key, const u8 hfunc) 1508 { 1509 struct net_device_context *ndc = netdev_priv(dev); 1510 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1511 struct rndis_device *rndis_dev; 1512 int i; 1513 1514 if (!ndev) 1515 return -ENODEV; 1516 1517 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1518 return -EOPNOTSUPP; 1519 1520 rndis_dev = ndev->extension; 1521 if (indir) { 1522 for (i = 0; i < ITAB_NUM; i++) 1523 if (indir[i] >= ndev->num_chn) 1524 return -EINVAL; 1525 1526 for (i = 0; i < ITAB_NUM; i++) 1527 rndis_dev->rx_table[i] = indir[i]; 1528 } 1529 1530 if (!key) { 1531 if (!indir) 1532 return 0; 1533 1534 key = rndis_dev->rss_key; 1535 } 1536 1537 return rndis_filter_set_rss_param(rndis_dev, key); 1538 } 1539 1540 /* Hyper-V RNDIS protocol does not have ring in the HW sense. 1541 * It does have pre-allocated receive area which is divided into sections. 1542 */ 1543 static void __netvsc_get_ringparam(struct netvsc_device *nvdev, 1544 struct ethtool_ringparam *ring) 1545 { 1546 u32 max_buf_size; 1547 1548 ring->rx_pending = nvdev->recv_section_cnt; 1549 ring->tx_pending = nvdev->send_section_cnt; 1550 1551 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 1552 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 1553 else 1554 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 1555 1556 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; 1557 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE 1558 / nvdev->send_section_size; 1559 } 1560 1561 static void netvsc_get_ringparam(struct net_device *ndev, 1562 struct ethtool_ringparam *ring) 1563 { 1564 struct net_device_context *ndevctx = netdev_priv(ndev); 1565 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1566 1567 if (!nvdev) 1568 return; 1569 1570 __netvsc_get_ringparam(nvdev, ring); 1571 } 1572 1573 static int netvsc_set_ringparam(struct net_device *ndev, 1574 struct ethtool_ringparam *ring) 1575 { 1576 struct net_device_context *ndevctx = netdev_priv(ndev); 1577 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1578 struct netvsc_device_info device_info; 1579 struct ethtool_ringparam orig; 1580 u32 new_tx, new_rx; 1581 int ret = 0; 1582 1583 if (!nvdev || nvdev->destroy) 1584 return -ENODEV; 1585 1586 memset(&orig, 0, sizeof(orig)); 1587 __netvsc_get_ringparam(nvdev, &orig); 1588 1589 new_tx = clamp_t(u32, ring->tx_pending, 1590 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); 1591 new_rx = clamp_t(u32, ring->rx_pending, 1592 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); 1593 1594 if (new_tx == orig.tx_pending && 1595 new_rx == orig.rx_pending) 1596 return 0; /* no change */ 1597 1598 memset(&device_info, 0, sizeof(device_info)); 1599 device_info.num_chn = nvdev->num_chn; 1600 device_info.send_sections = new_tx; 1601 device_info.send_section_size = nvdev->send_section_size; 1602 device_info.recv_sections = new_rx; 1603 device_info.recv_section_size = nvdev->recv_section_size; 1604 1605 ret = netvsc_detach(ndev, nvdev); 1606 if (ret) 1607 return ret; 1608 1609 ret = netvsc_attach(ndev, &device_info); 1610 if (ret) { 1611 device_info.send_sections = orig.tx_pending; 1612 device_info.recv_sections = orig.rx_pending; 1613 1614 if (netvsc_attach(ndev, &device_info)) 1615 netdev_err(ndev, "restoring ringparam failed"); 1616 } 1617 1618 return ret; 1619 } 1620 1621 static const struct ethtool_ops ethtool_ops = { 1622 .get_drvinfo = netvsc_get_drvinfo, 1623 .get_link = ethtool_op_get_link, 1624 .get_ethtool_stats = netvsc_get_ethtool_stats, 1625 .get_sset_count = netvsc_get_sset_count, 1626 .get_strings = netvsc_get_strings, 1627 .get_channels = netvsc_get_channels, 1628 .set_channels = netvsc_set_channels, 1629 .get_ts_info = ethtool_op_get_ts_info, 1630 .get_rxnfc = netvsc_get_rxnfc, 1631 .set_rxnfc = netvsc_set_rxnfc, 1632 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 1633 .get_rxfh_indir_size = netvsc_rss_indir_size, 1634 .get_rxfh = netvsc_get_rxfh, 1635 .set_rxfh = netvsc_set_rxfh, 1636 .get_link_ksettings = netvsc_get_link_ksettings, 1637 .set_link_ksettings = netvsc_set_link_ksettings, 1638 .get_ringparam = netvsc_get_ringparam, 1639 .set_ringparam = netvsc_set_ringparam, 1640 }; 1641 1642 static const struct net_device_ops device_ops = { 1643 .ndo_open = netvsc_open, 1644 .ndo_stop = netvsc_close, 1645 .ndo_start_xmit = netvsc_start_xmit, 1646 .ndo_change_rx_flags = netvsc_change_rx_flags, 1647 .ndo_set_rx_mode = netvsc_set_rx_mode, 1648 .ndo_change_mtu = netvsc_change_mtu, 1649 .ndo_validate_addr = eth_validate_addr, 1650 .ndo_set_mac_address = netvsc_set_mac_addr, 1651 .ndo_select_queue = netvsc_select_queue, 1652 .ndo_get_stats64 = netvsc_get_stats64, 1653 #ifdef CONFIG_NET_POLL_CONTROLLER 1654 .ndo_poll_controller = netvsc_poll_controller, 1655 #endif 1656 }; 1657 1658 /* 1659 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link 1660 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is 1661 * present send GARP packet to network peers with netif_notify_peers(). 1662 */ 1663 static void netvsc_link_change(struct work_struct *w) 1664 { 1665 struct net_device_context *ndev_ctx = 1666 container_of(w, struct net_device_context, dwork.work); 1667 struct hv_device *device_obj = ndev_ctx->device_ctx; 1668 struct net_device *net = hv_get_drvdata(device_obj); 1669 struct netvsc_device *net_device; 1670 struct rndis_device *rdev; 1671 struct netvsc_reconfig *event = NULL; 1672 bool notify = false, reschedule = false; 1673 unsigned long flags, next_reconfig, delay; 1674 1675 /* if changes are happening, comeback later */ 1676 if (!rtnl_trylock()) { 1677 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 1678 return; 1679 } 1680 1681 net_device = rtnl_dereference(ndev_ctx->nvdev); 1682 if (!net_device) 1683 goto out_unlock; 1684 1685 rdev = net_device->extension; 1686 1687 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; 1688 if (time_is_after_jiffies(next_reconfig)) { 1689 /* link_watch only sends one notification with current state 1690 * per second, avoid doing reconfig more frequently. Handle 1691 * wrap around. 1692 */ 1693 delay = next_reconfig - jiffies; 1694 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; 1695 schedule_delayed_work(&ndev_ctx->dwork, delay); 1696 goto out_unlock; 1697 } 1698 ndev_ctx->last_reconfig = jiffies; 1699 1700 spin_lock_irqsave(&ndev_ctx->lock, flags); 1701 if (!list_empty(&ndev_ctx->reconfig_events)) { 1702 event = list_first_entry(&ndev_ctx->reconfig_events, 1703 struct netvsc_reconfig, list); 1704 list_del(&event->list); 1705 reschedule = !list_empty(&ndev_ctx->reconfig_events); 1706 } 1707 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 1708 1709 if (!event) 1710 goto out_unlock; 1711 1712 switch (event->event) { 1713 /* Only the following events are possible due to the check in 1714 * netvsc_linkstatus_callback() 1715 */ 1716 case RNDIS_STATUS_MEDIA_CONNECT: 1717 if (rdev->link_state) { 1718 rdev->link_state = false; 1719 netif_carrier_on(net); 1720 netif_tx_wake_all_queues(net); 1721 } else { 1722 notify = true; 1723 } 1724 kfree(event); 1725 break; 1726 case RNDIS_STATUS_MEDIA_DISCONNECT: 1727 if (!rdev->link_state) { 1728 rdev->link_state = true; 1729 netif_carrier_off(net); 1730 netif_tx_stop_all_queues(net); 1731 } 1732 kfree(event); 1733 break; 1734 case RNDIS_STATUS_NETWORK_CHANGE: 1735 /* Only makes sense if carrier is present */ 1736 if (!rdev->link_state) { 1737 rdev->link_state = true; 1738 netif_carrier_off(net); 1739 netif_tx_stop_all_queues(net); 1740 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1741 spin_lock_irqsave(&ndev_ctx->lock, flags); 1742 list_add(&event->list, &ndev_ctx->reconfig_events); 1743 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 1744 reschedule = true; 1745 } 1746 break; 1747 } 1748 1749 rtnl_unlock(); 1750 1751 if (notify) 1752 netdev_notify_peers(net); 1753 1754 /* link_watch only sends one notification with current state per 1755 * second, handle next reconfig event in 2 seconds. 1756 */ 1757 if (reschedule) 1758 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 1759 1760 return; 1761 1762 out_unlock: 1763 rtnl_unlock(); 1764 } 1765 1766 static struct net_device *get_netvsc_bymac(const u8 *mac) 1767 { 1768 struct net_device *dev; 1769 1770 ASSERT_RTNL(); 1771 1772 for_each_netdev(&init_net, dev) { 1773 if (dev->netdev_ops != &device_ops) 1774 continue; /* not a netvsc device */ 1775 1776 if (ether_addr_equal(mac, dev->perm_addr)) 1777 return dev; 1778 } 1779 1780 return NULL; 1781 } 1782 1783 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 1784 { 1785 struct net_device *dev; 1786 1787 ASSERT_RTNL(); 1788 1789 for_each_netdev(&init_net, dev) { 1790 struct net_device_context *net_device_ctx; 1791 1792 if (dev->netdev_ops != &device_ops) 1793 continue; /* not a netvsc device */ 1794 1795 net_device_ctx = netdev_priv(dev); 1796 if (!rtnl_dereference(net_device_ctx->nvdev)) 1797 continue; /* device is removed */ 1798 1799 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) 1800 return dev; /* a match */ 1801 } 1802 1803 return NULL; 1804 } 1805 1806 /* Called when VF is injecting data into network stack. 1807 * Change the associated network device from VF to netvsc. 1808 * note: already called with rcu_read_lock 1809 */ 1810 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) 1811 { 1812 struct sk_buff *skb = *pskb; 1813 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); 1814 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1815 struct netvsc_vf_pcpu_stats *pcpu_stats 1816 = this_cpu_ptr(ndev_ctx->vf_stats); 1817 1818 skb->dev = ndev; 1819 1820 u64_stats_update_begin(&pcpu_stats->syncp); 1821 pcpu_stats->rx_packets++; 1822 pcpu_stats->rx_bytes += skb->len; 1823 u64_stats_update_end(&pcpu_stats->syncp); 1824 1825 return RX_HANDLER_ANOTHER; 1826 } 1827 1828 static int netvsc_vf_join(struct net_device *vf_netdev, 1829 struct net_device *ndev) 1830 { 1831 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1832 int ret; 1833 1834 ret = netdev_rx_handler_register(vf_netdev, 1835 netvsc_vf_handle_frame, ndev); 1836 if (ret != 0) { 1837 netdev_err(vf_netdev, 1838 "can not register netvsc VF receive handler (err = %d)\n", 1839 ret); 1840 goto rx_handler_failed; 1841 } 1842 1843 ret = netdev_upper_dev_link(vf_netdev, ndev, NULL); 1844 if (ret != 0) { 1845 netdev_err(vf_netdev, 1846 "can not set master device %s (err = %d)\n", 1847 ndev->name, ret); 1848 goto upper_link_failed; 1849 } 1850 1851 /* set slave flag before open to prevent IPv6 addrconf */ 1852 vf_netdev->flags |= IFF_SLAVE; 1853 1854 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 1855 1856 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 1857 1858 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 1859 return 0; 1860 1861 upper_link_failed: 1862 netdev_rx_handler_unregister(vf_netdev); 1863 rx_handler_failed: 1864 return ret; 1865 } 1866 1867 static void __netvsc_vf_setup(struct net_device *ndev, 1868 struct net_device *vf_netdev) 1869 { 1870 int ret; 1871 1872 /* Align MTU of VF with master */ 1873 ret = dev_set_mtu(vf_netdev, ndev->mtu); 1874 if (ret) 1875 netdev_warn(vf_netdev, 1876 "unable to change mtu to %u\n", ndev->mtu); 1877 1878 /* set multicast etc flags on VF */ 1879 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); 1880 1881 /* sync address list from ndev to VF */ 1882 netif_addr_lock_bh(ndev); 1883 dev_uc_sync(vf_netdev, ndev); 1884 dev_mc_sync(vf_netdev, ndev); 1885 netif_addr_unlock_bh(ndev); 1886 1887 if (netif_running(ndev)) { 1888 ret = dev_open(vf_netdev); 1889 if (ret) 1890 netdev_warn(vf_netdev, 1891 "unable to open: %d\n", ret); 1892 } 1893 } 1894 1895 /* Setup VF as slave of the synthetic device. 1896 * Runs in workqueue to avoid recursion in netlink callbacks. 1897 */ 1898 static void netvsc_vf_setup(struct work_struct *w) 1899 { 1900 struct net_device_context *ndev_ctx 1901 = container_of(w, struct net_device_context, vf_takeover.work); 1902 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); 1903 struct net_device *vf_netdev; 1904 1905 if (!rtnl_trylock()) { 1906 schedule_delayed_work(&ndev_ctx->vf_takeover, 0); 1907 return; 1908 } 1909 1910 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 1911 if (vf_netdev) 1912 __netvsc_vf_setup(ndev, vf_netdev); 1913 1914 rtnl_unlock(); 1915 } 1916 1917 static int netvsc_register_vf(struct net_device *vf_netdev) 1918 { 1919 struct net_device *ndev; 1920 struct net_device_context *net_device_ctx; 1921 struct netvsc_device *netvsc_dev; 1922 1923 if (vf_netdev->addr_len != ETH_ALEN) 1924 return NOTIFY_DONE; 1925 1926 /* 1927 * We will use the MAC address to locate the synthetic interface to 1928 * associate with the VF interface. If we don't find a matching 1929 * synthetic interface, move on. 1930 */ 1931 ndev = get_netvsc_bymac(vf_netdev->perm_addr); 1932 if (!ndev) 1933 return NOTIFY_DONE; 1934 1935 net_device_ctx = netdev_priv(ndev); 1936 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1937 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 1938 return NOTIFY_DONE; 1939 1940 if (netvsc_vf_join(vf_netdev, ndev) != 0) 1941 return NOTIFY_DONE; 1942 1943 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 1944 1945 dev_hold(vf_netdev); 1946 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 1947 return NOTIFY_OK; 1948 } 1949 1950 /* VF up/down change detected, schedule to change data path */ 1951 static int netvsc_vf_changed(struct net_device *vf_netdev) 1952 { 1953 struct net_device_context *net_device_ctx; 1954 struct netvsc_device *netvsc_dev; 1955 struct net_device *ndev; 1956 bool vf_is_up = netif_running(vf_netdev); 1957 1958 ndev = get_netvsc_byref(vf_netdev); 1959 if (!ndev) 1960 return NOTIFY_DONE; 1961 1962 net_device_ctx = netdev_priv(ndev); 1963 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1964 if (!netvsc_dev) 1965 return NOTIFY_DONE; 1966 1967 netvsc_switch_datapath(ndev, vf_is_up); 1968 netdev_info(ndev, "Data path switched %s VF: %s\n", 1969 vf_is_up ? "to" : "from", vf_netdev->name); 1970 1971 return NOTIFY_OK; 1972 } 1973 1974 static int netvsc_unregister_vf(struct net_device *vf_netdev) 1975 { 1976 struct net_device *ndev; 1977 struct net_device_context *net_device_ctx; 1978 1979 ndev = get_netvsc_byref(vf_netdev); 1980 if (!ndev) 1981 return NOTIFY_DONE; 1982 1983 net_device_ctx = netdev_priv(ndev); 1984 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 1985 1986 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 1987 1988 netdev_rx_handler_unregister(vf_netdev); 1989 netdev_upper_dev_unlink(vf_netdev, ndev); 1990 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 1991 dev_put(vf_netdev); 1992 1993 return NOTIFY_OK; 1994 } 1995 1996 static int netvsc_probe(struct hv_device *dev, 1997 const struct hv_vmbus_device_id *dev_id) 1998 { 1999 struct net_device *net = NULL; 2000 struct net_device_context *net_device_ctx; 2001 struct netvsc_device_info device_info; 2002 struct netvsc_device *nvdev; 2003 int ret = -ENOMEM; 2004 2005 net = alloc_etherdev_mq(sizeof(struct net_device_context), 2006 VRSS_CHANNEL_MAX); 2007 if (!net) 2008 goto no_net; 2009 2010 netif_carrier_off(net); 2011 2012 netvsc_init_settings(net); 2013 2014 net_device_ctx = netdev_priv(net); 2015 net_device_ctx->device_ctx = dev; 2016 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 2017 if (netif_msg_probe(net_device_ctx)) 2018 netdev_dbg(net, "netvsc msg_enable: %d\n", 2019 net_device_ctx->msg_enable); 2020 2021 hv_set_drvdata(dev, net); 2022 2023 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 2024 2025 spin_lock_init(&net_device_ctx->lock); 2026 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 2027 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 2028 2029 net_device_ctx->vf_stats 2030 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); 2031 if (!net_device_ctx->vf_stats) 2032 goto no_stats; 2033 2034 net->netdev_ops = &device_ops; 2035 net->ethtool_ops = ðtool_ops; 2036 SET_NETDEV_DEV(net, &dev->device); 2037 2038 /* We always need headroom for rndis header */ 2039 net->needed_headroom = RNDIS_AND_PPI_SIZE; 2040 2041 /* Initialize the number of queues to be 1, we may change it if more 2042 * channels are offered later. 2043 */ 2044 netif_set_real_num_tx_queues(net, 1); 2045 netif_set_real_num_rx_queues(net, 1); 2046 2047 /* Notify the netvsc driver of the new device */ 2048 memset(&device_info, 0, sizeof(device_info)); 2049 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 2050 device_info.send_sections = NETVSC_DEFAULT_TX; 2051 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; 2052 device_info.recv_sections = NETVSC_DEFAULT_RX; 2053 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; 2054 2055 nvdev = rndis_filter_device_add(dev, &device_info); 2056 if (IS_ERR(nvdev)) { 2057 ret = PTR_ERR(nvdev); 2058 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2059 goto rndis_failed; 2060 } 2061 2062 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2063 2064 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2065 net->features = net->hw_features | 2066 NETIF_F_HIGHDMA | NETIF_F_SG | 2067 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2068 net->vlan_features = net->features; 2069 2070 netdev_lockdep_set_classes(net); 2071 2072 /* MTU range: 68 - 1500 or 65521 */ 2073 net->min_mtu = NETVSC_MTU_MIN; 2074 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 2075 net->max_mtu = NETVSC_MTU - ETH_HLEN; 2076 else 2077 net->max_mtu = ETH_DATA_LEN; 2078 2079 ret = register_netdev(net); 2080 if (ret != 0) { 2081 pr_err("Unable to register netdev.\n"); 2082 goto register_failed; 2083 } 2084 2085 return ret; 2086 2087 register_failed: 2088 rndis_filter_device_remove(dev, nvdev); 2089 rndis_failed: 2090 free_percpu(net_device_ctx->vf_stats); 2091 no_stats: 2092 hv_set_drvdata(dev, NULL); 2093 free_netdev(net); 2094 no_net: 2095 return ret; 2096 } 2097 2098 static int netvsc_remove(struct hv_device *dev) 2099 { 2100 struct net_device_context *ndev_ctx; 2101 struct net_device *vf_netdev, *net; 2102 struct netvsc_device *nvdev; 2103 2104 net = hv_get_drvdata(dev); 2105 if (net == NULL) { 2106 dev_err(&dev->device, "No net device to remove\n"); 2107 return 0; 2108 } 2109 2110 ndev_ctx = netdev_priv(net); 2111 2112 cancel_delayed_work_sync(&ndev_ctx->dwork); 2113 2114 rcu_read_lock(); 2115 nvdev = rcu_dereference(ndev_ctx->nvdev); 2116 2117 if (nvdev) 2118 cancel_work_sync(&nvdev->subchan_work); 2119 2120 /* 2121 * Call to the vsc driver to let it know that the device is being 2122 * removed. Also blocks mtu and channel changes. 2123 */ 2124 rtnl_lock(); 2125 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2126 if (vf_netdev) 2127 netvsc_unregister_vf(vf_netdev); 2128 2129 if (nvdev) 2130 rndis_filter_device_remove(dev, nvdev); 2131 2132 unregister_netdevice(net); 2133 2134 rtnl_unlock(); 2135 rcu_read_unlock(); 2136 2137 hv_set_drvdata(dev, NULL); 2138 2139 free_percpu(ndev_ctx->vf_stats); 2140 free_netdev(net); 2141 return 0; 2142 } 2143 2144 static const struct hv_vmbus_device_id id_table[] = { 2145 /* Network guid */ 2146 { HV_NIC_GUID, }, 2147 { }, 2148 }; 2149 2150 MODULE_DEVICE_TABLE(vmbus, id_table); 2151 2152 /* The one and only one */ 2153 static struct hv_driver netvsc_drv = { 2154 .name = KBUILD_MODNAME, 2155 .id_table = id_table, 2156 .probe = netvsc_probe, 2157 .remove = netvsc_remove, 2158 }; 2159 2160 /* 2161 * On Hyper-V, every VF interface is matched with a corresponding 2162 * synthetic interface. The synthetic interface is presented first 2163 * to the guest. When the corresponding VF instance is registered, 2164 * we will take care of switching the data path. 2165 */ 2166 static int netvsc_netdev_event(struct notifier_block *this, 2167 unsigned long event, void *ptr) 2168 { 2169 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2170 2171 /* Skip our own events */ 2172 if (event_dev->netdev_ops == &device_ops) 2173 return NOTIFY_DONE; 2174 2175 /* Avoid non-Ethernet type devices */ 2176 if (event_dev->type != ARPHRD_ETHER) 2177 return NOTIFY_DONE; 2178 2179 /* Avoid Vlan dev with same MAC registering as VF */ 2180 if (is_vlan_dev(event_dev)) 2181 return NOTIFY_DONE; 2182 2183 /* Avoid Bonding master dev with same MAC registering as VF */ 2184 if ((event_dev->priv_flags & IFF_BONDING) && 2185 (event_dev->flags & IFF_MASTER)) 2186 return NOTIFY_DONE; 2187 2188 switch (event) { 2189 case NETDEV_REGISTER: 2190 return netvsc_register_vf(event_dev); 2191 case NETDEV_UNREGISTER: 2192 return netvsc_unregister_vf(event_dev); 2193 case NETDEV_UP: 2194 case NETDEV_DOWN: 2195 return netvsc_vf_changed(event_dev); 2196 default: 2197 return NOTIFY_DONE; 2198 } 2199 } 2200 2201 static struct notifier_block netvsc_netdev_notifier = { 2202 .notifier_call = netvsc_netdev_event, 2203 }; 2204 2205 static void __exit netvsc_drv_exit(void) 2206 { 2207 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2208 vmbus_driver_unregister(&netvsc_drv); 2209 } 2210 2211 static int __init netvsc_drv_init(void) 2212 { 2213 int ret; 2214 2215 if (ring_size < RING_SIZE_MIN) { 2216 ring_size = RING_SIZE_MIN; 2217 pr_info("Increased ring_size to %u (min allowed)\n", 2218 ring_size); 2219 } 2220 netvsc_ring_bytes = ring_size * PAGE_SIZE; 2221 netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes); 2222 2223 ret = vmbus_driver_register(&netvsc_drv); 2224 if (ret) 2225 return ret; 2226 2227 register_netdevice_notifier(&netvsc_netdev_notifier); 2228 return 0; 2229 } 2230 2231 MODULE_LICENSE("GPL"); 2232 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 2233 2234 module_init(netvsc_drv_init); 2235 module_exit(netvsc_drv_exit); 2236