1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/init.h> 23 #include <linux/atomic.h> 24 #include <linux/module.h> 25 #include <linux/highmem.h> 26 #include <linux/device.h> 27 #include <linux/io.h> 28 #include <linux/delay.h> 29 #include <linux/netdevice.h> 30 #include <linux/inetdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/skbuff.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/slab.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/netpoll.h> 38 39 #include <net/arp.h> 40 #include <net/route.h> 41 #include <net/sock.h> 42 #include <net/pkt_sched.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 46 #include "hyperv_net.h" 47 48 #define RING_SIZE_MIN 64 49 #define NETVSC_MIN_TX_SECTIONS 10 50 #define NETVSC_DEFAULT_TX 192 /* ~1M */ 51 #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ 52 #define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ 53 54 #define LINKCHANGE_INT (2 * HZ) 55 #define VF_TAKEOVER_INT (HZ / 10) 56 57 static int ring_size = 128; 58 module_param(ring_size, int, S_IRUGO); 59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 60 61 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 62 NETIF_MSG_LINK | NETIF_MSG_IFUP | 63 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 64 NETIF_MSG_TX_ERR; 65 66 static int debug = -1; 67 module_param(debug, int, S_IRUGO); 68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 69 70 static void netvsc_set_multicast_list(struct net_device *net) 71 { 72 struct net_device_context *net_device_ctx = netdev_priv(net); 73 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 74 75 rndis_filter_update(nvdev); 76 } 77 78 static int netvsc_open(struct net_device *net) 79 { 80 struct net_device_context *ndev_ctx = netdev_priv(net); 81 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 82 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 83 struct rndis_device *rdev; 84 int ret = 0; 85 86 netif_carrier_off(net); 87 88 /* Open up the device */ 89 ret = rndis_filter_open(nvdev); 90 if (ret != 0) { 91 netdev_err(net, "unable to open device (ret %d).\n", ret); 92 return ret; 93 } 94 95 netif_tx_wake_all_queues(net); 96 97 rdev = nvdev->extension; 98 99 if (!rdev->link_state) 100 netif_carrier_on(net); 101 102 if (vf_netdev) { 103 /* Setting synthetic device up transparently sets 104 * slave as up. If open fails, then slave will be 105 * still be offline (and not used). 106 */ 107 ret = dev_open(vf_netdev); 108 if (ret) 109 netdev_warn(net, 110 "unable to open slave: %s: %d\n", 111 vf_netdev->name, ret); 112 } 113 return 0; 114 } 115 116 static int netvsc_close(struct net_device *net) 117 { 118 struct net_device_context *net_device_ctx = netdev_priv(net); 119 struct net_device *vf_netdev 120 = rtnl_dereference(net_device_ctx->vf_netdev); 121 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 122 int ret = 0; 123 u32 aread, i, msec = 10, retry = 0, retry_max = 20; 124 struct vmbus_channel *chn; 125 126 netif_tx_disable(net); 127 128 /* No need to close rndis filter if it is removed already */ 129 if (!nvdev) 130 goto out; 131 132 ret = rndis_filter_close(nvdev); 133 if (ret != 0) { 134 netdev_err(net, "unable to close device (ret %d).\n", ret); 135 return ret; 136 } 137 138 /* Ensure pending bytes in ring are read */ 139 while (true) { 140 aread = 0; 141 for (i = 0; i < nvdev->num_chn; i++) { 142 chn = nvdev->chan_table[i].channel; 143 if (!chn) 144 continue; 145 146 aread = hv_get_bytes_to_read(&chn->inbound); 147 if (aread) 148 break; 149 150 aread = hv_get_bytes_to_read(&chn->outbound); 151 if (aread) 152 break; 153 } 154 155 retry++; 156 if (retry > retry_max || aread == 0) 157 break; 158 159 msleep(msec); 160 161 if (msec < 1000) 162 msec *= 2; 163 } 164 165 if (aread) { 166 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 167 ret = -ETIMEDOUT; 168 } 169 170 out: 171 if (vf_netdev) 172 dev_close(vf_netdev); 173 174 return ret; 175 } 176 177 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, 178 int pkt_type) 179 { 180 struct rndis_packet *rndis_pkt; 181 struct rndis_per_packet_info *ppi; 182 183 rndis_pkt = &msg->msg.pkt; 184 rndis_pkt->data_offset += ppi_size; 185 186 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt + 187 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len); 188 189 ppi->size = ppi_size; 190 ppi->type = pkt_type; 191 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 192 193 rndis_pkt->per_pkt_info_len += ppi_size; 194 195 return ppi; 196 } 197 198 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented 199 * packets. We can use ethtool to change UDP hash level when necessary. 200 */ 201 static inline u32 netvsc_get_hash( 202 struct sk_buff *skb, 203 const struct net_device_context *ndc) 204 { 205 struct flow_keys flow; 206 u32 hash; 207 static u32 hashrnd __read_mostly; 208 209 net_get_random_once(&hashrnd, sizeof(hashrnd)); 210 211 if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) 212 return 0; 213 214 if (flow.basic.ip_proto == IPPROTO_TCP || 215 (flow.basic.ip_proto == IPPROTO_UDP && 216 ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) || 217 (flow.basic.n_proto == htons(ETH_P_IPV6) && 218 ndc->udp6_l4_hash)))) { 219 return skb_get_hash(skb); 220 } else { 221 if (flow.basic.n_proto == htons(ETH_P_IP)) 222 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); 223 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 224 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); 225 else 226 hash = 0; 227 228 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); 229 } 230 231 return hash; 232 } 233 234 static inline int netvsc_get_tx_queue(struct net_device *ndev, 235 struct sk_buff *skb, int old_idx) 236 { 237 const struct net_device_context *ndc = netdev_priv(ndev); 238 struct sock *sk = skb->sk; 239 int q_idx; 240 241 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) & 242 (VRSS_SEND_TAB_SIZE - 1)]; 243 244 /* If queue index changed record the new value */ 245 if (q_idx != old_idx && 246 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 247 sk_tx_queue_set(sk, q_idx); 248 249 return q_idx; 250 } 251 252 /* 253 * Select queue for transmit. 254 * 255 * If a valid queue has already been assigned, then use that. 256 * Otherwise compute tx queue based on hash and the send table. 257 * 258 * This is basically similar to default (__netdev_pick_tx) with the added step 259 * of using the host send_table when no other queue has been assigned. 260 * 261 * TODO support XPS - but get_xps_queue not exported 262 */ 263 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) 264 { 265 int q_idx = sk_tx_queue_get(skb->sk); 266 267 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { 268 /* If forwarding a packet, we use the recorded queue when 269 * available for better cache locality. 270 */ 271 if (skb_rx_queue_recorded(skb)) 272 q_idx = skb_get_rx_queue(skb); 273 else 274 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); 275 } 276 277 return q_idx; 278 } 279 280 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 281 void *accel_priv, 282 select_queue_fallback_t fallback) 283 { 284 struct net_device_context *ndc = netdev_priv(ndev); 285 struct net_device *vf_netdev; 286 u16 txq; 287 288 rcu_read_lock(); 289 vf_netdev = rcu_dereference(ndc->vf_netdev); 290 if (vf_netdev) { 291 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 292 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 293 } else { 294 txq = netvsc_pick_tx(ndev, skb); 295 } 296 rcu_read_unlock(); 297 298 while (unlikely(txq >= ndev->real_num_tx_queues)) 299 txq -= ndev->real_num_tx_queues; 300 301 return txq; 302 } 303 304 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, 305 struct hv_page_buffer *pb) 306 { 307 int j = 0; 308 309 /* Deal with compund pages by ignoring unused part 310 * of the page. 311 */ 312 page += (offset >> PAGE_SHIFT); 313 offset &= ~PAGE_MASK; 314 315 while (len > 0) { 316 unsigned long bytes; 317 318 bytes = PAGE_SIZE - offset; 319 if (bytes > len) 320 bytes = len; 321 pb[j].pfn = page_to_pfn(page); 322 pb[j].offset = offset; 323 pb[j].len = bytes; 324 325 offset += bytes; 326 len -= bytes; 327 328 if (offset == PAGE_SIZE && len) { 329 page++; 330 offset = 0; 331 j++; 332 } 333 } 334 335 return j + 1; 336 } 337 338 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 339 struct hv_netvsc_packet *packet, 340 struct hv_page_buffer *pb) 341 { 342 u32 slots_used = 0; 343 char *data = skb->data; 344 int frags = skb_shinfo(skb)->nr_frags; 345 int i; 346 347 /* The packet is laid out thus: 348 * 1. hdr: RNDIS header and PPI 349 * 2. skb linear data 350 * 3. skb fragment data 351 */ 352 slots_used += fill_pg_buf(virt_to_page(hdr), 353 offset_in_page(hdr), 354 len, &pb[slots_used]); 355 356 packet->rmsg_size = len; 357 packet->rmsg_pgcnt = slots_used; 358 359 slots_used += fill_pg_buf(virt_to_page(data), 360 offset_in_page(data), 361 skb_headlen(skb), &pb[slots_used]); 362 363 for (i = 0; i < frags; i++) { 364 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 365 366 slots_used += fill_pg_buf(skb_frag_page(frag), 367 frag->page_offset, 368 skb_frag_size(frag), &pb[slots_used]); 369 } 370 return slots_used; 371 } 372 373 static int count_skb_frag_slots(struct sk_buff *skb) 374 { 375 int i, frags = skb_shinfo(skb)->nr_frags; 376 int pages = 0; 377 378 for (i = 0; i < frags; i++) { 379 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 380 unsigned long size = skb_frag_size(frag); 381 unsigned long offset = frag->page_offset; 382 383 /* Skip unused frames from start of page */ 384 offset &= ~PAGE_MASK; 385 pages += PFN_UP(offset + size); 386 } 387 return pages; 388 } 389 390 static int netvsc_get_slots(struct sk_buff *skb) 391 { 392 char *data = skb->data; 393 unsigned int offset = offset_in_page(data); 394 unsigned int len = skb_headlen(skb); 395 int slots; 396 int frag_slots; 397 398 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); 399 frag_slots = count_skb_frag_slots(skb); 400 return slots + frag_slots; 401 } 402 403 static u32 net_checksum_info(struct sk_buff *skb) 404 { 405 if (skb->protocol == htons(ETH_P_IP)) { 406 struct iphdr *ip = ip_hdr(skb); 407 408 if (ip->protocol == IPPROTO_TCP) 409 return TRANSPORT_INFO_IPV4_TCP; 410 else if (ip->protocol == IPPROTO_UDP) 411 return TRANSPORT_INFO_IPV4_UDP; 412 } else { 413 struct ipv6hdr *ip6 = ipv6_hdr(skb); 414 415 if (ip6->nexthdr == IPPROTO_TCP) 416 return TRANSPORT_INFO_IPV6_TCP; 417 else if (ip6->nexthdr == IPPROTO_UDP) 418 return TRANSPORT_INFO_IPV6_UDP; 419 } 420 421 return TRANSPORT_INFO_NOT_IP; 422 } 423 424 /* Send skb on the slave VF device. */ 425 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, 426 struct sk_buff *skb) 427 { 428 struct net_device_context *ndev_ctx = netdev_priv(net); 429 unsigned int len = skb->len; 430 int rc; 431 432 skb->dev = vf_netdev; 433 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; 434 435 rc = dev_queue_xmit(skb); 436 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { 437 struct netvsc_vf_pcpu_stats *pcpu_stats 438 = this_cpu_ptr(ndev_ctx->vf_stats); 439 440 u64_stats_update_begin(&pcpu_stats->syncp); 441 pcpu_stats->tx_packets++; 442 pcpu_stats->tx_bytes += len; 443 u64_stats_update_end(&pcpu_stats->syncp); 444 } else { 445 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); 446 } 447 448 return rc; 449 } 450 451 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 452 { 453 struct net_device_context *net_device_ctx = netdev_priv(net); 454 struct hv_netvsc_packet *packet = NULL; 455 int ret; 456 unsigned int num_data_pgs; 457 struct rndis_message *rndis_msg; 458 struct rndis_packet *rndis_pkt; 459 struct net_device *vf_netdev; 460 u32 rndis_msg_size; 461 struct rndis_per_packet_info *ppi; 462 u32 hash; 463 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; 464 465 /* if VF is present and up then redirect packets 466 * already called with rcu_read_lock_bh 467 */ 468 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); 469 if (vf_netdev && netif_running(vf_netdev) && 470 !netpoll_tx_running(net)) 471 return netvsc_vf_xmit(net, vf_netdev, skb); 472 473 /* We will atmost need two pages to describe the rndis 474 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 475 * of pages in a single packet. If skb is scattered around 476 * more pages we try linearizing it. 477 */ 478 479 num_data_pgs = netvsc_get_slots(skb) + 2; 480 481 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 482 ++net_device_ctx->eth_stats.tx_scattered; 483 484 if (skb_linearize(skb)) 485 goto no_memory; 486 487 num_data_pgs = netvsc_get_slots(skb) + 2; 488 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 489 ++net_device_ctx->eth_stats.tx_too_big; 490 goto drop; 491 } 492 } 493 494 /* 495 * Place the rndis header in the skb head room and 496 * the skb->cb will be used for hv_netvsc_packet 497 * structure. 498 */ 499 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); 500 if (ret) 501 goto no_memory; 502 503 /* Use the skb control buffer for building up the packet */ 504 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 505 FIELD_SIZEOF(struct sk_buff, cb)); 506 packet = (struct hv_netvsc_packet *)skb->cb; 507 508 packet->q_idx = skb_get_queue_mapping(skb); 509 510 packet->total_data_buflen = skb->len; 511 packet->total_bytes = skb->len; 512 packet->total_packets = 1; 513 514 rndis_msg = (struct rndis_message *)skb->head; 515 516 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); 517 518 /* Add the rndis header */ 519 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 520 rndis_msg->msg_len = packet->total_data_buflen; 521 rndis_pkt = &rndis_msg->msg.pkt; 522 rndis_pkt->data_offset = sizeof(struct rndis_packet); 523 rndis_pkt->data_len = packet->total_data_buflen; 524 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); 525 526 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 527 528 hash = skb_get_hash_raw(skb); 529 if (hash != 0 && net->real_num_tx_queues > 1) { 530 rndis_msg_size += NDIS_HASH_PPI_SIZE; 531 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 532 NBL_HASH_VALUE); 533 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; 534 } 535 536 if (skb_vlan_tag_present(skb)) { 537 struct ndis_pkt_8021q_info *vlan; 538 539 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 540 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 541 IEEE_8021Q_INFO); 542 543 vlan = (void *)ppi + ppi->ppi_offset; 544 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; 545 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> 546 VLAN_PRIO_SHIFT; 547 } 548 549 if (skb_is_gso(skb)) { 550 struct ndis_tcp_lso_info *lso_info; 551 552 rndis_msg_size += NDIS_LSO_PPI_SIZE; 553 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 554 TCP_LARGESEND_PKTINFO); 555 556 lso_info = (void *)ppi + ppi->ppi_offset; 557 558 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 559 if (skb->protocol == htons(ETH_P_IP)) { 560 lso_info->lso_v2_transmit.ip_version = 561 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 562 ip_hdr(skb)->tot_len = 0; 563 ip_hdr(skb)->check = 0; 564 tcp_hdr(skb)->check = 565 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 566 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 567 } else { 568 lso_info->lso_v2_transmit.ip_version = 569 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 570 ipv6_hdr(skb)->payload_len = 0; 571 tcp_hdr(skb)->check = 572 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 573 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 574 } 575 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); 576 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 577 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 578 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { 579 struct ndis_tcp_ip_checksum_info *csum_info; 580 581 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 582 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 583 TCPIP_CHKSUM_PKTINFO); 584 585 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + 586 ppi->ppi_offset); 587 588 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); 589 590 if (skb->protocol == htons(ETH_P_IP)) { 591 csum_info->transmit.is_ipv4 = 1; 592 593 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 594 csum_info->transmit.tcp_checksum = 1; 595 else 596 csum_info->transmit.udp_checksum = 1; 597 } else { 598 csum_info->transmit.is_ipv6 = 1; 599 600 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 601 csum_info->transmit.tcp_checksum = 1; 602 else 603 csum_info->transmit.udp_checksum = 1; 604 } 605 } else { 606 /* Can't do offload of this type of checksum */ 607 if (skb_checksum_help(skb)) 608 goto drop; 609 } 610 } 611 612 /* Start filling in the page buffers with the rndis hdr */ 613 rndis_msg->msg_len += rndis_msg_size; 614 packet->total_data_buflen = rndis_msg->msg_len; 615 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 616 skb, packet, pb); 617 618 /* timestamp packet in software */ 619 skb_tx_timestamp(skb); 620 621 ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); 622 if (likely(ret == 0)) 623 return NETDEV_TX_OK; 624 625 if (ret == -EAGAIN) { 626 ++net_device_ctx->eth_stats.tx_busy; 627 return NETDEV_TX_BUSY; 628 } 629 630 if (ret == -ENOSPC) 631 ++net_device_ctx->eth_stats.tx_no_space; 632 633 drop: 634 dev_kfree_skb_any(skb); 635 net->stats.tx_dropped++; 636 637 return NETDEV_TX_OK; 638 639 no_memory: 640 ++net_device_ctx->eth_stats.tx_no_memory; 641 goto drop; 642 } 643 644 /* 645 * netvsc_linkstatus_callback - Link up/down notification 646 */ 647 void netvsc_linkstatus_callback(struct hv_device *device_obj, 648 struct rndis_message *resp) 649 { 650 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 651 struct net_device *net; 652 struct net_device_context *ndev_ctx; 653 struct netvsc_reconfig *event; 654 unsigned long flags; 655 656 net = hv_get_drvdata(device_obj); 657 658 if (!net) 659 return; 660 661 ndev_ctx = netdev_priv(net); 662 663 /* Update the physical link speed when changing to another vSwitch */ 664 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { 665 u32 speed; 666 667 speed = *(u32 *)((void *)indicate 668 + indicate->status_buf_offset) / 10000; 669 ndev_ctx->speed = speed; 670 return; 671 } 672 673 /* Handle these link change statuses below */ 674 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && 675 indicate->status != RNDIS_STATUS_MEDIA_CONNECT && 676 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) 677 return; 678 679 if (net->reg_state != NETREG_REGISTERED) 680 return; 681 682 event = kzalloc(sizeof(*event), GFP_ATOMIC); 683 if (!event) 684 return; 685 event->event = indicate->status; 686 687 spin_lock_irqsave(&ndev_ctx->lock, flags); 688 list_add_tail(&event->list, &ndev_ctx->reconfig_events); 689 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 690 691 schedule_delayed_work(&ndev_ctx->dwork, 0); 692 } 693 694 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 695 struct napi_struct *napi, 696 const struct ndis_tcp_ip_checksum_info *csum_info, 697 const struct ndis_pkt_8021q_info *vlan, 698 void *data, u32 buflen) 699 { 700 struct sk_buff *skb; 701 702 skb = napi_alloc_skb(napi, buflen); 703 if (!skb) 704 return skb; 705 706 /* 707 * Copy to skb. This copy is needed here since the memory pointed by 708 * hv_netvsc_packet cannot be deallocated 709 */ 710 skb_put_data(skb, data, buflen); 711 712 skb->protocol = eth_type_trans(skb, net); 713 714 /* skb is already created with CHECKSUM_NONE */ 715 skb_checksum_none_assert(skb); 716 717 /* 718 * In Linux, the IP checksum is always checked. 719 * Do L4 checksum offload if enabled and present. 720 */ 721 if (csum_info && (net->features & NETIF_F_RXCSUM)) { 722 if (csum_info->receive.tcp_checksum_succeeded || 723 csum_info->receive.udp_checksum_succeeded) 724 skb->ip_summed = CHECKSUM_UNNECESSARY; 725 } 726 727 if (vlan) { 728 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); 729 730 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 731 vlan_tci); 732 } 733 734 return skb; 735 } 736 737 /* 738 * netvsc_recv_callback - Callback when we receive a packet from the 739 * "wire" on the specified device. 740 */ 741 int netvsc_recv_callback(struct net_device *net, 742 struct vmbus_channel *channel, 743 void *data, u32 len, 744 const struct ndis_tcp_ip_checksum_info *csum_info, 745 const struct ndis_pkt_8021q_info *vlan) 746 { 747 struct net_device_context *net_device_ctx = netdev_priv(net); 748 struct netvsc_device *net_device; 749 u16 q_idx = channel->offermsg.offer.sub_channel_index; 750 struct netvsc_channel *nvchan; 751 struct sk_buff *skb; 752 struct netvsc_stats *rx_stats; 753 754 if (net->reg_state != NETREG_REGISTERED) 755 return NVSP_STAT_FAIL; 756 757 rcu_read_lock(); 758 net_device = rcu_dereference(net_device_ctx->nvdev); 759 if (unlikely(!net_device)) 760 goto drop; 761 762 nvchan = &net_device->chan_table[q_idx]; 763 764 /* Allocate a skb - TODO direct I/O to pages? */ 765 skb = netvsc_alloc_recv_skb(net, &nvchan->napi, 766 csum_info, vlan, data, len); 767 if (unlikely(!skb)) { 768 drop: 769 ++net->stats.rx_dropped; 770 rcu_read_unlock(); 771 return NVSP_STAT_FAIL; 772 } 773 774 skb_record_rx_queue(skb, q_idx); 775 776 /* 777 * Even if injecting the packet, record the statistics 778 * on the synthetic device because modifying the VF device 779 * statistics will not work correctly. 780 */ 781 rx_stats = &nvchan->rx_stats; 782 u64_stats_update_begin(&rx_stats->syncp); 783 rx_stats->packets++; 784 rx_stats->bytes += len; 785 786 if (skb->pkt_type == PACKET_BROADCAST) 787 ++rx_stats->broadcast; 788 else if (skb->pkt_type == PACKET_MULTICAST) 789 ++rx_stats->multicast; 790 u64_stats_update_end(&rx_stats->syncp); 791 792 napi_gro_receive(&nvchan->napi, skb); 793 rcu_read_unlock(); 794 795 return 0; 796 } 797 798 static void netvsc_get_drvinfo(struct net_device *net, 799 struct ethtool_drvinfo *info) 800 { 801 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 802 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 803 } 804 805 static void netvsc_get_channels(struct net_device *net, 806 struct ethtool_channels *channel) 807 { 808 struct net_device_context *net_device_ctx = netdev_priv(net); 809 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 810 811 if (nvdev) { 812 channel->max_combined = nvdev->max_chn; 813 channel->combined_count = nvdev->num_chn; 814 } 815 } 816 817 static int netvsc_set_channels(struct net_device *net, 818 struct ethtool_channels *channels) 819 { 820 struct net_device_context *net_device_ctx = netdev_priv(net); 821 struct hv_device *dev = net_device_ctx->device_ctx; 822 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 823 unsigned int orig, count = channels->combined_count; 824 struct netvsc_device_info device_info; 825 bool was_opened; 826 int ret = 0; 827 828 /* We do not support separate count for rx, tx, or other */ 829 if (count == 0 || 830 channels->rx_count || channels->tx_count || channels->other_count) 831 return -EINVAL; 832 833 if (!nvdev || nvdev->destroy) 834 return -ENODEV; 835 836 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) 837 return -EINVAL; 838 839 if (count > nvdev->max_chn) 840 return -EINVAL; 841 842 orig = nvdev->num_chn; 843 was_opened = rndis_filter_opened(nvdev); 844 if (was_opened) 845 rndis_filter_close(nvdev); 846 847 memset(&device_info, 0, sizeof(device_info)); 848 device_info.num_chn = count; 849 device_info.ring_size = ring_size; 850 device_info.send_sections = nvdev->send_section_cnt; 851 device_info.send_section_size = nvdev->send_section_size; 852 device_info.recv_sections = nvdev->recv_section_cnt; 853 device_info.recv_section_size = nvdev->recv_section_size; 854 855 rndis_filter_device_remove(dev, nvdev); 856 857 nvdev = rndis_filter_device_add(dev, &device_info); 858 if (IS_ERR(nvdev)) { 859 ret = PTR_ERR(nvdev); 860 device_info.num_chn = orig; 861 nvdev = rndis_filter_device_add(dev, &device_info); 862 863 if (IS_ERR(nvdev)) { 864 netdev_err(net, "restoring channel setting failed: %ld\n", 865 PTR_ERR(nvdev)); 866 return ret; 867 } 868 } 869 870 if (was_opened) 871 rndis_filter_open(nvdev); 872 873 /* We may have missed link change notifications */ 874 net_device_ctx->last_reconfig = 0; 875 schedule_delayed_work(&net_device_ctx->dwork, 0); 876 877 return ret; 878 } 879 880 static bool 881 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd) 882 { 883 struct ethtool_link_ksettings diff1 = *cmd; 884 struct ethtool_link_ksettings diff2 = {}; 885 886 diff1.base.speed = 0; 887 diff1.base.duplex = 0; 888 /* advertising and cmd are usually set */ 889 ethtool_link_ksettings_zero_link_mode(&diff1, advertising); 890 diff1.base.cmd = 0; 891 /* We set port to PORT_OTHER */ 892 diff2.base.port = PORT_OTHER; 893 894 return !memcmp(&diff1, &diff2, sizeof(diff1)); 895 } 896 897 static void netvsc_init_settings(struct net_device *dev) 898 { 899 struct net_device_context *ndc = netdev_priv(dev); 900 901 ndc->udp4_l4_hash = true; 902 ndc->udp6_l4_hash = true; 903 904 ndc->speed = SPEED_UNKNOWN; 905 ndc->duplex = DUPLEX_FULL; 906 } 907 908 static int netvsc_get_link_ksettings(struct net_device *dev, 909 struct ethtool_link_ksettings *cmd) 910 { 911 struct net_device_context *ndc = netdev_priv(dev); 912 913 cmd->base.speed = ndc->speed; 914 cmd->base.duplex = ndc->duplex; 915 cmd->base.port = PORT_OTHER; 916 917 return 0; 918 } 919 920 static int netvsc_set_link_ksettings(struct net_device *dev, 921 const struct ethtool_link_ksettings *cmd) 922 { 923 struct net_device_context *ndc = netdev_priv(dev); 924 u32 speed; 925 926 speed = cmd->base.speed; 927 if (!ethtool_validate_speed(speed) || 928 !ethtool_validate_duplex(cmd->base.duplex) || 929 !netvsc_validate_ethtool_ss_cmd(cmd)) 930 return -EINVAL; 931 932 ndc->speed = speed; 933 ndc->duplex = cmd->base.duplex; 934 935 return 0; 936 } 937 938 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 939 { 940 struct net_device_context *ndevctx = netdev_priv(ndev); 941 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 942 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 943 struct hv_device *hdev = ndevctx->device_ctx; 944 int orig_mtu = ndev->mtu; 945 struct netvsc_device_info device_info; 946 bool was_opened; 947 int ret = 0; 948 949 if (!nvdev || nvdev->destroy) 950 return -ENODEV; 951 952 /* Change MTU of underlying VF netdev first. */ 953 if (vf_netdev) { 954 ret = dev_set_mtu(vf_netdev, mtu); 955 if (ret) 956 return ret; 957 } 958 959 netif_device_detach(ndev); 960 was_opened = rndis_filter_opened(nvdev); 961 if (was_opened) 962 rndis_filter_close(nvdev); 963 964 memset(&device_info, 0, sizeof(device_info)); 965 device_info.ring_size = ring_size; 966 device_info.num_chn = nvdev->num_chn; 967 device_info.send_sections = nvdev->send_section_cnt; 968 device_info.send_section_size = nvdev->send_section_size; 969 device_info.recv_sections = nvdev->recv_section_cnt; 970 device_info.recv_section_size = nvdev->recv_section_size; 971 972 rndis_filter_device_remove(hdev, nvdev); 973 974 ndev->mtu = mtu; 975 976 nvdev = rndis_filter_device_add(hdev, &device_info); 977 if (IS_ERR(nvdev)) { 978 ret = PTR_ERR(nvdev); 979 980 /* Attempt rollback to original MTU */ 981 ndev->mtu = orig_mtu; 982 nvdev = rndis_filter_device_add(hdev, &device_info); 983 984 if (vf_netdev) 985 dev_set_mtu(vf_netdev, orig_mtu); 986 987 if (IS_ERR(nvdev)) { 988 netdev_err(ndev, "restoring mtu failed: %ld\n", 989 PTR_ERR(nvdev)); 990 return ret; 991 } 992 } 993 994 if (was_opened) 995 rndis_filter_open(nvdev); 996 997 netif_device_attach(ndev); 998 999 /* We may have missed link change notifications */ 1000 schedule_delayed_work(&ndevctx->dwork, 0); 1001 1002 return ret; 1003 } 1004 1005 static void netvsc_get_vf_stats(struct net_device *net, 1006 struct netvsc_vf_pcpu_stats *tot) 1007 { 1008 struct net_device_context *ndev_ctx = netdev_priv(net); 1009 int i; 1010 1011 memset(tot, 0, sizeof(*tot)); 1012 1013 for_each_possible_cpu(i) { 1014 const struct netvsc_vf_pcpu_stats *stats 1015 = per_cpu_ptr(ndev_ctx->vf_stats, i); 1016 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1017 unsigned int start; 1018 1019 do { 1020 start = u64_stats_fetch_begin_irq(&stats->syncp); 1021 rx_packets = stats->rx_packets; 1022 tx_packets = stats->tx_packets; 1023 rx_bytes = stats->rx_bytes; 1024 tx_bytes = stats->tx_bytes; 1025 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1026 1027 tot->rx_packets += rx_packets; 1028 tot->tx_packets += tx_packets; 1029 tot->rx_bytes += rx_bytes; 1030 tot->tx_bytes += tx_bytes; 1031 tot->tx_dropped += stats->tx_dropped; 1032 } 1033 } 1034 1035 static void netvsc_get_stats64(struct net_device *net, 1036 struct rtnl_link_stats64 *t) 1037 { 1038 struct net_device_context *ndev_ctx = netdev_priv(net); 1039 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1040 struct netvsc_vf_pcpu_stats vf_tot; 1041 int i; 1042 1043 if (!nvdev) 1044 return; 1045 1046 netdev_stats_to_stats64(t, &net->stats); 1047 1048 netvsc_get_vf_stats(net, &vf_tot); 1049 t->rx_packets += vf_tot.rx_packets; 1050 t->tx_packets += vf_tot.tx_packets; 1051 t->rx_bytes += vf_tot.rx_bytes; 1052 t->tx_bytes += vf_tot.tx_bytes; 1053 t->tx_dropped += vf_tot.tx_dropped; 1054 1055 for (i = 0; i < nvdev->num_chn; i++) { 1056 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1057 const struct netvsc_stats *stats; 1058 u64 packets, bytes, multicast; 1059 unsigned int start; 1060 1061 stats = &nvchan->tx_stats; 1062 do { 1063 start = u64_stats_fetch_begin_irq(&stats->syncp); 1064 packets = stats->packets; 1065 bytes = stats->bytes; 1066 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1067 1068 t->tx_bytes += bytes; 1069 t->tx_packets += packets; 1070 1071 stats = &nvchan->rx_stats; 1072 do { 1073 start = u64_stats_fetch_begin_irq(&stats->syncp); 1074 packets = stats->packets; 1075 bytes = stats->bytes; 1076 multicast = stats->multicast + stats->broadcast; 1077 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1078 1079 t->rx_bytes += bytes; 1080 t->rx_packets += packets; 1081 t->multicast += multicast; 1082 } 1083 } 1084 1085 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1086 { 1087 struct net_device_context *ndc = netdev_priv(ndev); 1088 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1089 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1090 struct sockaddr *addr = p; 1091 int err; 1092 1093 err = eth_prepare_mac_addr_change(ndev, p); 1094 if (err) 1095 return err; 1096 1097 if (!nvdev) 1098 return -ENODEV; 1099 1100 if (vf_netdev) { 1101 err = dev_set_mac_address(vf_netdev, addr); 1102 if (err) 1103 return err; 1104 } 1105 1106 err = rndis_filter_set_device_mac(nvdev, addr->sa_data); 1107 if (!err) { 1108 eth_commit_mac_addr_change(ndev, p); 1109 } else if (vf_netdev) { 1110 /* rollback change on VF */ 1111 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); 1112 dev_set_mac_address(vf_netdev, addr); 1113 } 1114 1115 return err; 1116 } 1117 1118 static const struct { 1119 char name[ETH_GSTRING_LEN]; 1120 u16 offset; 1121 } netvsc_stats[] = { 1122 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, 1123 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, 1124 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, 1125 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, 1126 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, 1127 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, 1128 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, 1129 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, 1130 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, 1131 }, vf_stats[] = { 1132 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, 1133 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, 1134 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, 1135 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, 1136 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, 1137 }; 1138 1139 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) 1140 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) 1141 1142 /* 4 statistics per queue (rx/tx packets/bytes) */ 1143 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) 1144 1145 static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1146 { 1147 struct net_device_context *ndc = netdev_priv(dev); 1148 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1149 1150 if (!nvdev) 1151 return -ENODEV; 1152 1153 switch (string_set) { 1154 case ETH_SS_STATS: 1155 return NETVSC_GLOBAL_STATS_LEN 1156 + NETVSC_VF_STATS_LEN 1157 + NETVSC_QUEUE_STATS_LEN(nvdev); 1158 default: 1159 return -EINVAL; 1160 } 1161 } 1162 1163 static void netvsc_get_ethtool_stats(struct net_device *dev, 1164 struct ethtool_stats *stats, u64 *data) 1165 { 1166 struct net_device_context *ndc = netdev_priv(dev); 1167 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1168 const void *nds = &ndc->eth_stats; 1169 const struct netvsc_stats *qstats; 1170 struct netvsc_vf_pcpu_stats sum; 1171 unsigned int start; 1172 u64 packets, bytes; 1173 int i, j; 1174 1175 if (!nvdev) 1176 return; 1177 1178 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) 1179 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); 1180 1181 netvsc_get_vf_stats(dev, &sum); 1182 for (j = 0; j < NETVSC_VF_STATS_LEN; j++) 1183 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); 1184 1185 for (j = 0; j < nvdev->num_chn; j++) { 1186 qstats = &nvdev->chan_table[j].tx_stats; 1187 1188 do { 1189 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1190 packets = qstats->packets; 1191 bytes = qstats->bytes; 1192 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1193 data[i++] = packets; 1194 data[i++] = bytes; 1195 1196 qstats = &nvdev->chan_table[j].rx_stats; 1197 do { 1198 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1199 packets = qstats->packets; 1200 bytes = qstats->bytes; 1201 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1202 data[i++] = packets; 1203 data[i++] = bytes; 1204 } 1205 } 1206 1207 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1208 { 1209 struct net_device_context *ndc = netdev_priv(dev); 1210 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1211 u8 *p = data; 1212 int i; 1213 1214 if (!nvdev) 1215 return; 1216 1217 switch (stringset) { 1218 case ETH_SS_STATS: 1219 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { 1220 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); 1221 p += ETH_GSTRING_LEN; 1222 } 1223 1224 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { 1225 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); 1226 p += ETH_GSTRING_LEN; 1227 } 1228 1229 for (i = 0; i < nvdev->num_chn; i++) { 1230 sprintf(p, "tx_queue_%u_packets", i); 1231 p += ETH_GSTRING_LEN; 1232 sprintf(p, "tx_queue_%u_bytes", i); 1233 p += ETH_GSTRING_LEN; 1234 sprintf(p, "rx_queue_%u_packets", i); 1235 p += ETH_GSTRING_LEN; 1236 sprintf(p, "rx_queue_%u_bytes", i); 1237 p += ETH_GSTRING_LEN; 1238 } 1239 1240 break; 1241 } 1242 } 1243 1244 static int 1245 netvsc_get_rss_hash_opts(struct net_device_context *ndc, 1246 struct ethtool_rxnfc *info) 1247 { 1248 info->data = RXH_IP_SRC | RXH_IP_DST; 1249 1250 switch (info->flow_type) { 1251 case TCP_V4_FLOW: 1252 case TCP_V6_FLOW: 1253 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1254 break; 1255 1256 case UDP_V4_FLOW: 1257 if (ndc->udp4_l4_hash) 1258 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1259 1260 break; 1261 1262 case UDP_V6_FLOW: 1263 if (ndc->udp6_l4_hash) 1264 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1265 1266 break; 1267 1268 case IPV4_FLOW: 1269 case IPV6_FLOW: 1270 break; 1271 default: 1272 info->data = 0; 1273 break; 1274 } 1275 1276 return 0; 1277 } 1278 1279 static int 1280 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1281 u32 *rules) 1282 { 1283 struct net_device_context *ndc = netdev_priv(dev); 1284 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1285 1286 if (!nvdev) 1287 return -ENODEV; 1288 1289 switch (info->cmd) { 1290 case ETHTOOL_GRXRINGS: 1291 info->data = nvdev->num_chn; 1292 return 0; 1293 1294 case ETHTOOL_GRXFH: 1295 return netvsc_get_rss_hash_opts(ndc, info); 1296 } 1297 return -EOPNOTSUPP; 1298 } 1299 1300 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, 1301 struct ethtool_rxnfc *info) 1302 { 1303 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1304 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1305 if (info->flow_type == UDP_V4_FLOW) 1306 ndc->udp4_l4_hash = true; 1307 else if (info->flow_type == UDP_V6_FLOW) 1308 ndc->udp6_l4_hash = true; 1309 else 1310 return -EOPNOTSUPP; 1311 1312 return 0; 1313 } 1314 1315 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1316 if (info->flow_type == UDP_V4_FLOW) 1317 ndc->udp4_l4_hash = false; 1318 else if (info->flow_type == UDP_V6_FLOW) 1319 ndc->udp6_l4_hash = false; 1320 else 1321 return -EOPNOTSUPP; 1322 1323 return 0; 1324 } 1325 1326 return -EOPNOTSUPP; 1327 } 1328 1329 static int 1330 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) 1331 { 1332 struct net_device_context *ndc = netdev_priv(ndev); 1333 1334 if (info->cmd == ETHTOOL_SRXFH) 1335 return netvsc_set_rss_hash_opts(ndc, info); 1336 1337 return -EOPNOTSUPP; 1338 } 1339 1340 #ifdef CONFIG_NET_POLL_CONTROLLER 1341 static void netvsc_poll_controller(struct net_device *dev) 1342 { 1343 struct net_device_context *ndc = netdev_priv(dev); 1344 struct netvsc_device *ndev; 1345 int i; 1346 1347 rcu_read_lock(); 1348 ndev = rcu_dereference(ndc->nvdev); 1349 if (ndev) { 1350 for (i = 0; i < ndev->num_chn; i++) { 1351 struct netvsc_channel *nvchan = &ndev->chan_table[i]; 1352 1353 napi_schedule(&nvchan->napi); 1354 } 1355 } 1356 rcu_read_unlock(); 1357 } 1358 #endif 1359 1360 static u32 netvsc_get_rxfh_key_size(struct net_device *dev) 1361 { 1362 return NETVSC_HASH_KEYLEN; 1363 } 1364 1365 static u32 netvsc_rss_indir_size(struct net_device *dev) 1366 { 1367 return ITAB_NUM; 1368 } 1369 1370 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1371 u8 *hfunc) 1372 { 1373 struct net_device_context *ndc = netdev_priv(dev); 1374 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1375 struct rndis_device *rndis_dev; 1376 int i; 1377 1378 if (!ndev) 1379 return -ENODEV; 1380 1381 if (hfunc) 1382 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 1383 1384 rndis_dev = ndev->extension; 1385 if (indir) { 1386 for (i = 0; i < ITAB_NUM; i++) 1387 indir[i] = rndis_dev->ind_table[i]; 1388 } 1389 1390 if (key) 1391 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); 1392 1393 return 0; 1394 } 1395 1396 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, 1397 const u8 *key, const u8 hfunc) 1398 { 1399 struct net_device_context *ndc = netdev_priv(dev); 1400 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1401 struct rndis_device *rndis_dev; 1402 int i; 1403 1404 if (!ndev) 1405 return -ENODEV; 1406 1407 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1408 return -EOPNOTSUPP; 1409 1410 rndis_dev = ndev->extension; 1411 if (indir) { 1412 for (i = 0; i < ITAB_NUM; i++) 1413 if (indir[i] >= ndev->num_chn) 1414 return -EINVAL; 1415 1416 for (i = 0; i < ITAB_NUM; i++) 1417 rndis_dev->ind_table[i] = indir[i]; 1418 } 1419 1420 if (!key) { 1421 if (!indir) 1422 return 0; 1423 1424 key = rndis_dev->rss_key; 1425 } 1426 1427 return rndis_filter_set_rss_param(rndis_dev, key); 1428 } 1429 1430 /* Hyper-V RNDIS protocol does not have ring in the HW sense. 1431 * It does have pre-allocated receive area which is divided into sections. 1432 */ 1433 static void __netvsc_get_ringparam(struct netvsc_device *nvdev, 1434 struct ethtool_ringparam *ring) 1435 { 1436 u32 max_buf_size; 1437 1438 ring->rx_pending = nvdev->recv_section_cnt; 1439 ring->tx_pending = nvdev->send_section_cnt; 1440 1441 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 1442 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 1443 else 1444 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 1445 1446 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; 1447 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE 1448 / nvdev->send_section_size; 1449 } 1450 1451 static void netvsc_get_ringparam(struct net_device *ndev, 1452 struct ethtool_ringparam *ring) 1453 { 1454 struct net_device_context *ndevctx = netdev_priv(ndev); 1455 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1456 1457 if (!nvdev) 1458 return; 1459 1460 __netvsc_get_ringparam(nvdev, ring); 1461 } 1462 1463 static int netvsc_set_ringparam(struct net_device *ndev, 1464 struct ethtool_ringparam *ring) 1465 { 1466 struct net_device_context *ndevctx = netdev_priv(ndev); 1467 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1468 struct hv_device *hdev = ndevctx->device_ctx; 1469 struct netvsc_device_info device_info; 1470 struct ethtool_ringparam orig; 1471 u32 new_tx, new_rx; 1472 bool was_opened; 1473 int ret = 0; 1474 1475 if (!nvdev || nvdev->destroy) 1476 return -ENODEV; 1477 1478 memset(&orig, 0, sizeof(orig)); 1479 __netvsc_get_ringparam(nvdev, &orig); 1480 1481 new_tx = clamp_t(u32, ring->tx_pending, 1482 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); 1483 new_rx = clamp_t(u32, ring->rx_pending, 1484 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); 1485 1486 if (new_tx == orig.tx_pending && 1487 new_rx == orig.rx_pending) 1488 return 0; /* no change */ 1489 1490 memset(&device_info, 0, sizeof(device_info)); 1491 device_info.num_chn = nvdev->num_chn; 1492 device_info.ring_size = ring_size; 1493 device_info.send_sections = new_tx; 1494 device_info.send_section_size = nvdev->send_section_size; 1495 device_info.recv_sections = new_rx; 1496 device_info.recv_section_size = nvdev->recv_section_size; 1497 1498 netif_device_detach(ndev); 1499 was_opened = rndis_filter_opened(nvdev); 1500 if (was_opened) 1501 rndis_filter_close(nvdev); 1502 1503 rndis_filter_device_remove(hdev, nvdev); 1504 1505 nvdev = rndis_filter_device_add(hdev, &device_info); 1506 if (IS_ERR(nvdev)) { 1507 ret = PTR_ERR(nvdev); 1508 1509 device_info.send_sections = orig.tx_pending; 1510 device_info.recv_sections = orig.rx_pending; 1511 nvdev = rndis_filter_device_add(hdev, &device_info); 1512 if (IS_ERR(nvdev)) { 1513 netdev_err(ndev, "restoring ringparam failed: %ld\n", 1514 PTR_ERR(nvdev)); 1515 return ret; 1516 } 1517 } 1518 1519 if (was_opened) 1520 rndis_filter_open(nvdev); 1521 netif_device_attach(ndev); 1522 1523 /* We may have missed link change notifications */ 1524 ndevctx->last_reconfig = 0; 1525 schedule_delayed_work(&ndevctx->dwork, 0); 1526 1527 return ret; 1528 } 1529 1530 static const struct ethtool_ops ethtool_ops = { 1531 .get_drvinfo = netvsc_get_drvinfo, 1532 .get_link = ethtool_op_get_link, 1533 .get_ethtool_stats = netvsc_get_ethtool_stats, 1534 .get_sset_count = netvsc_get_sset_count, 1535 .get_strings = netvsc_get_strings, 1536 .get_channels = netvsc_get_channels, 1537 .set_channels = netvsc_set_channels, 1538 .get_ts_info = ethtool_op_get_ts_info, 1539 .get_rxnfc = netvsc_get_rxnfc, 1540 .set_rxnfc = netvsc_set_rxnfc, 1541 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 1542 .get_rxfh_indir_size = netvsc_rss_indir_size, 1543 .get_rxfh = netvsc_get_rxfh, 1544 .set_rxfh = netvsc_set_rxfh, 1545 .get_link_ksettings = netvsc_get_link_ksettings, 1546 .set_link_ksettings = netvsc_set_link_ksettings, 1547 .get_ringparam = netvsc_get_ringparam, 1548 .set_ringparam = netvsc_set_ringparam, 1549 }; 1550 1551 static const struct net_device_ops device_ops = { 1552 .ndo_open = netvsc_open, 1553 .ndo_stop = netvsc_close, 1554 .ndo_start_xmit = netvsc_start_xmit, 1555 .ndo_set_rx_mode = netvsc_set_multicast_list, 1556 .ndo_change_mtu = netvsc_change_mtu, 1557 .ndo_validate_addr = eth_validate_addr, 1558 .ndo_set_mac_address = netvsc_set_mac_addr, 1559 .ndo_select_queue = netvsc_select_queue, 1560 .ndo_get_stats64 = netvsc_get_stats64, 1561 #ifdef CONFIG_NET_POLL_CONTROLLER 1562 .ndo_poll_controller = netvsc_poll_controller, 1563 #endif 1564 }; 1565 1566 /* 1567 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link 1568 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is 1569 * present send GARP packet to network peers with netif_notify_peers(). 1570 */ 1571 static void netvsc_link_change(struct work_struct *w) 1572 { 1573 struct net_device_context *ndev_ctx = 1574 container_of(w, struct net_device_context, dwork.work); 1575 struct hv_device *device_obj = ndev_ctx->device_ctx; 1576 struct net_device *net = hv_get_drvdata(device_obj); 1577 struct netvsc_device *net_device; 1578 struct rndis_device *rdev; 1579 struct netvsc_reconfig *event = NULL; 1580 bool notify = false, reschedule = false; 1581 unsigned long flags, next_reconfig, delay; 1582 1583 /* if changes are happening, comeback later */ 1584 if (!rtnl_trylock()) { 1585 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 1586 return; 1587 } 1588 1589 net_device = rtnl_dereference(ndev_ctx->nvdev); 1590 if (!net_device) 1591 goto out_unlock; 1592 1593 rdev = net_device->extension; 1594 1595 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; 1596 if (time_is_after_jiffies(next_reconfig)) { 1597 /* link_watch only sends one notification with current state 1598 * per second, avoid doing reconfig more frequently. Handle 1599 * wrap around. 1600 */ 1601 delay = next_reconfig - jiffies; 1602 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; 1603 schedule_delayed_work(&ndev_ctx->dwork, delay); 1604 goto out_unlock; 1605 } 1606 ndev_ctx->last_reconfig = jiffies; 1607 1608 spin_lock_irqsave(&ndev_ctx->lock, flags); 1609 if (!list_empty(&ndev_ctx->reconfig_events)) { 1610 event = list_first_entry(&ndev_ctx->reconfig_events, 1611 struct netvsc_reconfig, list); 1612 list_del(&event->list); 1613 reschedule = !list_empty(&ndev_ctx->reconfig_events); 1614 } 1615 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 1616 1617 if (!event) 1618 goto out_unlock; 1619 1620 switch (event->event) { 1621 /* Only the following events are possible due to the check in 1622 * netvsc_linkstatus_callback() 1623 */ 1624 case RNDIS_STATUS_MEDIA_CONNECT: 1625 if (rdev->link_state) { 1626 rdev->link_state = false; 1627 netif_carrier_on(net); 1628 netif_tx_wake_all_queues(net); 1629 } else { 1630 notify = true; 1631 } 1632 kfree(event); 1633 break; 1634 case RNDIS_STATUS_MEDIA_DISCONNECT: 1635 if (!rdev->link_state) { 1636 rdev->link_state = true; 1637 netif_carrier_off(net); 1638 netif_tx_stop_all_queues(net); 1639 } 1640 kfree(event); 1641 break; 1642 case RNDIS_STATUS_NETWORK_CHANGE: 1643 /* Only makes sense if carrier is present */ 1644 if (!rdev->link_state) { 1645 rdev->link_state = true; 1646 netif_carrier_off(net); 1647 netif_tx_stop_all_queues(net); 1648 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1649 spin_lock_irqsave(&ndev_ctx->lock, flags); 1650 list_add(&event->list, &ndev_ctx->reconfig_events); 1651 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 1652 reschedule = true; 1653 } 1654 break; 1655 } 1656 1657 rtnl_unlock(); 1658 1659 if (notify) 1660 netdev_notify_peers(net); 1661 1662 /* link_watch only sends one notification with current state per 1663 * second, handle next reconfig event in 2 seconds. 1664 */ 1665 if (reschedule) 1666 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 1667 1668 return; 1669 1670 out_unlock: 1671 rtnl_unlock(); 1672 } 1673 1674 static struct net_device *get_netvsc_bymac(const u8 *mac) 1675 { 1676 struct net_device *dev; 1677 1678 ASSERT_RTNL(); 1679 1680 for_each_netdev(&init_net, dev) { 1681 if (dev->netdev_ops != &device_ops) 1682 continue; /* not a netvsc device */ 1683 1684 if (ether_addr_equal(mac, dev->perm_addr)) 1685 return dev; 1686 } 1687 1688 return NULL; 1689 } 1690 1691 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 1692 { 1693 struct net_device *dev; 1694 1695 ASSERT_RTNL(); 1696 1697 for_each_netdev(&init_net, dev) { 1698 struct net_device_context *net_device_ctx; 1699 1700 if (dev->netdev_ops != &device_ops) 1701 continue; /* not a netvsc device */ 1702 1703 net_device_ctx = netdev_priv(dev); 1704 if (!rtnl_dereference(net_device_ctx->nvdev)) 1705 continue; /* device is removed */ 1706 1707 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) 1708 return dev; /* a match */ 1709 } 1710 1711 return NULL; 1712 } 1713 1714 /* Called when VF is injecting data into network stack. 1715 * Change the associated network device from VF to netvsc. 1716 * note: already called with rcu_read_lock 1717 */ 1718 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) 1719 { 1720 struct sk_buff *skb = *pskb; 1721 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); 1722 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1723 struct netvsc_vf_pcpu_stats *pcpu_stats 1724 = this_cpu_ptr(ndev_ctx->vf_stats); 1725 1726 skb->dev = ndev; 1727 1728 u64_stats_update_begin(&pcpu_stats->syncp); 1729 pcpu_stats->rx_packets++; 1730 pcpu_stats->rx_bytes += skb->len; 1731 u64_stats_update_end(&pcpu_stats->syncp); 1732 1733 return RX_HANDLER_ANOTHER; 1734 } 1735 1736 static int netvsc_vf_join(struct net_device *vf_netdev, 1737 struct net_device *ndev) 1738 { 1739 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1740 int ret; 1741 1742 ret = netdev_rx_handler_register(vf_netdev, 1743 netvsc_vf_handle_frame, ndev); 1744 if (ret != 0) { 1745 netdev_err(vf_netdev, 1746 "can not register netvsc VF receive handler (err = %d)\n", 1747 ret); 1748 goto rx_handler_failed; 1749 } 1750 1751 ret = netdev_upper_dev_link(vf_netdev, ndev); 1752 if (ret != 0) { 1753 netdev_err(vf_netdev, 1754 "can not set master device %s (err = %d)\n", 1755 ndev->name, ret); 1756 goto upper_link_failed; 1757 } 1758 1759 /* set slave flag before open to prevent IPv6 addrconf */ 1760 vf_netdev->flags |= IFF_SLAVE; 1761 1762 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 1763 1764 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 1765 1766 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 1767 return 0; 1768 1769 upper_link_failed: 1770 netdev_rx_handler_unregister(vf_netdev); 1771 rx_handler_failed: 1772 return ret; 1773 } 1774 1775 static void __netvsc_vf_setup(struct net_device *ndev, 1776 struct net_device *vf_netdev) 1777 { 1778 int ret; 1779 1780 /* Align MTU of VF with master */ 1781 ret = dev_set_mtu(vf_netdev, ndev->mtu); 1782 if (ret) 1783 netdev_warn(vf_netdev, 1784 "unable to change mtu to %u\n", ndev->mtu); 1785 1786 if (netif_running(ndev)) { 1787 ret = dev_open(vf_netdev); 1788 if (ret) 1789 netdev_warn(vf_netdev, 1790 "unable to open: %d\n", ret); 1791 } 1792 } 1793 1794 /* Setup VF as slave of the synthetic device. 1795 * Runs in workqueue to avoid recursion in netlink callbacks. 1796 */ 1797 static void netvsc_vf_setup(struct work_struct *w) 1798 { 1799 struct net_device_context *ndev_ctx 1800 = container_of(w, struct net_device_context, vf_takeover.work); 1801 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); 1802 struct net_device *vf_netdev; 1803 1804 if (!rtnl_trylock()) { 1805 schedule_delayed_work(&ndev_ctx->vf_takeover, 0); 1806 return; 1807 } 1808 1809 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 1810 if (vf_netdev) 1811 __netvsc_vf_setup(ndev, vf_netdev); 1812 1813 rtnl_unlock(); 1814 } 1815 1816 static int netvsc_register_vf(struct net_device *vf_netdev) 1817 { 1818 struct net_device *ndev; 1819 struct net_device_context *net_device_ctx; 1820 struct netvsc_device *netvsc_dev; 1821 1822 if (vf_netdev->addr_len != ETH_ALEN) 1823 return NOTIFY_DONE; 1824 1825 /* 1826 * We will use the MAC address to locate the synthetic interface to 1827 * associate with the VF interface. If we don't find a matching 1828 * synthetic interface, move on. 1829 */ 1830 ndev = get_netvsc_bymac(vf_netdev->perm_addr); 1831 if (!ndev) 1832 return NOTIFY_DONE; 1833 1834 net_device_ctx = netdev_priv(ndev); 1835 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1836 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 1837 return NOTIFY_DONE; 1838 1839 if (netvsc_vf_join(vf_netdev, ndev) != 0) 1840 return NOTIFY_DONE; 1841 1842 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 1843 1844 dev_hold(vf_netdev); 1845 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 1846 return NOTIFY_OK; 1847 } 1848 1849 /* VF up/down change detected, schedule to change data path */ 1850 static int netvsc_vf_changed(struct net_device *vf_netdev) 1851 { 1852 struct net_device_context *net_device_ctx; 1853 struct netvsc_device *netvsc_dev; 1854 struct net_device *ndev; 1855 bool vf_is_up = netif_running(vf_netdev); 1856 1857 ndev = get_netvsc_byref(vf_netdev); 1858 if (!ndev) 1859 return NOTIFY_DONE; 1860 1861 net_device_ctx = netdev_priv(ndev); 1862 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1863 if (!netvsc_dev) 1864 return NOTIFY_DONE; 1865 1866 netvsc_switch_datapath(ndev, vf_is_up); 1867 netdev_info(ndev, "Data path switched %s VF: %s\n", 1868 vf_is_up ? "to" : "from", vf_netdev->name); 1869 1870 return NOTIFY_OK; 1871 } 1872 1873 static int netvsc_unregister_vf(struct net_device *vf_netdev) 1874 { 1875 struct net_device *ndev; 1876 struct net_device_context *net_device_ctx; 1877 1878 ndev = get_netvsc_byref(vf_netdev); 1879 if (!ndev) 1880 return NOTIFY_DONE; 1881 1882 net_device_ctx = netdev_priv(ndev); 1883 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 1884 1885 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 1886 1887 netdev_rx_handler_unregister(vf_netdev); 1888 netdev_upper_dev_unlink(vf_netdev, ndev); 1889 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 1890 dev_put(vf_netdev); 1891 1892 return NOTIFY_OK; 1893 } 1894 1895 static int netvsc_probe(struct hv_device *dev, 1896 const struct hv_vmbus_device_id *dev_id) 1897 { 1898 struct net_device *net = NULL; 1899 struct net_device_context *net_device_ctx; 1900 struct netvsc_device_info device_info; 1901 struct netvsc_device *nvdev; 1902 int ret = -ENOMEM; 1903 1904 net = alloc_etherdev_mq(sizeof(struct net_device_context), 1905 VRSS_CHANNEL_MAX); 1906 if (!net) 1907 goto no_net; 1908 1909 netif_carrier_off(net); 1910 1911 netvsc_init_settings(net); 1912 1913 net_device_ctx = netdev_priv(net); 1914 net_device_ctx->device_ctx = dev; 1915 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 1916 if (netif_msg_probe(net_device_ctx)) 1917 netdev_dbg(net, "netvsc msg_enable: %d\n", 1918 net_device_ctx->msg_enable); 1919 1920 hv_set_drvdata(dev, net); 1921 1922 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1923 1924 spin_lock_init(&net_device_ctx->lock); 1925 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1926 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 1927 1928 net_device_ctx->vf_stats 1929 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); 1930 if (!net_device_ctx->vf_stats) 1931 goto no_stats; 1932 1933 net->netdev_ops = &device_ops; 1934 net->ethtool_ops = ðtool_ops; 1935 SET_NETDEV_DEV(net, &dev->device); 1936 1937 /* We always need headroom for rndis header */ 1938 net->needed_headroom = RNDIS_AND_PPI_SIZE; 1939 1940 /* Initialize the number of queues to be 1, we may change it if more 1941 * channels are offered later. 1942 */ 1943 netif_set_real_num_tx_queues(net, 1); 1944 netif_set_real_num_rx_queues(net, 1); 1945 1946 /* Notify the netvsc driver of the new device */ 1947 memset(&device_info, 0, sizeof(device_info)); 1948 device_info.ring_size = ring_size; 1949 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 1950 device_info.send_sections = NETVSC_DEFAULT_TX; 1951 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; 1952 device_info.recv_sections = NETVSC_DEFAULT_RX; 1953 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; 1954 1955 nvdev = rndis_filter_device_add(dev, &device_info); 1956 if (IS_ERR(nvdev)) { 1957 ret = PTR_ERR(nvdev); 1958 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 1959 goto rndis_failed; 1960 } 1961 1962 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 1963 1964 /* hw_features computed in rndis_filter_device_add */ 1965 net->features = net->hw_features | 1966 NETIF_F_HIGHDMA | NETIF_F_SG | 1967 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 1968 net->vlan_features = net->features; 1969 1970 netdev_lockdep_set_classes(net); 1971 1972 /* MTU range: 68 - 1500 or 65521 */ 1973 net->min_mtu = NETVSC_MTU_MIN; 1974 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 1975 net->max_mtu = NETVSC_MTU - ETH_HLEN; 1976 else 1977 net->max_mtu = ETH_DATA_LEN; 1978 1979 ret = register_netdev(net); 1980 if (ret != 0) { 1981 pr_err("Unable to register netdev.\n"); 1982 goto register_failed; 1983 } 1984 1985 return ret; 1986 1987 register_failed: 1988 rndis_filter_device_remove(dev, nvdev); 1989 rndis_failed: 1990 free_percpu(net_device_ctx->vf_stats); 1991 no_stats: 1992 hv_set_drvdata(dev, NULL); 1993 free_netdev(net); 1994 no_net: 1995 return ret; 1996 } 1997 1998 static int netvsc_remove(struct hv_device *dev) 1999 { 2000 struct net_device_context *ndev_ctx; 2001 struct net_device *vf_netdev; 2002 struct net_device *net; 2003 2004 net = hv_get_drvdata(dev); 2005 if (net == NULL) { 2006 dev_err(&dev->device, "No net device to remove\n"); 2007 return 0; 2008 } 2009 2010 ndev_ctx = netdev_priv(net); 2011 2012 netif_device_detach(net); 2013 2014 cancel_delayed_work_sync(&ndev_ctx->dwork); 2015 2016 /* 2017 * Call to the vsc driver to let it know that the device is being 2018 * removed. Also blocks mtu and channel changes. 2019 */ 2020 rtnl_lock(); 2021 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2022 if (vf_netdev) 2023 netvsc_unregister_vf(vf_netdev); 2024 2025 unregister_netdevice(net); 2026 2027 rndis_filter_device_remove(dev, 2028 rtnl_dereference(ndev_ctx->nvdev)); 2029 rtnl_unlock(); 2030 2031 hv_set_drvdata(dev, NULL); 2032 2033 free_percpu(ndev_ctx->vf_stats); 2034 free_netdev(net); 2035 return 0; 2036 } 2037 2038 static const struct hv_vmbus_device_id id_table[] = { 2039 /* Network guid */ 2040 { HV_NIC_GUID, }, 2041 { }, 2042 }; 2043 2044 MODULE_DEVICE_TABLE(vmbus, id_table); 2045 2046 /* The one and only one */ 2047 static struct hv_driver netvsc_drv = { 2048 .name = KBUILD_MODNAME, 2049 .id_table = id_table, 2050 .probe = netvsc_probe, 2051 .remove = netvsc_remove, 2052 }; 2053 2054 /* 2055 * On Hyper-V, every VF interface is matched with a corresponding 2056 * synthetic interface. The synthetic interface is presented first 2057 * to the guest. When the corresponding VF instance is registered, 2058 * we will take care of switching the data path. 2059 */ 2060 static int netvsc_netdev_event(struct notifier_block *this, 2061 unsigned long event, void *ptr) 2062 { 2063 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2064 2065 /* Skip our own events */ 2066 if (event_dev->netdev_ops == &device_ops) 2067 return NOTIFY_DONE; 2068 2069 /* Avoid non-Ethernet type devices */ 2070 if (event_dev->type != ARPHRD_ETHER) 2071 return NOTIFY_DONE; 2072 2073 /* Avoid Vlan dev with same MAC registering as VF */ 2074 if (is_vlan_dev(event_dev)) 2075 return NOTIFY_DONE; 2076 2077 /* Avoid Bonding master dev with same MAC registering as VF */ 2078 if ((event_dev->priv_flags & IFF_BONDING) && 2079 (event_dev->flags & IFF_MASTER)) 2080 return NOTIFY_DONE; 2081 2082 switch (event) { 2083 case NETDEV_REGISTER: 2084 return netvsc_register_vf(event_dev); 2085 case NETDEV_UNREGISTER: 2086 return netvsc_unregister_vf(event_dev); 2087 case NETDEV_UP: 2088 case NETDEV_DOWN: 2089 return netvsc_vf_changed(event_dev); 2090 default: 2091 return NOTIFY_DONE; 2092 } 2093 } 2094 2095 static struct notifier_block netvsc_netdev_notifier = { 2096 .notifier_call = netvsc_netdev_event, 2097 }; 2098 2099 static void __exit netvsc_drv_exit(void) 2100 { 2101 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2102 vmbus_driver_unregister(&netvsc_drv); 2103 } 2104 2105 static int __init netvsc_drv_init(void) 2106 { 2107 int ret; 2108 2109 if (ring_size < RING_SIZE_MIN) { 2110 ring_size = RING_SIZE_MIN; 2111 pr_info("Increased ring_size to %d (min allowed)\n", 2112 ring_size); 2113 } 2114 ret = vmbus_driver_register(&netvsc_drv); 2115 2116 if (ret) 2117 return ret; 2118 2119 register_netdevice_notifier(&netvsc_netdev_notifier); 2120 return 0; 2121 } 2122 2123 MODULE_LICENSE("GPL"); 2124 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 2125 2126 module_init(netvsc_drv_init); 2127 module_exit(netvsc_drv_exit); 2128