1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/init.h> 23 #include <linux/atomic.h> 24 #include <linux/module.h> 25 #include <linux/highmem.h> 26 #include <linux/device.h> 27 #include <linux/io.h> 28 #include <linux/delay.h> 29 #include <linux/netdevice.h> 30 #include <linux/inetdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/skbuff.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/slab.h> 36 #include <net/arp.h> 37 #include <net/route.h> 38 #include <net/sock.h> 39 #include <net/pkt_sched.h> 40 41 #include "hyperv_net.h" 42 43 struct net_device_context { 44 /* point back to our device context */ 45 struct hv_device *device_ctx; 46 struct delayed_work dwork; 47 struct work_struct work; 48 }; 49 50 #define RING_SIZE_MIN 64 51 static int ring_size = 128; 52 module_param(ring_size, int, S_IRUGO); 53 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 54 55 static void do_set_multicast(struct work_struct *w) 56 { 57 struct net_device_context *ndevctx = 58 container_of(w, struct net_device_context, work); 59 struct netvsc_device *nvdev; 60 struct rndis_device *rdev; 61 62 nvdev = hv_get_drvdata(ndevctx->device_ctx); 63 if (nvdev == NULL || nvdev->ndev == NULL) 64 return; 65 66 rdev = nvdev->extension; 67 if (rdev == NULL) 68 return; 69 70 if (nvdev->ndev->flags & IFF_PROMISC) 71 rndis_filter_set_packet_filter(rdev, 72 NDIS_PACKET_TYPE_PROMISCUOUS); 73 else 74 rndis_filter_set_packet_filter(rdev, 75 NDIS_PACKET_TYPE_BROADCAST | 76 NDIS_PACKET_TYPE_ALL_MULTICAST | 77 NDIS_PACKET_TYPE_DIRECTED); 78 } 79 80 static void netvsc_set_multicast_list(struct net_device *net) 81 { 82 struct net_device_context *net_device_ctx = netdev_priv(net); 83 84 schedule_work(&net_device_ctx->work); 85 } 86 87 static int netvsc_open(struct net_device *net) 88 { 89 struct net_device_context *net_device_ctx = netdev_priv(net); 90 struct hv_device *device_obj = net_device_ctx->device_ctx; 91 struct netvsc_device *nvdev; 92 struct rndis_device *rdev; 93 int ret = 0; 94 95 netif_carrier_off(net); 96 97 /* Open up the device */ 98 ret = rndis_filter_open(device_obj); 99 if (ret != 0) { 100 netdev_err(net, "unable to open device (ret %d).\n", ret); 101 return ret; 102 } 103 104 netif_tx_start_all_queues(net); 105 106 nvdev = hv_get_drvdata(device_obj); 107 rdev = nvdev->extension; 108 if (!rdev->link_state) 109 netif_carrier_on(net); 110 111 return ret; 112 } 113 114 static int netvsc_close(struct net_device *net) 115 { 116 struct net_device_context *net_device_ctx = netdev_priv(net); 117 struct hv_device *device_obj = net_device_ctx->device_ctx; 118 int ret; 119 120 netif_tx_disable(net); 121 122 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ 123 cancel_work_sync(&net_device_ctx->work); 124 ret = rndis_filter_close(device_obj); 125 if (ret != 0) 126 netdev_err(net, "unable to close device (ret %d).\n", ret); 127 128 return ret; 129 } 130 131 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, 132 int pkt_type) 133 { 134 struct rndis_packet *rndis_pkt; 135 struct rndis_per_packet_info *ppi; 136 137 rndis_pkt = &msg->msg.pkt; 138 rndis_pkt->data_offset += ppi_size; 139 140 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt + 141 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len); 142 143 ppi->size = ppi_size; 144 ppi->type = pkt_type; 145 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 146 147 rndis_pkt->per_pkt_info_len += ppi_size; 148 149 return ppi; 150 } 151 152 union sub_key { 153 u64 k; 154 struct { 155 u8 pad[3]; 156 u8 kb; 157 u32 ka; 158 }; 159 }; 160 161 /* Toeplitz hash function 162 * data: network byte order 163 * return: host byte order 164 */ 165 static u32 comp_hash(u8 *key, int klen, void *data, int dlen) 166 { 167 union sub_key subk; 168 int k_next = 4; 169 u8 dt; 170 int i, j; 171 u32 ret = 0; 172 173 subk.k = 0; 174 subk.ka = ntohl(*(u32 *)key); 175 176 for (i = 0; i < dlen; i++) { 177 subk.kb = key[k_next]; 178 k_next = (k_next + 1) % klen; 179 dt = ((u8 *)data)[i]; 180 for (j = 0; j < 8; j++) { 181 if (dt & 0x80) 182 ret ^= subk.ka; 183 dt <<= 1; 184 subk.k <<= 1; 185 } 186 } 187 188 return ret; 189 } 190 191 static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) 192 { 193 struct flow_keys flow; 194 int data_len; 195 196 if (!skb_flow_dissect(skb, &flow) || 197 !(flow.n_proto == htons(ETH_P_IP) || 198 flow.n_proto == htons(ETH_P_IPV6))) 199 return false; 200 201 if (flow.ip_proto == IPPROTO_TCP) 202 data_len = 12; 203 else 204 data_len = 8; 205 206 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); 207 208 return true; 209 } 210 211 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 212 void *accel_priv, select_queue_fallback_t fallback) 213 { 214 struct net_device_context *net_device_ctx = netdev_priv(ndev); 215 struct hv_device *hdev = net_device_ctx->device_ctx; 216 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev); 217 u32 hash; 218 u16 q_idx = 0; 219 220 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) 221 return 0; 222 223 if (netvsc_set_hash(&hash, skb)) { 224 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % 225 ndev->real_num_tx_queues; 226 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); 227 } 228 229 return q_idx; 230 } 231 232 void netvsc_xmit_completion(void *context) 233 { 234 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 235 struct sk_buff *skb = (struct sk_buff *) 236 (unsigned long)packet->send_completion_tid; 237 238 if (!packet->part_of_skb) 239 kfree(packet); 240 241 if (skb) 242 dev_kfree_skb_any(skb); 243 } 244 245 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, 246 struct hv_page_buffer *pb) 247 { 248 int j = 0; 249 250 /* Deal with compund pages by ignoring unused part 251 * of the page. 252 */ 253 page += (offset >> PAGE_SHIFT); 254 offset &= ~PAGE_MASK; 255 256 while (len > 0) { 257 unsigned long bytes; 258 259 bytes = PAGE_SIZE - offset; 260 if (bytes > len) 261 bytes = len; 262 pb[j].pfn = page_to_pfn(page); 263 pb[j].offset = offset; 264 pb[j].len = bytes; 265 266 offset += bytes; 267 len -= bytes; 268 269 if (offset == PAGE_SIZE && len) { 270 page++; 271 offset = 0; 272 j++; 273 } 274 } 275 276 return j + 1; 277 } 278 279 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 280 struct hv_netvsc_packet *packet) 281 { 282 struct hv_page_buffer *pb = packet->page_buf; 283 u32 slots_used = 0; 284 char *data = skb->data; 285 int frags = skb_shinfo(skb)->nr_frags; 286 int i; 287 288 /* The packet is laid out thus: 289 * 1. hdr: RNDIS header and PPI 290 * 2. skb linear data 291 * 3. skb fragment data 292 */ 293 if (hdr != NULL) 294 slots_used += fill_pg_buf(virt_to_page(hdr), 295 offset_in_page(hdr), 296 len, &pb[slots_used]); 297 298 packet->rmsg_size = len; 299 packet->rmsg_pgcnt = slots_used; 300 301 slots_used += fill_pg_buf(virt_to_page(data), 302 offset_in_page(data), 303 skb_headlen(skb), &pb[slots_used]); 304 305 for (i = 0; i < frags; i++) { 306 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 307 308 slots_used += fill_pg_buf(skb_frag_page(frag), 309 frag->page_offset, 310 skb_frag_size(frag), &pb[slots_used]); 311 } 312 return slots_used; 313 } 314 315 static int count_skb_frag_slots(struct sk_buff *skb) 316 { 317 int i, frags = skb_shinfo(skb)->nr_frags; 318 int pages = 0; 319 320 for (i = 0; i < frags; i++) { 321 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 322 unsigned long size = skb_frag_size(frag); 323 unsigned long offset = frag->page_offset; 324 325 /* Skip unused frames from start of page */ 326 offset &= ~PAGE_MASK; 327 pages += PFN_UP(offset + size); 328 } 329 return pages; 330 } 331 332 static int netvsc_get_slots(struct sk_buff *skb) 333 { 334 char *data = skb->data; 335 unsigned int offset = offset_in_page(data); 336 unsigned int len = skb_headlen(skb); 337 int slots; 338 int frag_slots; 339 340 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); 341 frag_slots = count_skb_frag_slots(skb); 342 return slots + frag_slots; 343 } 344 345 static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) 346 { 347 u32 ret_val = TRANSPORT_INFO_NOT_IP; 348 349 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) && 350 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) { 351 goto not_ip; 352 } 353 354 *trans_off = skb_transport_offset(skb); 355 356 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) { 357 struct iphdr *iphdr = ip_hdr(skb); 358 359 if (iphdr->protocol == IPPROTO_TCP) 360 ret_val = TRANSPORT_INFO_IPV4_TCP; 361 else if (iphdr->protocol == IPPROTO_UDP) 362 ret_val = TRANSPORT_INFO_IPV4_UDP; 363 } else { 364 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 365 ret_val = TRANSPORT_INFO_IPV6_TCP; 366 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 367 ret_val = TRANSPORT_INFO_IPV6_UDP; 368 } 369 370 not_ip: 371 return ret_val; 372 } 373 374 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 375 { 376 struct net_device_context *net_device_ctx = netdev_priv(net); 377 struct hv_netvsc_packet *packet = NULL; 378 int ret; 379 unsigned int num_data_pgs; 380 struct rndis_message *rndis_msg; 381 struct rndis_packet *rndis_pkt; 382 u32 rndis_msg_size; 383 bool isvlan; 384 bool linear = false; 385 struct rndis_per_packet_info *ppi; 386 struct ndis_tcp_ip_checksum_info *csum_info; 387 struct ndis_tcp_lso_info *lso_info; 388 int hdr_offset; 389 u32 net_trans_info; 390 u32 hash; 391 u32 skb_length; 392 u32 head_room; 393 u32 pkt_sz; 394 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; 395 396 397 /* We will atmost need two pages to describe the rndis 398 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 399 * of pages in a single packet. If skb is scattered around 400 * more pages we try linearizing it. 401 */ 402 403 check_size: 404 skb_length = skb->len; 405 head_room = skb_headroom(skb); 406 num_data_pgs = netvsc_get_slots(skb) + 2; 407 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { 408 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", 409 num_data_pgs, skb->len); 410 ret = -EFAULT; 411 goto drop; 412 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 413 if (skb_linearize(skb)) { 414 net_alert_ratelimited("failed to linearize skb\n"); 415 ret = -ENOMEM; 416 goto drop; 417 } 418 linear = true; 419 goto check_size; 420 } 421 422 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; 423 424 if (head_room < pkt_sz) { 425 packet = kmalloc(pkt_sz, GFP_ATOMIC); 426 if (!packet) { 427 /* out of memory, drop packet */ 428 netdev_err(net, "unable to alloc hv_netvsc_packet\n"); 429 ret = -ENOMEM; 430 goto drop; 431 } 432 packet->part_of_skb = false; 433 } else { 434 /* Use the headroom for building up the packet */ 435 packet = (struct hv_netvsc_packet *)skb->head; 436 packet->part_of_skb = true; 437 } 438 439 packet->status = 0; 440 packet->xmit_more = skb->xmit_more; 441 442 packet->vlan_tci = skb->vlan_tci; 443 packet->page_buf = page_buf; 444 445 packet->q_idx = skb_get_queue_mapping(skb); 446 447 packet->is_data_pkt = true; 448 packet->total_data_buflen = skb->len; 449 450 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + 451 sizeof(struct hv_netvsc_packet)); 452 453 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE); 454 455 /* Set the completion routine */ 456 packet->send_completion = netvsc_xmit_completion; 457 packet->send_completion_ctx = packet; 458 packet->send_completion_tid = (unsigned long)skb; 459 460 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; 461 462 /* Add the rndis header */ 463 rndis_msg = packet->rndis_msg; 464 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 465 rndis_msg->msg_len = packet->total_data_buflen; 466 rndis_pkt = &rndis_msg->msg.pkt; 467 rndis_pkt->data_offset = sizeof(struct rndis_packet); 468 rndis_pkt->data_len = packet->total_data_buflen; 469 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); 470 471 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 472 473 hash = skb_get_hash_raw(skb); 474 if (hash != 0 && net->real_num_tx_queues > 1) { 475 rndis_msg_size += NDIS_HASH_PPI_SIZE; 476 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 477 NBL_HASH_VALUE); 478 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; 479 } 480 481 if (isvlan) { 482 struct ndis_pkt_8021q_info *vlan; 483 484 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 485 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 486 IEEE_8021Q_INFO); 487 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + 488 ppi->ppi_offset); 489 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK; 490 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >> 491 VLAN_PRIO_SHIFT; 492 } 493 494 net_trans_info = get_net_transport_info(skb, &hdr_offset); 495 if (net_trans_info == TRANSPORT_INFO_NOT_IP) 496 goto do_send; 497 498 /* 499 * Setup the sendside checksum offload only if this is not a 500 * GSO packet. 501 */ 502 if (skb_is_gso(skb)) 503 goto do_lso; 504 505 if ((skb->ip_summed == CHECKSUM_NONE) || 506 (skb->ip_summed == CHECKSUM_UNNECESSARY)) 507 goto do_send; 508 509 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 510 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 511 TCPIP_CHKSUM_PKTINFO); 512 513 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + 514 ppi->ppi_offset); 515 516 if (net_trans_info & (INFO_IPV4 << 16)) 517 csum_info->transmit.is_ipv4 = 1; 518 else 519 csum_info->transmit.is_ipv6 = 1; 520 521 if (net_trans_info & INFO_TCP) { 522 csum_info->transmit.tcp_checksum = 1; 523 csum_info->transmit.tcp_header_offset = hdr_offset; 524 } else if (net_trans_info & INFO_UDP) { 525 /* UDP checksum offload is not supported on ws2008r2. 526 * Furthermore, on ws2012 and ws2012r2, there are some 527 * issues with udp checksum offload from Linux guests. 528 * (these are host issues). 529 * For now compute the checksum here. 530 */ 531 struct udphdr *uh; 532 u16 udp_len; 533 534 ret = skb_cow_head(skb, 0); 535 if (ret) 536 goto drop; 537 538 uh = udp_hdr(skb); 539 udp_len = ntohs(uh->len); 540 uh->check = 0; 541 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, 542 ip_hdr(skb)->daddr, 543 udp_len, IPPROTO_UDP, 544 csum_partial(uh, udp_len, 0)); 545 if (uh->check == 0) 546 uh->check = CSUM_MANGLED_0; 547 548 csum_info->transmit.udp_checksum = 0; 549 } 550 goto do_send; 551 552 do_lso: 553 rndis_msg_size += NDIS_LSO_PPI_SIZE; 554 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 555 TCP_LARGESEND_PKTINFO); 556 557 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + 558 ppi->ppi_offset); 559 560 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 561 if (net_trans_info & (INFO_IPV4 << 16)) { 562 lso_info->lso_v2_transmit.ip_version = 563 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 564 ip_hdr(skb)->tot_len = 0; 565 ip_hdr(skb)->check = 0; 566 tcp_hdr(skb)->check = 567 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 568 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 569 } else { 570 lso_info->lso_v2_transmit.ip_version = 571 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 572 ipv6_hdr(skb)->payload_len = 0; 573 tcp_hdr(skb)->check = 574 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 575 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 576 } 577 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; 578 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 579 580 do_send: 581 /* Start filling in the page buffers with the rndis hdr */ 582 rndis_msg->msg_len += rndis_msg_size; 583 packet->total_data_buflen = rndis_msg->msg_len; 584 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 585 skb, packet); 586 587 ret = netvsc_send(net_device_ctx->device_ctx, packet); 588 589 drop: 590 if (ret == 0) { 591 net->stats.tx_bytes += skb_length; 592 net->stats.tx_packets++; 593 } else { 594 if (packet && !packet->part_of_skb) 595 kfree(packet); 596 if (ret != -EAGAIN) { 597 dev_kfree_skb_any(skb); 598 net->stats.tx_dropped++; 599 } 600 } 601 602 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; 603 } 604 605 /* 606 * netvsc_linkstatus_callback - Link up/down notification 607 */ 608 void netvsc_linkstatus_callback(struct hv_device *device_obj, 609 struct rndis_message *resp) 610 { 611 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 612 struct net_device *net; 613 struct net_device_context *ndev_ctx; 614 struct netvsc_device *net_device; 615 struct rndis_device *rdev; 616 617 net_device = hv_get_drvdata(device_obj); 618 rdev = net_device->extension; 619 620 switch (indicate->status) { 621 case RNDIS_STATUS_MEDIA_CONNECT: 622 rdev->link_state = false; 623 break; 624 case RNDIS_STATUS_MEDIA_DISCONNECT: 625 rdev->link_state = true; 626 break; 627 case RNDIS_STATUS_NETWORK_CHANGE: 628 rdev->link_change = true; 629 break; 630 default: 631 return; 632 } 633 634 net = net_device->ndev; 635 636 if (!net || net->reg_state != NETREG_REGISTERED) 637 return; 638 639 ndev_ctx = netdev_priv(net); 640 if (!rdev->link_state) { 641 schedule_delayed_work(&ndev_ctx->dwork, 0); 642 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 643 } else { 644 schedule_delayed_work(&ndev_ctx->dwork, 0); 645 } 646 } 647 648 /* 649 * netvsc_recv_callback - Callback when we receive a packet from the 650 * "wire" on the specified device. 651 */ 652 int netvsc_recv_callback(struct hv_device *device_obj, 653 struct hv_netvsc_packet *packet, 654 struct ndis_tcp_ip_checksum_info *csum_info) 655 { 656 struct net_device *net; 657 struct sk_buff *skb; 658 659 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 660 if (!net || net->reg_state != NETREG_REGISTERED) { 661 packet->status = NVSP_STAT_FAIL; 662 return 0; 663 } 664 665 /* Allocate a skb - TODO direct I/O to pages? */ 666 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); 667 if (unlikely(!skb)) { 668 ++net->stats.rx_dropped; 669 packet->status = NVSP_STAT_FAIL; 670 return 0; 671 } 672 673 /* 674 * Copy to skb. This copy is needed here since the memory pointed by 675 * hv_netvsc_packet cannot be deallocated 676 */ 677 memcpy(skb_put(skb, packet->total_data_buflen), packet->data, 678 packet->total_data_buflen); 679 680 skb->protocol = eth_type_trans(skb, net); 681 if (csum_info) { 682 /* We only look at the IP checksum here. 683 * Should we be dropping the packet if checksum 684 * failed? How do we deal with other checksums - TCP/UDP? 685 */ 686 if (csum_info->receive.ip_checksum_succeeded) 687 skb->ip_summed = CHECKSUM_UNNECESSARY; 688 else 689 skb->ip_summed = CHECKSUM_NONE; 690 } 691 692 if (packet->vlan_tci & VLAN_TAG_PRESENT) 693 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 694 packet->vlan_tci); 695 696 skb_record_rx_queue(skb, packet->channel-> 697 offermsg.offer.sub_channel_index); 698 699 net->stats.rx_packets++; 700 net->stats.rx_bytes += packet->total_data_buflen; 701 702 /* 703 * Pass the skb back up. Network stack will deallocate the skb when it 704 * is done. 705 * TODO - use NAPI? 706 */ 707 netif_rx(skb); 708 709 return 0; 710 } 711 712 static void netvsc_get_drvinfo(struct net_device *net, 713 struct ethtool_drvinfo *info) 714 { 715 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 716 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 717 } 718 719 static void netvsc_get_channels(struct net_device *net, 720 struct ethtool_channels *channel) 721 { 722 struct net_device_context *net_device_ctx = netdev_priv(net); 723 struct hv_device *dev = net_device_ctx->device_ctx; 724 struct netvsc_device *nvdev = hv_get_drvdata(dev); 725 726 if (nvdev) { 727 channel->max_combined = nvdev->max_chn; 728 channel->combined_count = nvdev->num_chn; 729 } 730 } 731 732 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 733 { 734 struct net_device_context *ndevctx = netdev_priv(ndev); 735 struct hv_device *hdev = ndevctx->device_ctx; 736 struct netvsc_device *nvdev = hv_get_drvdata(hdev); 737 struct netvsc_device_info device_info; 738 int limit = ETH_DATA_LEN; 739 740 if (nvdev == NULL || nvdev->destroy) 741 return -ENODEV; 742 743 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 744 limit = NETVSC_MTU - ETH_HLEN; 745 746 /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ 747 if (mtu < ETH_DATA_LEN || mtu > limit) 748 return -EINVAL; 749 750 nvdev->start_remove = true; 751 cancel_work_sync(&ndevctx->work); 752 netif_tx_disable(ndev); 753 rndis_filter_device_remove(hdev); 754 755 ndev->mtu = mtu; 756 757 ndevctx->device_ctx = hdev; 758 hv_set_drvdata(hdev, ndev); 759 device_info.ring_size = ring_size; 760 rndis_filter_device_add(hdev, &device_info); 761 netif_tx_wake_all_queues(ndev); 762 763 return 0; 764 } 765 766 767 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 768 { 769 struct net_device_context *ndevctx = netdev_priv(ndev); 770 struct hv_device *hdev = ndevctx->device_ctx; 771 struct sockaddr *addr = p; 772 char save_adr[ETH_ALEN]; 773 unsigned char save_aatype; 774 int err; 775 776 memcpy(save_adr, ndev->dev_addr, ETH_ALEN); 777 save_aatype = ndev->addr_assign_type; 778 779 err = eth_mac_addr(ndev, p); 780 if (err != 0) 781 return err; 782 783 err = rndis_filter_set_device_mac(hdev, addr->sa_data); 784 if (err != 0) { 785 /* roll back to saved MAC */ 786 memcpy(ndev->dev_addr, save_adr, ETH_ALEN); 787 ndev->addr_assign_type = save_aatype; 788 } 789 790 return err; 791 } 792 793 #ifdef CONFIG_NET_POLL_CONTROLLER 794 static void netvsc_poll_controller(struct net_device *net) 795 { 796 /* As netvsc_start_xmit() works synchronous we don't have to 797 * trigger anything here. 798 */ 799 } 800 #endif 801 802 static const struct ethtool_ops ethtool_ops = { 803 .get_drvinfo = netvsc_get_drvinfo, 804 .get_link = ethtool_op_get_link, 805 .get_channels = netvsc_get_channels, 806 }; 807 808 static const struct net_device_ops device_ops = { 809 .ndo_open = netvsc_open, 810 .ndo_stop = netvsc_close, 811 .ndo_start_xmit = netvsc_start_xmit, 812 .ndo_set_rx_mode = netvsc_set_multicast_list, 813 .ndo_change_mtu = netvsc_change_mtu, 814 .ndo_validate_addr = eth_validate_addr, 815 .ndo_set_mac_address = netvsc_set_mac_addr, 816 .ndo_select_queue = netvsc_select_queue, 817 #ifdef CONFIG_NET_POLL_CONTROLLER 818 .ndo_poll_controller = netvsc_poll_controller, 819 #endif 820 }; 821 822 /* 823 * Send GARP packet to network peers after migrations. 824 * After Quick Migration, the network is not immediately operational in the 825 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 826 * another netif_notify_peers() into a delayed work, otherwise GARP packet 827 * will not be sent after quick migration, and cause network disconnection. 828 * Also, we update the carrier status here. 829 */ 830 static void netvsc_link_change(struct work_struct *w) 831 { 832 struct net_device_context *ndev_ctx; 833 struct net_device *net; 834 struct netvsc_device *net_device; 835 struct rndis_device *rdev; 836 bool notify, refresh = false; 837 char *argv[] = { "/etc/init.d/network", "restart", NULL }; 838 char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; 839 840 rtnl_lock(); 841 842 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 843 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 844 rdev = net_device->extension; 845 net = net_device->ndev; 846 847 if (rdev->link_state) { 848 netif_carrier_off(net); 849 notify = false; 850 } else { 851 netif_carrier_on(net); 852 notify = true; 853 if (rdev->link_change) { 854 rdev->link_change = false; 855 refresh = true; 856 } 857 } 858 859 rtnl_unlock(); 860 861 if (refresh) 862 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 863 864 if (notify) 865 netdev_notify_peers(net); 866 } 867 868 869 static int netvsc_probe(struct hv_device *dev, 870 const struct hv_vmbus_device_id *dev_id) 871 { 872 struct net_device *net = NULL; 873 struct net_device_context *net_device_ctx; 874 struct netvsc_device_info device_info; 875 struct netvsc_device *nvdev; 876 int ret; 877 u32 max_needed_headroom; 878 879 net = alloc_etherdev_mq(sizeof(struct net_device_context), 880 num_online_cpus()); 881 if (!net) 882 return -ENOMEM; 883 884 max_needed_headroom = sizeof(struct hv_netvsc_packet) + 885 RNDIS_AND_PPI_SIZE; 886 887 netif_carrier_off(net); 888 889 net_device_ctx = netdev_priv(net); 890 net_device_ctx->device_ctx = dev; 891 hv_set_drvdata(dev, net); 892 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 893 INIT_WORK(&net_device_ctx->work, do_set_multicast); 894 895 net->netdev_ops = &device_ops; 896 897 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | 898 NETIF_F_TSO; 899 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | 900 NETIF_F_IP_CSUM | NETIF_F_TSO; 901 902 net->ethtool_ops = ðtool_ops; 903 SET_NETDEV_DEV(net, &dev->device); 904 905 /* 906 * Request additional head room in the skb. 907 * We will use this space to build the rndis 908 * heaser and other state we need to maintain. 909 */ 910 net->needed_headroom = max_needed_headroom; 911 912 /* Notify the netvsc driver of the new device */ 913 device_info.ring_size = ring_size; 914 ret = rndis_filter_device_add(dev, &device_info); 915 if (ret != 0) { 916 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 917 free_netdev(net); 918 hv_set_drvdata(dev, NULL); 919 return ret; 920 } 921 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 922 923 nvdev = hv_get_drvdata(dev); 924 netif_set_real_num_tx_queues(net, nvdev->num_chn); 925 netif_set_real_num_rx_queues(net, nvdev->num_chn); 926 927 ret = register_netdev(net); 928 if (ret != 0) { 929 pr_err("Unable to register netdev.\n"); 930 rndis_filter_device_remove(dev); 931 free_netdev(net); 932 } else { 933 schedule_delayed_work(&net_device_ctx->dwork, 0); 934 } 935 936 return ret; 937 } 938 939 static int netvsc_remove(struct hv_device *dev) 940 { 941 struct net_device *net; 942 struct net_device_context *ndev_ctx; 943 struct netvsc_device *net_device; 944 945 net_device = hv_get_drvdata(dev); 946 net = net_device->ndev; 947 948 if (net == NULL) { 949 dev_err(&dev->device, "No net device to remove\n"); 950 return 0; 951 } 952 953 net_device->start_remove = true; 954 955 ndev_ctx = netdev_priv(net); 956 cancel_delayed_work_sync(&ndev_ctx->dwork); 957 cancel_work_sync(&ndev_ctx->work); 958 959 /* Stop outbound asap */ 960 netif_tx_disable(net); 961 962 unregister_netdev(net); 963 964 /* 965 * Call to the vsc driver to let it know that the device is being 966 * removed 967 */ 968 rndis_filter_device_remove(dev); 969 970 free_netdev(net); 971 return 0; 972 } 973 974 static const struct hv_vmbus_device_id id_table[] = { 975 /* Network guid */ 976 { HV_NIC_GUID, }, 977 { }, 978 }; 979 980 MODULE_DEVICE_TABLE(vmbus, id_table); 981 982 /* The one and only one */ 983 static struct hv_driver netvsc_drv = { 984 .name = KBUILD_MODNAME, 985 .id_table = id_table, 986 .probe = netvsc_probe, 987 .remove = netvsc_remove, 988 }; 989 990 static void __exit netvsc_drv_exit(void) 991 { 992 vmbus_driver_unregister(&netvsc_drv); 993 } 994 995 static int __init netvsc_drv_init(void) 996 { 997 if (ring_size < RING_SIZE_MIN) { 998 ring_size = RING_SIZE_MIN; 999 pr_info("Increased ring_size to %d (min allowed)\n", 1000 ring_size); 1001 } 1002 return vmbus_driver_register(&netvsc_drv); 1003 } 1004 1005 MODULE_LICENSE("GPL"); 1006 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 1007 1008 module_init(netvsc_drv_init); 1009 module_exit(netvsc_drv_exit); 1010