1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/wait.h> 25 #include <linux/mm.h> 26 #include <linux/delay.h> 27 #include <linux/io.h> 28 #include <linux/slab.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_ether.h> 31 #include <linux/vmalloc.h> 32 #include <linux/rtnetlink.h> 33 #include <linux/prefetch.h> 34 35 #include <asm/sync_bitops.h> 36 37 #include "hyperv_net.h" 38 39 /* 40 * Switch the data path from the synthetic interface to the VF 41 * interface. 42 */ 43 void netvsc_switch_datapath(struct net_device *ndev, bool vf) 44 { 45 struct net_device_context *net_device_ctx = netdev_priv(ndev); 46 struct hv_device *dev = net_device_ctx->device_ctx; 47 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); 48 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; 49 50 memset(init_pkt, 0, sizeof(struct nvsp_message)); 51 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; 52 if (vf) 53 init_pkt->msg.v4_msg.active_dp.active_datapath = 54 NVSP_DATAPATH_VF; 55 else 56 init_pkt->msg.v4_msg.active_dp.active_datapath = 57 NVSP_DATAPATH_SYNTHETIC; 58 59 vmbus_sendpacket(dev->channel, init_pkt, 60 sizeof(struct nvsp_message), 61 (unsigned long)init_pkt, 62 VM_PKT_DATA_INBAND, 0); 63 } 64 65 static struct netvsc_device *alloc_net_device(void) 66 { 67 struct netvsc_device *net_device; 68 69 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); 70 if (!net_device) 71 return NULL; 72 73 init_waitqueue_head(&net_device->wait_drain); 74 net_device->destroy = false; 75 atomic_set(&net_device->open_cnt, 0); 76 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 78 79 init_completion(&net_device->channel_init_wait); 80 init_waitqueue_head(&net_device->subchan_open); 81 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 82 83 return net_device; 84 } 85 86 static void free_netvsc_device(struct rcu_head *head) 87 { 88 struct netvsc_device *nvdev 89 = container_of(head, struct netvsc_device, rcu); 90 int i; 91 92 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 93 vfree(nvdev->chan_table[i].mrc.slots); 94 95 kfree(nvdev); 96 } 97 98 static void free_netvsc_device_rcu(struct netvsc_device *nvdev) 99 { 100 call_rcu(&nvdev->rcu, free_netvsc_device); 101 } 102 103 static void netvsc_revoke_buf(struct hv_device *device, 104 struct netvsc_device *net_device) 105 { 106 struct nvsp_message *revoke_packet; 107 struct net_device *ndev = hv_get_drvdata(device); 108 int ret; 109 110 /* 111 * If we got a section count, it means we received a 112 * SendReceiveBufferComplete msg (ie sent 113 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 114 * to send a revoke msg here 115 */ 116 if (net_device->recv_section_cnt) { 117 /* Send the revoke receive buffer */ 118 revoke_packet = &net_device->revoke_packet; 119 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 120 121 revoke_packet->hdr.msg_type = 122 NVSP_MSG1_TYPE_REVOKE_RECV_BUF; 123 revoke_packet->msg.v1_msg. 124 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 125 126 ret = vmbus_sendpacket(device->channel, 127 revoke_packet, 128 sizeof(struct nvsp_message), 129 (unsigned long)revoke_packet, 130 VM_PKT_DATA_INBAND, 0); 131 /* If the failure is because the channel is rescinded; 132 * ignore the failure since we cannot send on a rescinded 133 * channel. This would allow us to properly cleanup 134 * even when the channel is rescinded. 135 */ 136 if (device->channel->rescind) 137 ret = 0; 138 /* 139 * If we failed here, we might as well return and 140 * have a leak rather than continue and a bugchk 141 */ 142 if (ret != 0) { 143 netdev_err(ndev, "unable to send " 144 "revoke receive buffer to netvsp\n"); 145 return; 146 } 147 net_device->recv_section_cnt = 0; 148 } 149 150 /* Deal with the send buffer we may have setup. 151 * If we got a send section size, it means we received a 152 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent 153 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need 154 * to send a revoke msg here 155 */ 156 if (net_device->send_section_cnt) { 157 /* Send the revoke receive buffer */ 158 revoke_packet = &net_device->revoke_packet; 159 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 160 161 revoke_packet->hdr.msg_type = 162 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 163 revoke_packet->msg.v1_msg.revoke_send_buf.id = 164 NETVSC_SEND_BUFFER_ID; 165 166 ret = vmbus_sendpacket(device->channel, 167 revoke_packet, 168 sizeof(struct nvsp_message), 169 (unsigned long)revoke_packet, 170 VM_PKT_DATA_INBAND, 0); 171 172 /* If the failure is because the channel is rescinded; 173 * ignore the failure since we cannot send on a rescinded 174 * channel. This would allow us to properly cleanup 175 * even when the channel is rescinded. 176 */ 177 if (device->channel->rescind) 178 ret = 0; 179 180 /* If we failed here, we might as well return and 181 * have a leak rather than continue and a bugchk 182 */ 183 if (ret != 0) { 184 netdev_err(ndev, "unable to send " 185 "revoke send buffer to netvsp\n"); 186 return; 187 } 188 net_device->send_section_cnt = 0; 189 } 190 } 191 192 static void netvsc_teardown_gpadl(struct hv_device *device, 193 struct netvsc_device *net_device) 194 { 195 struct net_device *ndev = hv_get_drvdata(device); 196 int ret; 197 198 if (net_device->recv_buf_gpadl_handle) { 199 ret = vmbus_teardown_gpadl(device->channel, 200 net_device->recv_buf_gpadl_handle); 201 202 /* If we failed here, we might as well return and have a leak 203 * rather than continue and a bugchk 204 */ 205 if (ret != 0) { 206 netdev_err(ndev, 207 "unable to teardown receive buffer's gpadl\n"); 208 return; 209 } 210 net_device->recv_buf_gpadl_handle = 0; 211 } 212 213 if (net_device->recv_buf) { 214 /* Free up the receive buffer */ 215 vfree(net_device->recv_buf); 216 net_device->recv_buf = NULL; 217 } 218 219 if (net_device->send_buf_gpadl_handle) { 220 ret = vmbus_teardown_gpadl(device->channel, 221 net_device->send_buf_gpadl_handle); 222 223 /* If we failed here, we might as well return and have a leak 224 * rather than continue and a bugchk 225 */ 226 if (ret != 0) { 227 netdev_err(ndev, 228 "unable to teardown send buffer's gpadl\n"); 229 return; 230 } 231 net_device->send_buf_gpadl_handle = 0; 232 } 233 if (net_device->send_buf) { 234 /* Free up the send buffer */ 235 vfree(net_device->send_buf); 236 net_device->send_buf = NULL; 237 } 238 kfree(net_device->send_section_map); 239 } 240 241 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 242 { 243 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; 244 int node = cpu_to_node(nvchan->channel->target_cpu); 245 size_t size; 246 247 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); 248 nvchan->mrc.slots = vzalloc_node(size, node); 249 if (!nvchan->mrc.slots) 250 nvchan->mrc.slots = vzalloc(size); 251 252 return nvchan->mrc.slots ? 0 : -ENOMEM; 253 } 254 255 static int netvsc_init_buf(struct hv_device *device, 256 struct netvsc_device *net_device, 257 const struct netvsc_device_info *device_info) 258 { 259 struct nvsp_1_message_send_receive_buffer_complete *resp; 260 struct net_device *ndev = hv_get_drvdata(device); 261 struct nvsp_message *init_packet; 262 unsigned int buf_size; 263 size_t map_words; 264 int ret = 0; 265 266 /* Get receive buffer area. */ 267 buf_size = device_info->recv_sections * device_info->recv_section_size; 268 buf_size = roundup(buf_size, PAGE_SIZE); 269 270 net_device->recv_buf = vzalloc(buf_size); 271 if (!net_device->recv_buf) { 272 netdev_err(ndev, 273 "unable to allocate receive buffer of size %u\n", 274 buf_size); 275 ret = -ENOMEM; 276 goto cleanup; 277 } 278 279 /* 280 * Establish the gpadl handle for this buffer on this 281 * channel. Note: This call uses the vmbus connection rather 282 * than the channel to establish the gpadl handle. 283 */ 284 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, 285 buf_size, 286 &net_device->recv_buf_gpadl_handle); 287 if (ret != 0) { 288 netdev_err(ndev, 289 "unable to establish receive buffer's gpadl\n"); 290 goto cleanup; 291 } 292 293 /* Notify the NetVsp of the gpadl handle */ 294 init_packet = &net_device->channel_init_pkt; 295 memset(init_packet, 0, sizeof(struct nvsp_message)); 296 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; 297 init_packet->msg.v1_msg.send_recv_buf. 298 gpadl_handle = net_device->recv_buf_gpadl_handle; 299 init_packet->msg.v1_msg. 300 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 301 302 /* Send the gpadl notification request */ 303 ret = vmbus_sendpacket(device->channel, init_packet, 304 sizeof(struct nvsp_message), 305 (unsigned long)init_packet, 306 VM_PKT_DATA_INBAND, 307 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 308 if (ret != 0) { 309 netdev_err(ndev, 310 "unable to send receive buffer's gpadl to netvsp\n"); 311 goto cleanup; 312 } 313 314 wait_for_completion(&net_device->channel_init_wait); 315 316 /* Check the response */ 317 resp = &init_packet->msg.v1_msg.send_recv_buf_complete; 318 if (resp->status != NVSP_STAT_SUCCESS) { 319 netdev_err(ndev, 320 "Unable to complete receive buffer initialization with NetVsp - status %d\n", 321 resp->status); 322 ret = -EINVAL; 323 goto cleanup; 324 } 325 326 /* Parse the response */ 327 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", 328 resp->num_sections, resp->sections[0].sub_alloc_size, 329 resp->sections[0].num_sub_allocs); 330 331 /* There should only be one section for the entire receive buffer */ 332 if (resp->num_sections != 1 || resp->sections[0].offset != 0) { 333 ret = -EINVAL; 334 goto cleanup; 335 } 336 337 net_device->recv_section_size = resp->sections[0].sub_alloc_size; 338 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; 339 340 /* Setup receive completion ring */ 341 net_device->recv_completion_cnt 342 = round_up(net_device->recv_section_cnt + 1, 343 PAGE_SIZE / sizeof(u64)); 344 ret = netvsc_alloc_recv_comp_ring(net_device, 0); 345 if (ret) 346 goto cleanup; 347 348 /* Now setup the send buffer. */ 349 buf_size = device_info->send_sections * device_info->send_section_size; 350 buf_size = round_up(buf_size, PAGE_SIZE); 351 352 net_device->send_buf = vzalloc(buf_size); 353 if (!net_device->send_buf) { 354 netdev_err(ndev, "unable to allocate send buffer of size %u\n", 355 buf_size); 356 ret = -ENOMEM; 357 goto cleanup; 358 } 359 360 /* Establish the gpadl handle for this buffer on this 361 * channel. Note: This call uses the vmbus connection rather 362 * than the channel to establish the gpadl handle. 363 */ 364 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, 365 buf_size, 366 &net_device->send_buf_gpadl_handle); 367 if (ret != 0) { 368 netdev_err(ndev, 369 "unable to establish send buffer's gpadl\n"); 370 goto cleanup; 371 } 372 373 /* Notify the NetVsp of the gpadl handle */ 374 init_packet = &net_device->channel_init_pkt; 375 memset(init_packet, 0, sizeof(struct nvsp_message)); 376 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 377 init_packet->msg.v1_msg.send_send_buf.gpadl_handle = 378 net_device->send_buf_gpadl_handle; 379 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; 380 381 /* Send the gpadl notification request */ 382 ret = vmbus_sendpacket(device->channel, init_packet, 383 sizeof(struct nvsp_message), 384 (unsigned long)init_packet, 385 VM_PKT_DATA_INBAND, 386 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 387 if (ret != 0) { 388 netdev_err(ndev, 389 "unable to send send buffer's gpadl to netvsp\n"); 390 goto cleanup; 391 } 392 393 wait_for_completion(&net_device->channel_init_wait); 394 395 /* Check the response */ 396 if (init_packet->msg.v1_msg. 397 send_send_buf_complete.status != NVSP_STAT_SUCCESS) { 398 netdev_err(ndev, "Unable to complete send buffer " 399 "initialization with NetVsp - status %d\n", 400 init_packet->msg.v1_msg. 401 send_send_buf_complete.status); 402 ret = -EINVAL; 403 goto cleanup; 404 } 405 406 /* Parse the response */ 407 net_device->send_section_size = init_packet->msg. 408 v1_msg.send_send_buf_complete.section_size; 409 410 /* Section count is simply the size divided by the section size. */ 411 net_device->send_section_cnt = buf_size / net_device->send_section_size; 412 413 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", 414 net_device->send_section_size, net_device->send_section_cnt); 415 416 /* Setup state for managing the send buffer. */ 417 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); 418 419 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL); 420 if (net_device->send_section_map == NULL) { 421 ret = -ENOMEM; 422 goto cleanup; 423 } 424 425 goto exit; 426 427 cleanup: 428 netvsc_revoke_buf(device, net_device); 429 netvsc_teardown_gpadl(device, net_device); 430 431 exit: 432 return ret; 433 } 434 435 /* Negotiate NVSP protocol version */ 436 static int negotiate_nvsp_ver(struct hv_device *device, 437 struct netvsc_device *net_device, 438 struct nvsp_message *init_packet, 439 u32 nvsp_ver) 440 { 441 struct net_device *ndev = hv_get_drvdata(device); 442 int ret; 443 444 memset(init_packet, 0, sizeof(struct nvsp_message)); 445 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 446 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; 447 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; 448 449 /* Send the init request */ 450 ret = vmbus_sendpacket(device->channel, init_packet, 451 sizeof(struct nvsp_message), 452 (unsigned long)init_packet, 453 VM_PKT_DATA_INBAND, 454 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 455 456 if (ret != 0) 457 return ret; 458 459 wait_for_completion(&net_device->channel_init_wait); 460 461 if (init_packet->msg.init_msg.init_complete.status != 462 NVSP_STAT_SUCCESS) 463 return -EINVAL; 464 465 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) 466 return 0; 467 468 /* NVSPv2 or later: Send NDIS config */ 469 memset(init_packet, 0, sizeof(struct nvsp_message)); 470 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; 471 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; 472 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; 473 474 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { 475 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; 476 477 /* Teaming bit is needed to receive link speed updates */ 478 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; 479 } 480 481 ret = vmbus_sendpacket(device->channel, init_packet, 482 sizeof(struct nvsp_message), 483 (unsigned long)init_packet, 484 VM_PKT_DATA_INBAND, 0); 485 486 return ret; 487 } 488 489 static int netvsc_connect_vsp(struct hv_device *device, 490 struct netvsc_device *net_device, 491 const struct netvsc_device_info *device_info) 492 { 493 static const u32 ver_list[] = { 494 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, 495 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 496 }; 497 struct nvsp_message *init_packet; 498 int ndis_version, i, ret; 499 500 init_packet = &net_device->channel_init_pkt; 501 502 /* Negotiate the latest NVSP protocol supported */ 503 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) 504 if (negotiate_nvsp_ver(device, net_device, init_packet, 505 ver_list[i]) == 0) { 506 net_device->nvsp_version = ver_list[i]; 507 break; 508 } 509 510 if (i < 0) { 511 ret = -EPROTO; 512 goto cleanup; 513 } 514 515 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); 516 517 /* Send the ndis version */ 518 memset(init_packet, 0, sizeof(struct nvsp_message)); 519 520 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 521 ndis_version = 0x00060001; 522 else 523 ndis_version = 0x0006001e; 524 525 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 526 init_packet->msg.v1_msg. 527 send_ndis_ver.ndis_major_ver = 528 (ndis_version & 0xFFFF0000) >> 16; 529 init_packet->msg.v1_msg. 530 send_ndis_ver.ndis_minor_ver = 531 ndis_version & 0xFFFF; 532 533 /* Send the init request */ 534 ret = vmbus_sendpacket(device->channel, init_packet, 535 sizeof(struct nvsp_message), 536 (unsigned long)init_packet, 537 VM_PKT_DATA_INBAND, 0); 538 if (ret != 0) 539 goto cleanup; 540 541 542 ret = netvsc_init_buf(device, net_device, device_info); 543 544 cleanup: 545 return ret; 546 } 547 548 /* 549 * netvsc_device_remove - Callback when the root bus device is removed 550 */ 551 void netvsc_device_remove(struct hv_device *device) 552 { 553 struct net_device *ndev = hv_get_drvdata(device); 554 struct net_device_context *net_device_ctx = netdev_priv(ndev); 555 struct netvsc_device *net_device 556 = rtnl_dereference(net_device_ctx->nvdev); 557 int i; 558 559 cancel_work_sync(&net_device->subchan_work); 560 561 netvsc_revoke_buf(device, net_device); 562 563 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 564 565 /* 566 * At this point, no one should be accessing net_device 567 * except in here 568 */ 569 netdev_dbg(ndev, "net device safe to remove\n"); 570 571 /* Now, we can close the channel safely */ 572 vmbus_close(device->channel); 573 574 netvsc_teardown_gpadl(device, net_device); 575 576 /* And dissassociate NAPI context from device */ 577 for (i = 0; i < net_device->num_chn; i++) 578 netif_napi_del(&net_device->chan_table[i].napi); 579 580 /* Release all resources */ 581 free_netvsc_device_rcu(net_device); 582 } 583 584 #define RING_AVAIL_PERCENT_HIWATER 20 585 #define RING_AVAIL_PERCENT_LOWATER 10 586 587 /* 588 * Get the percentage of available bytes to write in the ring. 589 * The return value is in range from 0 to 100. 590 */ 591 static inline u32 hv_ringbuf_avail_percent( 592 struct hv_ring_buffer_info *ring_info) 593 { 594 u32 avail_read, avail_write; 595 596 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); 597 598 return avail_write * 100 / ring_info->ring_datasize; 599 } 600 601 static inline void netvsc_free_send_slot(struct netvsc_device *net_device, 602 u32 index) 603 { 604 sync_change_bit(index, net_device->send_section_map); 605 } 606 607 static void netvsc_send_tx_complete(struct netvsc_device *net_device, 608 struct vmbus_channel *incoming_channel, 609 struct hv_device *device, 610 const struct vmpacket_descriptor *desc, 611 int budget) 612 { 613 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; 614 struct net_device *ndev = hv_get_drvdata(device); 615 struct net_device_context *ndev_ctx = netdev_priv(ndev); 616 struct vmbus_channel *channel = device->channel; 617 u16 q_idx = 0; 618 int queue_sends; 619 620 /* Notify the layer above us */ 621 if (likely(skb)) { 622 const struct hv_netvsc_packet *packet 623 = (struct hv_netvsc_packet *)skb->cb; 624 u32 send_index = packet->send_buf_index; 625 struct netvsc_stats *tx_stats; 626 627 if (send_index != NETVSC_INVALID_INDEX) 628 netvsc_free_send_slot(net_device, send_index); 629 q_idx = packet->q_idx; 630 channel = incoming_channel; 631 632 tx_stats = &net_device->chan_table[q_idx].tx_stats; 633 634 u64_stats_update_begin(&tx_stats->syncp); 635 tx_stats->packets += packet->total_packets; 636 tx_stats->bytes += packet->total_bytes; 637 u64_stats_update_end(&tx_stats->syncp); 638 639 napi_consume_skb(skb, budget); 640 } 641 642 queue_sends = 643 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 644 645 if (net_device->destroy && queue_sends == 0) 646 wake_up(&net_device->wait_drain); 647 648 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 649 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 650 queue_sends < 1)) { 651 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 652 ndev_ctx->eth_stats.wake_queue++; 653 } 654 } 655 656 static void netvsc_send_completion(struct netvsc_device *net_device, 657 struct vmbus_channel *incoming_channel, 658 struct hv_device *device, 659 const struct vmpacket_descriptor *desc, 660 int budget) 661 { 662 struct nvsp_message *nvsp_packet = hv_pkt_data(desc); 663 struct net_device *ndev = hv_get_drvdata(device); 664 665 switch (nvsp_packet->hdr.msg_type) { 666 case NVSP_MSG_TYPE_INIT_COMPLETE: 667 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: 668 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: 669 case NVSP_MSG5_TYPE_SUBCHANNEL: 670 /* Copy the response back */ 671 memcpy(&net_device->channel_init_pkt, nvsp_packet, 672 sizeof(struct nvsp_message)); 673 complete(&net_device->channel_init_wait); 674 break; 675 676 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: 677 netvsc_send_tx_complete(net_device, incoming_channel, 678 device, desc, budget); 679 break; 680 681 default: 682 netdev_err(ndev, 683 "Unknown send completion type %d received!!\n", 684 nvsp_packet->hdr.msg_type); 685 } 686 } 687 688 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) 689 { 690 unsigned long *map_addr = net_device->send_section_map; 691 unsigned int i; 692 693 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { 694 if (sync_test_and_set_bit(i, map_addr) == 0) 695 return i; 696 } 697 698 return NETVSC_INVALID_INDEX; 699 } 700 701 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 702 unsigned int section_index, 703 u32 pend_size, 704 struct hv_netvsc_packet *packet, 705 struct rndis_message *rndis_msg, 706 struct hv_page_buffer *pb, 707 struct sk_buff *skb) 708 { 709 char *start = net_device->send_buf; 710 char *dest = start + (section_index * net_device->send_section_size) 711 + pend_size; 712 int i; 713 u32 msg_size = 0; 714 u32 padding = 0; 715 u32 remain = packet->total_data_buflen % net_device->pkt_align; 716 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : 717 packet->page_buf_cnt; 718 719 /* Add padding */ 720 if (skb->xmit_more && remain && !packet->cp_partial) { 721 padding = net_device->pkt_align - remain; 722 rndis_msg->msg_len += padding; 723 packet->total_data_buflen += padding; 724 } 725 726 for (i = 0; i < page_count; i++) { 727 char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); 728 u32 offset = pb[i].offset; 729 u32 len = pb[i].len; 730 731 memcpy(dest, (src + offset), len); 732 msg_size += len; 733 dest += len; 734 } 735 736 if (padding) { 737 memset(dest, 0, padding); 738 msg_size += padding; 739 } 740 741 return msg_size; 742 } 743 744 static inline int netvsc_send_pkt( 745 struct hv_device *device, 746 struct hv_netvsc_packet *packet, 747 struct netvsc_device *net_device, 748 struct hv_page_buffer *pb, 749 struct sk_buff *skb) 750 { 751 struct nvsp_message nvmsg; 752 struct nvsp_1_message_send_rndis_packet * const rpkt = 753 &nvmsg.msg.v1_msg.send_rndis_pkt; 754 struct netvsc_channel * const nvchan = 755 &net_device->chan_table[packet->q_idx]; 756 struct vmbus_channel *out_channel = nvchan->channel; 757 struct net_device *ndev = hv_get_drvdata(device); 758 struct net_device_context *ndev_ctx = netdev_priv(ndev); 759 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); 760 u64 req_id; 761 int ret; 762 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); 763 764 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 765 if (skb) 766 rpkt->channel_type = 0; /* 0 is RMC_DATA */ 767 else 768 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ 769 770 rpkt->send_buf_section_index = packet->send_buf_index; 771 if (packet->send_buf_index == NETVSC_INVALID_INDEX) 772 rpkt->send_buf_section_size = 0; 773 else 774 rpkt->send_buf_section_size = packet->total_data_buflen; 775 776 req_id = (ulong)skb; 777 778 if (out_channel->rescind) 779 return -ENODEV; 780 781 if (packet->page_buf_cnt) { 782 if (packet->cp_partial) 783 pb += packet->rmsg_pgcnt; 784 785 ret = vmbus_sendpacket_pagebuffer(out_channel, 786 pb, packet->page_buf_cnt, 787 &nvmsg, sizeof(nvmsg), 788 req_id); 789 } else { 790 ret = vmbus_sendpacket(out_channel, 791 &nvmsg, sizeof(nvmsg), 792 req_id, VM_PKT_DATA_INBAND, 793 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 794 } 795 796 if (ret == 0) { 797 atomic_inc_return(&nvchan->queue_sends); 798 799 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { 800 netif_tx_stop_queue(txq); 801 ndev_ctx->eth_stats.stop_queue++; 802 } 803 } else if (ret == -EAGAIN) { 804 netif_tx_stop_queue(txq); 805 ndev_ctx->eth_stats.stop_queue++; 806 if (atomic_read(&nvchan->queue_sends) < 1) { 807 netif_tx_wake_queue(txq); 808 ndev_ctx->eth_stats.wake_queue++; 809 ret = -ENOSPC; 810 } 811 } else { 812 netdev_err(ndev, 813 "Unable to send packet pages %u len %u, ret %d\n", 814 packet->page_buf_cnt, packet->total_data_buflen, 815 ret); 816 } 817 818 return ret; 819 } 820 821 /* Move packet out of multi send data (msd), and clear msd */ 822 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, 823 struct sk_buff **msd_skb, 824 struct multi_send_data *msdp) 825 { 826 *msd_skb = msdp->skb; 827 *msd_send = msdp->pkt; 828 msdp->skb = NULL; 829 msdp->pkt = NULL; 830 msdp->count = 0; 831 } 832 833 /* RCU already held by caller */ 834 int netvsc_send(struct net_device_context *ndev_ctx, 835 struct hv_netvsc_packet *packet, 836 struct rndis_message *rndis_msg, 837 struct hv_page_buffer *pb, 838 struct sk_buff *skb) 839 { 840 struct netvsc_device *net_device 841 = rcu_dereference_bh(ndev_ctx->nvdev); 842 struct hv_device *device = ndev_ctx->device_ctx; 843 int ret = 0; 844 struct netvsc_channel *nvchan; 845 u32 pktlen = packet->total_data_buflen, msd_len = 0; 846 unsigned int section_index = NETVSC_INVALID_INDEX; 847 struct multi_send_data *msdp; 848 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; 849 struct sk_buff *msd_skb = NULL; 850 bool try_batch; 851 bool xmit_more = (skb != NULL) ? skb->xmit_more : false; 852 853 /* If device is rescinded, return error and packet will get dropped. */ 854 if (unlikely(!net_device || net_device->destroy)) 855 return -ENODEV; 856 857 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get 858 * here before the negotiation with the host is finished and 859 * send_section_map may not be allocated yet. 860 */ 861 if (unlikely(!net_device->send_section_map)) 862 return -EAGAIN; 863 864 nvchan = &net_device->chan_table[packet->q_idx]; 865 packet->send_buf_index = NETVSC_INVALID_INDEX; 866 packet->cp_partial = false; 867 868 /* Send control message directly without accessing msd (Multi-Send 869 * Data) field which may be changed during data packet processing. 870 */ 871 if (!skb) { 872 cur_send = packet; 873 goto send_now; 874 } 875 876 /* batch packets in send buffer if possible */ 877 msdp = &nvchan->msd; 878 if (msdp->pkt) 879 msd_len = msdp->pkt->total_data_buflen; 880 881 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; 882 if (try_batch && msd_len + pktlen + net_device->pkt_align < 883 net_device->send_section_size) { 884 section_index = msdp->pkt->send_buf_index; 885 886 } else if (try_batch && msd_len + packet->rmsg_size < 887 net_device->send_section_size) { 888 section_index = msdp->pkt->send_buf_index; 889 packet->cp_partial = true; 890 891 } else if (pktlen + net_device->pkt_align < 892 net_device->send_section_size) { 893 section_index = netvsc_get_next_send_section(net_device); 894 if (unlikely(section_index == NETVSC_INVALID_INDEX)) { 895 ++ndev_ctx->eth_stats.tx_send_full; 896 } else { 897 move_pkt_msd(&msd_send, &msd_skb, msdp); 898 msd_len = 0; 899 } 900 } 901 902 if (section_index != NETVSC_INVALID_INDEX) { 903 netvsc_copy_to_send_buf(net_device, 904 section_index, msd_len, 905 packet, rndis_msg, pb, skb); 906 907 packet->send_buf_index = section_index; 908 909 if (packet->cp_partial) { 910 packet->page_buf_cnt -= packet->rmsg_pgcnt; 911 packet->total_data_buflen = msd_len + packet->rmsg_size; 912 } else { 913 packet->page_buf_cnt = 0; 914 packet->total_data_buflen += msd_len; 915 } 916 917 if (msdp->pkt) { 918 packet->total_packets += msdp->pkt->total_packets; 919 packet->total_bytes += msdp->pkt->total_bytes; 920 } 921 922 if (msdp->skb) 923 dev_consume_skb_any(msdp->skb); 924 925 if (xmit_more && !packet->cp_partial) { 926 msdp->skb = skb; 927 msdp->pkt = packet; 928 msdp->count++; 929 } else { 930 cur_send = packet; 931 msdp->skb = NULL; 932 msdp->pkt = NULL; 933 msdp->count = 0; 934 } 935 } else { 936 move_pkt_msd(&msd_send, &msd_skb, msdp); 937 cur_send = packet; 938 } 939 940 if (msd_send) { 941 int m_ret = netvsc_send_pkt(device, msd_send, net_device, 942 NULL, msd_skb); 943 944 if (m_ret != 0) { 945 netvsc_free_send_slot(net_device, 946 msd_send->send_buf_index); 947 dev_kfree_skb_any(msd_skb); 948 } 949 } 950 951 send_now: 952 if (cur_send) 953 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); 954 955 if (ret != 0 && section_index != NETVSC_INVALID_INDEX) 956 netvsc_free_send_slot(net_device, section_index); 957 958 return ret; 959 } 960 961 /* Send pending recv completions */ 962 static int send_recv_completions(struct net_device *ndev, 963 struct netvsc_device *nvdev, 964 struct netvsc_channel *nvchan) 965 { 966 struct multi_recv_comp *mrc = &nvchan->mrc; 967 struct recv_comp_msg { 968 struct nvsp_message_header hdr; 969 u32 status; 970 } __packed; 971 struct recv_comp_msg msg = { 972 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, 973 }; 974 int ret; 975 976 while (mrc->first != mrc->next) { 977 const struct recv_comp_data *rcd 978 = mrc->slots + mrc->first; 979 980 msg.status = rcd->status; 981 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), 982 rcd->tid, VM_PKT_COMP, 0); 983 if (unlikely(ret)) { 984 struct net_device_context *ndev_ctx = netdev_priv(ndev); 985 986 ++ndev_ctx->eth_stats.rx_comp_busy; 987 return ret; 988 } 989 990 if (++mrc->first == nvdev->recv_completion_cnt) 991 mrc->first = 0; 992 } 993 994 /* receive completion ring has been emptied */ 995 if (unlikely(nvdev->destroy)) 996 wake_up(&nvdev->wait_drain); 997 998 return 0; 999 } 1000 1001 /* Count how many receive completions are outstanding */ 1002 static void recv_comp_slot_avail(const struct netvsc_device *nvdev, 1003 const struct multi_recv_comp *mrc, 1004 u32 *filled, u32 *avail) 1005 { 1006 u32 count = nvdev->recv_completion_cnt; 1007 1008 if (mrc->next >= mrc->first) 1009 *filled = mrc->next - mrc->first; 1010 else 1011 *filled = (count - mrc->first) + mrc->next; 1012 1013 *avail = count - *filled - 1; 1014 } 1015 1016 /* Add receive complete to ring to send to host. */ 1017 static void enq_receive_complete(struct net_device *ndev, 1018 struct netvsc_device *nvdev, u16 q_idx, 1019 u64 tid, u32 status) 1020 { 1021 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; 1022 struct multi_recv_comp *mrc = &nvchan->mrc; 1023 struct recv_comp_data *rcd; 1024 u32 filled, avail; 1025 1026 recv_comp_slot_avail(nvdev, mrc, &filled, &avail); 1027 1028 if (unlikely(filled > NAPI_POLL_WEIGHT)) { 1029 send_recv_completions(ndev, nvdev, nvchan); 1030 recv_comp_slot_avail(nvdev, mrc, &filled, &avail); 1031 } 1032 1033 if (unlikely(!avail)) { 1034 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", 1035 q_idx, tid); 1036 return; 1037 } 1038 1039 rcd = mrc->slots + mrc->next; 1040 rcd->tid = tid; 1041 rcd->status = status; 1042 1043 if (++mrc->next == nvdev->recv_completion_cnt) 1044 mrc->next = 0; 1045 } 1046 1047 static int netvsc_receive(struct net_device *ndev, 1048 struct netvsc_device *net_device, 1049 struct net_device_context *net_device_ctx, 1050 struct hv_device *device, 1051 struct vmbus_channel *channel, 1052 const struct vmpacket_descriptor *desc, 1053 struct nvsp_message *nvsp) 1054 { 1055 const struct vmtransfer_page_packet_header *vmxferpage_packet 1056 = container_of(desc, const struct vmtransfer_page_packet_header, d); 1057 u16 q_idx = channel->offermsg.offer.sub_channel_index; 1058 char *recv_buf = net_device->recv_buf; 1059 u32 status = NVSP_STAT_SUCCESS; 1060 int i; 1061 int count = 0; 1062 1063 /* Make sure this is a valid nvsp packet */ 1064 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { 1065 netif_err(net_device_ctx, rx_err, ndev, 1066 "Unknown nvsp packet type received %u\n", 1067 nvsp->hdr.msg_type); 1068 return 0; 1069 } 1070 1071 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { 1072 netif_err(net_device_ctx, rx_err, ndev, 1073 "Invalid xfer page set id - expecting %x got %x\n", 1074 NETVSC_RECEIVE_BUFFER_ID, 1075 vmxferpage_packet->xfer_pageset_id); 1076 return 0; 1077 } 1078 1079 count = vmxferpage_packet->range_cnt; 1080 1081 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 1082 for (i = 0; i < count; i++) { 1083 void *data = recv_buf 1084 + vmxferpage_packet->ranges[i].byte_offset; 1085 u32 buflen = vmxferpage_packet->ranges[i].byte_count; 1086 1087 /* Pass it to the upper layer */ 1088 status = rndis_filter_receive(ndev, net_device, device, 1089 channel, data, buflen); 1090 } 1091 1092 enq_receive_complete(ndev, net_device, q_idx, 1093 vmxferpage_packet->d.trans_id, status); 1094 1095 return count; 1096 } 1097 1098 static void netvsc_send_table(struct hv_device *hdev, 1099 struct nvsp_message *nvmsg) 1100 { 1101 struct net_device *ndev = hv_get_drvdata(hdev); 1102 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1103 int i; 1104 u32 count, *tab; 1105 1106 count = nvmsg->msg.v5_msg.send_table.count; 1107 if (count != VRSS_SEND_TAB_SIZE) { 1108 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 1109 return; 1110 } 1111 1112 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + 1113 nvmsg->msg.v5_msg.send_table.offset); 1114 1115 for (i = 0; i < count; i++) 1116 net_device_ctx->tx_table[i] = tab[i]; 1117 } 1118 1119 static void netvsc_send_vf(struct net_device_context *net_device_ctx, 1120 struct nvsp_message *nvmsg) 1121 { 1122 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1123 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1124 } 1125 1126 static inline void netvsc_receive_inband(struct hv_device *hdev, 1127 struct net_device_context *net_device_ctx, 1128 struct nvsp_message *nvmsg) 1129 { 1130 switch (nvmsg->hdr.msg_type) { 1131 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1132 netvsc_send_table(hdev, nvmsg); 1133 break; 1134 1135 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: 1136 netvsc_send_vf(net_device_ctx, nvmsg); 1137 break; 1138 } 1139 } 1140 1141 static int netvsc_process_raw_pkt(struct hv_device *device, 1142 struct vmbus_channel *channel, 1143 struct netvsc_device *net_device, 1144 struct net_device *ndev, 1145 const struct vmpacket_descriptor *desc, 1146 int budget) 1147 { 1148 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1149 struct nvsp_message *nvmsg = hv_pkt_data(desc); 1150 1151 switch (desc->type) { 1152 case VM_PKT_COMP: 1153 netvsc_send_completion(net_device, channel, device, 1154 desc, budget); 1155 break; 1156 1157 case VM_PKT_DATA_USING_XFER_PAGES: 1158 return netvsc_receive(ndev, net_device, net_device_ctx, 1159 device, channel, desc, nvmsg); 1160 break; 1161 1162 case VM_PKT_DATA_INBAND: 1163 netvsc_receive_inband(device, net_device_ctx, nvmsg); 1164 break; 1165 1166 default: 1167 netdev_err(ndev, "unhandled packet type %d, tid %llx\n", 1168 desc->type, desc->trans_id); 1169 break; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel) 1176 { 1177 struct vmbus_channel *primary = channel->primary_channel; 1178 1179 return primary ? primary->device_obj : channel->device_obj; 1180 } 1181 1182 /* Network processing softirq 1183 * Process data in incoming ring buffer from host 1184 * Stops when ring is empty or budget is met or exceeded. 1185 */ 1186 int netvsc_poll(struct napi_struct *napi, int budget) 1187 { 1188 struct netvsc_channel *nvchan 1189 = container_of(napi, struct netvsc_channel, napi); 1190 struct netvsc_device *net_device = nvchan->net_device; 1191 struct vmbus_channel *channel = nvchan->channel; 1192 struct hv_device *device = netvsc_channel_to_device(channel); 1193 struct net_device *ndev = hv_get_drvdata(device); 1194 int work_done = 0; 1195 1196 /* If starting a new interval */ 1197 if (!nvchan->desc) 1198 nvchan->desc = hv_pkt_iter_first(channel); 1199 1200 while (nvchan->desc && work_done < budget) { 1201 work_done += netvsc_process_raw_pkt(device, channel, net_device, 1202 ndev, nvchan->desc, budget); 1203 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); 1204 } 1205 1206 /* If send of pending receive completions suceeded 1207 * and did not exhaust NAPI budget this time 1208 * and not doing busy poll 1209 * then re-enable host interrupts 1210 * and reschedule if ring is not empty. 1211 */ 1212 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1213 work_done < budget && 1214 napi_complete_done(napi, work_done) && 1215 hv_end_read(&channel->inbound)) { 1216 hv_begin_read(&channel->inbound); 1217 napi_reschedule(napi); 1218 } 1219 1220 /* Driver may overshoot since multiple packets per descriptor */ 1221 return min(work_done, budget); 1222 } 1223 1224 /* Call back when data is available in host ring buffer. 1225 * Processing is deferred until network softirq (NAPI) 1226 */ 1227 void netvsc_channel_cb(void *context) 1228 { 1229 struct netvsc_channel *nvchan = context; 1230 struct vmbus_channel *channel = nvchan->channel; 1231 struct hv_ring_buffer_info *rbi = &channel->inbound; 1232 1233 /* preload first vmpacket descriptor */ 1234 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); 1235 1236 if (napi_schedule_prep(&nvchan->napi)) { 1237 /* disable interupts from host */ 1238 hv_begin_read(rbi); 1239 1240 __napi_schedule(&nvchan->napi); 1241 } 1242 } 1243 1244 /* 1245 * netvsc_device_add - Callback when the device belonging to this 1246 * driver is added 1247 */ 1248 struct netvsc_device *netvsc_device_add(struct hv_device *device, 1249 const struct netvsc_device_info *device_info) 1250 { 1251 int i, ret = 0; 1252 int ring_size = device_info->ring_size; 1253 struct netvsc_device *net_device; 1254 struct net_device *ndev = hv_get_drvdata(device); 1255 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1256 1257 net_device = alloc_net_device(); 1258 if (!net_device) 1259 return ERR_PTR(-ENOMEM); 1260 1261 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1262 net_device_ctx->tx_table[i] = 0; 1263 1264 net_device->ring_size = ring_size; 1265 1266 /* Because the device uses NAPI, all the interrupt batching and 1267 * control is done via Net softirq, not the channel handling 1268 */ 1269 set_channel_read_mode(device->channel, HV_CALL_ISR); 1270 1271 /* If we're reopening the device we may have multiple queues, fill the 1272 * chn_table with the default channel to use it before subchannels are 1273 * opened. 1274 * Initialize the channel state before we open; 1275 * we can be interrupted as soon as we open the channel. 1276 */ 1277 1278 for (i = 0; i < VRSS_CHANNEL_MAX; i++) { 1279 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1280 1281 nvchan->channel = device->channel; 1282 nvchan->net_device = net_device; 1283 u64_stats_init(&nvchan->tx_stats.syncp); 1284 u64_stats_init(&nvchan->rx_stats.syncp); 1285 } 1286 1287 /* Enable NAPI handler before init callbacks */ 1288 netif_napi_add(ndev, &net_device->chan_table[0].napi, 1289 netvsc_poll, NAPI_POLL_WEIGHT); 1290 1291 /* Open the channel */ 1292 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1293 ring_size * PAGE_SIZE, NULL, 0, 1294 netvsc_channel_cb, 1295 net_device->chan_table); 1296 1297 if (ret != 0) { 1298 netif_napi_del(&net_device->chan_table[0].napi); 1299 netdev_err(ndev, "unable to open channel: %d\n", ret); 1300 goto cleanup; 1301 } 1302 1303 /* Channel is opened */ 1304 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); 1305 1306 napi_enable(&net_device->chan_table[0].napi); 1307 1308 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is 1309 * populated. 1310 */ 1311 rcu_assign_pointer(net_device_ctx->nvdev, net_device); 1312 1313 /* Connect with the NetVsp */ 1314 ret = netvsc_connect_vsp(device, net_device, device_info); 1315 if (ret != 0) { 1316 netdev_err(ndev, 1317 "unable to connect to NetVSP - %d\n", ret); 1318 goto close; 1319 } 1320 1321 return net_device; 1322 1323 close: 1324 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 1325 napi_disable(&net_device->chan_table[0].napi); 1326 1327 /* Now, we can close the channel safely */ 1328 vmbus_close(device->channel); 1329 1330 cleanup: 1331 free_netvsc_device(&net_device->rcu); 1332 1333 return ERR_PTR(ret); 1334 } 1335