1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23 #include <linux/filter.h>
24
25 #include <asm/sync_bitops.h>
26 #include <asm/mshyperv.h>
27
28 #include "hyperv_net.h"
29 #include "netvsc_trace.h"
30
31 /*
32 * Switch the data path from the synthetic interface to the VF
33 * interface.
34 */
netvsc_switch_datapath(struct net_device * ndev,bool vf)35 int netvsc_switch_datapath(struct net_device *ndev, bool vf)
36 {
37 struct net_device_context *net_device_ctx = netdev_priv(ndev);
38 struct hv_device *dev = net_device_ctx->device_ctx;
39 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
40 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
41 int ret, retry = 0;
42
43 /* Block sending traffic to VF if it's about to be gone */
44 if (!vf)
45 net_device_ctx->data_path_is_vf = vf;
46
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56 again:
57 trace_nvsp_send(ndev, init_pkt);
58
59 ret = vmbus_sendpacket(dev->channel, init_pkt,
60 sizeof(struct nvsp_message),
61 (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
62 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
63
64 /* If failed to switch to/from VF, let data_path_is_vf stay false,
65 * so we use synthetic path to send data.
66 */
67 if (ret) {
68 if (ret != -EAGAIN) {
69 netdev_err(ndev,
70 "Unable to send sw datapath msg, err: %d\n",
71 ret);
72 return ret;
73 }
74
75 if (retry++ < RETRY_MAX) {
76 usleep_range(RETRY_US_LO, RETRY_US_HI);
77 goto again;
78 } else {
79 netdev_err(
80 ndev,
81 "Retry failed to send sw datapath msg, err: %d\n",
82 ret);
83 return ret;
84 }
85 }
86
87 wait_for_completion(&nv_dev->channel_init_wait);
88 net_device_ctx->data_path_is_vf = vf;
89
90 return 0;
91 }
92
93 /* Worker to setup sub channels on initial setup
94 * Initial hotplug event occurs in softirq context
95 * and can't wait for channels.
96 */
netvsc_subchan_work(struct work_struct * w)97 static void netvsc_subchan_work(struct work_struct *w)
98 {
99 struct netvsc_device *nvdev =
100 container_of(w, struct netvsc_device, subchan_work);
101 struct rndis_device *rdev;
102 int i, ret;
103
104 /* Avoid deadlock with device removal already under RTNL */
105 if (!rtnl_trylock()) {
106 schedule_work(w);
107 return;
108 }
109
110 rdev = nvdev->extension;
111 if (rdev) {
112 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
113 if (ret == 0) {
114 netif_device_attach(rdev->ndev);
115 } else {
116 /* fallback to only primary channel */
117 for (i = 1; i < nvdev->num_chn; i++)
118 netif_napi_del(&nvdev->chan_table[i].napi);
119
120 nvdev->max_chn = 1;
121 nvdev->num_chn = 1;
122 }
123 }
124
125 rtnl_unlock();
126 }
127
alloc_net_device(void)128 static struct netvsc_device *alloc_net_device(void)
129 {
130 struct netvsc_device *net_device;
131
132 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
133 if (!net_device)
134 return NULL;
135
136 init_waitqueue_head(&net_device->wait_drain);
137 net_device->destroy = false;
138 net_device->tx_disable = true;
139
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
142
143 init_completion(&net_device->channel_init_wait);
144 init_waitqueue_head(&net_device->subchan_open);
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
146
147 return net_device;
148 }
149
free_netvsc_device(struct rcu_head * head)150 static void free_netvsc_device(struct rcu_head *head)
151 {
152 struct netvsc_device *nvdev
153 = container_of(head, struct netvsc_device, rcu);
154 int i;
155
156 kfree(nvdev->extension);
157
158 if (!nvdev->recv_buf_gpadl_handle.decrypted)
159 vfree(nvdev->recv_buf);
160 if (!nvdev->send_buf_gpadl_handle.decrypted)
161 vfree(nvdev->send_buf);
162 bitmap_free(nvdev->send_section_map);
163
164 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
165 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
166 kfree(nvdev->chan_table[i].recv_buf);
167 vfree(nvdev->chan_table[i].mrc.slots);
168 }
169
170 kfree(nvdev);
171 }
172
free_netvsc_device_rcu(struct netvsc_device * nvdev)173 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
174 {
175 call_rcu(&nvdev->rcu, free_netvsc_device);
176 }
177
netvsc_revoke_recv_buf(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)178 static void netvsc_revoke_recv_buf(struct hv_device *device,
179 struct netvsc_device *net_device,
180 struct net_device *ndev)
181 {
182 struct nvsp_message *revoke_packet;
183 int ret;
184
185 /*
186 * If we got a section count, it means we received a
187 * SendReceiveBufferComplete msg (ie sent
188 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
189 * to send a revoke msg here
190 */
191 if (net_device->recv_section_cnt) {
192 /* Send the revoke receive buffer */
193 revoke_packet = &net_device->revoke_packet;
194 memset(revoke_packet, 0, sizeof(struct nvsp_message));
195
196 revoke_packet->hdr.msg_type =
197 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
198 revoke_packet->msg.v1_msg.
199 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
200
201 trace_nvsp_send(ndev, revoke_packet);
202
203 ret = vmbus_sendpacket(device->channel,
204 revoke_packet,
205 sizeof(struct nvsp_message),
206 VMBUS_RQST_ID_NO_RESPONSE,
207 VM_PKT_DATA_INBAND, 0);
208 /* If the failure is because the channel is rescinded;
209 * ignore the failure since we cannot send on a rescinded
210 * channel. This would allow us to properly cleanup
211 * even when the channel is rescinded.
212 */
213 if (device->channel->rescind)
214 ret = 0;
215 /*
216 * If we failed here, we might as well return and
217 * have a leak rather than continue and a bugchk
218 */
219 if (ret != 0) {
220 netdev_err(ndev, "unable to send "
221 "revoke receive buffer to netvsp\n");
222 return;
223 }
224 net_device->recv_section_cnt = 0;
225 }
226 }
227
netvsc_revoke_send_buf(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)228 static void netvsc_revoke_send_buf(struct hv_device *device,
229 struct netvsc_device *net_device,
230 struct net_device *ndev)
231 {
232 struct nvsp_message *revoke_packet;
233 int ret;
234
235 /* Deal with the send buffer we may have setup.
236 * If we got a send section size, it means we received a
237 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
238 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
239 * to send a revoke msg here
240 */
241 if (net_device->send_section_cnt) {
242 /* Send the revoke receive buffer */
243 revoke_packet = &net_device->revoke_packet;
244 memset(revoke_packet, 0, sizeof(struct nvsp_message));
245
246 revoke_packet->hdr.msg_type =
247 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
248 revoke_packet->msg.v1_msg.revoke_send_buf.id =
249 NETVSC_SEND_BUFFER_ID;
250
251 trace_nvsp_send(ndev, revoke_packet);
252
253 ret = vmbus_sendpacket(device->channel,
254 revoke_packet,
255 sizeof(struct nvsp_message),
256 VMBUS_RQST_ID_NO_RESPONSE,
257 VM_PKT_DATA_INBAND, 0);
258
259 /* If the failure is because the channel is rescinded;
260 * ignore the failure since we cannot send on a rescinded
261 * channel. This would allow us to properly cleanup
262 * even when the channel is rescinded.
263 */
264 if (device->channel->rescind)
265 ret = 0;
266
267 /* If we failed here, we might as well return and
268 * have a leak rather than continue and a bugchk
269 */
270 if (ret != 0) {
271 netdev_err(ndev, "unable to send "
272 "revoke send buffer to netvsp\n");
273 return;
274 }
275 net_device->send_section_cnt = 0;
276 }
277 }
278
netvsc_teardown_recv_gpadl(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)279 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
280 struct netvsc_device *net_device,
281 struct net_device *ndev)
282 {
283 int ret;
284
285 if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
286 ret = vmbus_teardown_gpadl(device->channel,
287 &net_device->recv_buf_gpadl_handle);
288
289 /* If we failed here, we might as well return and have a leak
290 * rather than continue and a bugchk
291 */
292 if (ret != 0) {
293 netdev_err(ndev,
294 "unable to teardown receive buffer's gpadl\n");
295 return;
296 }
297 }
298 }
299
netvsc_teardown_send_gpadl(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)300 static void netvsc_teardown_send_gpadl(struct hv_device *device,
301 struct netvsc_device *net_device,
302 struct net_device *ndev)
303 {
304 int ret;
305
306 if (net_device->send_buf_gpadl_handle.gpadl_handle) {
307 ret = vmbus_teardown_gpadl(device->channel,
308 &net_device->send_buf_gpadl_handle);
309
310 /* If we failed here, we might as well return and have a leak
311 * rather than continue and a bugchk
312 */
313 if (ret != 0) {
314 netdev_err(ndev,
315 "unable to teardown send buffer's gpadl\n");
316 return;
317 }
318 }
319 }
320
netvsc_alloc_recv_comp_ring(struct netvsc_device * net_device,u32 q_idx)321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
322 {
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
324 int node = cpu_to_node(nvchan->channel->target_cpu);
325 size_t size;
326
327 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
328 nvchan->mrc.slots = vzalloc_node(size, node);
329 if (!nvchan->mrc.slots)
330 nvchan->mrc.slots = vzalloc(size);
331
332 return nvchan->mrc.slots ? 0 : -ENOMEM;
333 }
334
netvsc_init_buf(struct hv_device * device,struct netvsc_device * net_device,const struct netvsc_device_info * device_info)335 static int netvsc_init_buf(struct hv_device *device,
336 struct netvsc_device *net_device,
337 const struct netvsc_device_info *device_info)
338 {
339 struct nvsp_1_message_send_receive_buffer_complete *resp;
340 struct net_device *ndev = hv_get_drvdata(device);
341 struct nvsp_message *init_packet;
342 unsigned int buf_size;
343 int i, ret = 0;
344
345 /* Get receive buffer area. */
346 buf_size = device_info->recv_sections * device_info->recv_section_size;
347 buf_size = roundup(buf_size, PAGE_SIZE);
348
349 /* Legacy hosts only allow smaller receive buffer */
350 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
351 buf_size = min_t(unsigned int, buf_size,
352 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
353
354 net_device->recv_buf = vzalloc(buf_size);
355 if (!net_device->recv_buf) {
356 netdev_err(ndev,
357 "unable to allocate receive buffer of size %u\n",
358 buf_size);
359 ret = -ENOMEM;
360 goto cleanup;
361 }
362
363 net_device->recv_buf_size = buf_size;
364
365 /*
366 * Establish the gpadl handle for this buffer on this
367 * channel. Note: This call uses the vmbus connection rather
368 * than the channel to establish the gpadl handle.
369 */
370 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
371 buf_size,
372 &net_device->recv_buf_gpadl_handle);
373 if (ret != 0) {
374 netdev_err(ndev,
375 "unable to establish receive buffer's gpadl\n");
376 goto cleanup;
377 }
378
379 /* Notify the NetVsp of the gpadl handle */
380 init_packet = &net_device->channel_init_pkt;
381 memset(init_packet, 0, sizeof(struct nvsp_message));
382 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
383 init_packet->msg.v1_msg.send_recv_buf.
384 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
385 init_packet->msg.v1_msg.
386 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
387
388 trace_nvsp_send(ndev, init_packet);
389
390 /* Send the gpadl notification request */
391 ret = vmbus_sendpacket(device->channel, init_packet,
392 sizeof(struct nvsp_message),
393 (unsigned long)init_packet,
394 VM_PKT_DATA_INBAND,
395 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
396 if (ret != 0) {
397 netdev_err(ndev,
398 "unable to send receive buffer's gpadl to netvsp\n");
399 goto cleanup;
400 }
401
402 wait_for_completion(&net_device->channel_init_wait);
403
404 /* Check the response */
405 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
406 if (resp->status != NVSP_STAT_SUCCESS) {
407 netdev_err(ndev,
408 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
409 resp->status);
410 ret = -EINVAL;
411 goto cleanup;
412 }
413
414 /* Parse the response */
415 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
416 resp->num_sections, resp->sections[0].sub_alloc_size,
417 resp->sections[0].num_sub_allocs);
418
419 /* There should only be one section for the entire receive buffer */
420 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
421 ret = -EINVAL;
422 goto cleanup;
423 }
424
425 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
426 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
427
428 /* Ensure buffer will not overflow */
429 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
430 (u64)net_device->recv_section_cnt > (u64)buf_size) {
431 netdev_err(ndev, "invalid recv_section_size %u\n",
432 net_device->recv_section_size);
433 ret = -EINVAL;
434 goto cleanup;
435 }
436
437 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
438 struct netvsc_channel *nvchan = &net_device->chan_table[i];
439
440 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
441 if (nvchan->recv_buf == NULL) {
442 ret = -ENOMEM;
443 goto cleanup;
444 }
445 }
446
447 /* Setup receive completion ring.
448 * Add 1 to the recv_section_cnt because at least one entry in a
449 * ring buffer has to be empty.
450 */
451 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
452 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
453 if (ret)
454 goto cleanup;
455
456 /* Now setup the send buffer. */
457 buf_size = device_info->send_sections * device_info->send_section_size;
458 buf_size = round_up(buf_size, PAGE_SIZE);
459
460 net_device->send_buf = vzalloc(buf_size);
461 if (!net_device->send_buf) {
462 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
463 buf_size);
464 ret = -ENOMEM;
465 goto cleanup;
466 }
467 net_device->send_buf_size = buf_size;
468
469 /* Establish the gpadl handle for this buffer on this
470 * channel. Note: This call uses the vmbus connection rather
471 * than the channel to establish the gpadl handle.
472 */
473 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
474 buf_size,
475 &net_device->send_buf_gpadl_handle);
476 if (ret != 0) {
477 netdev_err(ndev,
478 "unable to establish send buffer's gpadl\n");
479 goto cleanup;
480 }
481
482 /* Notify the NetVsp of the gpadl handle */
483 init_packet = &net_device->channel_init_pkt;
484 memset(init_packet, 0, sizeof(struct nvsp_message));
485 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
486 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
487 net_device->send_buf_gpadl_handle.gpadl_handle;
488 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
489
490 trace_nvsp_send(ndev, init_packet);
491
492 /* Send the gpadl notification request */
493 ret = vmbus_sendpacket(device->channel, init_packet,
494 sizeof(struct nvsp_message),
495 (unsigned long)init_packet,
496 VM_PKT_DATA_INBAND,
497 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
498 if (ret != 0) {
499 netdev_err(ndev,
500 "unable to send send buffer's gpadl to netvsp\n");
501 goto cleanup;
502 }
503
504 wait_for_completion(&net_device->channel_init_wait);
505
506 /* Check the response */
507 if (init_packet->msg.v1_msg.
508 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
509 netdev_err(ndev, "Unable to complete send buffer "
510 "initialization with NetVsp - status %d\n",
511 init_packet->msg.v1_msg.
512 send_send_buf_complete.status);
513 ret = -EINVAL;
514 goto cleanup;
515 }
516
517 /* Parse the response */
518 net_device->send_section_size = init_packet->msg.
519 v1_msg.send_send_buf_complete.section_size;
520 if (net_device->send_section_size < NETVSC_MTU_MIN) {
521 netdev_err(ndev, "invalid send_section_size %u\n",
522 net_device->send_section_size);
523 ret = -EINVAL;
524 goto cleanup;
525 }
526
527 /* Section count is simply the size divided by the section size. */
528 net_device->send_section_cnt = buf_size / net_device->send_section_size;
529
530 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
531 net_device->send_section_size, net_device->send_section_cnt);
532
533 /* Setup state for managing the send buffer. */
534 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
535 GFP_KERNEL);
536 if (!net_device->send_section_map) {
537 ret = -ENOMEM;
538 goto cleanup;
539 }
540
541 goto exit;
542
543 cleanup:
544 netvsc_revoke_recv_buf(device, net_device, ndev);
545 netvsc_revoke_send_buf(device, net_device, ndev);
546 netvsc_teardown_recv_gpadl(device, net_device, ndev);
547 netvsc_teardown_send_gpadl(device, net_device, ndev);
548
549 exit:
550 return ret;
551 }
552
553 /* Negotiate NVSP protocol version */
negotiate_nvsp_ver(struct hv_device * device,struct netvsc_device * net_device,struct nvsp_message * init_packet,u32 nvsp_ver)554 static int negotiate_nvsp_ver(struct hv_device *device,
555 struct netvsc_device *net_device,
556 struct nvsp_message *init_packet,
557 u32 nvsp_ver)
558 {
559 struct net_device *ndev = hv_get_drvdata(device);
560 int ret;
561
562 memset(init_packet, 0, sizeof(struct nvsp_message));
563 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
564 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
565 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
566 trace_nvsp_send(ndev, init_packet);
567
568 /* Send the init request */
569 ret = vmbus_sendpacket(device->channel, init_packet,
570 sizeof(struct nvsp_message),
571 (unsigned long)init_packet,
572 VM_PKT_DATA_INBAND,
573 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
574
575 if (ret != 0)
576 return ret;
577
578 wait_for_completion(&net_device->channel_init_wait);
579
580 if (init_packet->msg.init_msg.init_complete.status !=
581 NVSP_STAT_SUCCESS)
582 return -EINVAL;
583
584 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
585 return 0;
586
587 /* NVSPv2 or later: Send NDIS config */
588 memset(init_packet, 0, sizeof(struct nvsp_message));
589 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
590 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
591 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
592
593 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
594 if (hv_is_isolation_supported())
595 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
596 else
597 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
598
599 /* Teaming bit is needed to receive link speed updates */
600 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
601 }
602
603 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
604 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
605
606 trace_nvsp_send(ndev, init_packet);
607
608 ret = vmbus_sendpacket(device->channel, init_packet,
609 sizeof(struct nvsp_message),
610 VMBUS_RQST_ID_NO_RESPONSE,
611 VM_PKT_DATA_INBAND, 0);
612
613 return ret;
614 }
615
netvsc_connect_vsp(struct hv_device * device,struct netvsc_device * net_device,const struct netvsc_device_info * device_info)616 static int netvsc_connect_vsp(struct hv_device *device,
617 struct netvsc_device *net_device,
618 const struct netvsc_device_info *device_info)
619 {
620 struct net_device *ndev = hv_get_drvdata(device);
621 static const u32 ver_list[] = {
622 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
623 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
624 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
625 };
626 struct nvsp_message *init_packet;
627 int ndis_version, i, ret;
628
629 init_packet = &net_device->channel_init_pkt;
630
631 /* Negotiate the latest NVSP protocol supported */
632 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
633 if (negotiate_nvsp_ver(device, net_device, init_packet,
634 ver_list[i]) == 0) {
635 net_device->nvsp_version = ver_list[i];
636 break;
637 }
638
639 if (i < 0) {
640 ret = -EPROTO;
641 goto cleanup;
642 }
643
644 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
645 netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
646 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
647 ret = -EPROTO;
648 goto cleanup;
649 }
650
651 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
652
653 /* Send the ndis version */
654 memset(init_packet, 0, sizeof(struct nvsp_message));
655
656 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
657 ndis_version = 0x00060001;
658 else
659 ndis_version = 0x0006001e;
660
661 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
662 init_packet->msg.v1_msg.
663 send_ndis_ver.ndis_major_ver =
664 (ndis_version & 0xFFFF0000) >> 16;
665 init_packet->msg.v1_msg.
666 send_ndis_ver.ndis_minor_ver =
667 ndis_version & 0xFFFF;
668
669 trace_nvsp_send(ndev, init_packet);
670
671 /* Send the init request */
672 ret = vmbus_sendpacket(device->channel, init_packet,
673 sizeof(struct nvsp_message),
674 VMBUS_RQST_ID_NO_RESPONSE,
675 VM_PKT_DATA_INBAND, 0);
676 if (ret != 0)
677 goto cleanup;
678
679
680 ret = netvsc_init_buf(device, net_device, device_info);
681
682 cleanup:
683 return ret;
684 }
685
686 /*
687 * netvsc_device_remove - Callback when the root bus device is removed
688 */
netvsc_device_remove(struct hv_device * device)689 void netvsc_device_remove(struct hv_device *device)
690 {
691 struct net_device *ndev = hv_get_drvdata(device);
692 struct net_device_context *net_device_ctx = netdev_priv(ndev);
693 struct netvsc_device *net_device
694 = rtnl_dereference(net_device_ctx->nvdev);
695 int i;
696
697 /*
698 * Revoke receive buffer. If host is pre-Win2016 then tear down
699 * receive buffer GPADL. Do the same for send buffer.
700 */
701 netvsc_revoke_recv_buf(device, net_device, ndev);
702 if (vmbus_proto_version < VERSION_WIN10)
703 netvsc_teardown_recv_gpadl(device, net_device, ndev);
704
705 netvsc_revoke_send_buf(device, net_device, ndev);
706 if (vmbus_proto_version < VERSION_WIN10)
707 netvsc_teardown_send_gpadl(device, net_device, ndev);
708
709 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
710
711 /* Disable NAPI and disassociate its context from the device. */
712 for (i = 0; i < net_device->num_chn; i++) {
713 /* See also vmbus_reset_channel_cb(). */
714 /* only disable enabled NAPI channel */
715 if (i < ndev->real_num_rx_queues)
716 napi_disable(&net_device->chan_table[i].napi);
717
718 netif_napi_del(&net_device->chan_table[i].napi);
719 }
720
721 /*
722 * At this point, no one should be accessing net_device
723 * except in here
724 */
725 netdev_dbg(ndev, "net device safe to remove\n");
726
727 /* Now, we can close the channel safely */
728 vmbus_close(device->channel);
729
730 /*
731 * If host is Win2016 or higher then we do the GPADL tear down
732 * here after VMBus is closed.
733 */
734 if (vmbus_proto_version >= VERSION_WIN10) {
735 netvsc_teardown_recv_gpadl(device, net_device, ndev);
736 netvsc_teardown_send_gpadl(device, net_device, ndev);
737 }
738
739 /* Release all resources */
740 free_netvsc_device_rcu(net_device);
741 }
742
743 #define RING_AVAIL_PERCENT_HIWATER 20
744 #define RING_AVAIL_PERCENT_LOWATER 10
745
netvsc_free_send_slot(struct netvsc_device * net_device,u32 index)746 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
747 u32 index)
748 {
749 sync_change_bit(index, net_device->send_section_map);
750 }
751
netvsc_send_tx_complete(struct net_device * ndev,struct netvsc_device * net_device,struct vmbus_channel * channel,const struct vmpacket_descriptor * desc,int budget)752 static void netvsc_send_tx_complete(struct net_device *ndev,
753 struct netvsc_device *net_device,
754 struct vmbus_channel *channel,
755 const struct vmpacket_descriptor *desc,
756 int budget)
757 {
758 struct net_device_context *ndev_ctx = netdev_priv(ndev);
759 struct sk_buff *skb;
760 u16 q_idx = 0;
761 int queue_sends;
762 u64 cmd_rqst;
763
764 cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
765 if (cmd_rqst == VMBUS_RQST_ERROR) {
766 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
767 return;
768 }
769
770 skb = (struct sk_buff *)(unsigned long)cmd_rqst;
771
772 /* Notify the layer above us */
773 if (likely(skb)) {
774 struct hv_netvsc_packet *packet
775 = (struct hv_netvsc_packet *)skb->cb;
776 u32 send_index = packet->send_buf_index;
777 struct netvsc_stats_tx *tx_stats;
778
779 if (send_index != NETVSC_INVALID_INDEX)
780 netvsc_free_send_slot(net_device, send_index);
781 q_idx = packet->q_idx;
782
783 tx_stats = &net_device->chan_table[q_idx].tx_stats;
784
785 u64_stats_update_begin(&tx_stats->syncp);
786 tx_stats->packets += packet->total_packets;
787 tx_stats->bytes += packet->total_bytes;
788 u64_stats_update_end(&tx_stats->syncp);
789
790 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
791 napi_consume_skb(skb, budget);
792 }
793
794 queue_sends =
795 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
796
797 if (unlikely(net_device->destroy)) {
798 if (queue_sends == 0)
799 wake_up(&net_device->wait_drain);
800 } else {
801 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
802
803 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
804 (hv_get_avail_to_write_percent(&channel->outbound) >
805 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
806 netif_tx_wake_queue(txq);
807 ndev_ctx->eth_stats.wake_queue++;
808 }
809 }
810 }
811
netvsc_send_completion(struct net_device * ndev,struct netvsc_device * net_device,struct vmbus_channel * incoming_channel,const struct vmpacket_descriptor * desc,int budget)812 static void netvsc_send_completion(struct net_device *ndev,
813 struct netvsc_device *net_device,
814 struct vmbus_channel *incoming_channel,
815 const struct vmpacket_descriptor *desc,
816 int budget)
817 {
818 const struct nvsp_message *nvsp_packet;
819 u32 msglen = hv_pkt_datalen(desc);
820 struct nvsp_message *pkt_rqst;
821 u64 cmd_rqst;
822 u32 status;
823
824 /* First check if this is a VMBUS completion without data payload */
825 if (!msglen) {
826 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
827 desc->trans_id);
828 if (cmd_rqst == VMBUS_RQST_ERROR) {
829 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
830 return;
831 }
832
833 pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
834 switch (pkt_rqst->hdr.msg_type) {
835 case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
836 complete(&net_device->channel_init_wait);
837 break;
838
839 default:
840 netdev_err(ndev, "Unexpected VMBUS completion!!\n");
841 }
842 return;
843 }
844
845 /* Ensure packet is big enough to read header fields */
846 if (msglen < sizeof(struct nvsp_message_header)) {
847 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
848 return;
849 }
850
851 nvsp_packet = hv_pkt_data(desc);
852 switch (nvsp_packet->hdr.msg_type) {
853 case NVSP_MSG_TYPE_INIT_COMPLETE:
854 if (msglen < sizeof(struct nvsp_message_header) +
855 sizeof(struct nvsp_message_init_complete)) {
856 netdev_err(ndev, "nvsp_msg length too small: %u\n",
857 msglen);
858 return;
859 }
860 fallthrough;
861
862 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
863 if (msglen < sizeof(struct nvsp_message_header) +
864 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
865 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
866 msglen);
867 return;
868 }
869 fallthrough;
870
871 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
872 if (msglen < sizeof(struct nvsp_message_header) +
873 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
874 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
875 msglen);
876 return;
877 }
878 fallthrough;
879
880 case NVSP_MSG5_TYPE_SUBCHANNEL:
881 if (msglen < sizeof(struct nvsp_message_header) +
882 sizeof(struct nvsp_5_subchannel_complete)) {
883 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
884 msglen);
885 return;
886 }
887 /* Copy the response back */
888 memcpy(&net_device->channel_init_pkt, nvsp_packet,
889 sizeof(struct nvsp_message));
890 complete(&net_device->channel_init_wait);
891 break;
892
893 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
894 if (msglen < sizeof(struct nvsp_message_header) +
895 sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
896 if (net_ratelimit())
897 netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
898 msglen);
899 return;
900 }
901
902 /* If status indicates an error, output a message so we know
903 * there's a problem. But process the completion anyway so the
904 * resources are released.
905 */
906 status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
907 if (status != NVSP_STAT_SUCCESS && net_ratelimit())
908 netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
909 status);
910
911 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
912 desc, budget);
913 break;
914
915 default:
916 netdev_err(ndev,
917 "Unknown send completion type %d received!!\n",
918 nvsp_packet->hdr.msg_type);
919 }
920 }
921
netvsc_get_next_send_section(struct netvsc_device * net_device)922 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
923 {
924 unsigned long *map_addr = net_device->send_section_map;
925 unsigned int i;
926
927 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
928 if (sync_test_and_set_bit(i, map_addr) == 0)
929 return i;
930 }
931
932 return NETVSC_INVALID_INDEX;
933 }
934
netvsc_copy_to_send_buf(struct netvsc_device * net_device,unsigned int section_index,u32 pend_size,struct hv_netvsc_packet * packet,struct rndis_message * rndis_msg,struct hv_page_buffer * pb,bool xmit_more)935 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
936 unsigned int section_index,
937 u32 pend_size,
938 struct hv_netvsc_packet *packet,
939 struct rndis_message *rndis_msg,
940 struct hv_page_buffer *pb,
941 bool xmit_more)
942 {
943 char *start = net_device->send_buf;
944 char *dest = start + (section_index * net_device->send_section_size)
945 + pend_size;
946 int i;
947 u32 padding = 0;
948 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
949 packet->page_buf_cnt;
950 u32 remain;
951
952 /* Add padding */
953 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
954 if (xmit_more && remain) {
955 padding = net_device->pkt_align - remain;
956 rndis_msg->msg_len += padding;
957 packet->total_data_buflen += padding;
958 }
959
960 for (i = 0; i < page_count; i++) {
961 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
962 u32 offset = pb[i].offset;
963 u32 len = pb[i].len;
964
965 memcpy(dest, (src + offset), len);
966 dest += len;
967 }
968
969 if (padding)
970 memset(dest, 0, padding);
971 }
972
netvsc_dma_unmap(struct hv_device * hv_dev,struct hv_netvsc_packet * packet)973 void netvsc_dma_unmap(struct hv_device *hv_dev,
974 struct hv_netvsc_packet *packet)
975 {
976 int i;
977
978 if (!hv_is_isolation_supported())
979 return;
980
981 if (!packet->dma_range)
982 return;
983
984 for (i = 0; i < packet->page_buf_cnt; i++)
985 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
986 packet->dma_range[i].mapping_size,
987 DMA_TO_DEVICE);
988
989 kfree(packet->dma_range);
990 }
991
992 /* netvsc_dma_map - Map swiotlb bounce buffer with data page of
993 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
994 * VM.
995 *
996 * In isolation VM, netvsc send buffer has been marked visible to
997 * host and so the data copied to send buffer doesn't need to use
998 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
999 * may not be copied to send buffer and so these pages need to be
1000 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1001 * that. The pfns in the struct hv_page_buffer need to be converted
1002 * to bounce buffer's pfn. The loop here is necessary because the
1003 * entries in the page buffer array are not necessarily full
1004 * pages of data. Each entry in the array has a separate offset and
1005 * len that may be non-zero, even for entries in the middle of the
1006 * array. And the entries are not physically contiguous. So each
1007 * entry must be individually mapped rather than as a contiguous unit.
1008 * So not use dma_map_sg() here.
1009 */
netvsc_dma_map(struct hv_device * hv_dev,struct hv_netvsc_packet * packet,struct hv_page_buffer * pb)1010 static int netvsc_dma_map(struct hv_device *hv_dev,
1011 struct hv_netvsc_packet *packet,
1012 struct hv_page_buffer *pb)
1013 {
1014 u32 page_count = packet->page_buf_cnt;
1015 dma_addr_t dma;
1016 int i;
1017
1018 if (!hv_is_isolation_supported())
1019 return 0;
1020
1021 packet->dma_range = kcalloc(page_count,
1022 sizeof(*packet->dma_range),
1023 GFP_ATOMIC);
1024 if (!packet->dma_range)
1025 return -ENOMEM;
1026
1027 for (i = 0; i < page_count; i++) {
1028 char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1029 + pb[i].offset);
1030 u32 len = pb[i].len;
1031
1032 dma = dma_map_single(&hv_dev->device, src, len,
1033 DMA_TO_DEVICE);
1034 if (dma_mapping_error(&hv_dev->device, dma)) {
1035 kfree(packet->dma_range);
1036 return -ENOMEM;
1037 }
1038
1039 /* pb[].offset and pb[].len are not changed during dma mapping
1040 * and so not reassign.
1041 */
1042 packet->dma_range[i].dma = dma;
1043 packet->dma_range[i].mapping_size = len;
1044 pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1045 }
1046
1047 return 0;
1048 }
1049
netvsc_send_pkt(struct hv_device * device,struct hv_netvsc_packet * packet,struct netvsc_device * net_device,struct hv_page_buffer * pb,struct sk_buff * skb)1050 static inline int netvsc_send_pkt(
1051 struct hv_device *device,
1052 struct hv_netvsc_packet *packet,
1053 struct netvsc_device *net_device,
1054 struct hv_page_buffer *pb,
1055 struct sk_buff *skb)
1056 {
1057 struct nvsp_message nvmsg;
1058 struct nvsp_1_message_send_rndis_packet *rpkt =
1059 &nvmsg.msg.v1_msg.send_rndis_pkt;
1060 struct netvsc_channel * const nvchan =
1061 &net_device->chan_table[packet->q_idx];
1062 struct vmbus_channel *out_channel = nvchan->channel;
1063 struct net_device *ndev = hv_get_drvdata(device);
1064 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1065 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1066 u64 req_id;
1067 int ret;
1068 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1069
1070 memset(&nvmsg, 0, sizeof(struct nvsp_message));
1071 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1072 if (skb)
1073 rpkt->channel_type = 0; /* 0 is RMC_DATA */
1074 else
1075 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
1076
1077 rpkt->send_buf_section_index = packet->send_buf_index;
1078 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1079 rpkt->send_buf_section_size = 0;
1080 else
1081 rpkt->send_buf_section_size = packet->total_data_buflen;
1082
1083 req_id = (ulong)skb;
1084
1085 if (out_channel->rescind)
1086 return -ENODEV;
1087
1088 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1089
1090 packet->dma_range = NULL;
1091 if (packet->page_buf_cnt) {
1092 if (packet->cp_partial)
1093 pb += packet->rmsg_pgcnt;
1094
1095 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1096 if (ret) {
1097 ret = -EAGAIN;
1098 goto exit;
1099 }
1100
1101 ret = vmbus_sendpacket_pagebuffer(out_channel,
1102 pb, packet->page_buf_cnt,
1103 &nvmsg, sizeof(nvmsg),
1104 req_id);
1105
1106 if (ret)
1107 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1108 } else {
1109 ret = vmbus_sendpacket(out_channel,
1110 &nvmsg, sizeof(nvmsg),
1111 req_id, VM_PKT_DATA_INBAND,
1112 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1113 }
1114
1115 exit:
1116 if (ret == 0) {
1117 atomic_inc_return(&nvchan->queue_sends);
1118
1119 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1120 netif_tx_stop_queue(txq);
1121 ndev_ctx->eth_stats.stop_queue++;
1122 }
1123 } else if (ret == -EAGAIN) {
1124 netif_tx_stop_queue(txq);
1125 ndev_ctx->eth_stats.stop_queue++;
1126 } else {
1127 netdev_err(ndev,
1128 "Unable to send packet pages %u len %u, ret %d\n",
1129 packet->page_buf_cnt, packet->total_data_buflen,
1130 ret);
1131 }
1132
1133 if (netif_tx_queue_stopped(txq) &&
1134 atomic_read(&nvchan->queue_sends) < 1 &&
1135 !net_device->tx_disable) {
1136 netif_tx_wake_queue(txq);
1137 ndev_ctx->eth_stats.wake_queue++;
1138 if (ret == -EAGAIN)
1139 ret = -ENOSPC;
1140 }
1141
1142 return ret;
1143 }
1144
1145 /* Move packet out of multi send data (msd), and clear msd */
move_pkt_msd(struct hv_netvsc_packet ** msd_send,struct sk_buff ** msd_skb,struct multi_send_data * msdp)1146 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1147 struct sk_buff **msd_skb,
1148 struct multi_send_data *msdp)
1149 {
1150 *msd_skb = msdp->skb;
1151 *msd_send = msdp->pkt;
1152 msdp->skb = NULL;
1153 msdp->pkt = NULL;
1154 msdp->count = 0;
1155 }
1156
1157 /* RCU already held by caller */
1158 /* Batching/bouncing logic is designed to attempt to optimize
1159 * performance.
1160 *
1161 * For small, non-LSO packets we copy the packet to a send buffer
1162 * which is pre-registered with the Hyper-V side. This enables the
1163 * hypervisor to avoid remapping the aperture to access the packet
1164 * descriptor and data.
1165 *
1166 * If we already started using a buffer and the netdev is transmitting
1167 * a burst of packets, keep on copying into the buffer until it is
1168 * full or we are done collecting a burst. If there is an existing
1169 * buffer with space for the RNDIS descriptor but not the packet, copy
1170 * the RNDIS descriptor to the buffer, keeping the packet in place.
1171 *
1172 * If we do batching and send more than one packet using a single
1173 * NetVSC message, free the SKBs of the packets copied, except for the
1174 * last packet. This is done to streamline the handling of the case
1175 * where the last packet only had the RNDIS descriptor copied to the
1176 * send buffer, with the data pointers included in the NetVSC message.
1177 */
netvsc_send(struct net_device * ndev,struct hv_netvsc_packet * packet,struct rndis_message * rndis_msg,struct hv_page_buffer * pb,struct sk_buff * skb,bool xdp_tx)1178 int netvsc_send(struct net_device *ndev,
1179 struct hv_netvsc_packet *packet,
1180 struct rndis_message *rndis_msg,
1181 struct hv_page_buffer *pb,
1182 struct sk_buff *skb,
1183 bool xdp_tx)
1184 {
1185 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1186 struct netvsc_device *net_device
1187 = rcu_dereference_bh(ndev_ctx->nvdev);
1188 struct hv_device *device = ndev_ctx->device_ctx;
1189 int ret = 0;
1190 struct netvsc_channel *nvchan;
1191 u32 pktlen = packet->total_data_buflen, msd_len = 0;
1192 unsigned int section_index = NETVSC_INVALID_INDEX;
1193 struct multi_send_data *msdp;
1194 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1195 struct sk_buff *msd_skb = NULL;
1196 bool try_batch, xmit_more;
1197
1198 /* If device is rescinded, return error and packet will get dropped. */
1199 if (unlikely(!net_device || net_device->destroy))
1200 return -ENODEV;
1201
1202 nvchan = &net_device->chan_table[packet->q_idx];
1203 packet->send_buf_index = NETVSC_INVALID_INDEX;
1204 packet->cp_partial = false;
1205
1206 /* Send a control message or XDP packet directly without accessing
1207 * msd (Multi-Send Data) field which may be changed during data packet
1208 * processing.
1209 */
1210 if (!skb || xdp_tx)
1211 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1212
1213 /* batch packets in send buffer if possible */
1214 msdp = &nvchan->msd;
1215 if (msdp->pkt)
1216 msd_len = msdp->pkt->total_data_buflen;
1217
1218 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1219 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1220 net_device->send_section_size) {
1221 section_index = msdp->pkt->send_buf_index;
1222
1223 } else if (try_batch && msd_len + packet->rmsg_size <
1224 net_device->send_section_size) {
1225 section_index = msdp->pkt->send_buf_index;
1226 packet->cp_partial = true;
1227
1228 } else if (pktlen + net_device->pkt_align <
1229 net_device->send_section_size) {
1230 section_index = netvsc_get_next_send_section(net_device);
1231 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1232 ++ndev_ctx->eth_stats.tx_send_full;
1233 } else {
1234 move_pkt_msd(&msd_send, &msd_skb, msdp);
1235 msd_len = 0;
1236 }
1237 }
1238
1239 /* Keep aggregating only if stack says more data is coming
1240 * and not doing mixed modes send and not flow blocked
1241 */
1242 xmit_more = netdev_xmit_more() &&
1243 !packet->cp_partial &&
1244 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1245
1246 if (section_index != NETVSC_INVALID_INDEX) {
1247 netvsc_copy_to_send_buf(net_device,
1248 section_index, msd_len,
1249 packet, rndis_msg, pb, xmit_more);
1250
1251 packet->send_buf_index = section_index;
1252
1253 if (packet->cp_partial) {
1254 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1255 packet->total_data_buflen = msd_len + packet->rmsg_size;
1256 } else {
1257 packet->page_buf_cnt = 0;
1258 packet->total_data_buflen += msd_len;
1259 }
1260
1261 if (msdp->pkt) {
1262 packet->total_packets += msdp->pkt->total_packets;
1263 packet->total_bytes += msdp->pkt->total_bytes;
1264 }
1265
1266 if (msdp->skb)
1267 dev_consume_skb_any(msdp->skb);
1268
1269 if (xmit_more) {
1270 msdp->skb = skb;
1271 msdp->pkt = packet;
1272 msdp->count++;
1273 } else {
1274 cur_send = packet;
1275 msdp->skb = NULL;
1276 msdp->pkt = NULL;
1277 msdp->count = 0;
1278 }
1279 } else {
1280 move_pkt_msd(&msd_send, &msd_skb, msdp);
1281 cur_send = packet;
1282 }
1283
1284 if (msd_send) {
1285 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1286 NULL, msd_skb);
1287
1288 if (m_ret != 0) {
1289 netvsc_free_send_slot(net_device,
1290 msd_send->send_buf_index);
1291 dev_kfree_skb_any(msd_skb);
1292 }
1293 }
1294
1295 if (cur_send)
1296 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1297
1298 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1299 netvsc_free_send_slot(net_device, section_index);
1300
1301 return ret;
1302 }
1303
1304 /* Send pending recv completions */
send_recv_completions(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan)1305 static int send_recv_completions(struct net_device *ndev,
1306 struct netvsc_device *nvdev,
1307 struct netvsc_channel *nvchan)
1308 {
1309 struct multi_recv_comp *mrc = &nvchan->mrc;
1310 struct recv_comp_msg {
1311 struct nvsp_message_header hdr;
1312 u32 status;
1313 } __packed;
1314 struct recv_comp_msg msg = {
1315 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1316 };
1317 int ret;
1318
1319 while (mrc->first != mrc->next) {
1320 const struct recv_comp_data *rcd
1321 = mrc->slots + mrc->first;
1322
1323 msg.status = rcd->status;
1324 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1325 rcd->tid, VM_PKT_COMP, 0);
1326 if (unlikely(ret)) {
1327 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1328
1329 ++ndev_ctx->eth_stats.rx_comp_busy;
1330 return ret;
1331 }
1332
1333 if (++mrc->first == nvdev->recv_completion_cnt)
1334 mrc->first = 0;
1335 }
1336
1337 /* receive completion ring has been emptied */
1338 if (unlikely(nvdev->destroy))
1339 wake_up(&nvdev->wait_drain);
1340
1341 return 0;
1342 }
1343
1344 /* Count how many receive completions are outstanding */
recv_comp_slot_avail(const struct netvsc_device * nvdev,const struct multi_recv_comp * mrc,u32 * filled,u32 * avail)1345 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1346 const struct multi_recv_comp *mrc,
1347 u32 *filled, u32 *avail)
1348 {
1349 u32 count = nvdev->recv_completion_cnt;
1350
1351 if (mrc->next >= mrc->first)
1352 *filled = mrc->next - mrc->first;
1353 else
1354 *filled = (count - mrc->first) + mrc->next;
1355
1356 *avail = count - *filled - 1;
1357 }
1358
1359 /* Add receive complete to ring to send to host. */
enq_receive_complete(struct net_device * ndev,struct netvsc_device * nvdev,u16 q_idx,u64 tid,u32 status)1360 static void enq_receive_complete(struct net_device *ndev,
1361 struct netvsc_device *nvdev, u16 q_idx,
1362 u64 tid, u32 status)
1363 {
1364 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1365 struct multi_recv_comp *mrc = &nvchan->mrc;
1366 struct recv_comp_data *rcd;
1367 u32 filled, avail;
1368
1369 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1370
1371 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1372 send_recv_completions(ndev, nvdev, nvchan);
1373 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1374 }
1375
1376 if (unlikely(!avail)) {
1377 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1378 q_idx, tid);
1379 return;
1380 }
1381
1382 rcd = mrc->slots + mrc->next;
1383 rcd->tid = tid;
1384 rcd->status = status;
1385
1386 if (++mrc->next == nvdev->recv_completion_cnt)
1387 mrc->next = 0;
1388 }
1389
netvsc_receive(struct net_device * ndev,struct netvsc_device * net_device,struct netvsc_channel * nvchan,const struct vmpacket_descriptor * desc)1390 static int netvsc_receive(struct net_device *ndev,
1391 struct netvsc_device *net_device,
1392 struct netvsc_channel *nvchan,
1393 const struct vmpacket_descriptor *desc)
1394 {
1395 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1396 struct vmbus_channel *channel = nvchan->channel;
1397 const struct vmtransfer_page_packet_header *vmxferpage_packet
1398 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1399 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1400 u32 msglen = hv_pkt_datalen(desc);
1401 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1402 char *recv_buf = net_device->recv_buf;
1403 u32 status = NVSP_STAT_SUCCESS;
1404 int i;
1405 int count = 0;
1406
1407 /* Ensure packet is big enough to read header fields */
1408 if (msglen < sizeof(struct nvsp_message_header)) {
1409 netif_err(net_device_ctx, rx_err, ndev,
1410 "invalid nvsp header, length too small: %u\n",
1411 msglen);
1412 return 0;
1413 }
1414
1415 /* Make sure this is a valid nvsp packet */
1416 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1417 netif_err(net_device_ctx, rx_err, ndev,
1418 "Unknown nvsp packet type received %u\n",
1419 nvsp->hdr.msg_type);
1420 return 0;
1421 }
1422
1423 /* Validate xfer page pkt header */
1424 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1425 netif_err(net_device_ctx, rx_err, ndev,
1426 "Invalid xfer page pkt, offset too small: %u\n",
1427 desc->offset8 << 3);
1428 return 0;
1429 }
1430
1431 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1432 netif_err(net_device_ctx, rx_err, ndev,
1433 "Invalid xfer page set id - expecting %x got %x\n",
1434 NETVSC_RECEIVE_BUFFER_ID,
1435 vmxferpage_packet->xfer_pageset_id);
1436 return 0;
1437 }
1438
1439 count = vmxferpage_packet->range_cnt;
1440
1441 /* Check count for a valid value */
1442 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1443 netif_err(net_device_ctx, rx_err, ndev,
1444 "Range count is not valid: %d\n",
1445 count);
1446 return 0;
1447 }
1448
1449 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1450 for (i = 0; i < count; i++) {
1451 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1452 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1453 void *data;
1454 int ret;
1455
1456 if (unlikely(offset > net_device->recv_buf_size ||
1457 buflen > net_device->recv_buf_size - offset)) {
1458 nvchan->rsc.cnt = 0;
1459 status = NVSP_STAT_FAIL;
1460 netif_err(net_device_ctx, rx_err, ndev,
1461 "Packet offset:%u + len:%u too big\n",
1462 offset, buflen);
1463
1464 continue;
1465 }
1466
1467 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1468 * make sure that nvchan->recv_buf is large enough to hold the packet.
1469 */
1470 if (unlikely(buflen > net_device->recv_section_size)) {
1471 nvchan->rsc.cnt = 0;
1472 status = NVSP_STAT_FAIL;
1473 netif_err(net_device_ctx, rx_err, ndev,
1474 "Packet too big: buflen=%u recv_section_size=%u\n",
1475 buflen, net_device->recv_section_size);
1476
1477 continue;
1478 }
1479
1480 data = recv_buf + offset;
1481
1482 nvchan->rsc.is_last = (i == count - 1);
1483
1484 trace_rndis_recv(ndev, q_idx, data);
1485
1486 /* Pass it to the upper layer */
1487 ret = rndis_filter_receive(ndev, net_device,
1488 nvchan, data, buflen);
1489
1490 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1491 /* Drop incomplete packet */
1492 nvchan->rsc.cnt = 0;
1493 status = NVSP_STAT_FAIL;
1494 }
1495 }
1496
1497 enq_receive_complete(ndev, net_device, q_idx,
1498 vmxferpage_packet->d.trans_id, status);
1499
1500 return count;
1501 }
1502
netvsc_send_table(struct net_device * ndev,struct netvsc_device * nvscdev,const struct nvsp_message * nvmsg,u32 msglen)1503 static void netvsc_send_table(struct net_device *ndev,
1504 struct netvsc_device *nvscdev,
1505 const struct nvsp_message *nvmsg,
1506 u32 msglen)
1507 {
1508 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1509 u32 count, offset, *tab;
1510 int i;
1511
1512 /* Ensure packet is big enough to read send_table fields */
1513 if (msglen < sizeof(struct nvsp_message_header) +
1514 sizeof(struct nvsp_5_send_indirect_table)) {
1515 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1516 return;
1517 }
1518
1519 count = nvmsg->msg.v5_msg.send_table.count;
1520 offset = nvmsg->msg.v5_msg.send_table.offset;
1521
1522 if (count != VRSS_SEND_TAB_SIZE) {
1523 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1524 return;
1525 }
1526
1527 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1528 * wrong due to a host bug. So fix the offset here.
1529 */
1530 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1531 msglen >= sizeof(struct nvsp_message_header) +
1532 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1533 offset = sizeof(struct nvsp_message_header) +
1534 sizeof(union nvsp_6_message_uber);
1535
1536 /* Boundary check for all versions */
1537 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1538 netdev_err(ndev, "Received send-table offset too big:%u\n",
1539 offset);
1540 return;
1541 }
1542
1543 tab = (void *)nvmsg + offset;
1544
1545 for (i = 0; i < count; i++)
1546 net_device_ctx->tx_table[i] = tab[i];
1547 }
1548
netvsc_send_vf(struct net_device * ndev,const struct nvsp_message * nvmsg,u32 msglen)1549 static void netvsc_send_vf(struct net_device *ndev,
1550 const struct nvsp_message *nvmsg,
1551 u32 msglen)
1552 {
1553 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1554
1555 /* Ensure packet is big enough to read its fields */
1556 if (msglen < sizeof(struct nvsp_message_header) +
1557 sizeof(struct nvsp_4_send_vf_association)) {
1558 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1559 return;
1560 }
1561
1562 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1563 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1564
1565 if (net_device_ctx->vf_alloc)
1566 complete(&net_device_ctx->vf_add);
1567
1568 netdev_info(ndev, "VF slot %u %s\n",
1569 net_device_ctx->vf_serial,
1570 net_device_ctx->vf_alloc ? "added" : "removed");
1571 }
1572
netvsc_receive_inband(struct net_device * ndev,struct netvsc_device * nvscdev,const struct vmpacket_descriptor * desc)1573 static void netvsc_receive_inband(struct net_device *ndev,
1574 struct netvsc_device *nvscdev,
1575 const struct vmpacket_descriptor *desc)
1576 {
1577 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1578 u32 msglen = hv_pkt_datalen(desc);
1579
1580 /* Ensure packet is big enough to read header fields */
1581 if (msglen < sizeof(struct nvsp_message_header)) {
1582 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1583 return;
1584 }
1585
1586 switch (nvmsg->hdr.msg_type) {
1587 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1588 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1589 break;
1590
1591 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1592 if (hv_is_isolation_supported())
1593 netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1594 else
1595 netvsc_send_vf(ndev, nvmsg, msglen);
1596 break;
1597 }
1598 }
1599
netvsc_process_raw_pkt(struct hv_device * device,struct netvsc_channel * nvchan,struct netvsc_device * net_device,struct net_device * ndev,const struct vmpacket_descriptor * desc,int budget)1600 static int netvsc_process_raw_pkt(struct hv_device *device,
1601 struct netvsc_channel *nvchan,
1602 struct netvsc_device *net_device,
1603 struct net_device *ndev,
1604 const struct vmpacket_descriptor *desc,
1605 int budget)
1606 {
1607 struct vmbus_channel *channel = nvchan->channel;
1608 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1609
1610 trace_nvsp_recv(ndev, channel, nvmsg);
1611
1612 switch (desc->type) {
1613 case VM_PKT_COMP:
1614 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1615 break;
1616
1617 case VM_PKT_DATA_USING_XFER_PAGES:
1618 return netvsc_receive(ndev, net_device, nvchan, desc);
1619
1620 case VM_PKT_DATA_INBAND:
1621 netvsc_receive_inband(ndev, net_device, desc);
1622 break;
1623
1624 default:
1625 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1626 desc->type, desc->trans_id);
1627 break;
1628 }
1629
1630 return 0;
1631 }
1632
netvsc_channel_to_device(struct vmbus_channel * channel)1633 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1634 {
1635 struct vmbus_channel *primary = channel->primary_channel;
1636
1637 return primary ? primary->device_obj : channel->device_obj;
1638 }
1639
1640 /* Network processing softirq
1641 * Process data in incoming ring buffer from host
1642 * Stops when ring is empty or budget is met or exceeded.
1643 */
netvsc_poll(struct napi_struct * napi,int budget)1644 int netvsc_poll(struct napi_struct *napi, int budget)
1645 {
1646 struct netvsc_channel *nvchan
1647 = container_of(napi, struct netvsc_channel, napi);
1648 struct netvsc_device *net_device = nvchan->net_device;
1649 struct vmbus_channel *channel = nvchan->channel;
1650 struct hv_device *device = netvsc_channel_to_device(channel);
1651 struct net_device *ndev = hv_get_drvdata(device);
1652 int work_done = 0;
1653 int ret;
1654
1655 /* If starting a new interval */
1656 if (!nvchan->desc)
1657 nvchan->desc = hv_pkt_iter_first(channel);
1658
1659 nvchan->xdp_flush = false;
1660
1661 while (nvchan->desc && work_done < budget) {
1662 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1663 ndev, nvchan->desc, budget);
1664 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1665 }
1666
1667 if (nvchan->xdp_flush)
1668 xdp_do_flush();
1669
1670 /* Send any pending receive completions */
1671 ret = send_recv_completions(ndev, net_device, nvchan);
1672
1673 /* If it did not exhaust NAPI budget this time
1674 * and not doing busy poll
1675 * then re-enable host interrupts
1676 * and reschedule if ring is not empty
1677 * or sending receive completion failed.
1678 */
1679 if (work_done < budget &&
1680 napi_complete_done(napi, work_done) &&
1681 (ret || hv_end_read(&channel->inbound)) &&
1682 napi_schedule_prep(napi)) {
1683 hv_begin_read(&channel->inbound);
1684 __napi_schedule(napi);
1685 }
1686
1687 /* Driver may overshoot since multiple packets per descriptor */
1688 return min(work_done, budget);
1689 }
1690
1691 /* Call back when data is available in host ring buffer.
1692 * Processing is deferred until network softirq (NAPI)
1693 */
netvsc_channel_cb(void * context)1694 void netvsc_channel_cb(void *context)
1695 {
1696 struct netvsc_channel *nvchan = context;
1697 struct vmbus_channel *channel = nvchan->channel;
1698 struct hv_ring_buffer_info *rbi = &channel->inbound;
1699
1700 /* preload first vmpacket descriptor */
1701 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1702
1703 if (napi_schedule_prep(&nvchan->napi)) {
1704 /* disable interrupts from host */
1705 hv_begin_read(rbi);
1706
1707 __napi_schedule_irqoff(&nvchan->napi);
1708 }
1709 }
1710
1711 /*
1712 * netvsc_device_add - Callback when the device belonging to this
1713 * driver is added
1714 */
netvsc_device_add(struct hv_device * device,const struct netvsc_device_info * device_info)1715 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1716 const struct netvsc_device_info *device_info)
1717 {
1718 int i, ret = 0;
1719 struct netvsc_device *net_device;
1720 struct net_device *ndev = hv_get_drvdata(device);
1721 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1722
1723 net_device = alloc_net_device();
1724 if (!net_device)
1725 return ERR_PTR(-ENOMEM);
1726
1727 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1728 net_device_ctx->tx_table[i] = 0;
1729
1730 /* Because the device uses NAPI, all the interrupt batching and
1731 * control is done via Net softirq, not the channel handling
1732 */
1733 set_channel_read_mode(device->channel, HV_CALL_ISR);
1734
1735 /* If we're reopening the device we may have multiple queues, fill the
1736 * chn_table with the default channel to use it before subchannels are
1737 * opened.
1738 * Initialize the channel state before we open;
1739 * we can be interrupted as soon as we open the channel.
1740 */
1741
1742 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1743 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1744
1745 nvchan->channel = device->channel;
1746 nvchan->net_device = net_device;
1747 u64_stats_init(&nvchan->tx_stats.syncp);
1748 u64_stats_init(&nvchan->rx_stats.syncp);
1749
1750 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1751
1752 if (ret) {
1753 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1754 goto cleanup2;
1755 }
1756
1757 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1758 MEM_TYPE_PAGE_SHARED, NULL);
1759
1760 if (ret) {
1761 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1762 goto cleanup2;
1763 }
1764 }
1765
1766 /* Enable NAPI handler before init callbacks */
1767 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1768
1769 /* Open the channel */
1770 device->channel->next_request_id_callback = vmbus_next_request_id;
1771 device->channel->request_addr_callback = vmbus_request_addr;
1772 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1773 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1774
1775 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1776 netvsc_ring_bytes, NULL, 0,
1777 netvsc_channel_cb, net_device->chan_table);
1778
1779 if (ret != 0) {
1780 netdev_err(ndev, "unable to open channel: %d\n", ret);
1781 goto cleanup;
1782 }
1783
1784 /* Channel is opened */
1785 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1786
1787 napi_enable(&net_device->chan_table[0].napi);
1788
1789 /* Connect with the NetVsp */
1790 ret = netvsc_connect_vsp(device, net_device, device_info);
1791 if (ret != 0) {
1792 netdev_err(ndev,
1793 "unable to connect to NetVSP - %d\n", ret);
1794 goto close;
1795 }
1796
1797 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1798 * populated.
1799 */
1800 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1801
1802 return net_device;
1803
1804 close:
1805 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1806 napi_disable(&net_device->chan_table[0].napi);
1807
1808 /* Now, we can close the channel safely */
1809 vmbus_close(device->channel);
1810
1811 cleanup:
1812 netif_napi_del(&net_device->chan_table[0].napi);
1813
1814 cleanup2:
1815 free_netvsc_device(&net_device->rcu);
1816
1817 return ERR_PTR(ret);
1818 }
1819