xref: /openbmc/linux/drivers/net/hyperv/netvsc.c (revision 6b16f9ee)
195fa0405SHaiyang Zhang /*
295fa0405SHaiyang Zhang  * Copyright (c) 2009, Microsoft Corporation.
395fa0405SHaiyang Zhang  *
495fa0405SHaiyang Zhang  * This program is free software; you can redistribute it and/or modify it
595fa0405SHaiyang Zhang  * under the terms and conditions of the GNU General Public License,
695fa0405SHaiyang Zhang  * version 2, as published by the Free Software Foundation.
795fa0405SHaiyang Zhang  *
895fa0405SHaiyang Zhang  * This program is distributed in the hope it will be useful, but WITHOUT
995fa0405SHaiyang Zhang  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1095fa0405SHaiyang Zhang  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
1195fa0405SHaiyang Zhang  * more details.
1295fa0405SHaiyang Zhang  *
1395fa0405SHaiyang Zhang  * You should have received a copy of the GNU General Public License along with
14adf8d3ffSJeff Kirsher  * this program; if not, see <http://www.gnu.org/licenses/>.
1595fa0405SHaiyang Zhang  *
1695fa0405SHaiyang Zhang  * Authors:
1795fa0405SHaiyang Zhang  *   Haiyang Zhang <haiyangz@microsoft.com>
1895fa0405SHaiyang Zhang  *   Hank Janssen  <hjanssen@microsoft.com>
1995fa0405SHaiyang Zhang  */
2095fa0405SHaiyang Zhang #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2195fa0405SHaiyang Zhang 
2295fa0405SHaiyang Zhang #include <linux/kernel.h>
2395fa0405SHaiyang Zhang #include <linux/sched.h>
2495fa0405SHaiyang Zhang #include <linux/wait.h>
2595fa0405SHaiyang Zhang #include <linux/mm.h>
2695fa0405SHaiyang Zhang #include <linux/delay.h>
2795fa0405SHaiyang Zhang #include <linux/io.h>
2895fa0405SHaiyang Zhang #include <linux/slab.h>
2995fa0405SHaiyang Zhang #include <linux/netdevice.h>
30f157e78dSHaiyang Zhang #include <linux/if_ether.h>
31d6472302SStephen Rothwell #include <linux/vmalloc.h>
329749fed5Sstephen hemminger #include <linux/rtnetlink.h>
3343bf99ceSstephen hemminger #include <linux/prefetch.h>
349749fed5Sstephen hemminger 
35c25aaf81SKY Srinivasan #include <asm/sync_bitops.h>
3695fa0405SHaiyang Zhang 
3795fa0405SHaiyang Zhang #include "hyperv_net.h"
38ec966381SStephen Hemminger #include "netvsc_trace.h"
3995fa0405SHaiyang Zhang 
4084bf9cefSKY Srinivasan /*
4184bf9cefSKY Srinivasan  * Switch the data path from the synthetic interface to the VF
4284bf9cefSKY Srinivasan  * interface.
4384bf9cefSKY Srinivasan  */
440a1275caSVitaly Kuznetsov void netvsc_switch_datapath(struct net_device *ndev, bool vf)
4584bf9cefSKY Srinivasan {
463d541ac5SVitaly Kuznetsov 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
473d541ac5SVitaly Kuznetsov 	struct hv_device *dev = net_device_ctx->device_ctx;
4879e8cbe7Sstephen hemminger 	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
490a1275caSVitaly Kuznetsov 	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
5084bf9cefSKY Srinivasan 
5184bf9cefSKY Srinivasan 	memset(init_pkt, 0, sizeof(struct nvsp_message));
5284bf9cefSKY Srinivasan 	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
5384bf9cefSKY Srinivasan 	if (vf)
5484bf9cefSKY Srinivasan 		init_pkt->msg.v4_msg.active_dp.active_datapath =
5584bf9cefSKY Srinivasan 			NVSP_DATAPATH_VF;
5684bf9cefSKY Srinivasan 	else
5784bf9cefSKY Srinivasan 		init_pkt->msg.v4_msg.active_dp.active_datapath =
5884bf9cefSKY Srinivasan 			NVSP_DATAPATH_SYNTHETIC;
5984bf9cefSKY Srinivasan 
60ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_pkt);
61ec966381SStephen Hemminger 
6284bf9cefSKY Srinivasan 	vmbus_sendpacket(dev->channel, init_pkt,
6384bf9cefSKY Srinivasan 			       sizeof(struct nvsp_message),
6484bf9cefSKY Srinivasan 			       (unsigned long)init_pkt,
6584bf9cefSKY Srinivasan 			       VM_PKT_DATA_INBAND, 0);
6684bf9cefSKY Srinivasan }
6784bf9cefSKY Srinivasan 
683ffe64f1SStephen Hemminger /* Worker to setup sub channels on initial setup
693ffe64f1SStephen Hemminger  * Initial hotplug event occurs in softirq context
703ffe64f1SStephen Hemminger  * and can't wait for channels.
713ffe64f1SStephen Hemminger  */
723ffe64f1SStephen Hemminger static void netvsc_subchan_work(struct work_struct *w)
733ffe64f1SStephen Hemminger {
743ffe64f1SStephen Hemminger 	struct netvsc_device *nvdev =
753ffe64f1SStephen Hemminger 		container_of(w, struct netvsc_device, subchan_work);
763ffe64f1SStephen Hemminger 	struct rndis_device *rdev;
773ffe64f1SStephen Hemminger 	int i, ret;
783ffe64f1SStephen Hemminger 
793ffe64f1SStephen Hemminger 	/* Avoid deadlock with device removal already under RTNL */
803ffe64f1SStephen Hemminger 	if (!rtnl_trylock()) {
813ffe64f1SStephen Hemminger 		schedule_work(w);
823ffe64f1SStephen Hemminger 		return;
833ffe64f1SStephen Hemminger 	}
843ffe64f1SStephen Hemminger 
853ffe64f1SStephen Hemminger 	rdev = nvdev->extension;
863ffe64f1SStephen Hemminger 	if (rdev) {
8717d91256SHaiyang Zhang 		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
883ffe64f1SStephen Hemminger 		if (ret == 0) {
893ffe64f1SStephen Hemminger 			netif_device_attach(rdev->ndev);
903ffe64f1SStephen Hemminger 		} else {
913ffe64f1SStephen Hemminger 			/* fallback to only primary channel */
923ffe64f1SStephen Hemminger 			for (i = 1; i < nvdev->num_chn; i++)
933ffe64f1SStephen Hemminger 				netif_napi_del(&nvdev->chan_table[i].napi);
943ffe64f1SStephen Hemminger 
953ffe64f1SStephen Hemminger 			nvdev->max_chn = 1;
963ffe64f1SStephen Hemminger 			nvdev->num_chn = 1;
973ffe64f1SStephen Hemminger 		}
983ffe64f1SStephen Hemminger 	}
993ffe64f1SStephen Hemminger 
1003ffe64f1SStephen Hemminger 	rtnl_unlock();
1013ffe64f1SStephen Hemminger }
1023ffe64f1SStephen Hemminger 
10388098834SVitaly Kuznetsov static struct netvsc_device *alloc_net_device(void)
10495fa0405SHaiyang Zhang {
10595fa0405SHaiyang Zhang 	struct netvsc_device *net_device;
10695fa0405SHaiyang Zhang 
10795fa0405SHaiyang Zhang 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
10895fa0405SHaiyang Zhang 	if (!net_device)
10995fa0405SHaiyang Zhang 		return NULL;
11095fa0405SHaiyang Zhang 
111dc5cd894SHaiyang Zhang 	init_waitqueue_head(&net_device->wait_drain);
11295fa0405SHaiyang Zhang 	net_device->destroy = false;
1130da6edbdSStephen Hemminger 
1147c3877f2SHaiyang Zhang 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
1157c3877f2SHaiyang Zhang 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
1168b532797Sstephen hemminger 
117fd612602SStephen Hemminger 	init_completion(&net_device->channel_init_wait);
118732e4985Sstephen hemminger 	init_waitqueue_head(&net_device->subchan_open);
1193ffe64f1SStephen Hemminger 	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
1207c3877f2SHaiyang Zhang 
12195fa0405SHaiyang Zhang 	return net_device;
12295fa0405SHaiyang Zhang }
12395fa0405SHaiyang Zhang 
124545a8e79Sstephen hemminger static void free_netvsc_device(struct rcu_head *head)
125f90251c8SHaiyang Zhang {
126545a8e79Sstephen hemminger 	struct netvsc_device *nvdev
127545a8e79Sstephen hemminger 		= container_of(head, struct netvsc_device, rcu);
128c0b558e5SHaiyang Zhang 	int i;
129c0b558e5SHaiyang Zhang 
13002400fceSStephen Hemminger 	kfree(nvdev->extension);
13102400fceSStephen Hemminger 	vfree(nvdev->recv_buf);
13202400fceSStephen Hemminger 	vfree(nvdev->send_buf);
13302400fceSStephen Hemminger 	kfree(nvdev->send_section_map);
13402400fceSStephen Hemminger 
135c0b558e5SHaiyang Zhang 	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
1367426b1a5Sstephen hemminger 		vfree(nvdev->chan_table[i].mrc.slots);
137c0b558e5SHaiyang Zhang 
138f90251c8SHaiyang Zhang 	kfree(nvdev);
139f90251c8SHaiyang Zhang }
140f90251c8SHaiyang Zhang 
141545a8e79Sstephen hemminger static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
142545a8e79Sstephen hemminger {
143545a8e79Sstephen hemminger 	call_rcu(&nvdev->rcu, free_netvsc_device);
144545a8e79Sstephen hemminger }
14546b4f7f5Sstephen hemminger 
1467992894cSMohammed Gamal static void netvsc_revoke_recv_buf(struct hv_device *device,
1473f076effSMohammed Gamal 				   struct netvsc_device *net_device,
1483f076effSMohammed Gamal 				   struct net_device *ndev)
14995fa0405SHaiyang Zhang {
1507992894cSMohammed Gamal 	struct nvsp_message *revoke_packet;
1517a2a0a84SStephen Hemminger 	int ret;
15295fa0405SHaiyang Zhang 
15395fa0405SHaiyang Zhang 	/*
15495fa0405SHaiyang Zhang 	 * If we got a section count, it means we received a
15595fa0405SHaiyang Zhang 	 * SendReceiveBufferComplete msg (ie sent
15695fa0405SHaiyang Zhang 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
15795fa0405SHaiyang Zhang 	 * to send a revoke msg here
15895fa0405SHaiyang Zhang 	 */
15995fa0405SHaiyang Zhang 	if (net_device->recv_section_cnt) {
16095fa0405SHaiyang Zhang 		/* Send the revoke receive buffer */
16195fa0405SHaiyang Zhang 		revoke_packet = &net_device->revoke_packet;
16295fa0405SHaiyang Zhang 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
16395fa0405SHaiyang Zhang 
16495fa0405SHaiyang Zhang 		revoke_packet->hdr.msg_type =
16595fa0405SHaiyang Zhang 			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
16695fa0405SHaiyang Zhang 		revoke_packet->msg.v1_msg.
16795fa0405SHaiyang Zhang 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
16895fa0405SHaiyang Zhang 
169ec966381SStephen Hemminger 		trace_nvsp_send(ndev, revoke_packet);
170ec966381SStephen Hemminger 
1713d541ac5SVitaly Kuznetsov 		ret = vmbus_sendpacket(device->channel,
17295fa0405SHaiyang Zhang 				       revoke_packet,
17395fa0405SHaiyang Zhang 				       sizeof(struct nvsp_message),
17495fa0405SHaiyang Zhang 				       (unsigned long)revoke_packet,
17595fa0405SHaiyang Zhang 				       VM_PKT_DATA_INBAND, 0);
17673e64fa4SK. Y. Srinivasan 		/* If the failure is because the channel is rescinded;
17773e64fa4SK. Y. Srinivasan 		 * ignore the failure since we cannot send on a rescinded
17873e64fa4SK. Y. Srinivasan 		 * channel. This would allow us to properly cleanup
17973e64fa4SK. Y. Srinivasan 		 * even when the channel is rescinded.
18073e64fa4SK. Y. Srinivasan 		 */
18173e64fa4SK. Y. Srinivasan 		if (device->channel->rescind)
18273e64fa4SK. Y. Srinivasan 			ret = 0;
18395fa0405SHaiyang Zhang 		/*
18495fa0405SHaiyang Zhang 		 * If we failed here, we might as well return and
18595fa0405SHaiyang Zhang 		 * have a leak rather than continue and a bugchk
18695fa0405SHaiyang Zhang 		 */
18795fa0405SHaiyang Zhang 		if (ret != 0) {
18895fa0405SHaiyang Zhang 			netdev_err(ndev, "unable to send "
18995fa0405SHaiyang Zhang 				"revoke receive buffer to netvsp\n");
1907a2a0a84SStephen Hemminger 			return;
19195fa0405SHaiyang Zhang 		}
1928b532797Sstephen hemminger 		net_device->recv_section_cnt = 0;
19395fa0405SHaiyang Zhang 	}
1947992894cSMohammed Gamal }
1957992894cSMohammed Gamal 
1967992894cSMohammed Gamal static void netvsc_revoke_send_buf(struct hv_device *device,
1973f076effSMohammed Gamal 				   struct netvsc_device *net_device,
1983f076effSMohammed Gamal 				   struct net_device *ndev)
1997992894cSMohammed Gamal {
2007992894cSMohammed Gamal 	struct nvsp_message *revoke_packet;
2017992894cSMohammed Gamal 	int ret;
20295fa0405SHaiyang Zhang 
203c25aaf81SKY Srinivasan 	/* Deal with the send buffer we may have setup.
204c25aaf81SKY Srinivasan 	 * If we got a  send section size, it means we received a
205c51ed182SHaiyang Zhang 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
206c51ed182SHaiyang Zhang 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
207c25aaf81SKY Srinivasan 	 * to send a revoke msg here
208c25aaf81SKY Srinivasan 	 */
2098b532797Sstephen hemminger 	if (net_device->send_section_cnt) {
210c25aaf81SKY Srinivasan 		/* Send the revoke receive buffer */
211c25aaf81SKY Srinivasan 		revoke_packet = &net_device->revoke_packet;
212c25aaf81SKY Srinivasan 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
213c25aaf81SKY Srinivasan 
214c25aaf81SKY Srinivasan 		revoke_packet->hdr.msg_type =
215c25aaf81SKY Srinivasan 			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
216c51ed182SHaiyang Zhang 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
217c51ed182SHaiyang Zhang 			NETVSC_SEND_BUFFER_ID;
218c25aaf81SKY Srinivasan 
219ec966381SStephen Hemminger 		trace_nvsp_send(ndev, revoke_packet);
220ec966381SStephen Hemminger 
2213d541ac5SVitaly Kuznetsov 		ret = vmbus_sendpacket(device->channel,
222c25aaf81SKY Srinivasan 				       revoke_packet,
223c25aaf81SKY Srinivasan 				       sizeof(struct nvsp_message),
224c25aaf81SKY Srinivasan 				       (unsigned long)revoke_packet,
225c25aaf81SKY Srinivasan 				       VM_PKT_DATA_INBAND, 0);
22673e64fa4SK. Y. Srinivasan 
22773e64fa4SK. Y. Srinivasan 		/* If the failure is because the channel is rescinded;
22873e64fa4SK. Y. Srinivasan 		 * ignore the failure since we cannot send on a rescinded
22973e64fa4SK. Y. Srinivasan 		 * channel. This would allow us to properly cleanup
23073e64fa4SK. Y. Srinivasan 		 * even when the channel is rescinded.
23173e64fa4SK. Y. Srinivasan 		 */
23273e64fa4SK. Y. Srinivasan 		if (device->channel->rescind)
23373e64fa4SK. Y. Srinivasan 			ret = 0;
23473e64fa4SK. Y. Srinivasan 
235c25aaf81SKY Srinivasan 		/* If we failed here, we might as well return and
236c25aaf81SKY Srinivasan 		 * have a leak rather than continue and a bugchk
237c25aaf81SKY Srinivasan 		 */
238c25aaf81SKY Srinivasan 		if (ret != 0) {
239c25aaf81SKY Srinivasan 			netdev_err(ndev, "unable to send "
240c25aaf81SKY Srinivasan 				   "revoke send buffer to netvsp\n");
2417a2a0a84SStephen Hemminger 			return;
242c25aaf81SKY Srinivasan 		}
2438b532797Sstephen hemminger 		net_device->send_section_cnt = 0;
244c25aaf81SKY Srinivasan 	}
2450cf73780SVitaly Kuznetsov }
2460cf73780SVitaly Kuznetsov 
2477992894cSMohammed Gamal static void netvsc_teardown_recv_gpadl(struct hv_device *device,
2483f076effSMohammed Gamal 				       struct netvsc_device *net_device,
2493f076effSMohammed Gamal 				       struct net_device *ndev)
2500cf73780SVitaly Kuznetsov {
2510cf73780SVitaly Kuznetsov 	int ret;
2520cf73780SVitaly Kuznetsov 
2530cf73780SVitaly Kuznetsov 	if (net_device->recv_buf_gpadl_handle) {
2540cf73780SVitaly Kuznetsov 		ret = vmbus_teardown_gpadl(device->channel,
2550cf73780SVitaly Kuznetsov 					   net_device->recv_buf_gpadl_handle);
2560cf73780SVitaly Kuznetsov 
2570cf73780SVitaly Kuznetsov 		/* If we failed here, we might as well return and have a leak
2580cf73780SVitaly Kuznetsov 		 * rather than continue and a bugchk
2590cf73780SVitaly Kuznetsov 		 */
2600cf73780SVitaly Kuznetsov 		if (ret != 0) {
2610cf73780SVitaly Kuznetsov 			netdev_err(ndev,
2620cf73780SVitaly Kuznetsov 				   "unable to teardown receive buffer's gpadl\n");
2630cf73780SVitaly Kuznetsov 			return;
2640cf73780SVitaly Kuznetsov 		}
2650cf73780SVitaly Kuznetsov 		net_device->recv_buf_gpadl_handle = 0;
2660cf73780SVitaly Kuznetsov 	}
2677992894cSMohammed Gamal }
2687992894cSMohammed Gamal 
2697992894cSMohammed Gamal static void netvsc_teardown_send_gpadl(struct hv_device *device,
2703f076effSMohammed Gamal 				       struct netvsc_device *net_device,
2713f076effSMohammed Gamal 				       struct net_device *ndev)
2727992894cSMohammed Gamal {
2737992894cSMohammed Gamal 	int ret;
2740cf73780SVitaly Kuznetsov 
275c25aaf81SKY Srinivasan 	if (net_device->send_buf_gpadl_handle) {
2763d541ac5SVitaly Kuznetsov 		ret = vmbus_teardown_gpadl(device->channel,
277c25aaf81SKY Srinivasan 					   net_device->send_buf_gpadl_handle);
278c25aaf81SKY Srinivasan 
279c25aaf81SKY Srinivasan 		/* If we failed here, we might as well return and have a leak
280c25aaf81SKY Srinivasan 		 * rather than continue and a bugchk
281c25aaf81SKY Srinivasan 		 */
282c25aaf81SKY Srinivasan 		if (ret != 0) {
283c25aaf81SKY Srinivasan 			netdev_err(ndev,
284c25aaf81SKY Srinivasan 				   "unable to teardown send buffer's gpadl\n");
2857a2a0a84SStephen Hemminger 			return;
286c25aaf81SKY Srinivasan 		}
2872f18423dSDave Jones 		net_device->send_buf_gpadl_handle = 0;
288c25aaf81SKY Srinivasan 	}
28995fa0405SHaiyang Zhang }
29095fa0405SHaiyang Zhang 
2917426b1a5Sstephen hemminger int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
2927426b1a5Sstephen hemminger {
2937426b1a5Sstephen hemminger 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
2947426b1a5Sstephen hemminger 	int node = cpu_to_node(nvchan->channel->target_cpu);
2957426b1a5Sstephen hemminger 	size_t size;
2967426b1a5Sstephen hemminger 
2977426b1a5Sstephen hemminger 	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
2987426b1a5Sstephen hemminger 	nvchan->mrc.slots = vzalloc_node(size, node);
2997426b1a5Sstephen hemminger 	if (!nvchan->mrc.slots)
3007426b1a5Sstephen hemminger 		nvchan->mrc.slots = vzalloc(size);
3017426b1a5Sstephen hemminger 
3027426b1a5Sstephen hemminger 	return nvchan->mrc.slots ? 0 : -ENOMEM;
3037426b1a5Sstephen hemminger }
3047426b1a5Sstephen hemminger 
30595790837Sstephen hemminger static int netvsc_init_buf(struct hv_device *device,
3068b532797Sstephen hemminger 			   struct netvsc_device *net_device,
3078b532797Sstephen hemminger 			   const struct netvsc_device_info *device_info)
30895fa0405SHaiyang Zhang {
3097426b1a5Sstephen hemminger 	struct nvsp_1_message_send_receive_buffer_complete *resp;
31095833370Sstephen hemminger 	struct net_device *ndev = hv_get_drvdata(device);
31195833370Sstephen hemminger 	struct nvsp_message *init_packet;
3128b532797Sstephen hemminger 	unsigned int buf_size;
313fdfb70d2Sstephen hemminger 	size_t map_words;
31495833370Sstephen hemminger 	int ret = 0;
31595fa0405SHaiyang Zhang 
3168b532797Sstephen hemminger 	/* Get receive buffer area. */
3170ab09befSAlex Ng 	buf_size = device_info->recv_sections * device_info->recv_section_size;
3188b532797Sstephen hemminger 	buf_size = roundup(buf_size, PAGE_SIZE);
3198b532797Sstephen hemminger 
32011b2b653SHaiyang Zhang 	/* Legacy hosts only allow smaller receive buffer */
32111b2b653SHaiyang Zhang 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
32211b2b653SHaiyang Zhang 		buf_size = min_t(unsigned int, buf_size,
32311b2b653SHaiyang Zhang 				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
32411b2b653SHaiyang Zhang 
3258b532797Sstephen hemminger 	net_device->recv_buf = vzalloc(buf_size);
32695fa0405SHaiyang Zhang 	if (!net_device->recv_buf) {
3278b532797Sstephen hemminger 		netdev_err(ndev,
3288b532797Sstephen hemminger 			   "unable to allocate receive buffer of size %u\n",
3298b532797Sstephen hemminger 			   buf_size);
33095fa0405SHaiyang Zhang 		ret = -ENOMEM;
33195fa0405SHaiyang Zhang 		goto cleanup;
33295fa0405SHaiyang Zhang 	}
33395fa0405SHaiyang Zhang 
334c5d24bddSHaiyang Zhang 	net_device->recv_buf_size = buf_size;
335c5d24bddSHaiyang Zhang 
33695fa0405SHaiyang Zhang 	/*
33795fa0405SHaiyang Zhang 	 * Establish the gpadl handle for this buffer on this
33895fa0405SHaiyang Zhang 	 * channel.  Note: This call uses the vmbus connection rather
33995fa0405SHaiyang Zhang 	 * than the channel to establish the gpadl handle.
34095fa0405SHaiyang Zhang 	 */
34195fa0405SHaiyang Zhang 	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
3428b532797Sstephen hemminger 				    buf_size,
34395fa0405SHaiyang Zhang 				    &net_device->recv_buf_gpadl_handle);
34495fa0405SHaiyang Zhang 	if (ret != 0) {
34595fa0405SHaiyang Zhang 		netdev_err(ndev,
34695fa0405SHaiyang Zhang 			"unable to establish receive buffer's gpadl\n");
34795fa0405SHaiyang Zhang 		goto cleanup;
34895fa0405SHaiyang Zhang 	}
34995fa0405SHaiyang Zhang 
35095fa0405SHaiyang Zhang 	/* Notify the NetVsp of the gpadl handle */
35195fa0405SHaiyang Zhang 	init_packet = &net_device->channel_init_pkt;
35295fa0405SHaiyang Zhang 	memset(init_packet, 0, sizeof(struct nvsp_message));
35395fa0405SHaiyang Zhang 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
35495fa0405SHaiyang Zhang 	init_packet->msg.v1_msg.send_recv_buf.
35595fa0405SHaiyang Zhang 		gpadl_handle = net_device->recv_buf_gpadl_handle;
35695fa0405SHaiyang Zhang 	init_packet->msg.v1_msg.
35795fa0405SHaiyang Zhang 		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
35895fa0405SHaiyang Zhang 
359ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_packet);
360ec966381SStephen Hemminger 
36195fa0405SHaiyang Zhang 	/* Send the gpadl notification request */
36295fa0405SHaiyang Zhang 	ret = vmbus_sendpacket(device->channel, init_packet,
36395fa0405SHaiyang Zhang 			       sizeof(struct nvsp_message),
36495fa0405SHaiyang Zhang 			       (unsigned long)init_packet,
36595fa0405SHaiyang Zhang 			       VM_PKT_DATA_INBAND,
36695fa0405SHaiyang Zhang 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
36795fa0405SHaiyang Zhang 	if (ret != 0) {
36895fa0405SHaiyang Zhang 		netdev_err(ndev,
36995fa0405SHaiyang Zhang 			"unable to send receive buffer's gpadl to netvsp\n");
37095fa0405SHaiyang Zhang 		goto cleanup;
37195fa0405SHaiyang Zhang 	}
37295fa0405SHaiyang Zhang 
3735362855aSVitaly Kuznetsov 	wait_for_completion(&net_device->channel_init_wait);
37495fa0405SHaiyang Zhang 
37595fa0405SHaiyang Zhang 	/* Check the response */
3767426b1a5Sstephen hemminger 	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
3777426b1a5Sstephen hemminger 	if (resp->status != NVSP_STAT_SUCCESS) {
3787426b1a5Sstephen hemminger 		netdev_err(ndev,
3797426b1a5Sstephen hemminger 			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
3807426b1a5Sstephen hemminger 			   resp->status);
38195fa0405SHaiyang Zhang 		ret = -EINVAL;
38295fa0405SHaiyang Zhang 		goto cleanup;
38395fa0405SHaiyang Zhang 	}
38495fa0405SHaiyang Zhang 
38595fa0405SHaiyang Zhang 	/* Parse the response */
3867426b1a5Sstephen hemminger 	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
3877426b1a5Sstephen hemminger 		   resp->num_sections, resp->sections[0].sub_alloc_size,
3887426b1a5Sstephen hemminger 		   resp->sections[0].num_sub_allocs);
38995fa0405SHaiyang Zhang 
3908b532797Sstephen hemminger 	/* There should only be one section for the entire receive buffer */
3918b532797Sstephen hemminger 	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
39295fa0405SHaiyang Zhang 		ret = -EINVAL;
39395fa0405SHaiyang Zhang 		goto cleanup;
39495fa0405SHaiyang Zhang 	}
39595fa0405SHaiyang Zhang 
3968b532797Sstephen hemminger 	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
3978b532797Sstephen hemminger 	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
3988b532797Sstephen hemminger 
3997426b1a5Sstephen hemminger 	/* Setup receive completion ring */
4007426b1a5Sstephen hemminger 	net_device->recv_completion_cnt
4018b532797Sstephen hemminger 		= round_up(net_device->recv_section_cnt + 1,
4027426b1a5Sstephen hemminger 			   PAGE_SIZE / sizeof(u64));
4037426b1a5Sstephen hemminger 	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
4047426b1a5Sstephen hemminger 	if (ret)
4057426b1a5Sstephen hemminger 		goto cleanup;
4067426b1a5Sstephen hemminger 
4077426b1a5Sstephen hemminger 	/* Now setup the send buffer. */
4080ab09befSAlex Ng 	buf_size = device_info->send_sections * device_info->send_section_size;
4098b532797Sstephen hemminger 	buf_size = round_up(buf_size, PAGE_SIZE);
4108b532797Sstephen hemminger 
4118b532797Sstephen hemminger 	net_device->send_buf = vzalloc(buf_size);
412c25aaf81SKY Srinivasan 	if (!net_device->send_buf) {
4138b532797Sstephen hemminger 		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
4148b532797Sstephen hemminger 			   buf_size);
415c25aaf81SKY Srinivasan 		ret = -ENOMEM;
416c25aaf81SKY Srinivasan 		goto cleanup;
417c25aaf81SKY Srinivasan 	}
418c25aaf81SKY Srinivasan 
419c25aaf81SKY Srinivasan 	/* Establish the gpadl handle for this buffer on this
420c25aaf81SKY Srinivasan 	 * channel.  Note: This call uses the vmbus connection rather
421c25aaf81SKY Srinivasan 	 * than the channel to establish the gpadl handle.
422c25aaf81SKY Srinivasan 	 */
423c25aaf81SKY Srinivasan 	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
4248b532797Sstephen hemminger 				    buf_size,
425c25aaf81SKY Srinivasan 				    &net_device->send_buf_gpadl_handle);
426c25aaf81SKY Srinivasan 	if (ret != 0) {
427c25aaf81SKY Srinivasan 		netdev_err(ndev,
428c25aaf81SKY Srinivasan 			   "unable to establish send buffer's gpadl\n");
429c25aaf81SKY Srinivasan 		goto cleanup;
430c25aaf81SKY Srinivasan 	}
431c25aaf81SKY Srinivasan 
432c25aaf81SKY Srinivasan 	/* Notify the NetVsp of the gpadl handle */
433c25aaf81SKY Srinivasan 	init_packet = &net_device->channel_init_pkt;
434c25aaf81SKY Srinivasan 	memset(init_packet, 0, sizeof(struct nvsp_message));
435c25aaf81SKY Srinivasan 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
436c51ed182SHaiyang Zhang 	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
437c25aaf81SKY Srinivasan 		net_device->send_buf_gpadl_handle;
438c51ed182SHaiyang Zhang 	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
439c25aaf81SKY Srinivasan 
440ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_packet);
441ec966381SStephen Hemminger 
442c25aaf81SKY Srinivasan 	/* Send the gpadl notification request */
443c25aaf81SKY Srinivasan 	ret = vmbus_sendpacket(device->channel, init_packet,
444c25aaf81SKY Srinivasan 			       sizeof(struct nvsp_message),
445c25aaf81SKY Srinivasan 			       (unsigned long)init_packet,
446c25aaf81SKY Srinivasan 			       VM_PKT_DATA_INBAND,
447c25aaf81SKY Srinivasan 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
448c25aaf81SKY Srinivasan 	if (ret != 0) {
449c25aaf81SKY Srinivasan 		netdev_err(ndev,
450c25aaf81SKY Srinivasan 			   "unable to send send buffer's gpadl to netvsp\n");
451c25aaf81SKY Srinivasan 		goto cleanup;
452c25aaf81SKY Srinivasan 	}
453c25aaf81SKY Srinivasan 
4545362855aSVitaly Kuznetsov 	wait_for_completion(&net_device->channel_init_wait);
455c25aaf81SKY Srinivasan 
456c25aaf81SKY Srinivasan 	/* Check the response */
457c25aaf81SKY Srinivasan 	if (init_packet->msg.v1_msg.
458c25aaf81SKY Srinivasan 	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
459c25aaf81SKY Srinivasan 		netdev_err(ndev, "Unable to complete send buffer "
460c25aaf81SKY Srinivasan 			   "initialization with NetVsp - status %d\n",
461c25aaf81SKY Srinivasan 			   init_packet->msg.v1_msg.
462c51ed182SHaiyang Zhang 			   send_send_buf_complete.status);
463c25aaf81SKY Srinivasan 		ret = -EINVAL;
464c25aaf81SKY Srinivasan 		goto cleanup;
465c25aaf81SKY Srinivasan 	}
466c25aaf81SKY Srinivasan 
467c25aaf81SKY Srinivasan 	/* Parse the response */
468c25aaf81SKY Srinivasan 	net_device->send_section_size = init_packet->msg.
469c25aaf81SKY Srinivasan 				v1_msg.send_send_buf_complete.section_size;
470c25aaf81SKY Srinivasan 
4718b532797Sstephen hemminger 	/* Section count is simply the size divided by the section size. */
4728b532797Sstephen hemminger 	net_device->send_section_cnt = buf_size / net_device->send_section_size;
473c25aaf81SKY Srinivasan 
47493ba2222SVitaly Kuznetsov 	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
475c25aaf81SKY Srinivasan 		   net_device->send_section_size, net_device->send_section_cnt);
476c25aaf81SKY Srinivasan 
477c25aaf81SKY Srinivasan 	/* Setup state for managing the send buffer. */
478fdfb70d2Sstephen hemminger 	map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
479c25aaf81SKY Srinivasan 
480fdfb70d2Sstephen hemminger 	net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
481dd1d3f8fSWei Yongjun 	if (net_device->send_section_map == NULL) {
482dd1d3f8fSWei Yongjun 		ret = -ENOMEM;
483c25aaf81SKY Srinivasan 		goto cleanup;
484dd1d3f8fSWei Yongjun 	}
485c25aaf81SKY Srinivasan 
48695fa0405SHaiyang Zhang 	goto exit;
48795fa0405SHaiyang Zhang 
48895fa0405SHaiyang Zhang cleanup:
4893f076effSMohammed Gamal 	netvsc_revoke_recv_buf(device, net_device, ndev);
4903f076effSMohammed Gamal 	netvsc_revoke_send_buf(device, net_device, ndev);
4913f076effSMohammed Gamal 	netvsc_teardown_recv_gpadl(device, net_device, ndev);
4923f076effSMohammed Gamal 	netvsc_teardown_send_gpadl(device, net_device, ndev);
49395fa0405SHaiyang Zhang 
49495fa0405SHaiyang Zhang exit:
49595fa0405SHaiyang Zhang 	return ret;
49695fa0405SHaiyang Zhang }
49795fa0405SHaiyang Zhang 
498f157e78dSHaiyang Zhang /* Negotiate NVSP protocol version */
499f157e78dSHaiyang Zhang static int negotiate_nvsp_ver(struct hv_device *device,
500f157e78dSHaiyang Zhang 			      struct netvsc_device *net_device,
501f157e78dSHaiyang Zhang 			      struct nvsp_message *init_packet,
502f157e78dSHaiyang Zhang 			      u32 nvsp_ver)
50395fa0405SHaiyang Zhang {
5040a1275caSVitaly Kuznetsov 	struct net_device *ndev = hv_get_drvdata(device);
5057390fe9cSNicholas Mc Guire 	int ret;
506f157e78dSHaiyang Zhang 
507f157e78dSHaiyang Zhang 	memset(init_packet, 0, sizeof(struct nvsp_message));
508f157e78dSHaiyang Zhang 	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
509f157e78dSHaiyang Zhang 	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
510f157e78dSHaiyang Zhang 	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
511ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_packet);
512ec966381SStephen Hemminger 
513f157e78dSHaiyang Zhang 	/* Send the init request */
514f157e78dSHaiyang Zhang 	ret = vmbus_sendpacket(device->channel, init_packet,
515f157e78dSHaiyang Zhang 			       sizeof(struct nvsp_message),
516f157e78dSHaiyang Zhang 			       (unsigned long)init_packet,
517f157e78dSHaiyang Zhang 			       VM_PKT_DATA_INBAND,
518f157e78dSHaiyang Zhang 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
519f157e78dSHaiyang Zhang 
520f157e78dSHaiyang Zhang 	if (ret != 0)
521f157e78dSHaiyang Zhang 		return ret;
522f157e78dSHaiyang Zhang 
5235362855aSVitaly Kuznetsov 	wait_for_completion(&net_device->channel_init_wait);
524f157e78dSHaiyang Zhang 
525f157e78dSHaiyang Zhang 	if (init_packet->msg.init_msg.init_complete.status !=
526f157e78dSHaiyang Zhang 	    NVSP_STAT_SUCCESS)
527f157e78dSHaiyang Zhang 		return -EINVAL;
528f157e78dSHaiyang Zhang 
529a1eabb01SHaiyang Zhang 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
530f157e78dSHaiyang Zhang 		return 0;
531f157e78dSHaiyang Zhang 
53271790a27SHaiyang Zhang 	/* NVSPv2 or later: Send NDIS config */
533f157e78dSHaiyang Zhang 	memset(init_packet, 0, sizeof(struct nvsp_message));
534f157e78dSHaiyang Zhang 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
5350a1275caSVitaly Kuznetsov 	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
5361f5f3a75SHaiyang Zhang 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
537f157e78dSHaiyang Zhang 
5387f5d5af0SHaiyang Zhang 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
53971790a27SHaiyang Zhang 		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
54071790a27SHaiyang Zhang 
5417f5d5af0SHaiyang Zhang 		/* Teaming bit is needed to receive link speed updates */
5427f5d5af0SHaiyang Zhang 		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
5437f5d5af0SHaiyang Zhang 	}
5447f5d5af0SHaiyang Zhang 
545c8e4eff4SHaiyang Zhang 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
546c8e4eff4SHaiyang Zhang 		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
547c8e4eff4SHaiyang Zhang 
548ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_packet);
549ec966381SStephen Hemminger 
550f157e78dSHaiyang Zhang 	ret = vmbus_sendpacket(device->channel, init_packet,
551f157e78dSHaiyang Zhang 				sizeof(struct nvsp_message),
552f157e78dSHaiyang Zhang 				(unsigned long)init_packet,
553f157e78dSHaiyang Zhang 				VM_PKT_DATA_INBAND, 0);
554f157e78dSHaiyang Zhang 
555f157e78dSHaiyang Zhang 	return ret;
556f157e78dSHaiyang Zhang }
557f157e78dSHaiyang Zhang 
55895790837Sstephen hemminger static int netvsc_connect_vsp(struct hv_device *device,
5598b532797Sstephen hemminger 			      struct netvsc_device *net_device,
5608b532797Sstephen hemminger 			      const struct netvsc_device_info *device_info)
561f157e78dSHaiyang Zhang {
562ec966381SStephen Hemminger 	struct net_device *ndev = hv_get_drvdata(device);
5631b17ca04SColin Ian King 	static const u32 ver_list[] = {
564e5a78fadSStephen Hemminger 		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
5650dcec221SHaiyang Zhang 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
5660dcec221SHaiyang Zhang 		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
56795790837Sstephen hemminger 	};
56895790837Sstephen hemminger 	struct nvsp_message *init_packet;
56995790837Sstephen hemminger 	int ndis_version, i, ret;
57095fa0405SHaiyang Zhang 
57195fa0405SHaiyang Zhang 	init_packet = &net_device->channel_init_pkt;
57295fa0405SHaiyang Zhang 
573f157e78dSHaiyang Zhang 	/* Negotiate the latest NVSP protocol supported */
574e5a78fadSStephen Hemminger 	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
575f157e78dSHaiyang Zhang 		if (negotiate_nvsp_ver(device, net_device, init_packet,
576a1eabb01SHaiyang Zhang 				       ver_list[i])  == 0) {
577a1eabb01SHaiyang Zhang 			net_device->nvsp_version = ver_list[i];
578a1eabb01SHaiyang Zhang 			break;
579a1eabb01SHaiyang Zhang 		}
580a1eabb01SHaiyang Zhang 
581a1eabb01SHaiyang Zhang 	if (i < 0) {
58295fa0405SHaiyang Zhang 		ret = -EPROTO;
58395fa0405SHaiyang Zhang 		goto cleanup;
58495fa0405SHaiyang Zhang 	}
585f157e78dSHaiyang Zhang 
586f157e78dSHaiyang Zhang 	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
587f157e78dSHaiyang Zhang 
58895fa0405SHaiyang Zhang 	/* Send the ndis version */
58995fa0405SHaiyang Zhang 	memset(init_packet, 0, sizeof(struct nvsp_message));
59095fa0405SHaiyang Zhang 
591a1eabb01SHaiyang Zhang 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
5921f73db49SKY Srinivasan 		ndis_version = 0x00060001;
593a1eabb01SHaiyang Zhang 	else
594a1eabb01SHaiyang Zhang 		ndis_version = 0x0006001e;
59595fa0405SHaiyang Zhang 
59695fa0405SHaiyang Zhang 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
59795fa0405SHaiyang Zhang 	init_packet->msg.v1_msg.
59895fa0405SHaiyang Zhang 		send_ndis_ver.ndis_major_ver =
59995fa0405SHaiyang Zhang 				(ndis_version & 0xFFFF0000) >> 16;
60095fa0405SHaiyang Zhang 	init_packet->msg.v1_msg.
60195fa0405SHaiyang Zhang 		send_ndis_ver.ndis_minor_ver =
60295fa0405SHaiyang Zhang 				ndis_version & 0xFFFF;
60395fa0405SHaiyang Zhang 
604ec966381SStephen Hemminger 	trace_nvsp_send(ndev, init_packet);
605ec966381SStephen Hemminger 
60695fa0405SHaiyang Zhang 	/* Send the init request */
60795fa0405SHaiyang Zhang 	ret = vmbus_sendpacket(device->channel, init_packet,
60895fa0405SHaiyang Zhang 				sizeof(struct nvsp_message),
60995fa0405SHaiyang Zhang 				(unsigned long)init_packet,
61095fa0405SHaiyang Zhang 				VM_PKT_DATA_INBAND, 0);
61195fa0405SHaiyang Zhang 	if (ret != 0)
61295fa0405SHaiyang Zhang 		goto cleanup;
61395fa0405SHaiyang Zhang 
61499d3016dSHaiyang Zhang 
6158b532797Sstephen hemminger 	ret = netvsc_init_buf(device, net_device, device_info);
61695fa0405SHaiyang Zhang 
61795fa0405SHaiyang Zhang cleanup:
61895fa0405SHaiyang Zhang 	return ret;
61995fa0405SHaiyang Zhang }
62095fa0405SHaiyang Zhang 
62195fa0405SHaiyang Zhang /*
62295fa0405SHaiyang Zhang  * netvsc_device_remove - Callback when the root bus device is removed
62395fa0405SHaiyang Zhang  */
624e08f3ea5SStephen Hemminger void netvsc_device_remove(struct hv_device *device)
62595fa0405SHaiyang Zhang {
6263d541ac5SVitaly Kuznetsov 	struct net_device *ndev = hv_get_drvdata(device);
6273d541ac5SVitaly Kuznetsov 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
62879e8cbe7Sstephen hemminger 	struct netvsc_device *net_device
62979e8cbe7Sstephen hemminger 		= rtnl_dereference(net_device_ctx->nvdev);
63015a863bfSstephen hemminger 	int i;
63195fa0405SHaiyang Zhang 
632a56d99d7SMohammed Gamal 	/*
633a56d99d7SMohammed Gamal 	 * Revoke receive buffer. If host is pre-Win2016 then tear down
634a56d99d7SMohammed Gamal 	 * receive buffer GPADL. Do the same for send buffer.
635a56d99d7SMohammed Gamal 	 */
6363f076effSMohammed Gamal 	netvsc_revoke_recv_buf(device, net_device, ndev);
637a56d99d7SMohammed Gamal 	if (vmbus_proto_version < VERSION_WIN10)
6383f076effSMohammed Gamal 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
639a56d99d7SMohammed Gamal 
6403f076effSMohammed Gamal 	netvsc_revoke_send_buf(device, net_device, ndev);
641a56d99d7SMohammed Gamal 	if (vmbus_proto_version < VERSION_WIN10)
6423f076effSMohammed Gamal 		netvsc_teardown_send_gpadl(device, net_device, ndev);
64395fa0405SHaiyang Zhang 
644545a8e79Sstephen hemminger 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
64595fa0405SHaiyang Zhang 
6468348e046SStephen Hemminger 	/* And disassociate NAPI context from device */
6478348e046SStephen Hemminger 	for (i = 0; i < net_device->num_chn; i++)
6488348e046SStephen Hemminger 		netif_napi_del(&net_device->chan_table[i].napi);
6498348e046SStephen Hemminger 
65095fa0405SHaiyang Zhang 	/*
65195fa0405SHaiyang Zhang 	 * At this point, no one should be accessing net_device
65295fa0405SHaiyang Zhang 	 * except in here
65395fa0405SHaiyang Zhang 	 */
65493ba2222SVitaly Kuznetsov 	netdev_dbg(ndev, "net device safe to remove\n");
65595fa0405SHaiyang Zhang 
65695fa0405SHaiyang Zhang 	/* Now, we can close the channel safely */
65795fa0405SHaiyang Zhang 	vmbus_close(device->channel);
65895fa0405SHaiyang Zhang 
659a56d99d7SMohammed Gamal 	/*
660a56d99d7SMohammed Gamal 	 * If host is Win2016 or higher then we do the GPADL tear down
661a56d99d7SMohammed Gamal 	 * here after VMBus is closed.
662a56d99d7SMohammed Gamal 	*/
6637992894cSMohammed Gamal 	if (vmbus_proto_version >= VERSION_WIN10) {
6643f076effSMohammed Gamal 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
6653f076effSMohammed Gamal 		netvsc_teardown_send_gpadl(device, net_device, ndev);
6667992894cSMohammed Gamal 	}
6670cf73780SVitaly Kuznetsov 
66895fa0405SHaiyang Zhang 	/* Release all resources */
669545a8e79Sstephen hemminger 	free_netvsc_device_rcu(net_device);
67095fa0405SHaiyang Zhang }
67195fa0405SHaiyang Zhang 
67233be96e4SHaiyang Zhang #define RING_AVAIL_PERCENT_HIWATER 20
67333be96e4SHaiyang Zhang #define RING_AVAIL_PERCENT_LOWATER 10
67433be96e4SHaiyang Zhang 
675c25aaf81SKY Srinivasan static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
676c25aaf81SKY Srinivasan 					 u32 index)
677c25aaf81SKY Srinivasan {
678c25aaf81SKY Srinivasan 	sync_change_bit(index, net_device->send_section_map);
679c25aaf81SKY Srinivasan }
680c25aaf81SKY Srinivasan 
681c347b927SStephen Hemminger static void netvsc_send_tx_complete(struct net_device *ndev,
682c347b927SStephen Hemminger 				    struct netvsc_device *net_device,
683c347b927SStephen Hemminger 				    struct vmbus_channel *channel,
684f9645430Sstephen hemminger 				    const struct vmpacket_descriptor *desc,
685f9645430Sstephen hemminger 				    int budget)
68695fa0405SHaiyang Zhang {
68750698d80Sstephen hemminger 	struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
68809af87d1SSimon Xiao 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
6895b54dac8SHaiyang Zhang 	u16 q_idx = 0;
6905b54dac8SHaiyang Zhang 	int queue_sends;
69133be96e4SHaiyang Zhang 
69295fa0405SHaiyang Zhang 	/* Notify the layer above us */
693bc304dd3SStephen Hemminger 	if (likely(skb)) {
694793e3955Sstephen hemminger 		const struct hv_netvsc_packet *packet
695bc304dd3SStephen Hemminger 			= (struct hv_netvsc_packet *)skb->cb;
696793e3955Sstephen hemminger 		u32 send_index = packet->send_buf_index;
697793e3955Sstephen hemminger 		struct netvsc_stats *tx_stats;
698bc304dd3SStephen Hemminger 
699c25aaf81SKY Srinivasan 		if (send_index != NETVSC_INVALID_INDEX)
700c25aaf81SKY Srinivasan 			netvsc_free_send_slot(net_device, send_index);
701793e3955Sstephen hemminger 		q_idx = packet->q_idx;
702bc304dd3SStephen Hemminger 
7036c80f3fcSSimon Xiao 		tx_stats = &net_device->chan_table[q_idx].tx_stats;
704793e3955Sstephen hemminger 
705793e3955Sstephen hemminger 		u64_stats_update_begin(&tx_stats->syncp);
706793e3955Sstephen hemminger 		tx_stats->packets += packet->total_packets;
707793e3955Sstephen hemminger 		tx_stats->bytes += packet->total_bytes;
708793e3955Sstephen hemminger 		u64_stats_update_end(&tx_stats->syncp);
709793e3955Sstephen hemminger 
710f9645430Sstephen hemminger 		napi_consume_skb(skb, budget);
7115b54dac8SHaiyang Zhang 	}
71295fa0405SHaiyang Zhang 
713b8b835a8Sstephen hemminger 	queue_sends =
714b8b835a8Sstephen hemminger 		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
7151d06825bSHaiyang Zhang 
7167b2ee50cSStephen Hemminger 	if (unlikely(net_device->destroy)) {
7177b2ee50cSStephen Hemminger 		if (queue_sends == 0)
718dc5cd894SHaiyang Zhang 			wake_up(&net_device->wait_drain);
7197b2ee50cSStephen Hemminger 	} else {
7207b2ee50cSStephen Hemminger 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
721dc5cd894SHaiyang Zhang 
7227b2ee50cSStephen Hemminger 		if (netif_tx_queue_stopped(txq) &&
7236b1f8376SLong Li 		    (hv_get_avail_to_write_percent(&channel->outbound) >
7246b1f8376SLong Li 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
7257b2ee50cSStephen Hemminger 			netif_tx_wake_queue(txq);
72609af87d1SSimon Xiao 			ndev_ctx->eth_stats.wake_queue++;
72709af87d1SSimon Xiao 		}
728bc304dd3SStephen Hemminger 	}
7297b2ee50cSStephen Hemminger }
730bc304dd3SStephen Hemminger 
731c347b927SStephen Hemminger static void netvsc_send_completion(struct net_device *ndev,
732c347b927SStephen Hemminger 				   struct netvsc_device *net_device,
733bc304dd3SStephen Hemminger 				   struct vmbus_channel *incoming_channel,
734f9645430Sstephen hemminger 				   const struct vmpacket_descriptor *desc,
735f9645430Sstephen hemminger 				   int budget)
736bc304dd3SStephen Hemminger {
737c347b927SStephen Hemminger 	const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
738bc304dd3SStephen Hemminger 
739bc304dd3SStephen Hemminger 	switch (nvsp_packet->hdr.msg_type) {
740bc304dd3SStephen Hemminger 	case NVSP_MSG_TYPE_INIT_COMPLETE:
741bc304dd3SStephen Hemminger 	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
742bc304dd3SStephen Hemminger 	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
743bc304dd3SStephen Hemminger 	case NVSP_MSG5_TYPE_SUBCHANNEL:
744bc304dd3SStephen Hemminger 		/* Copy the response back */
745bc304dd3SStephen Hemminger 		memcpy(&net_device->channel_init_pkt, nvsp_packet,
746bc304dd3SStephen Hemminger 		       sizeof(struct nvsp_message));
747bc304dd3SStephen Hemminger 		complete(&net_device->channel_init_wait);
748bc304dd3SStephen Hemminger 		break;
749bc304dd3SStephen Hemminger 
750bc304dd3SStephen Hemminger 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
751c347b927SStephen Hemminger 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
752c347b927SStephen Hemminger 					desc, budget);
753bc304dd3SStephen Hemminger 		break;
754bc304dd3SStephen Hemminger 
755bc304dd3SStephen Hemminger 	default:
756bc304dd3SStephen Hemminger 		netdev_err(ndev,
757bc304dd3SStephen Hemminger 			   "Unknown send completion type %d received!!\n",
758bc304dd3SStephen Hemminger 			   nvsp_packet->hdr.msg_type);
75995fa0405SHaiyang Zhang 	}
76095fa0405SHaiyang Zhang }
76195fa0405SHaiyang Zhang 
762c25aaf81SKY Srinivasan static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
763c25aaf81SKY Srinivasan {
764b58a1858Sstephen hemminger 	unsigned long *map_addr = net_device->send_section_map;
765b58a1858Sstephen hemminger 	unsigned int i;
766c25aaf81SKY Srinivasan 
767fdfb70d2Sstephen hemminger 	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
768b58a1858Sstephen hemminger 		if (sync_test_and_set_bit(i, map_addr) == 0)
769b58a1858Sstephen hemminger 			return i;
770c25aaf81SKY Srinivasan 	}
771b58a1858Sstephen hemminger 
772b58a1858Sstephen hemminger 	return NETVSC_INVALID_INDEX;
773c25aaf81SKY Srinivasan }
774c25aaf81SKY Srinivasan 
77526a11262SStephen Hemminger static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
776c25aaf81SKY Srinivasan 				    unsigned int section_index,
7777c3877f2SHaiyang Zhang 				    u32 pend_size,
77824476760SKY Srinivasan 				    struct hv_netvsc_packet *packet,
779a9f2e2d6SKY Srinivasan 				    struct rndis_message *rndis_msg,
78002b6de01Sstephen hemminger 				    struct hv_page_buffer *pb,
781cfd8afd9SStephen Hemminger 				    bool xmit_more)
782c25aaf81SKY Srinivasan {
783c25aaf81SKY Srinivasan 	char *start = net_device->send_buf;
7847c3877f2SHaiyang Zhang 	char *dest = start + (section_index * net_device->send_section_size)
7857c3877f2SHaiyang Zhang 		     + pend_size;
786c25aaf81SKY Srinivasan 	int i;
7877c3877f2SHaiyang Zhang 	u32 padding = 0;
788aa0a34beSHaiyang Zhang 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
789aa0a34beSHaiyang Zhang 		packet->page_buf_cnt;
790b85e06f7SStephen Hemminger 	u32 remain;
7917c3877f2SHaiyang Zhang 
7927c3877f2SHaiyang Zhang 	/* Add padding */
793b85e06f7SStephen Hemminger 	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
794cfd8afd9SStephen Hemminger 	if (xmit_more && remain) {
7957c3877f2SHaiyang Zhang 		padding = net_device->pkt_align - remain;
79624476760SKY Srinivasan 		rndis_msg->msg_len += padding;
7977c3877f2SHaiyang Zhang 		packet->total_data_buflen += padding;
7987c3877f2SHaiyang Zhang 	}
799c25aaf81SKY Srinivasan 
800aa0a34beSHaiyang Zhang 	for (i = 0; i < page_count; i++) {
80102b6de01Sstephen hemminger 		char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
80202b6de01Sstephen hemminger 		u32 offset = pb[i].offset;
80302b6de01Sstephen hemminger 		u32 len = pb[i].len;
804c25aaf81SKY Srinivasan 
805c25aaf81SKY Srinivasan 		memcpy(dest, (src + offset), len);
806c25aaf81SKY Srinivasan 		dest += len;
807c25aaf81SKY Srinivasan 	}
8087c3877f2SHaiyang Zhang 
80926a11262SStephen Hemminger 	if (padding)
8107c3877f2SHaiyang Zhang 		memset(dest, 0, padding);
811c25aaf81SKY Srinivasan }
812c25aaf81SKY Srinivasan 
8133a8963acSStephen Hemminger static inline int netvsc_send_pkt(
8140a1275caSVitaly Kuznetsov 	struct hv_device *device,
8157c3877f2SHaiyang Zhang 	struct hv_netvsc_packet *packet,
816a9f2e2d6SKY Srinivasan 	struct netvsc_device *net_device,
81702b6de01Sstephen hemminger 	struct hv_page_buffer *pb,
8183a3d9a0aSKY Srinivasan 	struct sk_buff *skb)
81995fa0405SHaiyang Zhang {
8207c3877f2SHaiyang Zhang 	struct nvsp_message nvmsg;
821ec966381SStephen Hemminger 	struct nvsp_1_message_send_rndis_packet *rpkt =
822956a25c9SJoe Perches 		&nvmsg.msg.v1_msg.send_rndis_pkt;
823956a25c9SJoe Perches 	struct netvsc_channel * const nvchan =
824956a25c9SJoe Perches 		&net_device->chan_table[packet->q_idx];
825b8b835a8Sstephen hemminger 	struct vmbus_channel *out_channel = nvchan->channel;
8260a1275caSVitaly Kuznetsov 	struct net_device *ndev = hv_get_drvdata(device);
82709af87d1SSimon Xiao 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
828b8b835a8Sstephen hemminger 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
8297c3877f2SHaiyang Zhang 	u64 req_id;
8307c3877f2SHaiyang Zhang 	int ret;
8316b1f8376SLong Li 	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
832c25aaf81SKY Srinivasan 
8337c3877f2SHaiyang Zhang 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
834956a25c9SJoe Perches 	if (skb)
835956a25c9SJoe Perches 		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
8367c3877f2SHaiyang Zhang 	else
837956a25c9SJoe Perches 		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
838956a25c9SJoe Perches 
839956a25c9SJoe Perches 	rpkt->send_buf_section_index = packet->send_buf_index;
840956a25c9SJoe Perches 	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
841956a25c9SJoe Perches 		rpkt->send_buf_section_size = 0;
842956a25c9SJoe Perches 	else
843956a25c9SJoe Perches 		rpkt->send_buf_section_size = packet->total_data_buflen;
84495fa0405SHaiyang Zhang 
8453a3d9a0aSKY Srinivasan 	req_id = (ulong)skb;
846f1ea3cd7SHaiyang Zhang 
847c3582a2cSHaiyang Zhang 	if (out_channel->rescind)
848c3582a2cSHaiyang Zhang 		return -ENODEV;
849c3582a2cSHaiyang Zhang 
850ec966381SStephen Hemminger 	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
851ec966381SStephen Hemminger 
85295fa0405SHaiyang Zhang 	if (packet->page_buf_cnt) {
85302b6de01Sstephen hemminger 		if (packet->cp_partial)
85402b6de01Sstephen hemminger 			pb += packet->rmsg_pgcnt;
85502b6de01Sstephen hemminger 
8565a668d8cSstephen hemminger 		ret = vmbus_sendpacket_pagebuffer(out_channel,
85702b6de01Sstephen hemminger 						  pb, packet->page_buf_cnt,
8585a668d8cSstephen hemminger 						  &nvmsg, sizeof(nvmsg),
8595a668d8cSstephen hemminger 						  req_id);
86095fa0405SHaiyang Zhang 	} else {
8615dd0fb9bSstephen hemminger 		ret = vmbus_sendpacket(out_channel,
8625dd0fb9bSstephen hemminger 				       &nvmsg, sizeof(nvmsg),
8635dd0fb9bSstephen hemminger 				       req_id, VM_PKT_DATA_INBAND,
8643454323cSStephen Hemminger 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
86595fa0405SHaiyang Zhang 	}
86695fa0405SHaiyang Zhang 
8671d06825bSHaiyang Zhang 	if (ret == 0) {
868b8b835a8Sstephen hemminger 		atomic_inc_return(&nvchan->queue_sends);
8695b54dac8SHaiyang Zhang 
87009af87d1SSimon Xiao 		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
871b8b835a8Sstephen hemminger 			netif_tx_stop_queue(txq);
87209af87d1SSimon Xiao 			ndev_ctx->eth_stats.stop_queue++;
87309af87d1SSimon Xiao 		}
8741d06825bSHaiyang Zhang 	} else if (ret == -EAGAIN) {
875b8b835a8Sstephen hemminger 		netif_tx_stop_queue(txq);
87609af87d1SSimon Xiao 		ndev_ctx->eth_stats.stop_queue++;
877b8b835a8Sstephen hemminger 		if (atomic_read(&nvchan->queue_sends) < 1) {
878b8b835a8Sstephen hemminger 			netif_tx_wake_queue(txq);
87909af87d1SSimon Xiao 			ndev_ctx->eth_stats.wake_queue++;
88033be96e4SHaiyang Zhang 			ret = -ENOSPC;
88133be96e4SHaiyang Zhang 		}
8821d06825bSHaiyang Zhang 	} else {
8834a2176c6Sstephen hemminger 		netdev_err(ndev,
8844a2176c6Sstephen hemminger 			   "Unable to send packet pages %u len %u, ret %d\n",
8854a2176c6Sstephen hemminger 			   packet->page_buf_cnt, packet->total_data_buflen,
8864a2176c6Sstephen hemminger 			   ret);
8871d06825bSHaiyang Zhang 	}
88895fa0405SHaiyang Zhang 
8897c3877f2SHaiyang Zhang 	return ret;
8907c3877f2SHaiyang Zhang }
8917c3877f2SHaiyang Zhang 
892c85e4924SHaiyang Zhang /* Move packet out of multi send data (msd), and clear msd */
893c85e4924SHaiyang Zhang static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
894c85e4924SHaiyang Zhang 				struct sk_buff **msd_skb,
895c85e4924SHaiyang Zhang 				struct multi_send_data *msdp)
896c85e4924SHaiyang Zhang {
897c85e4924SHaiyang Zhang 	*msd_skb = msdp->skb;
898c85e4924SHaiyang Zhang 	*msd_send = msdp->pkt;
899c85e4924SHaiyang Zhang 	msdp->skb = NULL;
900c85e4924SHaiyang Zhang 	msdp->pkt = NULL;
901c85e4924SHaiyang Zhang 	msdp->count = 0;
902c85e4924SHaiyang Zhang }
903c85e4924SHaiyang Zhang 
9042a926f79Sstephen hemminger /* RCU already held by caller */
905cfd8afd9SStephen Hemminger int netvsc_send(struct net_device *ndev,
90624476760SKY Srinivasan 		struct hv_netvsc_packet *packet,
907a9f2e2d6SKY Srinivasan 		struct rndis_message *rndis_msg,
90802b6de01Sstephen hemminger 		struct hv_page_buffer *pb,
9093a3d9a0aSKY Srinivasan 		struct sk_buff *skb)
9107c3877f2SHaiyang Zhang {
911cfd8afd9SStephen Hemminger 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
9123962981fSstephen hemminger 	struct netvsc_device *net_device
913867047c4Sstephen hemminger 		= rcu_dereference_bh(ndev_ctx->nvdev);
9142a926f79Sstephen hemminger 	struct hv_device *device = ndev_ctx->device_ctx;
9156c4c137eSStephen Hemminger 	int ret = 0;
916b8b835a8Sstephen hemminger 	struct netvsc_channel *nvchan;
9177c3877f2SHaiyang Zhang 	u32 pktlen = packet->total_data_buflen, msd_len = 0;
9187c3877f2SHaiyang Zhang 	unsigned int section_index = NETVSC_INVALID_INDEX;
9197c3877f2SHaiyang Zhang 	struct multi_send_data *msdp;
9207c3877f2SHaiyang Zhang 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
921c85e4924SHaiyang Zhang 	struct sk_buff *msd_skb = NULL;
922cfd8afd9SStephen Hemminger 	bool try_batch, xmit_more;
9237c3877f2SHaiyang Zhang 
924592b4fe8Sstephen hemminger 	/* If device is rescinded, return error and packet will get dropped. */
9252a926f79Sstephen hemminger 	if (unlikely(!net_device || net_device->destroy))
9267c3877f2SHaiyang Zhang 		return -ENODEV;
9277c3877f2SHaiyang Zhang 
928b8b835a8Sstephen hemminger 	nvchan = &net_device->chan_table[packet->q_idx];
9297c3877f2SHaiyang Zhang 	packet->send_buf_index = NETVSC_INVALID_INDEX;
930aa0a34beSHaiyang Zhang 	packet->cp_partial = false;
9317c3877f2SHaiyang Zhang 
932cf8190e4SHaiyang Zhang 	/* Send control message directly without accessing msd (Multi-Send
933cf8190e4SHaiyang Zhang 	 * Data) field which may be changed during data packet processing.
934cf8190e4SHaiyang Zhang 	 */
93512f69661SStephen Hemminger 	if (!skb)
93612f69661SStephen Hemminger 		return netvsc_send_pkt(device, packet, net_device, pb, skb);
937cf8190e4SHaiyang Zhang 
9387c3877f2SHaiyang Zhang 	/* batch packets in send buffer if possible */
939b8b835a8Sstephen hemminger 	msdp = &nvchan->msd;
9407c3877f2SHaiyang Zhang 	if (msdp->pkt)
9417c3877f2SHaiyang Zhang 		msd_len = msdp->pkt->total_data_buflen;
9427c3877f2SHaiyang Zhang 
943ebc1dcf6Sstephen hemminger 	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
944aa0a34beSHaiyang Zhang 	if (try_batch && msd_len + pktlen + net_device->pkt_align <
9457c3877f2SHaiyang Zhang 	    net_device->send_section_size) {
9467c3877f2SHaiyang Zhang 		section_index = msdp->pkt->send_buf_index;
9477c3877f2SHaiyang Zhang 
948aa0a34beSHaiyang Zhang 	} else if (try_batch && msd_len + packet->rmsg_size <
949aa0a34beSHaiyang Zhang 		   net_device->send_section_size) {
950aa0a34beSHaiyang Zhang 		section_index = msdp->pkt->send_buf_index;
951aa0a34beSHaiyang Zhang 		packet->cp_partial = true;
952aa0a34beSHaiyang Zhang 
953ebc1dcf6Sstephen hemminger 	} else if (pktlen + net_device->pkt_align <
9547c3877f2SHaiyang Zhang 		   net_device->send_section_size) {
9557c3877f2SHaiyang Zhang 		section_index = netvsc_get_next_send_section(net_device);
956cad5c197Sstephen hemminger 		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
957cad5c197Sstephen hemminger 			++ndev_ctx->eth_stats.tx_send_full;
958cad5c197Sstephen hemminger 		} else {
959c85e4924SHaiyang Zhang 			move_pkt_msd(&msd_send, &msd_skb, msdp);
9607c3877f2SHaiyang Zhang 			msd_len = 0;
9617c3877f2SHaiyang Zhang 		}
9627c3877f2SHaiyang Zhang 	}
9637c3877f2SHaiyang Zhang 
964cfd8afd9SStephen Hemminger 	/* Keep aggregating only if stack says more data is coming
965cfd8afd9SStephen Hemminger 	 * and not doing mixed modes send and not flow blocked
966cfd8afd9SStephen Hemminger 	 */
9676b16f9eeSFlorian Westphal 	xmit_more = netdev_xmit_more() &&
968cfd8afd9SStephen Hemminger 		!packet->cp_partial &&
969cfd8afd9SStephen Hemminger 		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
970cfd8afd9SStephen Hemminger 
9717c3877f2SHaiyang Zhang 	if (section_index != NETVSC_INVALID_INDEX) {
9727c3877f2SHaiyang Zhang 		netvsc_copy_to_send_buf(net_device,
9737c3877f2SHaiyang Zhang 					section_index, msd_len,
974cfd8afd9SStephen Hemminger 					packet, rndis_msg, pb, xmit_more);
975b08cc791SKY Srinivasan 
976aa0a34beSHaiyang Zhang 		packet->send_buf_index = section_index;
977aa0a34beSHaiyang Zhang 
978aa0a34beSHaiyang Zhang 		if (packet->cp_partial) {
979aa0a34beSHaiyang Zhang 			packet->page_buf_cnt -= packet->rmsg_pgcnt;
980aa0a34beSHaiyang Zhang 			packet->total_data_buflen = msd_len + packet->rmsg_size;
981aa0a34beSHaiyang Zhang 		} else {
982aa0a34beSHaiyang Zhang 			packet->page_buf_cnt = 0;
983aa0a34beSHaiyang Zhang 			packet->total_data_buflen += msd_len;
984aa0a34beSHaiyang Zhang 		}
9857c3877f2SHaiyang Zhang 
986793e3955Sstephen hemminger 		if (msdp->pkt) {
987793e3955Sstephen hemminger 			packet->total_packets += msdp->pkt->total_packets;
988793e3955Sstephen hemminger 			packet->total_bytes += msdp->pkt->total_bytes;
989793e3955Sstephen hemminger 		}
990793e3955Sstephen hemminger 
991c85e4924SHaiyang Zhang 		if (msdp->skb)
99217db4bceSStephen Hemminger 			dev_consume_skb_any(msdp->skb);
993ee90b812SHaiyang Zhang 
994cfd8afd9SStephen Hemminger 		if (xmit_more) {
995c85e4924SHaiyang Zhang 			msdp->skb = skb;
9967c3877f2SHaiyang Zhang 			msdp->pkt = packet;
9977c3877f2SHaiyang Zhang 			msdp->count++;
9987c3877f2SHaiyang Zhang 		} else {
9997c3877f2SHaiyang Zhang 			cur_send = packet;
1000c85e4924SHaiyang Zhang 			msdp->skb = NULL;
10017c3877f2SHaiyang Zhang 			msdp->pkt = NULL;
10027c3877f2SHaiyang Zhang 			msdp->count = 0;
10037c3877f2SHaiyang Zhang 		}
10047c3877f2SHaiyang Zhang 	} else {
1005c85e4924SHaiyang Zhang 		move_pkt_msd(&msd_send, &msd_skb, msdp);
10067c3877f2SHaiyang Zhang 		cur_send = packet;
10077c3877f2SHaiyang Zhang 	}
10087c3877f2SHaiyang Zhang 
10097c3877f2SHaiyang Zhang 	if (msd_send) {
10106c4c137eSStephen Hemminger 		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
10110a1275caSVitaly Kuznetsov 					    NULL, msd_skb);
10127c3877f2SHaiyang Zhang 
10137c3877f2SHaiyang Zhang 		if (m_ret != 0) {
10147c3877f2SHaiyang Zhang 			netvsc_free_send_slot(net_device,
10157c3877f2SHaiyang Zhang 					      msd_send->send_buf_index);
1016c85e4924SHaiyang Zhang 			dev_kfree_skb_any(msd_skb);
10177c3877f2SHaiyang Zhang 		}
10187c3877f2SHaiyang Zhang 	}
10197c3877f2SHaiyang Zhang 
10207c3877f2SHaiyang Zhang 	if (cur_send)
10210a1275caSVitaly Kuznetsov 		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
10227c3877f2SHaiyang Zhang 
10237aab5159SJerry Snitselaar 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1024d953ca4dSHaiyang Zhang 		netvsc_free_send_slot(net_device, section_index);
1025d953ca4dSHaiyang Zhang 
102695fa0405SHaiyang Zhang 	return ret;
102795fa0405SHaiyang Zhang }
102895fa0405SHaiyang Zhang 
10297426b1a5Sstephen hemminger /* Send pending recv completions */
1030cad5c197Sstephen hemminger static int send_recv_completions(struct net_device *ndev,
1031cad5c197Sstephen hemminger 				 struct netvsc_device *nvdev,
1032cad5c197Sstephen hemminger 				 struct netvsc_channel *nvchan)
103395fa0405SHaiyang Zhang {
10347426b1a5Sstephen hemminger 	struct multi_recv_comp *mrc = &nvchan->mrc;
10357426b1a5Sstephen hemminger 	struct recv_comp_msg {
10367426b1a5Sstephen hemminger 		struct nvsp_message_header hdr;
10377426b1a5Sstephen hemminger 		u32 status;
10387426b1a5Sstephen hemminger 	}  __packed;
10397426b1a5Sstephen hemminger 	struct recv_comp_msg msg = {
10407426b1a5Sstephen hemminger 		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
10417426b1a5Sstephen hemminger 	};
104295fa0405SHaiyang Zhang 	int ret;
104395fa0405SHaiyang Zhang 
10447426b1a5Sstephen hemminger 	while (mrc->first != mrc->next) {
10457426b1a5Sstephen hemminger 		const struct recv_comp_data *rcd
10467426b1a5Sstephen hemminger 			= mrc->slots + mrc->first;
104795fa0405SHaiyang Zhang 
10487426b1a5Sstephen hemminger 		msg.status = rcd->status;
10497426b1a5Sstephen hemminger 		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
10507426b1a5Sstephen hemminger 				       rcd->tid, VM_PKT_COMP, 0);
1051cad5c197Sstephen hemminger 		if (unlikely(ret)) {
1052cad5c197Sstephen hemminger 			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1053cad5c197Sstephen hemminger 
1054cad5c197Sstephen hemminger 			++ndev_ctx->eth_stats.rx_comp_busy;
1055c0b558e5SHaiyang Zhang 			return ret;
1056cad5c197Sstephen hemminger 		}
10577426b1a5Sstephen hemminger 
10587426b1a5Sstephen hemminger 		if (++mrc->first == nvdev->recv_completion_cnt)
10597426b1a5Sstephen hemminger 			mrc->first = 0;
106095fa0405SHaiyang Zhang 	}
1061c0b558e5SHaiyang Zhang 
10627426b1a5Sstephen hemminger 	/* receive completion ring has been emptied */
10637426b1a5Sstephen hemminger 	if (unlikely(nvdev->destroy))
10647426b1a5Sstephen hemminger 		wake_up(&nvdev->wait_drain);
10657426b1a5Sstephen hemminger 
10667426b1a5Sstephen hemminger 	return 0;
10677426b1a5Sstephen hemminger }
10687426b1a5Sstephen hemminger 
10697426b1a5Sstephen hemminger /* Count how many receive completions are outstanding */
10707426b1a5Sstephen hemminger static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
10717426b1a5Sstephen hemminger 				 const struct multi_recv_comp *mrc,
1072c0b558e5SHaiyang Zhang 				 u32 *filled, u32 *avail)
1073c0b558e5SHaiyang Zhang {
10747426b1a5Sstephen hemminger 	u32 count = nvdev->recv_completion_cnt;
1075c0b558e5SHaiyang Zhang 
10767426b1a5Sstephen hemminger 	if (mrc->next >= mrc->first)
10777426b1a5Sstephen hemminger 		*filled = mrc->next - mrc->first;
10787426b1a5Sstephen hemminger 	else
10797426b1a5Sstephen hemminger 		*filled = (count - mrc->first) + mrc->next;
1080c0b558e5SHaiyang Zhang 
10817426b1a5Sstephen hemminger 	*avail = count - *filled - 1;
108295fa0405SHaiyang Zhang }
1083c0b558e5SHaiyang Zhang 
10847426b1a5Sstephen hemminger /* Add receive complete to ring to send to host. */
10857426b1a5Sstephen hemminger static void enq_receive_complete(struct net_device *ndev,
10867426b1a5Sstephen hemminger 				 struct netvsc_device *nvdev, u16 q_idx,
10877426b1a5Sstephen hemminger 				 u64 tid, u32 status)
1088c0b558e5SHaiyang Zhang {
10897426b1a5Sstephen hemminger 	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
10907426b1a5Sstephen hemminger 	struct multi_recv_comp *mrc = &nvchan->mrc;
10917426b1a5Sstephen hemminger 	struct recv_comp_data *rcd;
1092c0b558e5SHaiyang Zhang 	u32 filled, avail;
1093c0b558e5SHaiyang Zhang 
10947426b1a5Sstephen hemminger 	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1095c0b558e5SHaiyang Zhang 
10967426b1a5Sstephen hemminger 	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1097cad5c197Sstephen hemminger 		send_recv_completions(ndev, nvdev, nvchan);
10987426b1a5Sstephen hemminger 		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1099c0b558e5SHaiyang Zhang 	}
1100c0b558e5SHaiyang Zhang 
11017426b1a5Sstephen hemminger 	if (unlikely(!avail)) {
11027426b1a5Sstephen hemminger 		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
11037426b1a5Sstephen hemminger 			   q_idx, tid);
11047426b1a5Sstephen hemminger 		return;
1105c0b558e5SHaiyang Zhang 	}
1106c0b558e5SHaiyang Zhang 
11077426b1a5Sstephen hemminger 	rcd = mrc->slots + mrc->next;
11087426b1a5Sstephen hemminger 	rcd->tid = tid;
11097426b1a5Sstephen hemminger 	rcd->status = status;
1110c0b558e5SHaiyang Zhang 
11117426b1a5Sstephen hemminger 	if (++mrc->next == nvdev->recv_completion_cnt)
11127426b1a5Sstephen hemminger 		mrc->next = 0;
111395fa0405SHaiyang Zhang }
111495fa0405SHaiyang Zhang 
111515a863bfSstephen hemminger static int netvsc_receive(struct net_device *ndev,
1116dc54a08cSstephen hemminger 			  struct netvsc_device *net_device,
1117c8e4eff4SHaiyang Zhang 			  struct netvsc_channel *nvchan,
1118f3dd3f47Sstephen hemminger 			  const struct vmpacket_descriptor *desc,
1119c347b927SStephen Hemminger 			  const struct nvsp_message *nvsp)
112095fa0405SHaiyang Zhang {
1121c347b927SStephen Hemminger 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1122c8e4eff4SHaiyang Zhang 	struct vmbus_channel *channel = nvchan->channel;
1123f3dd3f47Sstephen hemminger 	const struct vmtransfer_page_packet_header *vmxferpage_packet
1124f3dd3f47Sstephen hemminger 		= container_of(desc, const struct vmtransfer_page_packet_header, d);
112515a863bfSstephen hemminger 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1126dc54a08cSstephen hemminger 	char *recv_buf = net_device->recv_buf;
11274baab261SHaiyang Zhang 	u32 status = NVSP_STAT_SUCCESS;
112845326342SHaiyang Zhang 	int i;
112945326342SHaiyang Zhang 	int count = 0;
113095fa0405SHaiyang Zhang 
113195fa0405SHaiyang Zhang 	/* Make sure this is a valid nvsp packet */
1132dc54a08cSstephen hemminger 	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1133dc54a08cSstephen hemminger 		netif_err(net_device_ctx, rx_err, ndev,
1134dc54a08cSstephen hemminger 			  "Unknown nvsp packet type received %u\n",
1135dc54a08cSstephen hemminger 			  nvsp->hdr.msg_type);
113615a863bfSstephen hemminger 		return 0;
113795fa0405SHaiyang Zhang 	}
113895fa0405SHaiyang Zhang 
1139dc54a08cSstephen hemminger 	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1140dc54a08cSstephen hemminger 		netif_err(net_device_ctx, rx_err, ndev,
1141dc54a08cSstephen hemminger 			  "Invalid xfer page set id - expecting %x got %x\n",
1142dc54a08cSstephen hemminger 			  NETVSC_RECEIVE_BUFFER_ID,
114395fa0405SHaiyang Zhang 			  vmxferpage_packet->xfer_pageset_id);
114415a863bfSstephen hemminger 		return 0;
114595fa0405SHaiyang Zhang 	}
114695fa0405SHaiyang Zhang 
11474baab261SHaiyang Zhang 	count = vmxferpage_packet->range_cnt;
114895fa0405SHaiyang Zhang 
114995fa0405SHaiyang Zhang 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
11504baab261SHaiyang Zhang 	for (i = 0; i < count; i++) {
1151c5d24bddSHaiyang Zhang 		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1152dc54a08cSstephen hemminger 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1153c5d24bddSHaiyang Zhang 		void *data;
11545c71dadbSHaiyang Zhang 		int ret;
115595fa0405SHaiyang Zhang 
1156c5d24bddSHaiyang Zhang 		if (unlikely(offset + buflen > net_device->recv_buf_size)) {
1157c8e4eff4SHaiyang Zhang 			nvchan->rsc.cnt = 0;
1158c5d24bddSHaiyang Zhang 			status = NVSP_STAT_FAIL;
1159c5d24bddSHaiyang Zhang 			netif_err(net_device_ctx, rx_err, ndev,
1160c5d24bddSHaiyang Zhang 				  "Packet offset:%u + len:%u too big\n",
1161c5d24bddSHaiyang Zhang 				  offset, buflen);
1162c5d24bddSHaiyang Zhang 
1163c5d24bddSHaiyang Zhang 			continue;
1164c5d24bddSHaiyang Zhang 		}
1165c5d24bddSHaiyang Zhang 
1166c5d24bddSHaiyang Zhang 		data = recv_buf + offset;
1167c5d24bddSHaiyang Zhang 
1168c8e4eff4SHaiyang Zhang 		nvchan->rsc.is_last = (i == count - 1);
1169c8e4eff4SHaiyang Zhang 
1170ec966381SStephen Hemminger 		trace_rndis_recv(ndev, q_idx, data);
1171ec966381SStephen Hemminger 
117295fa0405SHaiyang Zhang 		/* Pass it to the upper layer */
11735c71dadbSHaiyang Zhang 		ret = rndis_filter_receive(ndev, net_device,
1174c8e4eff4SHaiyang Zhang 					   nvchan, data, buflen);
11755c71dadbSHaiyang Zhang 
11765c71dadbSHaiyang Zhang 		if (unlikely(ret != NVSP_STAT_SUCCESS))
11775c71dadbSHaiyang Zhang 			status = NVSP_STAT_FAIL;
117895fa0405SHaiyang Zhang 	}
117995fa0405SHaiyang Zhang 
11807426b1a5Sstephen hemminger 	enq_receive_complete(ndev, net_device, q_idx,
11817426b1a5Sstephen hemminger 			     vmxferpage_packet->d.trans_id, status);
118215a863bfSstephen hemminger 
118315a863bfSstephen hemminger 	return count;
118495fa0405SHaiyang Zhang }
118595fa0405SHaiyang Zhang 
1186c347b927SStephen Hemminger static void netvsc_send_table(struct net_device *ndev,
1187c347b927SStephen Hemminger 			      const struct nvsp_message *nvmsg)
11885b54dac8SHaiyang Zhang {
11897ce10124Sstephen hemminger 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
11905b54dac8SHaiyang Zhang 	u32 count, *tab;
1191c347b927SStephen Hemminger 	int i;
11925b54dac8SHaiyang Zhang 
11935b54dac8SHaiyang Zhang 	count = nvmsg->msg.v5_msg.send_table.count;
11945b54dac8SHaiyang Zhang 	if (count != VRSS_SEND_TAB_SIZE) {
11955b54dac8SHaiyang Zhang 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
11965b54dac8SHaiyang Zhang 		return;
11975b54dac8SHaiyang Zhang 	}
11985b54dac8SHaiyang Zhang 
11995b54dac8SHaiyang Zhang 	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
12005b54dac8SHaiyang Zhang 		      nvmsg->msg.v5_msg.send_table.offset);
12015b54dac8SHaiyang Zhang 
12025b54dac8SHaiyang Zhang 	for (i = 0; i < count; i++)
120339e91cfbSHaiyang Zhang 		net_device_ctx->tx_table[i] = tab[i];
12045b54dac8SHaiyang Zhang }
12055b54dac8SHaiyang Zhang 
1206c347b927SStephen Hemminger static void netvsc_send_vf(struct net_device *ndev,
1207c347b927SStephen Hemminger 			   const struct nvsp_message *nvmsg)
120871790a27SHaiyang Zhang {
1209c347b927SStephen Hemminger 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1210c347b927SStephen Hemminger 
1211f9a7da91SVitaly Kuznetsov 	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1212f9a7da91SVitaly Kuznetsov 	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
121300d7ddbaSStephen Hemminger 	netdev_info(ndev, "VF slot %u %s\n",
121400d7ddbaSStephen Hemminger 		    net_device_ctx->vf_serial,
121500d7ddbaSStephen Hemminger 		    net_device_ctx->vf_alloc ? "added" : "removed");
121671790a27SHaiyang Zhang }
121771790a27SHaiyang Zhang 
1218c347b927SStephen Hemminger static  void netvsc_receive_inband(struct net_device *ndev,
1219c347b927SStephen Hemminger 				   const struct nvsp_message *nvmsg)
122071790a27SHaiyang Zhang {
122171790a27SHaiyang Zhang 	switch (nvmsg->hdr.msg_type) {
122271790a27SHaiyang Zhang 	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1223c347b927SStephen Hemminger 		netvsc_send_table(ndev, nvmsg);
122471790a27SHaiyang Zhang 		break;
122571790a27SHaiyang Zhang 
122671790a27SHaiyang Zhang 	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1227c347b927SStephen Hemminger 		netvsc_send_vf(ndev, nvmsg);
122871790a27SHaiyang Zhang 		break;
122971790a27SHaiyang Zhang 	}
123071790a27SHaiyang Zhang }
123171790a27SHaiyang Zhang 
123215a863bfSstephen hemminger static int netvsc_process_raw_pkt(struct hv_device *device,
1233c8e4eff4SHaiyang Zhang 				  struct netvsc_channel *nvchan,
123499a50bb1SK. Y. Srinivasan 				  struct netvsc_device *net_device,
123599a50bb1SK. Y. Srinivasan 				  struct net_device *ndev,
1236f9645430Sstephen hemminger 				  const struct vmpacket_descriptor *desc,
1237f9645430Sstephen hemminger 				  int budget)
123899a50bb1SK. Y. Srinivasan {
1239c8e4eff4SHaiyang Zhang 	struct vmbus_channel *channel = nvchan->channel;
1240c347b927SStephen Hemminger 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
124199a50bb1SK. Y. Srinivasan 
1242ec966381SStephen Hemminger 	trace_nvsp_recv(ndev, channel, nvmsg);
1243ec966381SStephen Hemminger 
124499a50bb1SK. Y. Srinivasan 	switch (desc->type) {
124599a50bb1SK. Y. Srinivasan 	case VM_PKT_COMP:
1246c347b927SStephen Hemminger 		netvsc_send_completion(ndev, net_device, channel,
1247f9645430Sstephen hemminger 				       desc, budget);
124899a50bb1SK. Y. Srinivasan 		break;
124999a50bb1SK. Y. Srinivasan 
125099a50bb1SK. Y. Srinivasan 	case VM_PKT_DATA_USING_XFER_PAGES:
1251c8e4eff4SHaiyang Zhang 		return netvsc_receive(ndev, net_device, nvchan,
1252c347b927SStephen Hemminger 				      desc, nvmsg);
125399a50bb1SK. Y. Srinivasan 		break;
125499a50bb1SK. Y. Srinivasan 
125599a50bb1SK. Y. Srinivasan 	case VM_PKT_DATA_INBAND:
1256c347b927SStephen Hemminger 		netvsc_receive_inband(ndev, nvmsg);
125799a50bb1SK. Y. Srinivasan 		break;
125899a50bb1SK. Y. Srinivasan 
125999a50bb1SK. Y. Srinivasan 	default:
126099a50bb1SK. Y. Srinivasan 		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1261f4f1c23dSstephen hemminger 			   desc->type, desc->trans_id);
126299a50bb1SK. Y. Srinivasan 		break;
126399a50bb1SK. Y. Srinivasan 	}
126415a863bfSstephen hemminger 
126515a863bfSstephen hemminger 	return 0;
126615a863bfSstephen hemminger }
126715a863bfSstephen hemminger 
126815a863bfSstephen hemminger static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
126915a863bfSstephen hemminger {
127015a863bfSstephen hemminger 	struct vmbus_channel *primary = channel->primary_channel;
127115a863bfSstephen hemminger 
127215a863bfSstephen hemminger 	return primary ? primary->device_obj : channel->device_obj;
127315a863bfSstephen hemminger }
127415a863bfSstephen hemminger 
1275262b7f14Sstephen hemminger /* Network processing softirq
1276262b7f14Sstephen hemminger  * Process data in incoming ring buffer from host
1277262b7f14Sstephen hemminger  * Stops when ring is empty or budget is met or exceeded.
1278262b7f14Sstephen hemminger  */
127915a863bfSstephen hemminger int netvsc_poll(struct napi_struct *napi, int budget)
128015a863bfSstephen hemminger {
128115a863bfSstephen hemminger 	struct netvsc_channel *nvchan
128215a863bfSstephen hemminger 		= container_of(napi, struct netvsc_channel, napi);
128335fbbccfSstephen hemminger 	struct netvsc_device *net_device = nvchan->net_device;
128415a863bfSstephen hemminger 	struct vmbus_channel *channel = nvchan->channel;
128515a863bfSstephen hemminger 	struct hv_device *device = netvsc_channel_to_device(channel);
128615a863bfSstephen hemminger 	struct net_device *ndev = hv_get_drvdata(device);
128715a863bfSstephen hemminger 	int work_done = 0;
12886b81b193SHaiyang Zhang 	int ret;
128915a863bfSstephen hemminger 
1290f4f1c23dSstephen hemminger 	/* If starting a new interval */
1291f4f1c23dSstephen hemminger 	if (!nvchan->desc)
1292f4f1c23dSstephen hemminger 		nvchan->desc = hv_pkt_iter_first(channel);
129315a863bfSstephen hemminger 
1294f4f1c23dSstephen hemminger 	while (nvchan->desc && work_done < budget) {
1295c8e4eff4SHaiyang Zhang 		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1296f9645430Sstephen hemminger 						    ndev, nvchan->desc, budget);
1297f4f1c23dSstephen hemminger 		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
129815a863bfSstephen hemminger 	}
129915a863bfSstephen hemminger 
13006b81b193SHaiyang Zhang 	/* Send any pending receive completions */
13016b81b193SHaiyang Zhang 	ret = send_recv_completions(ndev, net_device, nvchan);
13026b81b193SHaiyang Zhang 
13036b81b193SHaiyang Zhang 	/* If it did not exhaust NAPI budget this time
1304f4f1c23dSstephen hemminger 	 *  and not doing busy poll
1305f4e40363Sstephen hemminger 	 * then re-enable host interrupts
13066b81b193SHaiyang Zhang 	 *  and reschedule if ring is not empty
13076b81b193SHaiyang Zhang 	 *   or sending receive completion failed.
1308262b7f14Sstephen hemminger 	 */
13096b81b193SHaiyang Zhang 	if (work_done < budget &&
131015a863bfSstephen hemminger 	    napi_complete_done(napi, work_done) &&
13116b81b193SHaiyang Zhang 	    (ret || hv_end_read(&channel->inbound)) &&
1312d64e38aeSStephen Hemminger 	    napi_schedule_prep(napi)) {
13137426b1a5Sstephen hemminger 		hv_begin_read(&channel->inbound);
1314d64e38aeSStephen Hemminger 		__napi_schedule(napi);
13157426b1a5Sstephen hemminger 	}
1316f4f1c23dSstephen hemminger 
1317f4f1c23dSstephen hemminger 	/* Driver may overshoot since multiple packets per descriptor */
1318f4f1c23dSstephen hemminger 	return min(work_done, budget);
131999a50bb1SK. Y. Srinivasan }
132099a50bb1SK. Y. Srinivasan 
1321262b7f14Sstephen hemminger /* Call back when data is available in host ring buffer.
1322262b7f14Sstephen hemminger  * Processing is deferred until network softirq (NAPI)
1323262b7f14Sstephen hemminger  */
13245b54dac8SHaiyang Zhang void netvsc_channel_cb(void *context)
132595fa0405SHaiyang Zhang {
13266de38af6Sstephen hemminger 	struct netvsc_channel *nvchan = context;
132743bf99ceSstephen hemminger 	struct vmbus_channel *channel = nvchan->channel;
132843bf99ceSstephen hemminger 	struct hv_ring_buffer_info *rbi = &channel->inbound;
132943bf99ceSstephen hemminger 
133043bf99ceSstephen hemminger 	/* preload first vmpacket descriptor */
133143bf99ceSstephen hemminger 	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
133295fa0405SHaiyang Zhang 
1333f4f1c23dSstephen hemminger 	if (napi_schedule_prep(&nvchan->napi)) {
133452d3b494SAdrian Vladu 		/* disable interrupts from host */
133543bf99ceSstephen hemminger 		hv_begin_read(rbi);
13360d6dd357Sstephen hemminger 
133768633edaSStephen Hemminger 		__napi_schedule_irqoff(&nvchan->napi);
1338f4f1c23dSstephen hemminger 	}
133995fa0405SHaiyang Zhang }
134095fa0405SHaiyang Zhang 
134195fa0405SHaiyang Zhang /*
134295fa0405SHaiyang Zhang  * netvsc_device_add - Callback when the device belonging to this
134395fa0405SHaiyang Zhang  * driver is added
134495fa0405SHaiyang Zhang  */
13459749fed5Sstephen hemminger struct netvsc_device *netvsc_device_add(struct hv_device *device,
13462c7f83caSstephen hemminger 				const struct netvsc_device_info *device_info)
134795fa0405SHaiyang Zhang {
134888098834SVitaly Kuznetsov 	int i, ret = 0;
134995fa0405SHaiyang Zhang 	struct netvsc_device *net_device;
135088098834SVitaly Kuznetsov 	struct net_device *ndev = hv_get_drvdata(device);
135188098834SVitaly Kuznetsov 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
135295fa0405SHaiyang Zhang 
135388098834SVitaly Kuznetsov 	net_device = alloc_net_device();
1354b1c84927SDan Carpenter 	if (!net_device)
13559749fed5Sstephen hemminger 		return ERR_PTR(-ENOMEM);
135695fa0405SHaiyang Zhang 
13576b0cbe31SHaiyang Zhang 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
13586b0cbe31SHaiyang Zhang 		net_device_ctx->tx_table[i] = 0;
13596b0cbe31SHaiyang Zhang 
136015a863bfSstephen hemminger 	/* Because the device uses NAPI, all the interrupt batching and
136115a863bfSstephen hemminger 	 * control is done via Net softirq, not the channel handling
136215a863bfSstephen hemminger 	 */
136315a863bfSstephen hemminger 	set_channel_read_mode(device->channel, HV_CALL_ISR);
136415a863bfSstephen hemminger 
1365bffb1842SK. Y. Srinivasan 	/* If we're reopening the device we may have multiple queues, fill the
1366bffb1842SK. Y. Srinivasan 	 * chn_table with the default channel to use it before subchannels are
1367bffb1842SK. Y. Srinivasan 	 * opened.
1368bffb1842SK. Y. Srinivasan 	 * Initialize the channel state before we open;
1369bffb1842SK. Y. Srinivasan 	 * we can be interrupted as soon as we open the channel.
1370bffb1842SK. Y. Srinivasan 	 */
1371bffb1842SK. Y. Srinivasan 
1372bffb1842SK. Y. Srinivasan 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1373bffb1842SK. Y. Srinivasan 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1374bffb1842SK. Y. Srinivasan 
1375bffb1842SK. Y. Srinivasan 		nvchan->channel = device->channel;
137635fbbccfSstephen hemminger 		nvchan->net_device = net_device;
13774a0dee1fSFlorian Fainelli 		u64_stats_init(&nvchan->tx_stats.syncp);
13784a0dee1fSFlorian Fainelli 		u64_stats_init(&nvchan->rx_stats.syncp);
1379bffb1842SK. Y. Srinivasan 	}
1380bffb1842SK. Y. Srinivasan 
13812be0f264Sstephen hemminger 	/* Enable NAPI handler before init callbacks */
13822be0f264Sstephen hemminger 	netif_napi_add(ndev, &net_device->chan_table[0].napi,
13832be0f264Sstephen hemminger 		       netvsc_poll, NAPI_POLL_WEIGHT);
13842be0f264Sstephen hemminger 
138595fa0405SHaiyang Zhang 	/* Open the channel */
1386a7f99d0fSStephen Hemminger 	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1387a7f99d0fSStephen Hemminger 			 netvsc_ring_bytes,  NULL, 0,
1388a7f99d0fSStephen Hemminger 			 netvsc_channel_cb, net_device->chan_table);
138995fa0405SHaiyang Zhang 
139095fa0405SHaiyang Zhang 	if (ret != 0) {
139195fa0405SHaiyang Zhang 		netdev_err(ndev, "unable to open channel: %d\n", ret);
139295fa0405SHaiyang Zhang 		goto cleanup;
139395fa0405SHaiyang Zhang 	}
139495fa0405SHaiyang Zhang 
139595fa0405SHaiyang Zhang 	/* Channel is opened */
139693ba2222SVitaly Kuznetsov 	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
139795fa0405SHaiyang Zhang 
139815a863bfSstephen hemminger 	napi_enable(&net_device->chan_table[0].napi);
139988098834SVitaly Kuznetsov 
140095fa0405SHaiyang Zhang 	/* Connect with the NetVsp */
14018b532797Sstephen hemminger 	ret = netvsc_connect_vsp(device, net_device, device_info);
140295fa0405SHaiyang Zhang 	if (ret != 0) {
140395fa0405SHaiyang Zhang 		netdev_err(ndev,
140495fa0405SHaiyang Zhang 			"unable to connect to NetVSP - %d\n", ret);
140595fa0405SHaiyang Zhang 		goto close;
140695fa0405SHaiyang Zhang 	}
140795fa0405SHaiyang Zhang 
140812f69661SStephen Hemminger 	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
140912f69661SStephen Hemminger 	 * populated.
141012f69661SStephen Hemminger 	 */
141112f69661SStephen Hemminger 	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
141212f69661SStephen Hemminger 
14139749fed5Sstephen hemminger 	return net_device;
141495fa0405SHaiyang Zhang 
141595fa0405SHaiyang Zhang close:
141649393347Sstephen hemminger 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
141749393347Sstephen hemminger 	napi_disable(&net_device->chan_table[0].napi);
141815a863bfSstephen hemminger 
141995fa0405SHaiyang Zhang 	/* Now, we can close the channel safely */
142095fa0405SHaiyang Zhang 	vmbus_close(device->channel);
142195fa0405SHaiyang Zhang 
142295fa0405SHaiyang Zhang cleanup:
1423fcfb4a00SStephen Hemminger 	netif_napi_del(&net_device->chan_table[0].napi);
1424545a8e79Sstephen hemminger 	free_netvsc_device(&net_device->rcu);
142595fa0405SHaiyang Zhang 
14269749fed5Sstephen hemminger 	return ERR_PTR(ret);
142795fa0405SHaiyang Zhang }
1428