1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <linux/if_ether.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_vlan.h>
29 #include <linux/nls.h>
30 #include <linux/vmalloc.h>
31 #include <linux/rtnetlink.h>
32 
33 #include "hyperv_net.h"
34 #include "netvsc_trace.h"
35 
36 static void rndis_set_multicast(struct work_struct *w);
37 
38 #define RNDIS_EXT_LEN PAGE_SIZE
39 struct rndis_request {
40 	struct list_head list_ent;
41 	struct completion  wait_event;
42 
43 	struct rndis_message response_msg;
44 	/*
45 	 * The buffer for extended info after the RNDIS response message. It's
46 	 * referenced based on the data offset in the RNDIS message. Its size
47 	 * is enough for current needs, and should be sufficient for the near
48 	 * future.
49 	 */
50 	u8 response_ext[RNDIS_EXT_LEN];
51 
52 	/* Simplify allocation by having a netvsc packet inline */
53 	struct hv_netvsc_packet	pkt;
54 
55 	struct rndis_message request_msg;
56 	/*
57 	 * The buffer for the extended info after the RNDIS request message.
58 	 * It is referenced and sized in a similar way as response_ext.
59 	 */
60 	u8 request_ext[RNDIS_EXT_LEN];
61 };
62 
63 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
64 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
65 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
66 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
67 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
68 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
69 };
70 
71 static struct rndis_device *get_rndis_device(void)
72 {
73 	struct rndis_device *device;
74 
75 	device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
76 	if (!device)
77 		return NULL;
78 
79 	spin_lock_init(&device->request_lock);
80 
81 	INIT_LIST_HEAD(&device->req_list);
82 	INIT_WORK(&device->mcast_work, rndis_set_multicast);
83 
84 	device->state = RNDIS_DEV_UNINITIALIZED;
85 
86 	return device;
87 }
88 
89 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
90 					     u32 msg_type,
91 					     u32 msg_len)
92 {
93 	struct rndis_request *request;
94 	struct rndis_message *rndis_msg;
95 	struct rndis_set_request *set;
96 	unsigned long flags;
97 
98 	request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
99 	if (!request)
100 		return NULL;
101 
102 	init_completion(&request->wait_event);
103 
104 	rndis_msg = &request->request_msg;
105 	rndis_msg->ndis_msg_type = msg_type;
106 	rndis_msg->msg_len = msg_len;
107 
108 	request->pkt.q_idx = 0;
109 
110 	/*
111 	 * Set the request id. This field is always after the rndis header for
112 	 * request/response packet types so we just used the SetRequest as a
113 	 * template
114 	 */
115 	set = &rndis_msg->msg.set_req;
116 	set->req_id = atomic_inc_return(&dev->new_req_id);
117 
118 	/* Add to the request list */
119 	spin_lock_irqsave(&dev->request_lock, flags);
120 	list_add_tail(&request->list_ent, &dev->req_list);
121 	spin_unlock_irqrestore(&dev->request_lock, flags);
122 
123 	return request;
124 }
125 
126 static void put_rndis_request(struct rndis_device *dev,
127 			    struct rndis_request *req)
128 {
129 	unsigned long flags;
130 
131 	spin_lock_irqsave(&dev->request_lock, flags);
132 	list_del(&req->list_ent);
133 	spin_unlock_irqrestore(&dev->request_lock, flags);
134 
135 	kfree(req);
136 }
137 
138 static void dump_rndis_message(struct net_device *netdev,
139 			       const struct rndis_message *rndis_msg)
140 {
141 	switch (rndis_msg->ndis_msg_type) {
142 	case RNDIS_MSG_PACKET:
143 		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
144 			   "data offset %u data len %u, # oob %u, "
145 			   "oob offset %u, oob len %u, pkt offset %u, "
146 			   "pkt len %u\n",
147 			   rndis_msg->msg_len,
148 			   rndis_msg->msg.pkt.data_offset,
149 			   rndis_msg->msg.pkt.data_len,
150 			   rndis_msg->msg.pkt.num_oob_data_elements,
151 			   rndis_msg->msg.pkt.oob_data_offset,
152 			   rndis_msg->msg.pkt.oob_data_len,
153 			   rndis_msg->msg.pkt.per_pkt_info_offset,
154 			   rndis_msg->msg.pkt.per_pkt_info_len);
155 		break;
156 
157 	case RNDIS_MSG_INIT_C:
158 		netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
159 			"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
160 			"device flags %d, max xfer size 0x%x, max pkts %u, "
161 			"pkt aligned %u)\n",
162 			rndis_msg->msg_len,
163 			rndis_msg->msg.init_complete.req_id,
164 			rndis_msg->msg.init_complete.status,
165 			rndis_msg->msg.init_complete.major_ver,
166 			rndis_msg->msg.init_complete.minor_ver,
167 			rndis_msg->msg.init_complete.dev_flags,
168 			rndis_msg->msg.init_complete.max_xfer_size,
169 			rndis_msg->msg.init_complete.
170 			   max_pkt_per_msg,
171 			rndis_msg->msg.init_complete.
172 			   pkt_alignment_factor);
173 		break;
174 
175 	case RNDIS_MSG_QUERY_C:
176 		netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
177 			"(len %u, id 0x%x, status 0x%x, buf len %u, "
178 			"buf offset %u)\n",
179 			rndis_msg->msg_len,
180 			rndis_msg->msg.query_complete.req_id,
181 			rndis_msg->msg.query_complete.status,
182 			rndis_msg->msg.query_complete.
183 			   info_buflen,
184 			rndis_msg->msg.query_complete.
185 			   info_buf_offset);
186 		break;
187 
188 	case RNDIS_MSG_SET_C:
189 		netdev_dbg(netdev,
190 			"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
191 			rndis_msg->msg_len,
192 			rndis_msg->msg.set_complete.req_id,
193 			rndis_msg->msg.set_complete.status);
194 		break;
195 
196 	case RNDIS_MSG_INDICATE:
197 		netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
198 			"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
199 			rndis_msg->msg_len,
200 			rndis_msg->msg.indicate_status.status,
201 			rndis_msg->msg.indicate_status.status_buflen,
202 			rndis_msg->msg.indicate_status.status_buf_offset);
203 		break;
204 
205 	default:
206 		netdev_dbg(netdev, "0x%x (len %u)\n",
207 			rndis_msg->ndis_msg_type,
208 			rndis_msg->msg_len);
209 		break;
210 	}
211 }
212 
213 static int rndis_filter_send_request(struct rndis_device *dev,
214 				  struct rndis_request *req)
215 {
216 	struct hv_netvsc_packet *packet;
217 	struct hv_page_buffer page_buf[2];
218 	struct hv_page_buffer *pb = page_buf;
219 	int ret;
220 
221 	/* Setup the packet to send it */
222 	packet = &req->pkt;
223 
224 	packet->total_data_buflen = req->request_msg.msg_len;
225 	packet->page_buf_cnt = 1;
226 
227 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
228 					PAGE_SHIFT;
229 	pb[0].len = req->request_msg.msg_len;
230 	pb[0].offset =
231 		(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
232 
233 	/* Add one page_buf when request_msg crossing page boundary */
234 	if (pb[0].offset + pb[0].len > PAGE_SIZE) {
235 		packet->page_buf_cnt++;
236 		pb[0].len = PAGE_SIZE -
237 			pb[0].offset;
238 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
239 			+ pb[0].len) >> PAGE_SHIFT;
240 		pb[1].offset = 0;
241 		pb[1].len = req->request_msg.msg_len -
242 			pb[0].len;
243 	}
244 
245 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
246 
247 	rcu_read_lock_bh();
248 	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
249 	rcu_read_unlock_bh();
250 
251 	return ret;
252 }
253 
254 static void rndis_set_link_state(struct rndis_device *rdev,
255 				 struct rndis_request *request)
256 {
257 	u32 link_status;
258 	struct rndis_query_complete *query_complete;
259 
260 	query_complete = &request->response_msg.msg.query_complete;
261 
262 	if (query_complete->status == RNDIS_STATUS_SUCCESS &&
263 	    query_complete->info_buflen == sizeof(u32)) {
264 		memcpy(&link_status, (void *)((unsigned long)query_complete +
265 		       query_complete->info_buf_offset), sizeof(u32));
266 		rdev->link_state = link_status != 0;
267 	}
268 }
269 
270 static void rndis_filter_receive_response(struct net_device *ndev,
271 					  struct netvsc_device *nvdev,
272 					  const struct rndis_message *resp)
273 {
274 	struct rndis_device *dev = nvdev->extension;
275 	struct rndis_request *request = NULL;
276 	bool found = false;
277 	unsigned long flags;
278 
279 	/* This should never happen, it means control message
280 	 * response received after device removed.
281 	 */
282 	if (dev->state == RNDIS_DEV_UNINITIALIZED) {
283 		netdev_err(ndev,
284 			   "got rndis message uninitialized\n");
285 		return;
286 	}
287 
288 	spin_lock_irqsave(&dev->request_lock, flags);
289 	list_for_each_entry(request, &dev->req_list, list_ent) {
290 		/*
291 		 * All request/response message contains RequestId as the 1st
292 		 * field
293 		 */
294 		if (request->request_msg.msg.init_req.req_id
295 		    == resp->msg.init_complete.req_id) {
296 			found = true;
297 			break;
298 		}
299 	}
300 	spin_unlock_irqrestore(&dev->request_lock, flags);
301 
302 	if (found) {
303 		if (resp->msg_len <=
304 		    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
305 			memcpy(&request->response_msg, resp,
306 			       resp->msg_len);
307 			if (request->request_msg.ndis_msg_type ==
308 			    RNDIS_MSG_QUERY && request->request_msg.msg.
309 			    query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
310 				rndis_set_link_state(dev, request);
311 		} else {
312 			netdev_err(ndev,
313 				"rndis response buffer overflow "
314 				"detected (size %u max %zu)\n",
315 				resp->msg_len,
316 				sizeof(struct rndis_message));
317 
318 			if (resp->ndis_msg_type ==
319 			    RNDIS_MSG_RESET_C) {
320 				/* does not have a request id field */
321 				request->response_msg.msg.reset_complete.
322 					status = RNDIS_STATUS_BUFFER_OVERFLOW;
323 			} else {
324 				request->response_msg.msg.
325 				init_complete.status =
326 					RNDIS_STATUS_BUFFER_OVERFLOW;
327 			}
328 		}
329 
330 		complete(&request->wait_event);
331 	} else {
332 		netdev_err(ndev,
333 			"no rndis request found for this response "
334 			"(id 0x%x res type 0x%x)\n",
335 			resp->msg.init_complete.req_id,
336 			resp->ndis_msg_type);
337 	}
338 }
339 
340 /*
341  * Get the Per-Packet-Info with the specified type
342  * return NULL if not found.
343  */
344 static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
345 {
346 	struct rndis_per_packet_info *ppi;
347 	int len;
348 
349 	if (rpkt->per_pkt_info_offset == 0)
350 		return NULL;
351 
352 	ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
353 		rpkt->per_pkt_info_offset);
354 	len = rpkt->per_pkt_info_len;
355 
356 	while (len > 0) {
357 		if (ppi->type == type)
358 			return (void *)((ulong)ppi + ppi->ppi_offset);
359 		len -= ppi->size;
360 		ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
361 	}
362 
363 	return NULL;
364 }
365 
366 static int rndis_filter_receive_data(struct net_device *ndev,
367 				     struct netvsc_device *nvdev,
368 				     struct vmbus_channel *channel,
369 				     struct rndis_message *msg,
370 				     u32 data_buflen)
371 {
372 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
373 	const struct ndis_tcp_ip_checksum_info *csum_info;
374 	const struct ndis_pkt_8021q_info *vlan;
375 	u32 data_offset;
376 	void *data;
377 
378 	/* Remove the rndis header and pass it back up the stack */
379 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
380 
381 	data_buflen -= data_offset;
382 
383 	/*
384 	 * Make sure we got a valid RNDIS message, now total_data_buflen
385 	 * should be the data packet size plus the trailer padding size
386 	 */
387 	if (unlikely(data_buflen < rndis_pkt->data_len)) {
388 		netdev_err(ndev, "rndis message buffer "
389 			   "overflow detected (got %u, min %u)"
390 			   "...dropping this message!\n",
391 			   data_buflen, rndis_pkt->data_len);
392 		return NVSP_STAT_FAIL;
393 	}
394 
395 	vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
396 
397 	csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
398 
399 	data = (void *)msg + data_offset;
400 
401 	/*
402 	 * Remove the rndis trailer padding from rndis packet message
403 	 * rndis_pkt->data_len tell us the real data length, we only copy
404 	 * the data packet to the stack, without the rndis trailer padding
405 	 */
406 	return netvsc_recv_callback(ndev, nvdev, channel,
407 				    data, rndis_pkt->data_len,
408 				    csum_info, vlan);
409 }
410 
411 int rndis_filter_receive(struct net_device *ndev,
412 			 struct netvsc_device *net_dev,
413 			 struct vmbus_channel *channel,
414 			 void *data, u32 buflen)
415 {
416 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
417 	struct rndis_message *rndis_msg = data;
418 
419 	if (netif_msg_rx_status(net_device_ctx))
420 		dump_rndis_message(ndev, rndis_msg);
421 
422 	switch (rndis_msg->ndis_msg_type) {
423 	case RNDIS_MSG_PACKET:
424 		return rndis_filter_receive_data(ndev, net_dev, channel,
425 						 rndis_msg, buflen);
426 	case RNDIS_MSG_INIT_C:
427 	case RNDIS_MSG_QUERY_C:
428 	case RNDIS_MSG_SET_C:
429 		/* completion msgs */
430 		rndis_filter_receive_response(ndev, net_dev, rndis_msg);
431 		break;
432 
433 	case RNDIS_MSG_INDICATE:
434 		/* notification msgs */
435 		netvsc_linkstatus_callback(ndev, rndis_msg);
436 		break;
437 	default:
438 		netdev_err(ndev,
439 			"unhandled rndis message (type %u len %u)\n",
440 			   rndis_msg->ndis_msg_type,
441 			   rndis_msg->msg_len);
442 		return NVSP_STAT_FAIL;
443 	}
444 
445 	return NVSP_STAT_SUCCESS;
446 }
447 
448 static int rndis_filter_query_device(struct rndis_device *dev,
449 				     struct netvsc_device *nvdev,
450 				     u32 oid, void *result, u32 *result_size)
451 {
452 	struct rndis_request *request;
453 	u32 inresult_size = *result_size;
454 	struct rndis_query_request *query;
455 	struct rndis_query_complete *query_complete;
456 	int ret = 0;
457 
458 	if (!result)
459 		return -EINVAL;
460 
461 	*result_size = 0;
462 	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
463 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
464 	if (!request) {
465 		ret = -ENOMEM;
466 		goto cleanup;
467 	}
468 
469 	/* Setup the rndis query */
470 	query = &request->request_msg.msg.query_req;
471 	query->oid = oid;
472 	query->info_buf_offset = sizeof(struct rndis_query_request);
473 	query->info_buflen = 0;
474 	query->dev_vc_handle = 0;
475 
476 	if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
477 		struct ndis_offload *hwcaps;
478 		u32 nvsp_version = nvdev->nvsp_version;
479 		u8 ndis_rev;
480 		size_t size;
481 
482 		if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
483 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
484 			size = NDIS_OFFLOAD_SIZE;
485 		} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
486 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
487 			size = NDIS_OFFLOAD_SIZE_6_1;
488 		} else {
489 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
490 			size = NDIS_OFFLOAD_SIZE_6_0;
491 		}
492 
493 		request->request_msg.msg_len += size;
494 		query->info_buflen = size;
495 		hwcaps = (struct ndis_offload *)
496 			((unsigned long)query + query->info_buf_offset);
497 
498 		hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
499 		hwcaps->header.revision = ndis_rev;
500 		hwcaps->header.size = size;
501 
502 	} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
503 		struct ndis_recv_scale_cap *cap;
504 
505 		request->request_msg.msg_len +=
506 			sizeof(struct ndis_recv_scale_cap);
507 		query->info_buflen = sizeof(struct ndis_recv_scale_cap);
508 		cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
509 						     query->info_buf_offset);
510 		cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
511 		cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
512 		cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
513 	}
514 
515 	ret = rndis_filter_send_request(dev, request);
516 	if (ret != 0)
517 		goto cleanup;
518 
519 	wait_for_completion(&request->wait_event);
520 
521 	/* Copy the response back */
522 	query_complete = &request->response_msg.msg.query_complete;
523 
524 	if (query_complete->info_buflen > inresult_size) {
525 		ret = -1;
526 		goto cleanup;
527 	}
528 
529 	memcpy(result,
530 	       (void *)((unsigned long)query_complete +
531 			 query_complete->info_buf_offset),
532 	       query_complete->info_buflen);
533 
534 	*result_size = query_complete->info_buflen;
535 
536 cleanup:
537 	if (request)
538 		put_rndis_request(dev, request);
539 
540 	return ret;
541 }
542 
543 /* Get the hardware offload capabilities */
544 static int
545 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
546 		   struct ndis_offload *caps)
547 {
548 	u32 caps_len = sizeof(*caps);
549 	int ret;
550 
551 	memset(caps, 0, sizeof(*caps));
552 
553 	ret = rndis_filter_query_device(dev, net_device,
554 					OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
555 					caps, &caps_len);
556 	if (ret)
557 		return ret;
558 
559 	if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
560 		netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
561 			    caps->header.type);
562 		return -EINVAL;
563 	}
564 
565 	if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
566 		netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
567 			    caps->header.revision);
568 		return -EINVAL;
569 	}
570 
571 	if (caps->header.size > caps_len ||
572 	    caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
573 		netdev_warn(dev->ndev,
574 			    "invalid NDIS objsize %u, data size %u\n",
575 			    caps->header.size, caps_len);
576 		return -EINVAL;
577 	}
578 
579 	return 0;
580 }
581 
582 static int rndis_filter_query_device_mac(struct rndis_device *dev,
583 					 struct netvsc_device *net_device)
584 {
585 	u32 size = ETH_ALEN;
586 
587 	return rndis_filter_query_device(dev, net_device,
588 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
589 				      dev->hw_mac_adr, &size);
590 }
591 
592 #define NWADR_STR "NetworkAddress"
593 #define NWADR_STRLEN 14
594 
595 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
596 				const char *mac)
597 {
598 	struct rndis_device *rdev = nvdev->extension;
599 	struct rndis_request *request;
600 	struct rndis_set_request *set;
601 	struct rndis_config_parameter_info *cpi;
602 	wchar_t *cfg_nwadr, *cfg_mac;
603 	struct rndis_set_complete *set_complete;
604 	char macstr[2*ETH_ALEN+1];
605 	u32 extlen = sizeof(struct rndis_config_parameter_info) +
606 		2*NWADR_STRLEN + 4*ETH_ALEN;
607 	int ret;
608 
609 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
610 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
611 	if (!request)
612 		return -ENOMEM;
613 
614 	set = &request->request_msg.msg.set_req;
615 	set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
616 	set->info_buflen = extlen;
617 	set->info_buf_offset = sizeof(struct rndis_set_request);
618 	set->dev_vc_handle = 0;
619 
620 	cpi = (struct rndis_config_parameter_info *)((ulong)set +
621 		set->info_buf_offset);
622 	cpi->parameter_name_offset =
623 		sizeof(struct rndis_config_parameter_info);
624 	/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
625 	cpi->parameter_name_length = 2*NWADR_STRLEN;
626 	cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
627 	cpi->parameter_value_offset =
628 		cpi->parameter_name_offset + cpi->parameter_name_length;
629 	/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
630 	cpi->parameter_value_length = 4*ETH_ALEN;
631 
632 	cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
633 	cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
634 	ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
635 			      cfg_nwadr, NWADR_STRLEN);
636 	if (ret < 0)
637 		goto cleanup;
638 	snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
639 	ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
640 			      cfg_mac, 2*ETH_ALEN);
641 	if (ret < 0)
642 		goto cleanup;
643 
644 	ret = rndis_filter_send_request(rdev, request);
645 	if (ret != 0)
646 		goto cleanup;
647 
648 	wait_for_completion(&request->wait_event);
649 
650 	set_complete = &request->response_msg.msg.set_complete;
651 	if (set_complete->status != RNDIS_STATUS_SUCCESS)
652 		ret = -EIO;
653 
654 cleanup:
655 	put_rndis_request(rdev, request);
656 	return ret;
657 }
658 
659 static int
660 rndis_filter_set_offload_params(struct net_device *ndev,
661 				struct netvsc_device *nvdev,
662 				struct ndis_offload_params *req_offloads)
663 {
664 	struct rndis_device *rdev = nvdev->extension;
665 	struct rndis_request *request;
666 	struct rndis_set_request *set;
667 	struct ndis_offload_params *offload_params;
668 	struct rndis_set_complete *set_complete;
669 	u32 extlen = sizeof(struct ndis_offload_params);
670 	int ret;
671 	u32 vsp_version = nvdev->nvsp_version;
672 
673 	if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
674 		extlen = VERSION_4_OFFLOAD_SIZE;
675 		/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
676 		 * UDP checksum offload.
677 		 */
678 		req_offloads->udp_ip_v4_csum = 0;
679 		req_offloads->udp_ip_v6_csum = 0;
680 	}
681 
682 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
683 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
684 	if (!request)
685 		return -ENOMEM;
686 
687 	set = &request->request_msg.msg.set_req;
688 	set->oid = OID_TCP_OFFLOAD_PARAMETERS;
689 	set->info_buflen = extlen;
690 	set->info_buf_offset = sizeof(struct rndis_set_request);
691 	set->dev_vc_handle = 0;
692 
693 	offload_params = (struct ndis_offload_params *)((ulong)set +
694 				set->info_buf_offset);
695 	*offload_params = *req_offloads;
696 	offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
697 	offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
698 	offload_params->header.size = extlen;
699 
700 	ret = rndis_filter_send_request(rdev, request);
701 	if (ret != 0)
702 		goto cleanup;
703 
704 	wait_for_completion(&request->wait_event);
705 	set_complete = &request->response_msg.msg.set_complete;
706 	if (set_complete->status != RNDIS_STATUS_SUCCESS) {
707 		netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
708 			   set_complete->status);
709 		ret = -EINVAL;
710 	}
711 
712 cleanup:
713 	put_rndis_request(rdev, request);
714 	return ret;
715 }
716 
717 int rndis_filter_set_rss_param(struct rndis_device *rdev,
718 			       const u8 *rss_key)
719 {
720 	struct net_device *ndev = rdev->ndev;
721 	struct rndis_request *request;
722 	struct rndis_set_request *set;
723 	struct rndis_set_complete *set_complete;
724 	u32 extlen = sizeof(struct ndis_recv_scale_param) +
725 		     4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
726 	struct ndis_recv_scale_param *rssp;
727 	u32 *itab;
728 	u8 *keyp;
729 	int i, ret;
730 
731 	request = get_rndis_request(
732 			rdev, RNDIS_MSG_SET,
733 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
734 	if (!request)
735 		return -ENOMEM;
736 
737 	set = &request->request_msg.msg.set_req;
738 	set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
739 	set->info_buflen = extlen;
740 	set->info_buf_offset = sizeof(struct rndis_set_request);
741 	set->dev_vc_handle = 0;
742 
743 	rssp = (struct ndis_recv_scale_param *)(set + 1);
744 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
745 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
746 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
747 	rssp->flag = 0;
748 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
749 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
750 			 NDIS_HASH_TCP_IPV6;
751 	rssp->indirect_tabsize = 4*ITAB_NUM;
752 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
753 	rssp->hashkey_size = NETVSC_HASH_KEYLEN;
754 	rssp->kashkey_offset = rssp->indirect_taboffset +
755 			       rssp->indirect_tabsize;
756 
757 	/* Set indirection table entries */
758 	itab = (u32 *)(rssp + 1);
759 	for (i = 0; i < ITAB_NUM; i++)
760 		itab[i] = rdev->rx_table[i];
761 
762 	/* Set hask key values */
763 	keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
764 	memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
765 
766 	ret = rndis_filter_send_request(rdev, request);
767 	if (ret != 0)
768 		goto cleanup;
769 
770 	wait_for_completion(&request->wait_event);
771 	set_complete = &request->response_msg.msg.set_complete;
772 	if (set_complete->status == RNDIS_STATUS_SUCCESS)
773 		memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
774 	else {
775 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
776 			   set_complete->status);
777 		ret = -EINVAL;
778 	}
779 
780 cleanup:
781 	put_rndis_request(rdev, request);
782 	return ret;
783 }
784 
785 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
786 						 struct netvsc_device *net_device)
787 {
788 	u32 size = sizeof(u32);
789 	u32 link_status;
790 
791 	return rndis_filter_query_device(dev, net_device,
792 					 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
793 					 &link_status, &size);
794 }
795 
796 static int rndis_filter_query_link_speed(struct rndis_device *dev,
797 					 struct netvsc_device *net_device)
798 {
799 	u32 size = sizeof(u32);
800 	u32 link_speed;
801 	struct net_device_context *ndc;
802 	int ret;
803 
804 	ret = rndis_filter_query_device(dev, net_device,
805 					RNDIS_OID_GEN_LINK_SPEED,
806 					&link_speed, &size);
807 
808 	if (!ret) {
809 		ndc = netdev_priv(dev->ndev);
810 
811 		/* The link speed reported from host is in 100bps unit, so
812 		 * we convert it to Mbps here.
813 		 */
814 		ndc->speed = link_speed / 10000;
815 	}
816 
817 	return ret;
818 }
819 
820 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
821 					  u32 new_filter)
822 {
823 	struct rndis_request *request;
824 	struct rndis_set_request *set;
825 	int ret;
826 
827 	if (dev->filter == new_filter)
828 		return 0;
829 
830 	request = get_rndis_request(dev, RNDIS_MSG_SET,
831 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
832 			sizeof(u32));
833 	if (!request)
834 		return -ENOMEM;
835 
836 	/* Setup the rndis set */
837 	set = &request->request_msg.msg.set_req;
838 	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
839 	set->info_buflen = sizeof(u32);
840 	set->info_buf_offset = sizeof(struct rndis_set_request);
841 
842 	memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
843 	       &new_filter, sizeof(u32));
844 
845 	ret = rndis_filter_send_request(dev, request);
846 	if (ret == 0) {
847 		wait_for_completion(&request->wait_event);
848 		dev->filter = new_filter;
849 	}
850 
851 	put_rndis_request(dev, request);
852 
853 	return ret;
854 }
855 
856 static void rndis_set_multicast(struct work_struct *w)
857 {
858 	struct rndis_device *rdev
859 		= container_of(w, struct rndis_device, mcast_work);
860 	u32 filter = NDIS_PACKET_TYPE_DIRECTED;
861 	unsigned int flags = rdev->ndev->flags;
862 
863 	if (flags & IFF_PROMISC) {
864 		filter = NDIS_PACKET_TYPE_PROMISCUOUS;
865 	} else {
866 		if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
867 			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
868 		if (flags & IFF_BROADCAST)
869 			filter |= NDIS_PACKET_TYPE_BROADCAST;
870 	}
871 
872 	rndis_filter_set_packet_filter(rdev, filter);
873 }
874 
875 void rndis_filter_update(struct netvsc_device *nvdev)
876 {
877 	struct rndis_device *rdev = nvdev->extension;
878 
879 	schedule_work(&rdev->mcast_work);
880 }
881 
882 static int rndis_filter_init_device(struct rndis_device *dev,
883 				    struct netvsc_device *nvdev)
884 {
885 	struct rndis_request *request;
886 	struct rndis_initialize_request *init;
887 	struct rndis_initialize_complete *init_complete;
888 	u32 status;
889 	int ret;
890 
891 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
892 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
893 	if (!request) {
894 		ret = -ENOMEM;
895 		goto cleanup;
896 	}
897 
898 	/* Setup the rndis set */
899 	init = &request->request_msg.msg.init_req;
900 	init->major_ver = RNDIS_MAJOR_VERSION;
901 	init->minor_ver = RNDIS_MINOR_VERSION;
902 	init->max_xfer_size = 0x4000;
903 
904 	dev->state = RNDIS_DEV_INITIALIZING;
905 
906 	ret = rndis_filter_send_request(dev, request);
907 	if (ret != 0) {
908 		dev->state = RNDIS_DEV_UNINITIALIZED;
909 		goto cleanup;
910 	}
911 
912 	wait_for_completion(&request->wait_event);
913 
914 	init_complete = &request->response_msg.msg.init_complete;
915 	status = init_complete->status;
916 	if (status == RNDIS_STATUS_SUCCESS) {
917 		dev->state = RNDIS_DEV_INITIALIZED;
918 		nvdev->max_pkt = init_complete->max_pkt_per_msg;
919 		nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
920 		ret = 0;
921 	} else {
922 		dev->state = RNDIS_DEV_UNINITIALIZED;
923 		ret = -EINVAL;
924 	}
925 
926 cleanup:
927 	if (request)
928 		put_rndis_request(dev, request);
929 
930 	return ret;
931 }
932 
933 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
934 {
935 	int i;
936 
937 	for (i = 0; i < nvdev->num_chn; i++) {
938 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
939 
940 		if (nvchan->mrc.first != nvchan->mrc.next)
941 			return false;
942 
943 		if (atomic_read(&nvchan->queue_sends) > 0)
944 			return false;
945 	}
946 
947 	return true;
948 }
949 
950 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
951 				     struct rndis_device *dev)
952 {
953 	struct rndis_request *request;
954 	struct rndis_halt_request *halt;
955 
956 	/* Attempt to do a rndis device halt */
957 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
958 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
959 	if (!request)
960 		goto cleanup;
961 
962 	/* Setup the rndis set */
963 	halt = &request->request_msg.msg.halt_req;
964 	halt->req_id = atomic_inc_return(&dev->new_req_id);
965 
966 	/* Ignore return since this msg is optional. */
967 	rndis_filter_send_request(dev, request);
968 
969 	dev->state = RNDIS_DEV_UNINITIALIZED;
970 
971 cleanup:
972 	nvdev->destroy = true;
973 
974 	/* Force flag to be ordered before waiting */
975 	wmb();
976 
977 	/* Wait for all send completions */
978 	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
979 
980 	if (request)
981 		put_rndis_request(dev, request);
982 }
983 
984 static int rndis_filter_open_device(struct rndis_device *dev)
985 {
986 	int ret;
987 
988 	if (dev->state != RNDIS_DEV_INITIALIZED)
989 		return 0;
990 
991 	ret = rndis_filter_set_packet_filter(dev,
992 					 NDIS_PACKET_TYPE_BROADCAST |
993 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
994 					 NDIS_PACKET_TYPE_DIRECTED);
995 	if (ret == 0)
996 		dev->state = RNDIS_DEV_DATAINITIALIZED;
997 
998 	return ret;
999 }
1000 
1001 static int rndis_filter_close_device(struct rndis_device *dev)
1002 {
1003 	int ret;
1004 
1005 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1006 		return 0;
1007 
1008 	/* Make sure rndis_set_multicast doesn't re-enable filter! */
1009 	cancel_work_sync(&dev->mcast_work);
1010 
1011 	ret = rndis_filter_set_packet_filter(dev, 0);
1012 	if (ret == -ENODEV)
1013 		ret = 0;
1014 
1015 	if (ret == 0)
1016 		dev->state = RNDIS_DEV_INITIALIZED;
1017 
1018 	return ret;
1019 }
1020 
1021 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1022 {
1023 	struct net_device *ndev =
1024 		hv_get_drvdata(new_sc->primary_channel->device_obj);
1025 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1026 	struct netvsc_device *nvscdev;
1027 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1028 	struct netvsc_channel *nvchan;
1029 	int ret;
1030 
1031 	/* This is safe because this callback only happens when
1032 	 * new device is being setup and waiting on the channel_init_wait.
1033 	 */
1034 	nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1035 	if (!nvscdev || chn_index >= nvscdev->num_chn)
1036 		return;
1037 
1038 	nvchan = nvscdev->chan_table + chn_index;
1039 
1040 	/* Because the device uses NAPI, all the interrupt batching and
1041 	 * control is done via Net softirq, not the channel handling
1042 	 */
1043 	set_channel_read_mode(new_sc, HV_CALL_ISR);
1044 
1045 	/* Set the channel before opening.*/
1046 	nvchan->channel = new_sc;
1047 
1048 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
1049 			 netvsc_ring_bytes, NULL, 0,
1050 			 netvsc_channel_cb, nvchan);
1051 	if (ret == 0)
1052 		napi_enable(&nvchan->napi);
1053 	else
1054 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1055 
1056 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1057 		wake_up(&nvscdev->subchan_open);
1058 }
1059 
1060 /* Open sub-channels after completing the handling of the device probe.
1061  * This breaks overlap of processing the host message for the
1062  * new primary channel with the initialization of sub-channels.
1063  */
1064 void rndis_set_subchannel(struct work_struct *w)
1065 {
1066 	struct netvsc_device *nvdev
1067 		= container_of(w, struct netvsc_device, subchan_work);
1068 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1069 	struct net_device_context *ndev_ctx;
1070 	struct rndis_device *rdev;
1071 	struct net_device *ndev;
1072 	struct hv_device *hv_dev;
1073 	int i, ret;
1074 
1075 	if (!rtnl_trylock()) {
1076 		schedule_work(w);
1077 		return;
1078 	}
1079 
1080 	rdev = nvdev->extension;
1081 	if (!rdev)
1082 		goto unlock;	/* device was removed */
1083 
1084 	ndev = rdev->ndev;
1085 	ndev_ctx = netdev_priv(ndev);
1086 	hv_dev = ndev_ctx->device_ctx;
1087 
1088 	memset(init_packet, 0, sizeof(struct nvsp_message));
1089 	init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1090 	init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1091 	init_packet->msg.v5_msg.subchn_req.num_subchannels =
1092 						nvdev->num_chn - 1;
1093 	trace_nvsp_send(ndev, init_packet);
1094 
1095 	ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1096 			       sizeof(struct nvsp_message),
1097 			       (unsigned long)init_packet,
1098 			       VM_PKT_DATA_INBAND,
1099 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1100 	if (ret) {
1101 		netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1102 		goto failed;
1103 	}
1104 
1105 	wait_for_completion(&nvdev->channel_init_wait);
1106 	if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1107 		netdev_err(ndev, "sub channel request failed\n");
1108 		goto failed;
1109 	}
1110 
1111 	nvdev->num_chn = 1 +
1112 		init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1113 
1114 	/* wait for all sub channels to open */
1115 	wait_event(nvdev->subchan_open,
1116 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1117 
1118 	/* ignore failues from setting rss parameters, still have channels */
1119 	rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1120 
1121 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1122 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1123 
1124 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1125 		ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1126 
1127 	netif_device_attach(ndev);
1128 	rtnl_unlock();
1129 	return;
1130 
1131 failed:
1132 	/* fallback to only primary channel */
1133 	for (i = 1; i < nvdev->num_chn; i++)
1134 		netif_napi_del(&nvdev->chan_table[i].napi);
1135 
1136 	nvdev->max_chn = 1;
1137 	nvdev->num_chn = 1;
1138 
1139 	netif_device_attach(ndev);
1140 unlock:
1141 	rtnl_unlock();
1142 }
1143 
1144 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1145 				   struct netvsc_device *nvdev)
1146 {
1147 	struct net_device *net = rndis_device->ndev;
1148 	struct net_device_context *net_device_ctx = netdev_priv(net);
1149 	struct ndis_offload hwcaps;
1150 	struct ndis_offload_params offloads;
1151 	unsigned int gso_max_size = GSO_MAX_SIZE;
1152 	int ret;
1153 
1154 	/* Find HW offload capabilities */
1155 	ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1156 	if (ret != 0)
1157 		return ret;
1158 
1159 	/* A value of zero means "no change"; now turn on what we want. */
1160 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1161 
1162 	/* Linux does not care about IP checksum, always does in kernel */
1163 	offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1164 
1165 	/* Reset previously set hw_features flags */
1166 	net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1167 	net_device_ctx->tx_checksum_mask = 0;
1168 
1169 	/* Compute tx offload settings based on hw capabilities */
1170 	net->hw_features |= NETIF_F_RXCSUM;
1171 
1172 	if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1173 		/* Can checksum TCP */
1174 		net->hw_features |= NETIF_F_IP_CSUM;
1175 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1176 
1177 		offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1178 
1179 		if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1180 			offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1181 			net->hw_features |= NETIF_F_TSO;
1182 
1183 			if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1184 				gso_max_size = hwcaps.lsov2.ip4_maxsz;
1185 		}
1186 
1187 		if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1188 			offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1189 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1190 		}
1191 	}
1192 
1193 	if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1194 		net->hw_features |= NETIF_F_IPV6_CSUM;
1195 
1196 		offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1197 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1198 
1199 		if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1200 		    (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1201 			offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1202 			net->hw_features |= NETIF_F_TSO6;
1203 
1204 			if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1205 				gso_max_size = hwcaps.lsov2.ip6_maxsz;
1206 		}
1207 
1208 		if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1209 			offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1210 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1211 		}
1212 	}
1213 
1214 	/* In case some hw_features disappeared we need to remove them from
1215 	 * net->features list as they're no longer supported.
1216 	 */
1217 	net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1218 
1219 	netif_set_gso_max_size(net, gso_max_size);
1220 
1221 	ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1222 
1223 	return ret;
1224 }
1225 
1226 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1227 				      struct netvsc_device_info *device_info)
1228 {
1229 	struct net_device *net = hv_get_drvdata(dev);
1230 	struct netvsc_device *net_device;
1231 	struct rndis_device *rndis_device;
1232 	struct ndis_recv_scale_cap rsscap;
1233 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1234 	u32 mtu, size;
1235 	u32 num_possible_rss_qs;
1236 	int i, ret;
1237 
1238 	rndis_device = get_rndis_device();
1239 	if (!rndis_device)
1240 		return ERR_PTR(-ENODEV);
1241 
1242 	/* Let the inner driver handle this first to create the netvsc channel
1243 	 * NOTE! Once the channel is created, we may get a receive callback
1244 	 * (RndisFilterOnReceive()) before this call is completed
1245 	 */
1246 	net_device = netvsc_device_add(dev, device_info);
1247 	if (IS_ERR(net_device)) {
1248 		kfree(rndis_device);
1249 		return net_device;
1250 	}
1251 
1252 	/* Initialize the rndis device */
1253 	net_device->max_chn = 1;
1254 	net_device->num_chn = 1;
1255 
1256 	net_device->extension = rndis_device;
1257 	rndis_device->ndev = net;
1258 
1259 	/* Send the rndis initialization message */
1260 	ret = rndis_filter_init_device(rndis_device, net_device);
1261 	if (ret != 0)
1262 		goto err_dev_remv;
1263 
1264 	/* Get the MTU from the host */
1265 	size = sizeof(u32);
1266 	ret = rndis_filter_query_device(rndis_device, net_device,
1267 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1268 					&mtu, &size);
1269 	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1270 		net->mtu = mtu;
1271 
1272 	/* Get the mac address */
1273 	ret = rndis_filter_query_device_mac(rndis_device, net_device);
1274 	if (ret != 0)
1275 		goto err_dev_remv;
1276 
1277 	memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1278 
1279 	/* Query and set hardware capabilities */
1280 	ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1281 	if (ret != 0)
1282 		goto err_dev_remv;
1283 
1284 	rndis_filter_query_device_link_status(rndis_device, net_device);
1285 
1286 	netdev_dbg(net, "Device MAC %pM link state %s\n",
1287 		   rndis_device->hw_mac_adr,
1288 		   rndis_device->link_state ? "down" : "up");
1289 
1290 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1291 		return net_device;
1292 
1293 	rndis_filter_query_link_speed(rndis_device, net_device);
1294 
1295 	/* vRSS setup */
1296 	memset(&rsscap, 0, rsscap_size);
1297 	ret = rndis_filter_query_device(rndis_device, net_device,
1298 					OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1299 					&rsscap, &rsscap_size);
1300 	if (ret || rsscap.num_recv_que < 2)
1301 		goto out;
1302 
1303 	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
1304 	num_possible_rss_qs = min_t(u32, num_online_cpus(),
1305 				    rsscap.num_recv_que);
1306 
1307 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1308 
1309 	/* We will use the given number of channels if available. */
1310 	net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1311 
1312 	for (i = 0; i < ITAB_NUM; i++)
1313 		rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
1314 						i, net_device->num_chn);
1315 
1316 	atomic_set(&net_device->open_chn, 1);
1317 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1318 
1319 	for (i = 1; i < net_device->num_chn; i++) {
1320 		ret = netvsc_alloc_recv_comp_ring(net_device, i);
1321 		if (ret) {
1322 			while (--i != 0)
1323 				vfree(net_device->chan_table[i].mrc.slots);
1324 			goto out;
1325 		}
1326 	}
1327 
1328 	for (i = 1; i < net_device->num_chn; i++)
1329 		netif_napi_add(net, &net_device->chan_table[i].napi,
1330 			       netvsc_poll, NAPI_POLL_WEIGHT);
1331 
1332 	if (net_device->num_chn > 1)
1333 		schedule_work(&net_device->subchan_work);
1334 
1335 out:
1336 	/* if unavailable, just proceed with one queue */
1337 	if (ret) {
1338 		net_device->max_chn = 1;
1339 		net_device->num_chn = 1;
1340 	}
1341 
1342 	/* No sub channels, device is ready */
1343 	if (net_device->num_chn == 1)
1344 		netif_device_attach(net);
1345 
1346 	return net_device;
1347 
1348 err_dev_remv:
1349 	rndis_filter_device_remove(dev, net_device);
1350 	return ERR_PTR(ret);
1351 }
1352 
1353 void rndis_filter_device_remove(struct hv_device *dev,
1354 				struct netvsc_device *net_dev)
1355 {
1356 	struct rndis_device *rndis_dev = net_dev->extension;
1357 
1358 	/* Halt and release the rndis device */
1359 	rndis_filter_halt_device(net_dev, rndis_dev);
1360 
1361 	net_dev->extension = NULL;
1362 
1363 	netvsc_device_remove(dev);
1364 }
1365 
1366 int rndis_filter_open(struct netvsc_device *nvdev)
1367 {
1368 	if (!nvdev)
1369 		return -EINVAL;
1370 
1371 	return rndis_filter_open_device(nvdev->extension);
1372 }
1373 
1374 int rndis_filter_close(struct netvsc_device *nvdev)
1375 {
1376 	if (!nvdev)
1377 		return -EINVAL;
1378 
1379 	return rndis_filter_close_device(nvdev->extension);
1380 }
1381