1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <linux/if_ether.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_vlan.h>
29 #include <linux/nls.h>
30 #include <linux/vmalloc.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/ucs2_string.h>
33 
34 #include "hyperv_net.h"
35 #include "netvsc_trace.h"
36 
37 static void rndis_set_multicast(struct work_struct *w);
38 
39 #define RNDIS_EXT_LEN PAGE_SIZE
40 struct rndis_request {
41 	struct list_head list_ent;
42 	struct completion  wait_event;
43 
44 	struct rndis_message response_msg;
45 	/*
46 	 * The buffer for extended info after the RNDIS response message. It's
47 	 * referenced based on the data offset in the RNDIS message. Its size
48 	 * is enough for current needs, and should be sufficient for the near
49 	 * future.
50 	 */
51 	u8 response_ext[RNDIS_EXT_LEN];
52 
53 	/* Simplify allocation by having a netvsc packet inline */
54 	struct hv_netvsc_packet	pkt;
55 
56 	struct rndis_message request_msg;
57 	/*
58 	 * The buffer for the extended info after the RNDIS request message.
59 	 * It is referenced and sized in a similar way as response_ext.
60 	 */
61 	u8 request_ext[RNDIS_EXT_LEN];
62 };
63 
64 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
65 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
66 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
67 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
68 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
69 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
70 };
71 
72 static struct rndis_device *get_rndis_device(void)
73 {
74 	struct rndis_device *device;
75 
76 	device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
77 	if (!device)
78 		return NULL;
79 
80 	spin_lock_init(&device->request_lock);
81 
82 	INIT_LIST_HEAD(&device->req_list);
83 	INIT_WORK(&device->mcast_work, rndis_set_multicast);
84 
85 	device->state = RNDIS_DEV_UNINITIALIZED;
86 
87 	return device;
88 }
89 
90 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
91 					     u32 msg_type,
92 					     u32 msg_len)
93 {
94 	struct rndis_request *request;
95 	struct rndis_message *rndis_msg;
96 	struct rndis_set_request *set;
97 	unsigned long flags;
98 
99 	request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
100 	if (!request)
101 		return NULL;
102 
103 	init_completion(&request->wait_event);
104 
105 	rndis_msg = &request->request_msg;
106 	rndis_msg->ndis_msg_type = msg_type;
107 	rndis_msg->msg_len = msg_len;
108 
109 	request->pkt.q_idx = 0;
110 
111 	/*
112 	 * Set the request id. This field is always after the rndis header for
113 	 * request/response packet types so we just used the SetRequest as a
114 	 * template
115 	 */
116 	set = &rndis_msg->msg.set_req;
117 	set->req_id = atomic_inc_return(&dev->new_req_id);
118 
119 	/* Add to the request list */
120 	spin_lock_irqsave(&dev->request_lock, flags);
121 	list_add_tail(&request->list_ent, &dev->req_list);
122 	spin_unlock_irqrestore(&dev->request_lock, flags);
123 
124 	return request;
125 }
126 
127 static void put_rndis_request(struct rndis_device *dev,
128 			    struct rndis_request *req)
129 {
130 	unsigned long flags;
131 
132 	spin_lock_irqsave(&dev->request_lock, flags);
133 	list_del(&req->list_ent);
134 	spin_unlock_irqrestore(&dev->request_lock, flags);
135 
136 	kfree(req);
137 }
138 
139 static void dump_rndis_message(struct net_device *netdev,
140 			       const struct rndis_message *rndis_msg)
141 {
142 	switch (rndis_msg->ndis_msg_type) {
143 	case RNDIS_MSG_PACKET:
144 		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
145 			   "data offset %u data len %u, # oob %u, "
146 			   "oob offset %u, oob len %u, pkt offset %u, "
147 			   "pkt len %u\n",
148 			   rndis_msg->msg_len,
149 			   rndis_msg->msg.pkt.data_offset,
150 			   rndis_msg->msg.pkt.data_len,
151 			   rndis_msg->msg.pkt.num_oob_data_elements,
152 			   rndis_msg->msg.pkt.oob_data_offset,
153 			   rndis_msg->msg.pkt.oob_data_len,
154 			   rndis_msg->msg.pkt.per_pkt_info_offset,
155 			   rndis_msg->msg.pkt.per_pkt_info_len);
156 		break;
157 
158 	case RNDIS_MSG_INIT_C:
159 		netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
160 			"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
161 			"device flags %d, max xfer size 0x%x, max pkts %u, "
162 			"pkt aligned %u)\n",
163 			rndis_msg->msg_len,
164 			rndis_msg->msg.init_complete.req_id,
165 			rndis_msg->msg.init_complete.status,
166 			rndis_msg->msg.init_complete.major_ver,
167 			rndis_msg->msg.init_complete.minor_ver,
168 			rndis_msg->msg.init_complete.dev_flags,
169 			rndis_msg->msg.init_complete.max_xfer_size,
170 			rndis_msg->msg.init_complete.
171 			   max_pkt_per_msg,
172 			rndis_msg->msg.init_complete.
173 			   pkt_alignment_factor);
174 		break;
175 
176 	case RNDIS_MSG_QUERY_C:
177 		netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
178 			"(len %u, id 0x%x, status 0x%x, buf len %u, "
179 			"buf offset %u)\n",
180 			rndis_msg->msg_len,
181 			rndis_msg->msg.query_complete.req_id,
182 			rndis_msg->msg.query_complete.status,
183 			rndis_msg->msg.query_complete.
184 			   info_buflen,
185 			rndis_msg->msg.query_complete.
186 			   info_buf_offset);
187 		break;
188 
189 	case RNDIS_MSG_SET_C:
190 		netdev_dbg(netdev,
191 			"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
192 			rndis_msg->msg_len,
193 			rndis_msg->msg.set_complete.req_id,
194 			rndis_msg->msg.set_complete.status);
195 		break;
196 
197 	case RNDIS_MSG_INDICATE:
198 		netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
199 			"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
200 			rndis_msg->msg_len,
201 			rndis_msg->msg.indicate_status.status,
202 			rndis_msg->msg.indicate_status.status_buflen,
203 			rndis_msg->msg.indicate_status.status_buf_offset);
204 		break;
205 
206 	default:
207 		netdev_dbg(netdev, "0x%x (len %u)\n",
208 			rndis_msg->ndis_msg_type,
209 			rndis_msg->msg_len);
210 		break;
211 	}
212 }
213 
214 static int rndis_filter_send_request(struct rndis_device *dev,
215 				  struct rndis_request *req)
216 {
217 	struct hv_netvsc_packet *packet;
218 	struct hv_page_buffer page_buf[2];
219 	struct hv_page_buffer *pb = page_buf;
220 	int ret;
221 
222 	/* Setup the packet to send it */
223 	packet = &req->pkt;
224 
225 	packet->total_data_buflen = req->request_msg.msg_len;
226 	packet->page_buf_cnt = 1;
227 
228 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
229 					PAGE_SHIFT;
230 	pb[0].len = req->request_msg.msg_len;
231 	pb[0].offset =
232 		(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
233 
234 	/* Add one page_buf when request_msg crossing page boundary */
235 	if (pb[0].offset + pb[0].len > PAGE_SIZE) {
236 		packet->page_buf_cnt++;
237 		pb[0].len = PAGE_SIZE -
238 			pb[0].offset;
239 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
240 			+ pb[0].len) >> PAGE_SHIFT;
241 		pb[1].offset = 0;
242 		pb[1].len = req->request_msg.msg_len -
243 			pb[0].len;
244 	}
245 
246 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
247 
248 	rcu_read_lock_bh();
249 	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
250 	rcu_read_unlock_bh();
251 
252 	return ret;
253 }
254 
255 static void rndis_set_link_state(struct rndis_device *rdev,
256 				 struct rndis_request *request)
257 {
258 	u32 link_status;
259 	struct rndis_query_complete *query_complete;
260 
261 	query_complete = &request->response_msg.msg.query_complete;
262 
263 	if (query_complete->status == RNDIS_STATUS_SUCCESS &&
264 	    query_complete->info_buflen == sizeof(u32)) {
265 		memcpy(&link_status, (void *)((unsigned long)query_complete +
266 		       query_complete->info_buf_offset), sizeof(u32));
267 		rdev->link_state = link_status != 0;
268 	}
269 }
270 
271 static void rndis_filter_receive_response(struct net_device *ndev,
272 					  struct netvsc_device *nvdev,
273 					  const struct rndis_message *resp)
274 {
275 	struct rndis_device *dev = nvdev->extension;
276 	struct rndis_request *request = NULL;
277 	bool found = false;
278 	unsigned long flags;
279 
280 	/* This should never happen, it means control message
281 	 * response received after device removed.
282 	 */
283 	if (dev->state == RNDIS_DEV_UNINITIALIZED) {
284 		netdev_err(ndev,
285 			   "got rndis message uninitialized\n");
286 		return;
287 	}
288 
289 	spin_lock_irqsave(&dev->request_lock, flags);
290 	list_for_each_entry(request, &dev->req_list, list_ent) {
291 		/*
292 		 * All request/response message contains RequestId as the 1st
293 		 * field
294 		 */
295 		if (request->request_msg.msg.init_req.req_id
296 		    == resp->msg.init_complete.req_id) {
297 			found = true;
298 			break;
299 		}
300 	}
301 	spin_unlock_irqrestore(&dev->request_lock, flags);
302 
303 	if (found) {
304 		if (resp->msg_len <=
305 		    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
306 			memcpy(&request->response_msg, resp,
307 			       resp->msg_len);
308 			if (request->request_msg.ndis_msg_type ==
309 			    RNDIS_MSG_QUERY && request->request_msg.msg.
310 			    query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
311 				rndis_set_link_state(dev, request);
312 		} else {
313 			netdev_err(ndev,
314 				"rndis response buffer overflow "
315 				"detected (size %u max %zu)\n",
316 				resp->msg_len,
317 				sizeof(struct rndis_message));
318 
319 			if (resp->ndis_msg_type ==
320 			    RNDIS_MSG_RESET_C) {
321 				/* does not have a request id field */
322 				request->response_msg.msg.reset_complete.
323 					status = RNDIS_STATUS_BUFFER_OVERFLOW;
324 			} else {
325 				request->response_msg.msg.
326 				init_complete.status =
327 					RNDIS_STATUS_BUFFER_OVERFLOW;
328 			}
329 		}
330 
331 		complete(&request->wait_event);
332 	} else {
333 		netdev_err(ndev,
334 			"no rndis request found for this response "
335 			"(id 0x%x res type 0x%x)\n",
336 			resp->msg.init_complete.req_id,
337 			resp->ndis_msg_type);
338 	}
339 }
340 
341 /*
342  * Get the Per-Packet-Info with the specified type
343  * return NULL if not found.
344  */
345 static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
346 {
347 	struct rndis_per_packet_info *ppi;
348 	int len;
349 
350 	if (rpkt->per_pkt_info_offset == 0)
351 		return NULL;
352 
353 	ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
354 		rpkt->per_pkt_info_offset);
355 	len = rpkt->per_pkt_info_len;
356 
357 	while (len > 0) {
358 		if (ppi->type == type)
359 			return (void *)((ulong)ppi + ppi->ppi_offset);
360 		len -= ppi->size;
361 		ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
362 	}
363 
364 	return NULL;
365 }
366 
367 static int rndis_filter_receive_data(struct net_device *ndev,
368 				     struct netvsc_device *nvdev,
369 				     struct vmbus_channel *channel,
370 				     struct rndis_message *msg,
371 				     u32 data_buflen)
372 {
373 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
374 	const struct ndis_tcp_ip_checksum_info *csum_info;
375 	const struct ndis_pkt_8021q_info *vlan;
376 	u32 data_offset;
377 	void *data;
378 
379 	/* Remove the rndis header and pass it back up the stack */
380 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
381 
382 	data_buflen -= data_offset;
383 
384 	/*
385 	 * Make sure we got a valid RNDIS message, now total_data_buflen
386 	 * should be the data packet size plus the trailer padding size
387 	 */
388 	if (unlikely(data_buflen < rndis_pkt->data_len)) {
389 		netdev_err(ndev, "rndis message buffer "
390 			   "overflow detected (got %u, min %u)"
391 			   "...dropping this message!\n",
392 			   data_buflen, rndis_pkt->data_len);
393 		return NVSP_STAT_FAIL;
394 	}
395 
396 	vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
397 
398 	csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
399 
400 	data = (void *)msg + data_offset;
401 
402 	/*
403 	 * Remove the rndis trailer padding from rndis packet message
404 	 * rndis_pkt->data_len tell us the real data length, we only copy
405 	 * the data packet to the stack, without the rndis trailer padding
406 	 */
407 	return netvsc_recv_callback(ndev, nvdev, channel,
408 				    data, rndis_pkt->data_len,
409 				    csum_info, vlan);
410 }
411 
412 int rndis_filter_receive(struct net_device *ndev,
413 			 struct netvsc_device *net_dev,
414 			 struct vmbus_channel *channel,
415 			 void *data, u32 buflen)
416 {
417 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
418 	struct rndis_message *rndis_msg = data;
419 
420 	if (netif_msg_rx_status(net_device_ctx))
421 		dump_rndis_message(ndev, rndis_msg);
422 
423 	switch (rndis_msg->ndis_msg_type) {
424 	case RNDIS_MSG_PACKET:
425 		return rndis_filter_receive_data(ndev, net_dev, channel,
426 						 rndis_msg, buflen);
427 	case RNDIS_MSG_INIT_C:
428 	case RNDIS_MSG_QUERY_C:
429 	case RNDIS_MSG_SET_C:
430 		/* completion msgs */
431 		rndis_filter_receive_response(ndev, net_dev, rndis_msg);
432 		break;
433 
434 	case RNDIS_MSG_INDICATE:
435 		/* notification msgs */
436 		netvsc_linkstatus_callback(ndev, rndis_msg);
437 		break;
438 	default:
439 		netdev_err(ndev,
440 			"unhandled rndis message (type %u len %u)\n",
441 			   rndis_msg->ndis_msg_type,
442 			   rndis_msg->msg_len);
443 		return NVSP_STAT_FAIL;
444 	}
445 
446 	return NVSP_STAT_SUCCESS;
447 }
448 
449 static int rndis_filter_query_device(struct rndis_device *dev,
450 				     struct netvsc_device *nvdev,
451 				     u32 oid, void *result, u32 *result_size)
452 {
453 	struct rndis_request *request;
454 	u32 inresult_size = *result_size;
455 	struct rndis_query_request *query;
456 	struct rndis_query_complete *query_complete;
457 	int ret = 0;
458 
459 	if (!result)
460 		return -EINVAL;
461 
462 	*result_size = 0;
463 	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
464 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
465 	if (!request) {
466 		ret = -ENOMEM;
467 		goto cleanup;
468 	}
469 
470 	/* Setup the rndis query */
471 	query = &request->request_msg.msg.query_req;
472 	query->oid = oid;
473 	query->info_buf_offset = sizeof(struct rndis_query_request);
474 	query->info_buflen = 0;
475 	query->dev_vc_handle = 0;
476 
477 	if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
478 		struct ndis_offload *hwcaps;
479 		u32 nvsp_version = nvdev->nvsp_version;
480 		u8 ndis_rev;
481 		size_t size;
482 
483 		if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
484 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
485 			size = NDIS_OFFLOAD_SIZE;
486 		} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
487 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
488 			size = NDIS_OFFLOAD_SIZE_6_1;
489 		} else {
490 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
491 			size = NDIS_OFFLOAD_SIZE_6_0;
492 		}
493 
494 		request->request_msg.msg_len += size;
495 		query->info_buflen = size;
496 		hwcaps = (struct ndis_offload *)
497 			((unsigned long)query + query->info_buf_offset);
498 
499 		hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
500 		hwcaps->header.revision = ndis_rev;
501 		hwcaps->header.size = size;
502 
503 	} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
504 		struct ndis_recv_scale_cap *cap;
505 
506 		request->request_msg.msg_len +=
507 			sizeof(struct ndis_recv_scale_cap);
508 		query->info_buflen = sizeof(struct ndis_recv_scale_cap);
509 		cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
510 						     query->info_buf_offset);
511 		cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
512 		cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
513 		cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
514 	}
515 
516 	ret = rndis_filter_send_request(dev, request);
517 	if (ret != 0)
518 		goto cleanup;
519 
520 	wait_for_completion(&request->wait_event);
521 
522 	/* Copy the response back */
523 	query_complete = &request->response_msg.msg.query_complete;
524 
525 	if (query_complete->info_buflen > inresult_size) {
526 		ret = -1;
527 		goto cleanup;
528 	}
529 
530 	memcpy(result,
531 	       (void *)((unsigned long)query_complete +
532 			 query_complete->info_buf_offset),
533 	       query_complete->info_buflen);
534 
535 	*result_size = query_complete->info_buflen;
536 
537 cleanup:
538 	if (request)
539 		put_rndis_request(dev, request);
540 
541 	return ret;
542 }
543 
544 /* Get the hardware offload capabilities */
545 static int
546 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
547 		   struct ndis_offload *caps)
548 {
549 	u32 caps_len = sizeof(*caps);
550 	int ret;
551 
552 	memset(caps, 0, sizeof(*caps));
553 
554 	ret = rndis_filter_query_device(dev, net_device,
555 					OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
556 					caps, &caps_len);
557 	if (ret)
558 		return ret;
559 
560 	if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
561 		netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
562 			    caps->header.type);
563 		return -EINVAL;
564 	}
565 
566 	if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
567 		netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
568 			    caps->header.revision);
569 		return -EINVAL;
570 	}
571 
572 	if (caps->header.size > caps_len ||
573 	    caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
574 		netdev_warn(dev->ndev,
575 			    "invalid NDIS objsize %u, data size %u\n",
576 			    caps->header.size, caps_len);
577 		return -EINVAL;
578 	}
579 
580 	return 0;
581 }
582 
583 static int rndis_filter_query_device_mac(struct rndis_device *dev,
584 					 struct netvsc_device *net_device)
585 {
586 	u32 size = ETH_ALEN;
587 
588 	return rndis_filter_query_device(dev, net_device,
589 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
590 				      dev->hw_mac_adr, &size);
591 }
592 
593 #define NWADR_STR "NetworkAddress"
594 #define NWADR_STRLEN 14
595 
596 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
597 				const char *mac)
598 {
599 	struct rndis_device *rdev = nvdev->extension;
600 	struct rndis_request *request;
601 	struct rndis_set_request *set;
602 	struct rndis_config_parameter_info *cpi;
603 	wchar_t *cfg_nwadr, *cfg_mac;
604 	struct rndis_set_complete *set_complete;
605 	char macstr[2*ETH_ALEN+1];
606 	u32 extlen = sizeof(struct rndis_config_parameter_info) +
607 		2*NWADR_STRLEN + 4*ETH_ALEN;
608 	int ret;
609 
610 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
611 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
612 	if (!request)
613 		return -ENOMEM;
614 
615 	set = &request->request_msg.msg.set_req;
616 	set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
617 	set->info_buflen = extlen;
618 	set->info_buf_offset = sizeof(struct rndis_set_request);
619 	set->dev_vc_handle = 0;
620 
621 	cpi = (struct rndis_config_parameter_info *)((ulong)set +
622 		set->info_buf_offset);
623 	cpi->parameter_name_offset =
624 		sizeof(struct rndis_config_parameter_info);
625 	/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
626 	cpi->parameter_name_length = 2*NWADR_STRLEN;
627 	cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
628 	cpi->parameter_value_offset =
629 		cpi->parameter_name_offset + cpi->parameter_name_length;
630 	/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
631 	cpi->parameter_value_length = 4*ETH_ALEN;
632 
633 	cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
634 	cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
635 	ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
636 			      cfg_nwadr, NWADR_STRLEN);
637 	if (ret < 0)
638 		goto cleanup;
639 	snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
640 	ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
641 			      cfg_mac, 2*ETH_ALEN);
642 	if (ret < 0)
643 		goto cleanup;
644 
645 	ret = rndis_filter_send_request(rdev, request);
646 	if (ret != 0)
647 		goto cleanup;
648 
649 	wait_for_completion(&request->wait_event);
650 
651 	set_complete = &request->response_msg.msg.set_complete;
652 	if (set_complete->status != RNDIS_STATUS_SUCCESS)
653 		ret = -EIO;
654 
655 cleanup:
656 	put_rndis_request(rdev, request);
657 	return ret;
658 }
659 
660 static int
661 rndis_filter_set_offload_params(struct net_device *ndev,
662 				struct netvsc_device *nvdev,
663 				struct ndis_offload_params *req_offloads)
664 {
665 	struct rndis_device *rdev = nvdev->extension;
666 	struct rndis_request *request;
667 	struct rndis_set_request *set;
668 	struct ndis_offload_params *offload_params;
669 	struct rndis_set_complete *set_complete;
670 	u32 extlen = sizeof(struct ndis_offload_params);
671 	int ret;
672 	u32 vsp_version = nvdev->nvsp_version;
673 
674 	if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
675 		extlen = VERSION_4_OFFLOAD_SIZE;
676 		/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
677 		 * UDP checksum offload.
678 		 */
679 		req_offloads->udp_ip_v4_csum = 0;
680 		req_offloads->udp_ip_v6_csum = 0;
681 	}
682 
683 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
684 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
685 	if (!request)
686 		return -ENOMEM;
687 
688 	set = &request->request_msg.msg.set_req;
689 	set->oid = OID_TCP_OFFLOAD_PARAMETERS;
690 	set->info_buflen = extlen;
691 	set->info_buf_offset = sizeof(struct rndis_set_request);
692 	set->dev_vc_handle = 0;
693 
694 	offload_params = (struct ndis_offload_params *)((ulong)set +
695 				set->info_buf_offset);
696 	*offload_params = *req_offloads;
697 	offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
698 	offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
699 	offload_params->header.size = extlen;
700 
701 	ret = rndis_filter_send_request(rdev, request);
702 	if (ret != 0)
703 		goto cleanup;
704 
705 	wait_for_completion(&request->wait_event);
706 	set_complete = &request->response_msg.msg.set_complete;
707 	if (set_complete->status != RNDIS_STATUS_SUCCESS) {
708 		netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
709 			   set_complete->status);
710 		ret = -EINVAL;
711 	}
712 
713 cleanup:
714 	put_rndis_request(rdev, request);
715 	return ret;
716 }
717 
718 int rndis_filter_set_rss_param(struct rndis_device *rdev,
719 			       const u8 *rss_key)
720 {
721 	struct net_device *ndev = rdev->ndev;
722 	struct rndis_request *request;
723 	struct rndis_set_request *set;
724 	struct rndis_set_complete *set_complete;
725 	u32 extlen = sizeof(struct ndis_recv_scale_param) +
726 		     4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
727 	struct ndis_recv_scale_param *rssp;
728 	u32 *itab;
729 	u8 *keyp;
730 	int i, ret;
731 
732 	request = get_rndis_request(
733 			rdev, RNDIS_MSG_SET,
734 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
735 	if (!request)
736 		return -ENOMEM;
737 
738 	set = &request->request_msg.msg.set_req;
739 	set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
740 	set->info_buflen = extlen;
741 	set->info_buf_offset = sizeof(struct rndis_set_request);
742 	set->dev_vc_handle = 0;
743 
744 	rssp = (struct ndis_recv_scale_param *)(set + 1);
745 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
746 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
747 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
748 	rssp->flag = 0;
749 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
750 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
751 			 NDIS_HASH_TCP_IPV6;
752 	rssp->indirect_tabsize = 4*ITAB_NUM;
753 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
754 	rssp->hashkey_size = NETVSC_HASH_KEYLEN;
755 	rssp->hashkey_offset = rssp->indirect_taboffset +
756 			       rssp->indirect_tabsize;
757 
758 	/* Set indirection table entries */
759 	itab = (u32 *)(rssp + 1);
760 	for (i = 0; i < ITAB_NUM; i++)
761 		itab[i] = rdev->rx_table[i];
762 
763 	/* Set hask key values */
764 	keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
765 	memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
766 
767 	ret = rndis_filter_send_request(rdev, request);
768 	if (ret != 0)
769 		goto cleanup;
770 
771 	wait_for_completion(&request->wait_event);
772 	set_complete = &request->response_msg.msg.set_complete;
773 	if (set_complete->status == RNDIS_STATUS_SUCCESS)
774 		memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
775 	else {
776 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
777 			   set_complete->status);
778 		ret = -EINVAL;
779 	}
780 
781 cleanup:
782 	put_rndis_request(rdev, request);
783 	return ret;
784 }
785 
786 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
787 						 struct netvsc_device *net_device)
788 {
789 	u32 size = sizeof(u32);
790 	u32 link_status;
791 
792 	return rndis_filter_query_device(dev, net_device,
793 					 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
794 					 &link_status, &size);
795 }
796 
797 static int rndis_filter_query_link_speed(struct rndis_device *dev,
798 					 struct netvsc_device *net_device)
799 {
800 	u32 size = sizeof(u32);
801 	u32 link_speed;
802 	struct net_device_context *ndc;
803 	int ret;
804 
805 	ret = rndis_filter_query_device(dev, net_device,
806 					RNDIS_OID_GEN_LINK_SPEED,
807 					&link_speed, &size);
808 
809 	if (!ret) {
810 		ndc = netdev_priv(dev->ndev);
811 
812 		/* The link speed reported from host is in 100bps unit, so
813 		 * we convert it to Mbps here.
814 		 */
815 		ndc->speed = link_speed / 10000;
816 	}
817 
818 	return ret;
819 }
820 
821 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
822 					  u32 new_filter)
823 {
824 	struct rndis_request *request;
825 	struct rndis_set_request *set;
826 	int ret;
827 
828 	if (dev->filter == new_filter)
829 		return 0;
830 
831 	request = get_rndis_request(dev, RNDIS_MSG_SET,
832 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
833 			sizeof(u32));
834 	if (!request)
835 		return -ENOMEM;
836 
837 	/* Setup the rndis set */
838 	set = &request->request_msg.msg.set_req;
839 	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
840 	set->info_buflen = sizeof(u32);
841 	set->info_buf_offset = sizeof(struct rndis_set_request);
842 
843 	memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
844 	       &new_filter, sizeof(u32));
845 
846 	ret = rndis_filter_send_request(dev, request);
847 	if (ret == 0) {
848 		wait_for_completion(&request->wait_event);
849 		dev->filter = new_filter;
850 	}
851 
852 	put_rndis_request(dev, request);
853 
854 	return ret;
855 }
856 
857 static void rndis_set_multicast(struct work_struct *w)
858 {
859 	struct rndis_device *rdev
860 		= container_of(w, struct rndis_device, mcast_work);
861 	u32 filter = NDIS_PACKET_TYPE_DIRECTED;
862 	unsigned int flags = rdev->ndev->flags;
863 
864 	if (flags & IFF_PROMISC) {
865 		filter = NDIS_PACKET_TYPE_PROMISCUOUS;
866 	} else {
867 		if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
868 			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
869 		if (flags & IFF_BROADCAST)
870 			filter |= NDIS_PACKET_TYPE_BROADCAST;
871 	}
872 
873 	rndis_filter_set_packet_filter(rdev, filter);
874 }
875 
876 void rndis_filter_update(struct netvsc_device *nvdev)
877 {
878 	struct rndis_device *rdev = nvdev->extension;
879 
880 	schedule_work(&rdev->mcast_work);
881 }
882 
883 static int rndis_filter_init_device(struct rndis_device *dev,
884 				    struct netvsc_device *nvdev)
885 {
886 	struct rndis_request *request;
887 	struct rndis_initialize_request *init;
888 	struct rndis_initialize_complete *init_complete;
889 	u32 status;
890 	int ret;
891 
892 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
893 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
894 	if (!request) {
895 		ret = -ENOMEM;
896 		goto cleanup;
897 	}
898 
899 	/* Setup the rndis set */
900 	init = &request->request_msg.msg.init_req;
901 	init->major_ver = RNDIS_MAJOR_VERSION;
902 	init->minor_ver = RNDIS_MINOR_VERSION;
903 	init->max_xfer_size = 0x4000;
904 
905 	dev->state = RNDIS_DEV_INITIALIZING;
906 
907 	ret = rndis_filter_send_request(dev, request);
908 	if (ret != 0) {
909 		dev->state = RNDIS_DEV_UNINITIALIZED;
910 		goto cleanup;
911 	}
912 
913 	wait_for_completion(&request->wait_event);
914 
915 	init_complete = &request->response_msg.msg.init_complete;
916 	status = init_complete->status;
917 	if (status == RNDIS_STATUS_SUCCESS) {
918 		dev->state = RNDIS_DEV_INITIALIZED;
919 		nvdev->max_pkt = init_complete->max_pkt_per_msg;
920 		nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
921 		ret = 0;
922 	} else {
923 		dev->state = RNDIS_DEV_UNINITIALIZED;
924 		ret = -EINVAL;
925 	}
926 
927 cleanup:
928 	if (request)
929 		put_rndis_request(dev, request);
930 
931 	return ret;
932 }
933 
934 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
935 {
936 	int i;
937 
938 	for (i = 0; i < nvdev->num_chn; i++) {
939 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
940 
941 		if (nvchan->mrc.first != nvchan->mrc.next)
942 			return false;
943 
944 		if (atomic_read(&nvchan->queue_sends) > 0)
945 			return false;
946 	}
947 
948 	return true;
949 }
950 
951 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
952 				     struct rndis_device *dev)
953 {
954 	struct rndis_request *request;
955 	struct rndis_halt_request *halt;
956 
957 	/* Attempt to do a rndis device halt */
958 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
959 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
960 	if (!request)
961 		goto cleanup;
962 
963 	/* Setup the rndis set */
964 	halt = &request->request_msg.msg.halt_req;
965 	halt->req_id = atomic_inc_return(&dev->new_req_id);
966 
967 	/* Ignore return since this msg is optional. */
968 	rndis_filter_send_request(dev, request);
969 
970 	dev->state = RNDIS_DEV_UNINITIALIZED;
971 
972 cleanup:
973 	nvdev->destroy = true;
974 
975 	/* Force flag to be ordered before waiting */
976 	wmb();
977 
978 	/* Wait for all send completions */
979 	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
980 
981 	if (request)
982 		put_rndis_request(dev, request);
983 }
984 
985 static int rndis_filter_open_device(struct rndis_device *dev)
986 {
987 	int ret;
988 
989 	if (dev->state != RNDIS_DEV_INITIALIZED)
990 		return 0;
991 
992 	ret = rndis_filter_set_packet_filter(dev,
993 					 NDIS_PACKET_TYPE_BROADCAST |
994 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
995 					 NDIS_PACKET_TYPE_DIRECTED);
996 	if (ret == 0)
997 		dev->state = RNDIS_DEV_DATAINITIALIZED;
998 
999 	return ret;
1000 }
1001 
1002 static int rndis_filter_close_device(struct rndis_device *dev)
1003 {
1004 	int ret;
1005 
1006 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1007 		return 0;
1008 
1009 	/* Make sure rndis_set_multicast doesn't re-enable filter! */
1010 	cancel_work_sync(&dev->mcast_work);
1011 
1012 	ret = rndis_filter_set_packet_filter(dev, 0);
1013 	if (ret == -ENODEV)
1014 		ret = 0;
1015 
1016 	if (ret == 0)
1017 		dev->state = RNDIS_DEV_INITIALIZED;
1018 
1019 	return ret;
1020 }
1021 
1022 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1023 {
1024 	struct net_device *ndev =
1025 		hv_get_drvdata(new_sc->primary_channel->device_obj);
1026 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1027 	struct netvsc_device *nvscdev;
1028 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1029 	struct netvsc_channel *nvchan;
1030 	int ret;
1031 
1032 	/* This is safe because this callback only happens when
1033 	 * new device is being setup and waiting on the channel_init_wait.
1034 	 */
1035 	nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1036 	if (!nvscdev || chn_index >= nvscdev->num_chn)
1037 		return;
1038 
1039 	nvchan = nvscdev->chan_table + chn_index;
1040 
1041 	/* Because the device uses NAPI, all the interrupt batching and
1042 	 * control is done via Net softirq, not the channel handling
1043 	 */
1044 	set_channel_read_mode(new_sc, HV_CALL_ISR);
1045 
1046 	/* Set the channel before opening.*/
1047 	nvchan->channel = new_sc;
1048 
1049 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
1050 			 netvsc_ring_bytes, NULL, 0,
1051 			 netvsc_channel_cb, nvchan);
1052 	if (ret == 0)
1053 		napi_enable(&nvchan->napi);
1054 	else
1055 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1056 
1057 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1058 		wake_up(&nvscdev->subchan_open);
1059 }
1060 
1061 /* Open sub-channels after completing the handling of the device probe.
1062  * This breaks overlap of processing the host message for the
1063  * new primary channel with the initialization of sub-channels.
1064  */
1065 int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1066 {
1067 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1068 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1069 	struct hv_device *hv_dev = ndev_ctx->device_ctx;
1070 	struct rndis_device *rdev = nvdev->extension;
1071 	int i, ret;
1072 
1073 	ASSERT_RTNL();
1074 
1075 	memset(init_packet, 0, sizeof(struct nvsp_message));
1076 	init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1077 	init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1078 	init_packet->msg.v5_msg.subchn_req.num_subchannels =
1079 						nvdev->num_chn - 1;
1080 	trace_nvsp_send(ndev, init_packet);
1081 
1082 	ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1083 			       sizeof(struct nvsp_message),
1084 			       (unsigned long)init_packet,
1085 			       VM_PKT_DATA_INBAND,
1086 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1087 	if (ret) {
1088 		netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1089 		return ret;
1090 	}
1091 
1092 	wait_for_completion(&nvdev->channel_init_wait);
1093 	if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1094 		netdev_err(ndev, "sub channel request failed\n");
1095 		return -EIO;
1096 	}
1097 
1098 	nvdev->num_chn = 1 +
1099 		init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1100 
1101 	/* wait for all sub channels to open */
1102 	wait_event(nvdev->subchan_open,
1103 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1104 
1105 	/* ignore failues from setting rss parameters, still have channels */
1106 	rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1107 
1108 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1109 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1110 
1111 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1112 		ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1113 
1114 	return 0;
1115 }
1116 
1117 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1118 				   struct netvsc_device *nvdev)
1119 {
1120 	struct net_device *net = rndis_device->ndev;
1121 	struct net_device_context *net_device_ctx = netdev_priv(net);
1122 	struct ndis_offload hwcaps;
1123 	struct ndis_offload_params offloads;
1124 	unsigned int gso_max_size = GSO_MAX_SIZE;
1125 	int ret;
1126 
1127 	/* Find HW offload capabilities */
1128 	ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1129 	if (ret != 0)
1130 		return ret;
1131 
1132 	/* A value of zero means "no change"; now turn on what we want. */
1133 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1134 
1135 	/* Linux does not care about IP checksum, always does in kernel */
1136 	offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1137 
1138 	/* Reset previously set hw_features flags */
1139 	net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1140 	net_device_ctx->tx_checksum_mask = 0;
1141 
1142 	/* Compute tx offload settings based on hw capabilities */
1143 	net->hw_features |= NETIF_F_RXCSUM;
1144 
1145 	if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1146 		/* Can checksum TCP */
1147 		net->hw_features |= NETIF_F_IP_CSUM;
1148 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1149 
1150 		offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1151 
1152 		if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1153 			offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1154 			net->hw_features |= NETIF_F_TSO;
1155 
1156 			if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1157 				gso_max_size = hwcaps.lsov2.ip4_maxsz;
1158 		}
1159 
1160 		if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1161 			offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1162 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1163 		}
1164 	}
1165 
1166 	if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1167 		net->hw_features |= NETIF_F_IPV6_CSUM;
1168 
1169 		offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1170 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1171 
1172 		if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1173 		    (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1174 			offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1175 			net->hw_features |= NETIF_F_TSO6;
1176 
1177 			if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1178 				gso_max_size = hwcaps.lsov2.ip6_maxsz;
1179 		}
1180 
1181 		if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1182 			offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1183 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1184 		}
1185 	}
1186 
1187 	/* In case some hw_features disappeared we need to remove them from
1188 	 * net->features list as they're no longer supported.
1189 	 */
1190 	net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1191 
1192 	netif_set_gso_max_size(net, gso_max_size);
1193 
1194 	ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1195 
1196 	return ret;
1197 }
1198 
1199 static void rndis_get_friendly_name(struct net_device *net,
1200 				    struct rndis_device *rndis_device,
1201 				    struct netvsc_device *net_device)
1202 {
1203 	ucs2_char_t wname[256];
1204 	unsigned long len;
1205 	u8 ifalias[256];
1206 	u32 size;
1207 
1208 	size = sizeof(wname);
1209 	if (rndis_filter_query_device(rndis_device, net_device,
1210 				      RNDIS_OID_GEN_FRIENDLY_NAME,
1211 				      wname, &size) != 0)
1212 		return;	/* ignore if host does not support */
1213 
1214 	if (size == 0)
1215 		return;	/* name not set */
1216 
1217 	/* Convert Windows Unicode string to UTF-8 */
1218 	len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1219 
1220 	/* ignore the default value from host */
1221 	if (strcmp(ifalias, "Network Adapter") != 0)
1222 		dev_set_alias(net, ifalias, len);
1223 }
1224 
1225 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1226 				      struct netvsc_device_info *device_info)
1227 {
1228 	struct net_device *net = hv_get_drvdata(dev);
1229 	struct netvsc_device *net_device;
1230 	struct rndis_device *rndis_device;
1231 	struct ndis_recv_scale_cap rsscap;
1232 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1233 	u32 mtu, size;
1234 	u32 num_possible_rss_qs;
1235 	int i, ret;
1236 
1237 	rndis_device = get_rndis_device();
1238 	if (!rndis_device)
1239 		return ERR_PTR(-ENODEV);
1240 
1241 	/* Let the inner driver handle this first to create the netvsc channel
1242 	 * NOTE! Once the channel is created, we may get a receive callback
1243 	 * (RndisFilterOnReceive()) before this call is completed
1244 	 */
1245 	net_device = netvsc_device_add(dev, device_info);
1246 	if (IS_ERR(net_device)) {
1247 		kfree(rndis_device);
1248 		return net_device;
1249 	}
1250 
1251 	/* Initialize the rndis device */
1252 	net_device->max_chn = 1;
1253 	net_device->num_chn = 1;
1254 
1255 	net_device->extension = rndis_device;
1256 	rndis_device->ndev = net;
1257 
1258 	/* Send the rndis initialization message */
1259 	ret = rndis_filter_init_device(rndis_device, net_device);
1260 	if (ret != 0)
1261 		goto err_dev_remv;
1262 
1263 	/* Get the MTU from the host */
1264 	size = sizeof(u32);
1265 	ret = rndis_filter_query_device(rndis_device, net_device,
1266 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1267 					&mtu, &size);
1268 	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1269 		net->mtu = mtu;
1270 
1271 	/* Get the mac address */
1272 	ret = rndis_filter_query_device_mac(rndis_device, net_device);
1273 	if (ret != 0)
1274 		goto err_dev_remv;
1275 
1276 	memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1277 
1278 	/* Get friendly name as ifalias*/
1279 	if (!net->ifalias)
1280 		rndis_get_friendly_name(net, rndis_device, net_device);
1281 
1282 	/* Query and set hardware capabilities */
1283 	ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1284 	if (ret != 0)
1285 		goto err_dev_remv;
1286 
1287 	rndis_filter_query_device_link_status(rndis_device, net_device);
1288 
1289 	netdev_dbg(net, "Device MAC %pM link state %s\n",
1290 		   rndis_device->hw_mac_adr,
1291 		   rndis_device->link_state ? "down" : "up");
1292 
1293 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1294 		goto out;
1295 
1296 	rndis_filter_query_link_speed(rndis_device, net_device);
1297 
1298 	/* vRSS setup */
1299 	memset(&rsscap, 0, rsscap_size);
1300 	ret = rndis_filter_query_device(rndis_device, net_device,
1301 					OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1302 					&rsscap, &rsscap_size);
1303 	if (ret || rsscap.num_recv_que < 2)
1304 		goto out;
1305 
1306 	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
1307 	num_possible_rss_qs = min_t(u32, num_online_cpus(),
1308 				    rsscap.num_recv_que);
1309 
1310 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1311 
1312 	/* We will use the given number of channels if available. */
1313 	net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1314 
1315 	for (i = 0; i < ITAB_NUM; i++)
1316 		rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
1317 						i, net_device->num_chn);
1318 
1319 	atomic_set(&net_device->open_chn, 1);
1320 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1321 
1322 	for (i = 1; i < net_device->num_chn; i++) {
1323 		ret = netvsc_alloc_recv_comp_ring(net_device, i);
1324 		if (ret) {
1325 			while (--i != 0)
1326 				vfree(net_device->chan_table[i].mrc.slots);
1327 			goto out;
1328 		}
1329 	}
1330 
1331 	for (i = 1; i < net_device->num_chn; i++)
1332 		netif_napi_add(net, &net_device->chan_table[i].napi,
1333 			       netvsc_poll, NAPI_POLL_WEIGHT);
1334 
1335 	return net_device;
1336 
1337 out:
1338 	/* setting up multiple channels failed */
1339 	net_device->max_chn = 1;
1340 	net_device->num_chn = 1;
1341 	return net_device;
1342 
1343 err_dev_remv:
1344 	rndis_filter_device_remove(dev, net_device);
1345 	return ERR_PTR(ret);
1346 }
1347 
1348 void rndis_filter_device_remove(struct hv_device *dev,
1349 				struct netvsc_device *net_dev)
1350 {
1351 	struct rndis_device *rndis_dev = net_dev->extension;
1352 
1353 	/* Halt and release the rndis device */
1354 	rndis_filter_halt_device(net_dev, rndis_dev);
1355 
1356 	net_dev->extension = NULL;
1357 
1358 	netvsc_device_remove(dev);
1359 }
1360 
1361 int rndis_filter_open(struct netvsc_device *nvdev)
1362 {
1363 	if (!nvdev)
1364 		return -EINVAL;
1365 
1366 	return rndis_filter_open_device(nvdev->extension);
1367 }
1368 
1369 int rndis_filter_close(struct netvsc_device *nvdev)
1370 {
1371 	if (!nvdev)
1372 		return -EINVAL;
1373 
1374 	return rndis_filter_close_device(nvdev->extension);
1375 }
1376