1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #include <linux/ethtool.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/wait.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/if_ether.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_vlan.h>
19 #include <linux/nls.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/ucs2_string.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 
26 #include "hyperv_net.h"
27 #include "netvsc_trace.h"
28 
29 static void rndis_set_multicast(struct work_struct *w);
30 
31 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
32 struct rndis_request {
33 	struct list_head list_ent;
34 	struct completion  wait_event;
35 
36 	struct rndis_message response_msg;
37 	/*
38 	 * The buffer for extended info after the RNDIS response message. It's
39 	 * referenced based on the data offset in the RNDIS message. Its size
40 	 * is enough for current needs, and should be sufficient for the near
41 	 * future.
42 	 */
43 	u8 response_ext[RNDIS_EXT_LEN];
44 
45 	/* Simplify allocation by having a netvsc packet inline */
46 	struct hv_netvsc_packet	pkt;
47 
48 	struct rndis_message request_msg;
49 	/*
50 	 * The buffer for the extended info after the RNDIS request message.
51 	 * It is referenced and sized in a similar way as response_ext.
52 	 */
53 	u8 request_ext[RNDIS_EXT_LEN];
54 };
55 
56 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
57 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
58 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
59 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
60 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
61 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
62 };
63 
get_rndis_device(void)64 static struct rndis_device *get_rndis_device(void)
65 {
66 	struct rndis_device *device;
67 
68 	device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
69 	if (!device)
70 		return NULL;
71 
72 	spin_lock_init(&device->request_lock);
73 
74 	INIT_LIST_HEAD(&device->req_list);
75 	INIT_WORK(&device->mcast_work, rndis_set_multicast);
76 
77 	device->state = RNDIS_DEV_UNINITIALIZED;
78 
79 	return device;
80 }
81 
get_rndis_request(struct rndis_device * dev,u32 msg_type,u32 msg_len)82 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
83 					     u32 msg_type,
84 					     u32 msg_len)
85 {
86 	struct rndis_request *request;
87 	struct rndis_message *rndis_msg;
88 	struct rndis_set_request *set;
89 	unsigned long flags;
90 
91 	request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
92 	if (!request)
93 		return NULL;
94 
95 	init_completion(&request->wait_event);
96 
97 	rndis_msg = &request->request_msg;
98 	rndis_msg->ndis_msg_type = msg_type;
99 	rndis_msg->msg_len = msg_len;
100 
101 	request->pkt.q_idx = 0;
102 
103 	/*
104 	 * Set the request id. This field is always after the rndis header for
105 	 * request/response packet types so we just used the SetRequest as a
106 	 * template
107 	 */
108 	set = &rndis_msg->msg.set_req;
109 	set->req_id = atomic_inc_return(&dev->new_req_id);
110 
111 	/* Add to the request list */
112 	spin_lock_irqsave(&dev->request_lock, flags);
113 	list_add_tail(&request->list_ent, &dev->req_list);
114 	spin_unlock_irqrestore(&dev->request_lock, flags);
115 
116 	return request;
117 }
118 
put_rndis_request(struct rndis_device * dev,struct rndis_request * req)119 static void put_rndis_request(struct rndis_device *dev,
120 			    struct rndis_request *req)
121 {
122 	unsigned long flags;
123 
124 	spin_lock_irqsave(&dev->request_lock, flags);
125 	list_del(&req->list_ent);
126 	spin_unlock_irqrestore(&dev->request_lock, flags);
127 
128 	kfree(req);
129 }
130 
dump_rndis_message(struct net_device * netdev,const struct rndis_message * rndis_msg,const void * data)131 static void dump_rndis_message(struct net_device *netdev,
132 			       const struct rndis_message *rndis_msg,
133 			       const void *data)
134 {
135 	switch (rndis_msg->ndis_msg_type) {
136 	case RNDIS_MSG_PACKET:
137 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
138 			const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
139 			netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
140 				   "data offset %u data len %u, # oob %u, "
141 				   "oob offset %u, oob len %u, pkt offset %u, "
142 				   "pkt len %u\n",
143 				   rndis_msg->msg_len,
144 				   pkt->data_offset,
145 				   pkt->data_len,
146 				   pkt->num_oob_data_elements,
147 				   pkt->oob_data_offset,
148 				   pkt->oob_data_len,
149 				   pkt->per_pkt_info_offset,
150 				   pkt->per_pkt_info_len);
151 		}
152 		break;
153 
154 	case RNDIS_MSG_INIT_C:
155 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
156 				sizeof(struct rndis_initialize_complete)) {
157 			const struct rndis_initialize_complete *init_complete =
158 				data + RNDIS_HEADER_SIZE;
159 			netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
160 				"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
161 				"device flags %d, max xfer size 0x%x, max pkts %u, "
162 				"pkt aligned %u)\n",
163 				rndis_msg->msg_len,
164 				init_complete->req_id,
165 				init_complete->status,
166 				init_complete->major_ver,
167 				init_complete->minor_ver,
168 				init_complete->dev_flags,
169 				init_complete->max_xfer_size,
170 				init_complete->max_pkt_per_msg,
171 				init_complete->pkt_alignment_factor);
172 		}
173 		break;
174 
175 	case RNDIS_MSG_QUERY_C:
176 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
177 				sizeof(struct rndis_query_complete)) {
178 			const struct rndis_query_complete *query_complete =
179 				data + RNDIS_HEADER_SIZE;
180 			netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
181 				"(len %u, id 0x%x, status 0x%x, buf len %u, "
182 				"buf offset %u)\n",
183 				rndis_msg->msg_len,
184 				query_complete->req_id,
185 				query_complete->status,
186 				query_complete->info_buflen,
187 				query_complete->info_buf_offset);
188 		}
189 		break;
190 
191 	case RNDIS_MSG_SET_C:
192 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
193 			const struct rndis_set_complete *set_complete =
194 				data + RNDIS_HEADER_SIZE;
195 			netdev_dbg(netdev,
196 				"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
197 				rndis_msg->msg_len,
198 				set_complete->req_id,
199 				set_complete->status);
200 		}
201 		break;
202 
203 	case RNDIS_MSG_INDICATE:
204 		if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
205 				sizeof(struct rndis_indicate_status)) {
206 			const struct rndis_indicate_status *indicate_status =
207 				data + RNDIS_HEADER_SIZE;
208 			netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
209 				"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
210 				rndis_msg->msg_len,
211 				indicate_status->status,
212 				indicate_status->status_buflen,
213 				indicate_status->status_buf_offset);
214 		}
215 		break;
216 
217 	default:
218 		netdev_dbg(netdev, "0x%x (len %u)\n",
219 			rndis_msg->ndis_msg_type,
220 			rndis_msg->msg_len);
221 		break;
222 	}
223 }
224 
rndis_filter_send_request(struct rndis_device * dev,struct rndis_request * req)225 static int rndis_filter_send_request(struct rndis_device *dev,
226 				  struct rndis_request *req)
227 {
228 	struct hv_netvsc_packet *packet;
229 	struct hv_page_buffer page_buf[2];
230 	struct hv_page_buffer *pb = page_buf;
231 	int ret;
232 
233 	/* Setup the packet to send it */
234 	packet = &req->pkt;
235 
236 	packet->total_data_buflen = req->request_msg.msg_len;
237 	packet->page_buf_cnt = 1;
238 
239 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
240 					HV_HYP_PAGE_SHIFT;
241 	pb[0].len = req->request_msg.msg_len;
242 	pb[0].offset = offset_in_hvpage(&req->request_msg);
243 
244 	/* Add one page_buf when request_msg crossing page boundary */
245 	if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
246 		packet->page_buf_cnt++;
247 		pb[0].len = HV_HYP_PAGE_SIZE -
248 			pb[0].offset;
249 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
250 			+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
251 		pb[1].offset = 0;
252 		pb[1].len = req->request_msg.msg_len -
253 			pb[0].len;
254 	}
255 
256 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
257 
258 	rcu_read_lock_bh();
259 	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
260 	rcu_read_unlock_bh();
261 
262 	return ret;
263 }
264 
rndis_set_link_state(struct rndis_device * rdev,struct rndis_request * request)265 static void rndis_set_link_state(struct rndis_device *rdev,
266 				 struct rndis_request *request)
267 {
268 	u32 link_status;
269 	struct rndis_query_complete *query_complete;
270 	u32 msg_len = request->response_msg.msg_len;
271 
272 	/* Ensure the packet is big enough to access its fields */
273 	if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
274 		return;
275 
276 	query_complete = &request->response_msg.msg.query_complete;
277 
278 	if (query_complete->status == RNDIS_STATUS_SUCCESS &&
279 	    query_complete->info_buflen >= sizeof(u32) &&
280 	    query_complete->info_buf_offset >= sizeof(*query_complete) &&
281 	    msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
282 	    msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
283 			>= query_complete->info_buflen) {
284 		memcpy(&link_status, (void *)((unsigned long)query_complete +
285 		       query_complete->info_buf_offset), sizeof(u32));
286 		rdev->link_state = link_status != 0;
287 	}
288 }
289 
rndis_filter_receive_response(struct net_device * ndev,struct netvsc_device * nvdev,struct rndis_message * resp,void * data)290 static void rndis_filter_receive_response(struct net_device *ndev,
291 					  struct netvsc_device *nvdev,
292 					  struct rndis_message *resp,
293 					  void *data)
294 {
295 	u32 *req_id = &resp->msg.init_complete.req_id;
296 	struct rndis_device *dev = nvdev->extension;
297 	struct rndis_request *request = NULL;
298 	bool found = false;
299 	unsigned long flags;
300 
301 	/* This should never happen, it means control message
302 	 * response received after device removed.
303 	 */
304 	if (dev->state == RNDIS_DEV_UNINITIALIZED) {
305 		netdev_err(ndev,
306 			   "got rndis message uninitialized\n");
307 		return;
308 	}
309 
310 	/* Ensure the packet is big enough to read req_id. Req_id is the 1st
311 	 * field in any request/response message, so the payload should have at
312 	 * least sizeof(u32) bytes
313 	 */
314 	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
315 		netdev_err(ndev, "rndis msg_len too small: %u\n",
316 			   resp->msg_len);
317 		return;
318 	}
319 
320 	/* Copy the request ID into nvchan->recv_buf */
321 	*req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
322 
323 	spin_lock_irqsave(&dev->request_lock, flags);
324 	list_for_each_entry(request, &dev->req_list, list_ent) {
325 		/*
326 		 * All request/response message contains RequestId as the 1st
327 		 * field
328 		 */
329 		if (request->request_msg.msg.init_req.req_id == *req_id) {
330 			found = true;
331 			break;
332 		}
333 	}
334 	spin_unlock_irqrestore(&dev->request_lock, flags);
335 
336 	if (found) {
337 		if (resp->msg_len <=
338 		    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
339 			memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
340 			unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
341 			       data + RNDIS_HEADER_SIZE + sizeof(*req_id),
342 			       resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
343 			       "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
344 			if (request->request_msg.ndis_msg_type ==
345 			    RNDIS_MSG_QUERY && request->request_msg.msg.
346 			    query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
347 				rndis_set_link_state(dev, request);
348 		} else {
349 			netdev_err(ndev,
350 				"rndis response buffer overflow "
351 				"detected (size %u max %zu)\n",
352 				resp->msg_len,
353 				sizeof(struct rndis_message));
354 
355 			if (resp->ndis_msg_type ==
356 			    RNDIS_MSG_RESET_C) {
357 				/* does not have a request id field */
358 				request->response_msg.msg.reset_complete.
359 					status = RNDIS_STATUS_BUFFER_OVERFLOW;
360 			} else {
361 				request->response_msg.msg.
362 				init_complete.status =
363 					RNDIS_STATUS_BUFFER_OVERFLOW;
364 			}
365 		}
366 
367 		netvsc_dma_unmap(((struct net_device_context *)
368 			netdev_priv(ndev))->device_ctx, &request->pkt);
369 		complete(&request->wait_event);
370 	} else {
371 		netdev_err(ndev,
372 			"no rndis request found for this response "
373 			"(id 0x%x res type 0x%x)\n",
374 			*req_id,
375 			resp->ndis_msg_type);
376 	}
377 }
378 
379 /*
380  * Get the Per-Packet-Info with the specified type
381  * return NULL if not found.
382  */
rndis_get_ppi(struct net_device * ndev,struct rndis_packet * rpkt,u32 rpkt_len,u32 type,u8 internal,u32 ppi_size,void * data)383 static inline void *rndis_get_ppi(struct net_device *ndev,
384 				  struct rndis_packet *rpkt,
385 				  u32 rpkt_len, u32 type, u8 internal,
386 				  u32 ppi_size, void *data)
387 {
388 	struct rndis_per_packet_info *ppi;
389 	int len;
390 
391 	if (rpkt->per_pkt_info_offset == 0)
392 		return NULL;
393 
394 	/* Validate info_offset and info_len */
395 	if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
396 	    rpkt->per_pkt_info_offset > rpkt_len) {
397 		netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
398 			   rpkt->per_pkt_info_offset);
399 		return NULL;
400 	}
401 
402 	if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
403 	    rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
404 		netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
405 			   rpkt->per_pkt_info_len);
406 		return NULL;
407 	}
408 
409 	ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
410 		rpkt->per_pkt_info_offset);
411 	/* Copy the PPIs into nvchan->recv_buf */
412 	memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
413 	len = rpkt->per_pkt_info_len;
414 
415 	while (len > 0) {
416 		/* Validate ppi_offset and ppi_size */
417 		if (ppi->size > len) {
418 			netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
419 			continue;
420 		}
421 
422 		if (ppi->ppi_offset >= ppi->size) {
423 			netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
424 			continue;
425 		}
426 
427 		if (ppi->type == type && ppi->internal == internal) {
428 			/* ppi->size should be big enough to hold the returned object. */
429 			if (ppi->size - ppi->ppi_offset < ppi_size) {
430 				netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
431 					   ppi->size, ppi->ppi_offset);
432 				continue;
433 			}
434 			return (void *)((ulong)ppi + ppi->ppi_offset);
435 		}
436 		len -= ppi->size;
437 		ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
438 	}
439 
440 	return NULL;
441 }
442 
443 static inline
rsc_add_data(struct netvsc_channel * nvchan,const struct ndis_pkt_8021q_info * vlan,const struct ndis_tcp_ip_checksum_info * csum_info,const u32 * hash_info,void * data,u32 len)444 void rsc_add_data(struct netvsc_channel *nvchan,
445 		  const struct ndis_pkt_8021q_info *vlan,
446 		  const struct ndis_tcp_ip_checksum_info *csum_info,
447 		  const u32 *hash_info,
448 		  void *data, u32 len)
449 {
450 	u32 cnt = nvchan->rsc.cnt;
451 
452 	if (cnt) {
453 		nvchan->rsc.pktlen += len;
454 	} else {
455 		/* The data/values pointed by vlan, csum_info and hash_info are shared
456 		 * across the different 'fragments' of the RSC packet; store them into
457 		 * the packet itself.
458 		 */
459 		if (vlan != NULL) {
460 			memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
461 			nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
462 		} else {
463 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
464 		}
465 		if (csum_info != NULL) {
466 			memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
467 			nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
468 		} else {
469 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
470 		}
471 		nvchan->rsc.pktlen = len;
472 		if (hash_info != NULL) {
473 			nvchan->rsc.hash_info = *hash_info;
474 			nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
475 		} else {
476 			nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
477 		}
478 	}
479 
480 	nvchan->rsc.data[cnt] = data;
481 	nvchan->rsc.len[cnt] = len;
482 	nvchan->rsc.cnt++;
483 }
484 
rndis_filter_receive_data(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan,struct rndis_message * msg,void * data,u32 data_buflen)485 static int rndis_filter_receive_data(struct net_device *ndev,
486 				     struct netvsc_device *nvdev,
487 				     struct netvsc_channel *nvchan,
488 				     struct rndis_message *msg,
489 				     void *data, u32 data_buflen)
490 {
491 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
492 	const struct ndis_tcp_ip_checksum_info *csum_info;
493 	const struct ndis_pkt_8021q_info *vlan;
494 	const struct rndis_pktinfo_id *pktinfo_id;
495 	const u32 *hash_info;
496 	u32 data_offset, rpkt_len;
497 	bool rsc_more = false;
498 	int ret;
499 
500 	/* Ensure data_buflen is big enough to read header fields */
501 	if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
502 		netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
503 			   data_buflen);
504 		return NVSP_STAT_FAIL;
505 	}
506 
507 	/* Copy the RNDIS packet into nvchan->recv_buf */
508 	memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
509 
510 	/* Validate rndis_pkt offset */
511 	if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
512 		netdev_err(ndev, "invalid rndis packet offset: %u\n",
513 			   rndis_pkt->data_offset);
514 		return NVSP_STAT_FAIL;
515 	}
516 
517 	/* Remove the rndis header and pass it back up the stack */
518 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
519 
520 	rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
521 	data_buflen -= data_offset;
522 
523 	/*
524 	 * Make sure we got a valid RNDIS message, now total_data_buflen
525 	 * should be the data packet size plus the trailer padding size
526 	 */
527 	if (unlikely(data_buflen < rndis_pkt->data_len)) {
528 		netdev_err(ndev, "rndis message buffer "
529 			   "overflow detected (got %u, min %u)"
530 			   "...dropping this message!\n",
531 			   data_buflen, rndis_pkt->data_len);
532 		return NVSP_STAT_FAIL;
533 	}
534 
535 	vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
536 			     data);
537 
538 	csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
539 				  sizeof(*csum_info), data);
540 
541 	hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
542 				  sizeof(*hash_info), data);
543 
544 	pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
545 				   sizeof(*pktinfo_id), data);
546 
547 	/* Identify RSC frags, drop erroneous packets */
548 	if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
549 		if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
550 			nvchan->rsc.cnt = 0;
551 		else if (nvchan->rsc.cnt == 0)
552 			goto drop;
553 
554 		rsc_more = true;
555 
556 		if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
557 			rsc_more = false;
558 
559 		if (rsc_more && nvchan->rsc.is_last)
560 			goto drop;
561 	} else {
562 		nvchan->rsc.cnt = 0;
563 	}
564 
565 	if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
566 		goto drop;
567 
568 	/* Put data into per channel structure.
569 	 * Also, remove the rndis trailer padding from rndis packet message
570 	 * rndis_pkt->data_len tell us the real data length, we only copy
571 	 * the data packet to the stack, without the rndis trailer padding
572 	 */
573 	rsc_add_data(nvchan, vlan, csum_info, hash_info,
574 		     data + data_offset, rndis_pkt->data_len);
575 
576 	if (rsc_more)
577 		return NVSP_STAT_SUCCESS;
578 
579 	ret = netvsc_recv_callback(ndev, nvdev, nvchan);
580 	nvchan->rsc.cnt = 0;
581 
582 	return ret;
583 
584 drop:
585 	return NVSP_STAT_FAIL;
586 }
587 
rndis_filter_receive(struct net_device * ndev,struct netvsc_device * net_dev,struct netvsc_channel * nvchan,void * data,u32 buflen)588 int rndis_filter_receive(struct net_device *ndev,
589 			 struct netvsc_device *net_dev,
590 			 struct netvsc_channel *nvchan,
591 			 void *data, u32 buflen)
592 {
593 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
594 	struct rndis_message *rndis_msg = nvchan->recv_buf;
595 
596 	if (buflen < RNDIS_HEADER_SIZE) {
597 		netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
598 		return NVSP_STAT_FAIL;
599 	}
600 
601 	/* Copy the RNDIS msg header into nvchan->recv_buf */
602 	memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
603 
604 	/* Validate incoming rndis_message packet */
605 	if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
606 	    buflen < rndis_msg->msg_len) {
607 		netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
608 			   buflen, rndis_msg->msg_len);
609 		return NVSP_STAT_FAIL;
610 	}
611 
612 	if (netif_msg_rx_status(net_device_ctx))
613 		dump_rndis_message(ndev, rndis_msg, data);
614 
615 	switch (rndis_msg->ndis_msg_type) {
616 	case RNDIS_MSG_PACKET:
617 		return rndis_filter_receive_data(ndev, net_dev, nvchan,
618 						 rndis_msg, data, buflen);
619 	case RNDIS_MSG_INIT_C:
620 	case RNDIS_MSG_QUERY_C:
621 	case RNDIS_MSG_SET_C:
622 		/* completion msgs */
623 		rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
624 		break;
625 
626 	case RNDIS_MSG_INDICATE:
627 		/* notification msgs */
628 		netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
629 		break;
630 	default:
631 		netdev_err(ndev,
632 			"unhandled rndis message (type %u len %u)\n",
633 			   rndis_msg->ndis_msg_type,
634 			   rndis_msg->msg_len);
635 		return NVSP_STAT_FAIL;
636 	}
637 
638 	return NVSP_STAT_SUCCESS;
639 }
640 
rndis_filter_query_device(struct rndis_device * dev,struct netvsc_device * nvdev,u32 oid,void * result,u32 * result_size)641 static int rndis_filter_query_device(struct rndis_device *dev,
642 				     struct netvsc_device *nvdev,
643 				     u32 oid, void *result, u32 *result_size)
644 {
645 	struct rndis_request *request;
646 	u32 inresult_size = *result_size;
647 	struct rndis_query_request *query;
648 	struct rndis_query_complete *query_complete;
649 	u32 msg_len;
650 	int ret = 0;
651 
652 	if (!result)
653 		return -EINVAL;
654 
655 	*result_size = 0;
656 	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
657 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
658 	if (!request) {
659 		ret = -ENOMEM;
660 		goto cleanup;
661 	}
662 
663 	/* Setup the rndis query */
664 	query = &request->request_msg.msg.query_req;
665 	query->oid = oid;
666 	query->info_buf_offset = sizeof(struct rndis_query_request);
667 	query->info_buflen = 0;
668 	query->dev_vc_handle = 0;
669 
670 	if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
671 		struct ndis_offload *hwcaps;
672 		u32 nvsp_version = nvdev->nvsp_version;
673 		u8 ndis_rev;
674 		size_t size;
675 
676 		if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
677 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
678 			size = NDIS_OFFLOAD_SIZE;
679 		} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
680 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
681 			size = NDIS_OFFLOAD_SIZE_6_1;
682 		} else {
683 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
684 			size = NDIS_OFFLOAD_SIZE_6_0;
685 		}
686 
687 		request->request_msg.msg_len += size;
688 		query->info_buflen = size;
689 		hwcaps = (struct ndis_offload *)
690 			((unsigned long)query + query->info_buf_offset);
691 
692 		hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
693 		hwcaps->header.revision = ndis_rev;
694 		hwcaps->header.size = size;
695 
696 	} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
697 		struct ndis_recv_scale_cap *cap;
698 
699 		request->request_msg.msg_len +=
700 			sizeof(struct ndis_recv_scale_cap);
701 		query->info_buflen = sizeof(struct ndis_recv_scale_cap);
702 		cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
703 						     query->info_buf_offset);
704 		cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
705 		cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
706 		cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
707 	}
708 
709 	ret = rndis_filter_send_request(dev, request);
710 	if (ret != 0)
711 		goto cleanup;
712 
713 	wait_for_completion(&request->wait_event);
714 
715 	/* Copy the response back */
716 	query_complete = &request->response_msg.msg.query_complete;
717 	msg_len = request->response_msg.msg_len;
718 
719 	/* Ensure the packet is big enough to access its fields */
720 	if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
721 		ret = -1;
722 		goto cleanup;
723 	}
724 
725 	if (query_complete->info_buflen > inresult_size ||
726 	    query_complete->info_buf_offset < sizeof(*query_complete) ||
727 	    msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
728 	    msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
729 			< query_complete->info_buflen) {
730 		ret = -1;
731 		goto cleanup;
732 	}
733 
734 	memcpy(result,
735 	       (void *)((unsigned long)query_complete +
736 			 query_complete->info_buf_offset),
737 	       query_complete->info_buflen);
738 
739 	*result_size = query_complete->info_buflen;
740 
741 cleanup:
742 	if (request)
743 		put_rndis_request(dev, request);
744 
745 	return ret;
746 }
747 
748 /* Get the hardware offload capabilities */
749 static int
rndis_query_hwcaps(struct rndis_device * dev,struct netvsc_device * net_device,struct ndis_offload * caps)750 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
751 		   struct ndis_offload *caps)
752 {
753 	u32 caps_len = sizeof(*caps);
754 	int ret;
755 
756 	memset(caps, 0, sizeof(*caps));
757 
758 	ret = rndis_filter_query_device(dev, net_device,
759 					OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
760 					caps, &caps_len);
761 	if (ret)
762 		return ret;
763 
764 	if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
765 		netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
766 			    caps->header.type);
767 		return -EINVAL;
768 	}
769 
770 	if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
771 		netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
772 			    caps->header.revision);
773 		return -EINVAL;
774 	}
775 
776 	if (caps->header.size > caps_len ||
777 	    caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
778 		netdev_warn(dev->ndev,
779 			    "invalid NDIS objsize %u, data size %u\n",
780 			    caps->header.size, caps_len);
781 		return -EINVAL;
782 	}
783 
784 	return 0;
785 }
786 
rndis_filter_query_device_mac(struct rndis_device * dev,struct netvsc_device * net_device)787 static int rndis_filter_query_device_mac(struct rndis_device *dev,
788 					 struct netvsc_device *net_device)
789 {
790 	u32 size = ETH_ALEN;
791 
792 	return rndis_filter_query_device(dev, net_device,
793 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
794 				      dev->hw_mac_adr, &size);
795 }
796 
797 #define NWADR_STR "NetworkAddress"
798 #define NWADR_STRLEN 14
799 
rndis_filter_set_device_mac(struct netvsc_device * nvdev,const char * mac)800 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
801 				const char *mac)
802 {
803 	struct rndis_device *rdev = nvdev->extension;
804 	struct rndis_request *request;
805 	struct rndis_set_request *set;
806 	struct rndis_config_parameter_info *cpi;
807 	wchar_t *cfg_nwadr, *cfg_mac;
808 	struct rndis_set_complete *set_complete;
809 	char macstr[2*ETH_ALEN+1];
810 	u32 extlen = sizeof(struct rndis_config_parameter_info) +
811 		2*NWADR_STRLEN + 4*ETH_ALEN;
812 	int ret;
813 
814 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
815 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
816 	if (!request)
817 		return -ENOMEM;
818 
819 	set = &request->request_msg.msg.set_req;
820 	set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
821 	set->info_buflen = extlen;
822 	set->info_buf_offset = sizeof(struct rndis_set_request);
823 	set->dev_vc_handle = 0;
824 
825 	cpi = (struct rndis_config_parameter_info *)((ulong)set +
826 		set->info_buf_offset);
827 	cpi->parameter_name_offset =
828 		sizeof(struct rndis_config_parameter_info);
829 	/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
830 	cpi->parameter_name_length = 2*NWADR_STRLEN;
831 	cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
832 	cpi->parameter_value_offset =
833 		cpi->parameter_name_offset + cpi->parameter_name_length;
834 	/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
835 	cpi->parameter_value_length = 4*ETH_ALEN;
836 
837 	cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
838 	cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
839 	ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
840 			      cfg_nwadr, NWADR_STRLEN);
841 	if (ret < 0)
842 		goto cleanup;
843 	snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
844 	ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
845 			      cfg_mac, 2*ETH_ALEN);
846 	if (ret < 0)
847 		goto cleanup;
848 
849 	ret = rndis_filter_send_request(rdev, request);
850 	if (ret != 0)
851 		goto cleanup;
852 
853 	wait_for_completion(&request->wait_event);
854 
855 	set_complete = &request->response_msg.msg.set_complete;
856 	if (set_complete->status != RNDIS_STATUS_SUCCESS)
857 		ret = -EIO;
858 
859 cleanup:
860 	put_rndis_request(rdev, request);
861 	return ret;
862 }
863 
864 int
rndis_filter_set_offload_params(struct net_device * ndev,struct netvsc_device * nvdev,struct ndis_offload_params * req_offloads)865 rndis_filter_set_offload_params(struct net_device *ndev,
866 				struct netvsc_device *nvdev,
867 				struct ndis_offload_params *req_offloads)
868 {
869 	struct rndis_device *rdev = nvdev->extension;
870 	struct rndis_request *request;
871 	struct rndis_set_request *set;
872 	struct ndis_offload_params *offload_params;
873 	struct rndis_set_complete *set_complete;
874 	u32 extlen = sizeof(struct ndis_offload_params);
875 	int ret;
876 	u32 vsp_version = nvdev->nvsp_version;
877 
878 	if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
879 		extlen = VERSION_4_OFFLOAD_SIZE;
880 		/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
881 		 * UDP checksum offload.
882 		 */
883 		req_offloads->udp_ip_v4_csum = 0;
884 		req_offloads->udp_ip_v6_csum = 0;
885 	}
886 
887 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
888 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
889 	if (!request)
890 		return -ENOMEM;
891 
892 	set = &request->request_msg.msg.set_req;
893 	set->oid = OID_TCP_OFFLOAD_PARAMETERS;
894 	set->info_buflen = extlen;
895 	set->info_buf_offset = sizeof(struct rndis_set_request);
896 	set->dev_vc_handle = 0;
897 
898 	offload_params = (struct ndis_offload_params *)((ulong)set +
899 				set->info_buf_offset);
900 	*offload_params = *req_offloads;
901 	offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
902 	offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
903 	offload_params->header.size = extlen;
904 
905 	ret = rndis_filter_send_request(rdev, request);
906 	if (ret != 0)
907 		goto cleanup;
908 
909 	wait_for_completion(&request->wait_event);
910 	set_complete = &request->response_msg.msg.set_complete;
911 	if (set_complete->status != RNDIS_STATUS_SUCCESS) {
912 		netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
913 			   set_complete->status);
914 		ret = -EINVAL;
915 	}
916 
917 cleanup:
918 	put_rndis_request(rdev, request);
919 	return ret;
920 }
921 
rndis_set_rss_param_msg(struct rndis_device * rdev,const u8 * rss_key,u16 flag)922 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
923 				   const u8 *rss_key, u16 flag)
924 {
925 	struct net_device *ndev = rdev->ndev;
926 	struct net_device_context *ndc = netdev_priv(ndev);
927 	struct rndis_request *request;
928 	struct rndis_set_request *set;
929 	struct rndis_set_complete *set_complete;
930 	u32 extlen = sizeof(struct ndis_recv_scale_param) +
931 		     4 * ndc->rx_table_sz + NETVSC_HASH_KEYLEN;
932 	struct ndis_recv_scale_param *rssp;
933 	u32 *itab;
934 	u8 *keyp;
935 	int i, ret;
936 
937 	request = get_rndis_request(
938 			rdev, RNDIS_MSG_SET,
939 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
940 	if (!request)
941 		return -ENOMEM;
942 
943 	set = &request->request_msg.msg.set_req;
944 	set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
945 	set->info_buflen = extlen;
946 	set->info_buf_offset = sizeof(struct rndis_set_request);
947 	set->dev_vc_handle = 0;
948 
949 	rssp = (struct ndis_recv_scale_param *)(set + 1);
950 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
951 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
952 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
953 	rssp->flag = flag;
954 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
955 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
956 			 NDIS_HASH_TCP_IPV6;
957 	rssp->indirect_tabsize = 4 * ndc->rx_table_sz;
958 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
959 	rssp->hashkey_size = NETVSC_HASH_KEYLEN;
960 	rssp->hashkey_offset = rssp->indirect_taboffset +
961 			       rssp->indirect_tabsize;
962 
963 	/* Set indirection table entries */
964 	itab = (u32 *)(rssp + 1);
965 	for (i = 0; i < ndc->rx_table_sz; i++)
966 		itab[i] = ndc->rx_table[i];
967 
968 	/* Set hask key values */
969 	keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
970 	memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
971 
972 	ret = rndis_filter_send_request(rdev, request);
973 	if (ret != 0)
974 		goto cleanup;
975 
976 	wait_for_completion(&request->wait_event);
977 	set_complete = &request->response_msg.msg.set_complete;
978 	if (set_complete->status == RNDIS_STATUS_SUCCESS) {
979 		if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
980 		    !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
981 			memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
982 
983 	} else {
984 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
985 			   set_complete->status);
986 		ret = -EINVAL;
987 	}
988 
989 cleanup:
990 	put_rndis_request(rdev, request);
991 	return ret;
992 }
993 
rndis_filter_set_rss_param(struct rndis_device * rdev,const u8 * rss_key)994 int rndis_filter_set_rss_param(struct rndis_device *rdev,
995 			       const u8 *rss_key)
996 {
997 	/* Disable RSS before change */
998 	rndis_set_rss_param_msg(rdev, rss_key,
999 				NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
1000 
1001 	return rndis_set_rss_param_msg(rdev, rss_key, 0);
1002 }
1003 
rndis_filter_query_device_link_status(struct rndis_device * dev,struct netvsc_device * net_device)1004 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
1005 						 struct netvsc_device *net_device)
1006 {
1007 	u32 size = sizeof(u32);
1008 	u32 link_status;
1009 
1010 	return rndis_filter_query_device(dev, net_device,
1011 					 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
1012 					 &link_status, &size);
1013 }
1014 
rndis_filter_query_link_speed(struct rndis_device * dev,struct netvsc_device * net_device)1015 static int rndis_filter_query_link_speed(struct rndis_device *dev,
1016 					 struct netvsc_device *net_device)
1017 {
1018 	u32 size = sizeof(u32);
1019 	u32 link_speed;
1020 	struct net_device_context *ndc;
1021 	int ret;
1022 
1023 	ret = rndis_filter_query_device(dev, net_device,
1024 					RNDIS_OID_GEN_LINK_SPEED,
1025 					&link_speed, &size);
1026 
1027 	if (!ret) {
1028 		ndc = netdev_priv(dev->ndev);
1029 
1030 		/* The link speed reported from host is in 100bps unit, so
1031 		 * we convert it to Mbps here.
1032 		 */
1033 		ndc->speed = link_speed / 10000;
1034 	}
1035 
1036 	return ret;
1037 }
1038 
rndis_filter_set_packet_filter(struct rndis_device * dev,u32 new_filter)1039 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
1040 					  u32 new_filter)
1041 {
1042 	struct rndis_request *request;
1043 	struct rndis_set_request *set;
1044 	int ret;
1045 
1046 	if (dev->filter == new_filter)
1047 		return 0;
1048 
1049 	request = get_rndis_request(dev, RNDIS_MSG_SET,
1050 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
1051 			sizeof(u32));
1052 	if (!request)
1053 		return -ENOMEM;
1054 
1055 	/* Setup the rndis set */
1056 	set = &request->request_msg.msg.set_req;
1057 	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
1058 	set->info_buflen = sizeof(u32);
1059 	set->info_buf_offset = offsetof(typeof(*set), info_buf);
1060 	memcpy(set->info_buf, &new_filter, sizeof(u32));
1061 
1062 	ret = rndis_filter_send_request(dev, request);
1063 	if (ret == 0) {
1064 		wait_for_completion(&request->wait_event);
1065 		dev->filter = new_filter;
1066 	}
1067 
1068 	put_rndis_request(dev, request);
1069 
1070 	return ret;
1071 }
1072 
rndis_set_multicast(struct work_struct * w)1073 static void rndis_set_multicast(struct work_struct *w)
1074 {
1075 	struct rndis_device *rdev
1076 		= container_of(w, struct rndis_device, mcast_work);
1077 	u32 filter = NDIS_PACKET_TYPE_DIRECTED;
1078 	unsigned int flags = rdev->ndev->flags;
1079 
1080 	if (flags & IFF_PROMISC) {
1081 		filter = NDIS_PACKET_TYPE_PROMISCUOUS;
1082 	} else {
1083 		if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
1084 			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
1085 		if (flags & IFF_BROADCAST)
1086 			filter |= NDIS_PACKET_TYPE_BROADCAST;
1087 	}
1088 
1089 	rndis_filter_set_packet_filter(rdev, filter);
1090 }
1091 
rndis_filter_update(struct netvsc_device * nvdev)1092 void rndis_filter_update(struct netvsc_device *nvdev)
1093 {
1094 	struct rndis_device *rdev = nvdev->extension;
1095 
1096 	schedule_work(&rdev->mcast_work);
1097 }
1098 
rndis_filter_init_device(struct rndis_device * dev,struct netvsc_device * nvdev)1099 static int rndis_filter_init_device(struct rndis_device *dev,
1100 				    struct netvsc_device *nvdev)
1101 {
1102 	struct rndis_request *request;
1103 	struct rndis_initialize_request *init;
1104 	struct rndis_initialize_complete *init_complete;
1105 	u32 status;
1106 	int ret;
1107 
1108 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
1109 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1110 	if (!request) {
1111 		ret = -ENOMEM;
1112 		goto cleanup;
1113 	}
1114 
1115 	/* Setup the rndis set */
1116 	init = &request->request_msg.msg.init_req;
1117 	init->major_ver = RNDIS_MAJOR_VERSION;
1118 	init->minor_ver = RNDIS_MINOR_VERSION;
1119 	init->max_xfer_size = 0x4000;
1120 
1121 	dev->state = RNDIS_DEV_INITIALIZING;
1122 
1123 	ret = rndis_filter_send_request(dev, request);
1124 	if (ret != 0) {
1125 		dev->state = RNDIS_DEV_UNINITIALIZED;
1126 		goto cleanup;
1127 	}
1128 
1129 	wait_for_completion(&request->wait_event);
1130 
1131 	init_complete = &request->response_msg.msg.init_complete;
1132 	status = init_complete->status;
1133 	if (status == RNDIS_STATUS_SUCCESS) {
1134 		dev->state = RNDIS_DEV_INITIALIZED;
1135 		nvdev->max_pkt = init_complete->max_pkt_per_msg;
1136 		nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1137 		ret = 0;
1138 	} else {
1139 		dev->state = RNDIS_DEV_UNINITIALIZED;
1140 		ret = -EINVAL;
1141 	}
1142 
1143 cleanup:
1144 	if (request)
1145 		put_rndis_request(dev, request);
1146 
1147 	return ret;
1148 }
1149 
netvsc_device_idle(const struct netvsc_device * nvdev)1150 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1151 {
1152 	int i;
1153 
1154 	for (i = 0; i < nvdev->num_chn; i++) {
1155 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1156 
1157 		if (nvchan->mrc.first != nvchan->mrc.next)
1158 			return false;
1159 
1160 		if (atomic_read(&nvchan->queue_sends) > 0)
1161 			return false;
1162 	}
1163 
1164 	return true;
1165 }
1166 
rndis_filter_halt_device(struct netvsc_device * nvdev,struct rndis_device * dev)1167 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1168 				     struct rndis_device *dev)
1169 {
1170 	struct rndis_request *request;
1171 	struct rndis_halt_request *halt;
1172 
1173 	/* Attempt to do a rndis device halt */
1174 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
1175 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1176 	if (!request)
1177 		goto cleanup;
1178 
1179 	/* Setup the rndis set */
1180 	halt = &request->request_msg.msg.halt_req;
1181 	halt->req_id = atomic_inc_return(&dev->new_req_id);
1182 
1183 	/* Ignore return since this msg is optional. */
1184 	rndis_filter_send_request(dev, request);
1185 
1186 	dev->state = RNDIS_DEV_UNINITIALIZED;
1187 
1188 cleanup:
1189 	nvdev->destroy = true;
1190 
1191 	/* Force flag to be ordered before waiting */
1192 	wmb();
1193 
1194 	/* Wait for all send completions */
1195 	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1196 
1197 	if (request)
1198 		put_rndis_request(dev, request);
1199 }
1200 
rndis_filter_open_device(struct rndis_device * dev)1201 static int rndis_filter_open_device(struct rndis_device *dev)
1202 {
1203 	int ret;
1204 
1205 	if (dev->state != RNDIS_DEV_INITIALIZED)
1206 		return 0;
1207 
1208 	ret = rndis_filter_set_packet_filter(dev,
1209 					 NDIS_PACKET_TYPE_BROADCAST |
1210 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
1211 					 NDIS_PACKET_TYPE_DIRECTED);
1212 	if (ret == 0)
1213 		dev->state = RNDIS_DEV_DATAINITIALIZED;
1214 
1215 	return ret;
1216 }
1217 
rndis_filter_close_device(struct rndis_device * dev)1218 static int rndis_filter_close_device(struct rndis_device *dev)
1219 {
1220 	int ret;
1221 
1222 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1223 		return 0;
1224 
1225 	/* Make sure rndis_set_multicast doesn't re-enable filter! */
1226 	cancel_work_sync(&dev->mcast_work);
1227 
1228 	ret = rndis_filter_set_packet_filter(dev, 0);
1229 	if (ret == -ENODEV)
1230 		ret = 0;
1231 
1232 	if (ret == 0)
1233 		dev->state = RNDIS_DEV_INITIALIZED;
1234 
1235 	return ret;
1236 }
1237 
netvsc_sc_open(struct vmbus_channel * new_sc)1238 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1239 {
1240 	struct net_device *ndev =
1241 		hv_get_drvdata(new_sc->primary_channel->device_obj);
1242 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1243 	struct netvsc_device *nvscdev;
1244 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1245 	struct netvsc_channel *nvchan;
1246 	int ret;
1247 
1248 	/* This is safe because this callback only happens when
1249 	 * new device is being setup and waiting on the channel_init_wait.
1250 	 */
1251 	nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1252 	if (!nvscdev || chn_index >= nvscdev->num_chn)
1253 		return;
1254 
1255 	nvchan = nvscdev->chan_table + chn_index;
1256 
1257 	/* Because the device uses NAPI, all the interrupt batching and
1258 	 * control is done via Net softirq, not the channel handling
1259 	 */
1260 	set_channel_read_mode(new_sc, HV_CALL_ISR);
1261 
1262 	/* Set the channel before opening.*/
1263 	nvchan->channel = new_sc;
1264 
1265 	new_sc->next_request_id_callback = vmbus_next_request_id;
1266 	new_sc->request_addr_callback = vmbus_request_addr;
1267 	new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1268 	new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1269 
1270 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
1271 			 netvsc_ring_bytes, NULL, 0,
1272 			 netvsc_channel_cb, nvchan);
1273 	if (ret == 0)
1274 		napi_enable(&nvchan->napi);
1275 	else
1276 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1277 
1278 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1279 		wake_up(&nvscdev->subchan_open);
1280 }
1281 
1282 /* Open sub-channels after completing the handling of the device probe.
1283  * This breaks overlap of processing the host message for the
1284  * new primary channel with the initialization of sub-channels.
1285  */
rndis_set_subchannel(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_device_info * dev_info)1286 int rndis_set_subchannel(struct net_device *ndev,
1287 			 struct netvsc_device *nvdev,
1288 			 struct netvsc_device_info *dev_info)
1289 {
1290 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1291 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1292 	struct hv_device *hv_dev = ndev_ctx->device_ctx;
1293 	struct rndis_device *rdev = nvdev->extension;
1294 	int i, ret;
1295 
1296 	ASSERT_RTNL();
1297 
1298 	memset(init_packet, 0, sizeof(struct nvsp_message));
1299 	init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1300 	init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1301 	init_packet->msg.v5_msg.subchn_req.num_subchannels =
1302 						nvdev->num_chn - 1;
1303 	trace_nvsp_send(ndev, init_packet);
1304 
1305 	ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1306 			       sizeof(struct nvsp_message),
1307 			       (unsigned long)init_packet,
1308 			       VM_PKT_DATA_INBAND,
1309 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1310 	if (ret) {
1311 		netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1312 		return ret;
1313 	}
1314 
1315 	wait_for_completion(&nvdev->channel_init_wait);
1316 	if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1317 		netdev_err(ndev, "sub channel request failed\n");
1318 		return -EIO;
1319 	}
1320 
1321 	/* Check that number of allocated sub channel is within the expected range */
1322 	if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1323 		netdev_err(ndev, "invalid number of allocated sub channel\n");
1324 		return -EINVAL;
1325 	}
1326 	nvdev->num_chn = 1 +
1327 		init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1328 
1329 	/* wait for all sub channels to open */
1330 	wait_event(nvdev->subchan_open,
1331 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1332 
1333 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1334 		ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1335 
1336 	/* ignore failures from setting rss parameters, still have channels */
1337 	if (dev_info)
1338 		rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1339 	else
1340 		rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1341 
1342 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1343 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1344 
1345 	return 0;
1346 }
1347 
rndis_netdev_set_hwcaps(struct rndis_device * rndis_device,struct netvsc_device * nvdev)1348 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1349 				   struct netvsc_device *nvdev)
1350 {
1351 	struct net_device *net = rndis_device->ndev;
1352 	struct net_device_context *net_device_ctx = netdev_priv(net);
1353 	struct ndis_offload hwcaps;
1354 	struct ndis_offload_params offloads;
1355 	unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
1356 	int ret;
1357 
1358 	/* Find HW offload capabilities */
1359 	ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1360 	if (ret != 0)
1361 		return ret;
1362 
1363 	/* A value of zero means "no change"; now turn on what we want. */
1364 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1365 
1366 	/* Linux does not care about IP checksum, always does in kernel */
1367 	offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1368 
1369 	/* Reset previously set hw_features flags */
1370 	net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1371 	net_device_ctx->tx_checksum_mask = 0;
1372 
1373 	/* Compute tx offload settings based on hw capabilities */
1374 	net->hw_features |= NETIF_F_RXCSUM;
1375 	net->hw_features |= NETIF_F_SG;
1376 	net->hw_features |= NETIF_F_RXHASH;
1377 
1378 	if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1379 		/* Can checksum TCP */
1380 		net->hw_features |= NETIF_F_IP_CSUM;
1381 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1382 
1383 		offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1384 
1385 		if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1386 			offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1387 			net->hw_features |= NETIF_F_TSO;
1388 
1389 			if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1390 				gso_max_size = hwcaps.lsov2.ip4_maxsz;
1391 		}
1392 
1393 		if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1394 			offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1395 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1396 		}
1397 	}
1398 
1399 	if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1400 		net->hw_features |= NETIF_F_IPV6_CSUM;
1401 
1402 		offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1403 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1404 
1405 		if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1406 		    (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1407 			offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1408 			net->hw_features |= NETIF_F_TSO6;
1409 
1410 			if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1411 				gso_max_size = hwcaps.lsov2.ip6_maxsz;
1412 		}
1413 
1414 		if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1415 			offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1416 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1417 		}
1418 	}
1419 
1420 	if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1421 		net->hw_features |= NETIF_F_LRO;
1422 
1423 		if (net->features & NETIF_F_LRO) {
1424 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1425 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1426 		} else {
1427 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1428 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1429 		}
1430 	}
1431 
1432 	/* In case some hw_features disappeared we need to remove them from
1433 	 * net->features list as they're no longer supported.
1434 	 */
1435 	net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1436 
1437 	netif_set_tso_max_size(net, gso_max_size);
1438 
1439 	ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1440 
1441 	return ret;
1442 }
1443 
rndis_get_friendly_name(struct net_device * net,struct rndis_device * rndis_device,struct netvsc_device * net_device)1444 static void rndis_get_friendly_name(struct net_device *net,
1445 				    struct rndis_device *rndis_device,
1446 				    struct netvsc_device *net_device)
1447 {
1448 	ucs2_char_t wname[256];
1449 	unsigned long len;
1450 	u8 ifalias[256];
1451 	u32 size;
1452 
1453 	size = sizeof(wname);
1454 	if (rndis_filter_query_device(rndis_device, net_device,
1455 				      RNDIS_OID_GEN_FRIENDLY_NAME,
1456 				      wname, &size) != 0)
1457 		return;	/* ignore if host does not support */
1458 
1459 	if (size == 0)
1460 		return;	/* name not set */
1461 
1462 	/* Convert Windows Unicode string to UTF-8 */
1463 	len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1464 
1465 	/* ignore the default value from host */
1466 	if (strcmp(ifalias, "Network Adapter") != 0)
1467 		dev_set_alias(net, ifalias, len);
1468 }
1469 
rndis_filter_device_add(struct hv_device * dev,struct netvsc_device_info * device_info)1470 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1471 				      struct netvsc_device_info *device_info)
1472 {
1473 	struct net_device *net = hv_get_drvdata(dev);
1474 	struct net_device_context *ndc = netdev_priv(net);
1475 	struct netvsc_device *net_device;
1476 	struct rndis_device *rndis_device;
1477 	struct ndis_recv_scale_cap rsscap;
1478 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1479 	u32 mtu, size;
1480 	u32 num_possible_rss_qs;
1481 	int i, ret;
1482 
1483 	rndis_device = get_rndis_device();
1484 	if (!rndis_device)
1485 		return ERR_PTR(-ENODEV);
1486 
1487 	/* Let the inner driver handle this first to create the netvsc channel
1488 	 * NOTE! Once the channel is created, we may get a receive callback
1489 	 * (RndisFilterOnReceive()) before this call is completed
1490 	 */
1491 	net_device = netvsc_device_add(dev, device_info);
1492 	if (IS_ERR(net_device)) {
1493 		kfree(rndis_device);
1494 		return net_device;
1495 	}
1496 
1497 	/* Initialize the rndis device */
1498 	net_device->max_chn = 1;
1499 	net_device->num_chn = 1;
1500 
1501 	net_device->extension = rndis_device;
1502 	rndis_device->ndev = net;
1503 
1504 	/* Send the rndis initialization message */
1505 	ret = rndis_filter_init_device(rndis_device, net_device);
1506 	if (ret != 0)
1507 		goto err_dev_remv;
1508 
1509 	/* Get the MTU from the host */
1510 	size = sizeof(u32);
1511 	ret = rndis_filter_query_device(rndis_device, net_device,
1512 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1513 					&mtu, &size);
1514 	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1515 		net->mtu = mtu;
1516 
1517 	/* Get the mac address */
1518 	ret = rndis_filter_query_device_mac(rndis_device, net_device);
1519 	if (ret != 0)
1520 		goto err_dev_remv;
1521 
1522 	memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1523 
1524 	/* Get friendly name as ifalias*/
1525 	if (!net->ifalias)
1526 		rndis_get_friendly_name(net, rndis_device, net_device);
1527 
1528 	/* Query and set hardware capabilities */
1529 	ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1530 	if (ret != 0)
1531 		goto err_dev_remv;
1532 
1533 	rndis_filter_query_device_link_status(rndis_device, net_device);
1534 
1535 	netdev_dbg(net, "Device MAC %pM link state %s\n",
1536 		   rndis_device->hw_mac_adr,
1537 		   rndis_device->link_state ? "down" : "up");
1538 
1539 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1540 		goto out;
1541 
1542 	rndis_filter_query_link_speed(rndis_device, net_device);
1543 
1544 	/* vRSS setup */
1545 	memset(&rsscap, 0, rsscap_size);
1546 	ret = rndis_filter_query_device(rndis_device, net_device,
1547 					OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1548 					&rsscap, &rsscap_size);
1549 	if (ret || rsscap.num_recv_que < 2)
1550 		goto out;
1551 
1552 	if (rsscap.num_indirect_tabent &&
1553 	    rsscap.num_indirect_tabent <= ITAB_NUM_MAX)
1554 		ndc->rx_table_sz = rsscap.num_indirect_tabent;
1555 	else
1556 		ndc->rx_table_sz = ITAB_NUM;
1557 
1558 	ndc->rx_table = kcalloc(ndc->rx_table_sz, sizeof(u16), GFP_KERNEL);
1559 	if (!ndc->rx_table) {
1560 		ret = -ENOMEM;
1561 		goto err_dev_remv;
1562 	}
1563 
1564 	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
1565 	num_possible_rss_qs = min_t(u32, num_online_cpus(),
1566 				    rsscap.num_recv_que);
1567 
1568 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1569 
1570 	/* We will use the given number of channels if available. */
1571 	net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1572 
1573 	if (!netif_is_rxfh_configured(net)) {
1574 		for (i = 0; i < ndc->rx_table_sz; i++)
1575 			ndc->rx_table[i] = ethtool_rxfh_indir_default(
1576 						i, net_device->num_chn);
1577 	}
1578 
1579 	atomic_set(&net_device->open_chn, 1);
1580 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1581 
1582 	for (i = 1; i < net_device->num_chn; i++) {
1583 		ret = netvsc_alloc_recv_comp_ring(net_device, i);
1584 		if (ret) {
1585 			while (--i != 0)
1586 				vfree(net_device->chan_table[i].mrc.slots);
1587 			goto out;
1588 		}
1589 	}
1590 
1591 	for (i = 1; i < net_device->num_chn; i++)
1592 		netif_napi_add(net, &net_device->chan_table[i].napi,
1593 			       netvsc_poll);
1594 
1595 	return net_device;
1596 
1597 out:
1598 	/* setting up multiple channels failed */
1599 	net_device->max_chn = 1;
1600 	net_device->num_chn = 1;
1601 	return net_device;
1602 
1603 err_dev_remv:
1604 	rndis_filter_device_remove(dev, net_device);
1605 	return ERR_PTR(ret);
1606 }
1607 
rndis_filter_device_remove(struct hv_device * dev,struct netvsc_device * net_dev)1608 void rndis_filter_device_remove(struct hv_device *dev,
1609 				struct netvsc_device *net_dev)
1610 {
1611 	struct rndis_device *rndis_dev = net_dev->extension;
1612 	struct net_device *net = hv_get_drvdata(dev);
1613 	struct net_device_context *ndc;
1614 
1615 	ndc = netdev_priv(net);
1616 
1617 	/* Halt and release the rndis device */
1618 	rndis_filter_halt_device(net_dev, rndis_dev);
1619 
1620 	netvsc_device_remove(dev);
1621 
1622 	ndc->rx_table_sz = 0;
1623 	kfree(ndc->rx_table);
1624 	ndc->rx_table = NULL;
1625 }
1626 
rndis_filter_open(struct netvsc_device * nvdev)1627 int rndis_filter_open(struct netvsc_device *nvdev)
1628 {
1629 	if (!nvdev)
1630 		return -EINVAL;
1631 
1632 	return rndis_filter_open_device(nvdev->extension);
1633 }
1634 
rndis_filter_close(struct netvsc_device * nvdev)1635 int rndis_filter_close(struct netvsc_device *nvdev)
1636 {
1637 	if (!nvdev)
1638 		return -EINVAL;
1639 
1640 	return rndis_filter_close_device(nvdev->extension);
1641 }
1642