1 /*******************************************************************************
2 
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 *******************************************************************************/
26 
27 #include "vf.h"
28 #include "ixgbevf.h"
29 
30 /* On Hyper-V, to reset, we need to read from this offset
31  * from the PCI config space. This is the mechanism used on
32  * Hyper-V to support PF/VF communication.
33  */
34 #define IXGBE_HV_RESET_OFFSET           0x201
35 
36 /**
37  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
38  *  @hw: pointer to hardware structure
39  *
40  *  Starts the hardware by filling the bus info structure and media type, clears
41  *  all on chip counters, initializes receive address registers, multicast
42  *  table, VLAN filter table, calls routine to set up link and flow control
43  *  settings, and leaves transmit and receive units disabled and uninitialized
44  **/
45 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
46 {
47 	/* Clear adapter stopped flag */
48 	hw->adapter_stopped = false;
49 
50 	return 0;
51 }
52 
53 /**
54  *  ixgbevf_init_hw_vf - virtual function hardware initialization
55  *  @hw: pointer to hardware structure
56  *
57  *  Initialize the hardware by resetting the hardware and then starting
58  *  the hardware
59  **/
60 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
61 {
62 	s32 status = hw->mac.ops.start_hw(hw);
63 
64 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
65 
66 	return status;
67 }
68 
69 /**
70  *  ixgbevf_reset_hw_vf - Performs hardware reset
71  *  @hw: pointer to hardware structure
72  *
73  *  Resets the hardware by resetting the transmit and receive units, masks and
74  *  clears all interrupts.
75  **/
76 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
77 {
78 	struct ixgbe_mbx_info *mbx = &hw->mbx;
79 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
80 	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
81 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
82 	u8 *addr = (u8 *)(&msgbuf[1]);
83 
84 	/* Call adapter stop to disable tx/rx and clear interrupts */
85 	hw->mac.ops.stop_adapter(hw);
86 
87 	/* reset the api version */
88 	hw->api_version = ixgbe_mbox_api_10;
89 
90 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
91 	IXGBE_WRITE_FLUSH(hw);
92 
93 	/* we cannot reset while the RSTI / RSTD bits are asserted */
94 	while (!mbx->ops.check_for_rst(hw) && timeout) {
95 		timeout--;
96 		udelay(5);
97 	}
98 
99 	if (!timeout)
100 		return IXGBE_ERR_RESET_FAILED;
101 
102 	/* mailbox timeout can now become active */
103 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
104 
105 	msgbuf[0] = IXGBE_VF_RESET;
106 	mbx->ops.write_posted(hw, msgbuf, 1);
107 
108 	mdelay(10);
109 
110 	/* set our "perm_addr" based on info provided by PF
111 	 * also set up the mc_filter_type which is piggy backed
112 	 * on the mac address in word 3
113 	 */
114 	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
115 	if (ret_val)
116 		return ret_val;
117 
118 	/* New versions of the PF may NACK the reset return message
119 	 * to indicate that no MAC address has yet been assigned for
120 	 * the VF.
121 	 */
122 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
123 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
124 		return IXGBE_ERR_INVALID_MAC_ADDR;
125 
126 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
127 		ether_addr_copy(hw->mac.perm_addr, addr);
128 
129 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
130 
131 	return 0;
132 }
133 
134 /**
135  * Hyper-V variant; the VF/PF communication is through the PCI
136  * config space.
137  */
138 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
139 {
140 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
141 	struct ixgbevf_adapter *adapter = hw->back;
142 	int i;
143 
144 	for (i = 0; i < 6; i++)
145 		pci_read_config_byte(adapter->pdev,
146 				     (i + IXGBE_HV_RESET_OFFSET),
147 				     &hw->mac.perm_addr[i]);
148 	return 0;
149 #else
150 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
151 	return -EOPNOTSUPP;
152 #endif
153 }
154 
155 /**
156  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
157  *  @hw: pointer to hardware structure
158  *
159  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
160  *  disables transmit and receive units. The adapter_stopped flag is used by
161  *  the shared code and drivers to determine if the adapter is in a stopped
162  *  state and should not touch the hardware.
163  **/
164 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
165 {
166 	u32 number_of_queues;
167 	u32 reg_val;
168 	u16 i;
169 
170 	/* Set the adapter_stopped flag so other driver functions stop touching
171 	 * the hardware
172 	 */
173 	hw->adapter_stopped = true;
174 
175 	/* Disable the receive unit by stopped each queue */
176 	number_of_queues = hw->mac.max_rx_queues;
177 	for (i = 0; i < number_of_queues; i++) {
178 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
179 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
180 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
181 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
182 		}
183 	}
184 
185 	IXGBE_WRITE_FLUSH(hw);
186 
187 	/* Clear interrupt mask to stop from interrupts being generated */
188 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
189 
190 	/* Clear any pending interrupts */
191 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
192 
193 	/* Disable the transmit unit.  Each queue must be disabled. */
194 	number_of_queues = hw->mac.max_tx_queues;
195 	for (i = 0; i < number_of_queues; i++) {
196 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
197 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
198 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
199 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
200 		}
201 	}
202 
203 	return 0;
204 }
205 
206 /**
207  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
208  *  @hw: pointer to hardware structure
209  *  @mc_addr: the multicast address
210  *
211  *  Extracts the 12 bits, from a multicast address, to determine which
212  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
213  *  incoming Rx multicast addresses, to determine the bit-vector to check in
214  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
215  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
216  *  to mc_filter_type.
217  **/
218 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
219 {
220 	u32 vector = 0;
221 
222 	switch (hw->mac.mc_filter_type) {
223 	case 0:   /* use bits [47:36] of the address */
224 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
225 		break;
226 	case 1:   /* use bits [46:35] of the address */
227 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
228 		break;
229 	case 2:   /* use bits [45:34] of the address */
230 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
231 		break;
232 	case 3:   /* use bits [43:32] of the address */
233 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
234 		break;
235 	default:  /* Invalid mc_filter_type */
236 		break;
237 	}
238 
239 	/* vector can only be 12-bits or boundary will be exceeded */
240 	vector &= 0xFFF;
241 	return vector;
242 }
243 
244 /**
245  *  ixgbevf_get_mac_addr_vf - Read device MAC address
246  *  @hw: pointer to the HW structure
247  *  @mac_addr: pointer to storage for retrieved MAC address
248  **/
249 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
250 {
251 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
252 
253 	return 0;
254 }
255 
256 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
257 {
258 	struct ixgbe_mbx_info *mbx = &hw->mbx;
259 	u32 msgbuf[3];
260 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
261 	s32 ret_val;
262 
263 	memset(msgbuf, 0, sizeof(msgbuf));
264 	/* If index is one then this is the start of a new list and needs
265 	 * indication to the PF so it can do it's own list management.
266 	 * If it is zero then that tells the PF to just clear all of
267 	 * this VF's macvlans and there is no new list.
268 	 */
269 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
270 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
271 	if (addr)
272 		ether_addr_copy(msg_addr, addr);
273 	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
274 
275 	if (!ret_val)
276 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
277 
278 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
279 
280 	if (!ret_val)
281 		if (msgbuf[0] ==
282 		    (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
283 			ret_val = -ENOMEM;
284 
285 	return ret_val;
286 }
287 
288 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
289 {
290 	return -EOPNOTSUPP;
291 }
292 
293 /**
294  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
295  * @adapter: pointer to the port handle
296  * @reta: buffer to fill with RETA contents.
297  * @num_rx_queues: Number of Rx queues configured for this port
298  *
299  * The "reta" buffer should be big enough to contain 32 registers.
300  *
301  * Returns: 0 on success.
302  *          if API doesn't support this operation - (-EOPNOTSUPP).
303  */
304 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
305 {
306 	int err, i, j;
307 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
308 	u32 *hw_reta = &msgbuf[1];
309 	u32 mask = 0;
310 
311 	/* We have to use a mailbox for 82599 and x540 devices only.
312 	 * For these devices RETA has 128 entries.
313 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
314 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
315 	 */
316 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
317 
318 	/* We support the RSS querying for 82599 and x540 devices only.
319 	 * Thus return an error if API doesn't support RETA querying or querying
320 	 * is not supported for this device type.
321 	 */
322 	if (hw->api_version != ixgbe_mbox_api_12 ||
323 	    hw->mac.type >= ixgbe_mac_X550_vf)
324 		return -EOPNOTSUPP;
325 
326 	msgbuf[0] = IXGBE_VF_GET_RETA;
327 
328 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
329 
330 	if (err)
331 		return err;
332 
333 	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
334 
335 	if (err)
336 		return err;
337 
338 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
339 
340 	/* If the operation has been refused by a PF return -EPERM */
341 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
342 		return -EPERM;
343 
344 	/* If we didn't get an ACK there must have been
345 	 * some sort of mailbox error so we should treat it
346 	 * as such.
347 	 */
348 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
349 		return IXGBE_ERR_MBX;
350 
351 	/* ixgbevf doesn't support more than 2 queues at the moment */
352 	if (num_rx_queues > 1)
353 		mask = 0x1;
354 
355 	for (i = 0; i < dwords; i++)
356 		for (j = 0; j < 16; j++)
357 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
358 
359 	return 0;
360 }
361 
362 /**
363  * ixgbevf_get_rss_key_locked - get the RSS Random Key
364  * @hw: pointer to the HW structure
365  * @rss_key: buffer to fill with RSS Hash Key contents.
366  *
367  * The "rss_key" buffer should be big enough to contain 10 registers.
368  *
369  * Returns: 0 on success.
370  *          if API doesn't support this operation - (-EOPNOTSUPP).
371  */
372 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
373 {
374 	int err;
375 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
376 
377 	/* We currently support the RSS Random Key retrieval for 82599 and x540
378 	 * devices only.
379 	 *
380 	 * Thus return an error if API doesn't support RSS Random Key retrieval
381 	 * or if the operation is not supported for this device type.
382 	 */
383 	if (hw->api_version != ixgbe_mbox_api_12 ||
384 	    hw->mac.type >= ixgbe_mac_X550_vf)
385 		return -EOPNOTSUPP;
386 
387 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
388 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
389 
390 	if (err)
391 		return err;
392 
393 	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
394 
395 	if (err)
396 		return err;
397 
398 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
399 
400 	/* If the operation has been refused by a PF return -EPERM */
401 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
402 		return -EPERM;
403 
404 	/* If we didn't get an ACK there must have been
405 	 * some sort of mailbox error so we should treat it
406 	 * as such.
407 	 */
408 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
409 		return IXGBE_ERR_MBX;
410 
411 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
412 
413 	return 0;
414 }
415 
416 /**
417  *  ixgbevf_set_rar_vf - set device MAC address
418  *  @hw: pointer to hardware structure
419  *  @index: Receive address register to write
420  *  @addr: Address to put into receive address register
421  *  @vmdq: Unused in this implementation
422  **/
423 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
424 			      u32 vmdq)
425 {
426 	struct ixgbe_mbx_info *mbx = &hw->mbx;
427 	u32 msgbuf[3];
428 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
429 	s32 ret_val;
430 
431 	memset(msgbuf, 0, sizeof(msgbuf));
432 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
433 	ether_addr_copy(msg_addr, addr);
434 	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
435 
436 	if (!ret_val)
437 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
438 
439 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
440 
441 	/* if nacked the address was rejected, use "perm_addr" */
442 	if (!ret_val &&
443 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
444 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
445 		return IXGBE_ERR_MBX;
446 	}
447 
448 	return ret_val;
449 }
450 
451 /**
452  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
453  *  @hw: pointer to hardware structure
454  *  @index: Receive address register to write
455  *  @addr: Address to put into receive address register
456  *  @vmdq: Unused in this implementation
457  *
458  * We don't really allow setting the device MAC address. However,
459  * if the address being set is the permanent MAC address we will
460  * permit that.
461  **/
462 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
463 				 u32 vmdq)
464 {
465 	if (ether_addr_equal(addr, hw->mac.perm_addr))
466 		return 0;
467 
468 	return -EOPNOTSUPP;
469 }
470 
471 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
472 				       u32 *msg, u16 size)
473 {
474 	struct ixgbe_mbx_info *mbx = &hw->mbx;
475 	u32 retmsg[IXGBE_VFMAILBOX_SIZE];
476 	s32 retval = mbx->ops.write_posted(hw, msg, size);
477 
478 	if (!retval)
479 		mbx->ops.read_posted(hw, retmsg, size);
480 }
481 
482 /**
483  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
484  *  @hw: pointer to the HW structure
485  *  @netdev: pointer to net device structure
486  *
487  *  Updates the Multicast Table Array.
488  **/
489 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
490 					  struct net_device *netdev)
491 {
492 	struct netdev_hw_addr *ha;
493 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
494 	u16 *vector_list = (u16 *)&msgbuf[1];
495 	u32 cnt, i;
496 
497 	/* Each entry in the list uses 1 16 bit word.  We have 30
498 	 * 16 bit words available in our HW msg buffer (minus 1 for the
499 	 * msg type).  That's 30 hash values if we pack 'em right.  If
500 	 * there are more than 30 MC addresses to add then punt the
501 	 * extras for now and then add code to handle more than 30 later.
502 	 * It would be unusual for a server to request that many multi-cast
503 	 * addresses except for in large enterprise network environments.
504 	 */
505 
506 	cnt = netdev_mc_count(netdev);
507 	if (cnt > 30)
508 		cnt = 30;
509 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
510 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
511 
512 	i = 0;
513 	netdev_for_each_mc_addr(ha, netdev) {
514 		if (i == cnt)
515 			break;
516 		if (is_link_local_ether_addr(ha->addr))
517 			continue;
518 
519 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
520 	}
521 
522 	ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
523 
524 	return 0;
525 }
526 
527 /**
528  * Hyper-V variant - just a stub.
529  */
530 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
531 					     struct net_device *netdev)
532 {
533 	return -EOPNOTSUPP;
534 }
535 
536 /**
537  *  ixgbevf_update_xcast_mode - Update Multicast mode
538  *  @hw: pointer to the HW structure
539  *  @xcast_mode: new multicast mode
540  *
541  *  Updates the Multicast Mode of VF.
542  **/
543 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
544 {
545 	struct ixgbe_mbx_info *mbx = &hw->mbx;
546 	u32 msgbuf[2];
547 	s32 err;
548 
549 	switch (hw->api_version) {
550 	case ixgbe_mbox_api_12:
551 		break;
552 	default:
553 		return -EOPNOTSUPP;
554 	}
555 
556 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
557 	msgbuf[1] = xcast_mode;
558 
559 	err = mbx->ops.write_posted(hw, msgbuf, 2);
560 	if (err)
561 		return err;
562 
563 	err = mbx->ops.read_posted(hw, msgbuf, 2);
564 	if (err)
565 		return err;
566 
567 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
568 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
569 		return -EPERM;
570 
571 	return 0;
572 }
573 
574 /**
575  * Hyper-V variant - just a stub.
576  */
577 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
578 {
579 	return -EOPNOTSUPP;
580 }
581 
582 /**
583  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
584  *  @hw: pointer to the HW structure
585  *  @vlan: 12 bit VLAN ID
586  *  @vind: unused by VF drivers
587  *  @vlan_on: if true then set bit, else clear bit
588  **/
589 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
590 			       bool vlan_on)
591 {
592 	struct ixgbe_mbx_info *mbx = &hw->mbx;
593 	u32 msgbuf[2];
594 	s32 err;
595 
596 	msgbuf[0] = IXGBE_VF_SET_VLAN;
597 	msgbuf[1] = vlan;
598 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
599 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
600 
601 	err = mbx->ops.write_posted(hw, msgbuf, 2);
602 	if (err)
603 		goto mbx_err;
604 
605 	err = mbx->ops.read_posted(hw, msgbuf, 2);
606 	if (err)
607 		goto mbx_err;
608 
609 	/* remove extra bits from the message */
610 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
611 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
612 
613 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
614 		err = IXGBE_ERR_INVALID_ARGUMENT;
615 
616 mbx_err:
617 	return err;
618 }
619 
620 /**
621  * Hyper-V variant - just a stub.
622  */
623 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
624 				  bool vlan_on)
625 {
626 	return -EOPNOTSUPP;
627 }
628 
629 /**
630  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
631  *  @hw: pointer to hardware structure
632  *  @speed: Unused in this implementation
633  *  @autoneg: Unused in this implementation
634  *  @autoneg_wait_to_complete: Unused in this implementation
635  *
636  *  Do nothing and return success.  VF drivers are not allowed to change
637  *  global settings.  Maintained for driver compatibility.
638  **/
639 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
640 				     ixgbe_link_speed speed, bool autoneg,
641 				     bool autoneg_wait_to_complete)
642 {
643 	return 0;
644 }
645 
646 /**
647  *  ixgbevf_check_mac_link_vf - Get link/speed status
648  *  @hw: pointer to hardware structure
649  *  @speed: pointer to link speed
650  *  @link_up: true is link is up, false otherwise
651  *  @autoneg_wait_to_complete: true when waiting for completion is needed
652  *
653  *  Reads the links register to determine if link is up and the current speed
654  **/
655 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
656 				     ixgbe_link_speed *speed,
657 				     bool *link_up,
658 				     bool autoneg_wait_to_complete)
659 {
660 	struct ixgbe_mbx_info *mbx = &hw->mbx;
661 	struct ixgbe_mac_info *mac = &hw->mac;
662 	s32 ret_val = 0;
663 	u32 links_reg;
664 	u32 in_msg = 0;
665 
666 	/* If we were hit with a reset drop the link */
667 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
668 		mac->get_link_status = true;
669 
670 	if (!mac->get_link_status)
671 		goto out;
672 
673 	/* if link status is down no point in checking to see if pf is up */
674 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
675 	if (!(links_reg & IXGBE_LINKS_UP))
676 		goto out;
677 
678 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
679 	 * before the link status is correct
680 	 */
681 	if (mac->type == ixgbe_mac_82599_vf) {
682 		int i;
683 
684 		for (i = 0; i < 5; i++) {
685 			udelay(100);
686 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
687 
688 			if (!(links_reg & IXGBE_LINKS_UP))
689 				goto out;
690 		}
691 	}
692 
693 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
694 	case IXGBE_LINKS_SPEED_10G_82599:
695 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
696 		break;
697 	case IXGBE_LINKS_SPEED_1G_82599:
698 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
699 		break;
700 	case IXGBE_LINKS_SPEED_100_82599:
701 		*speed = IXGBE_LINK_SPEED_100_FULL;
702 		break;
703 	}
704 
705 	/* if the read failed it could just be a mailbox collision, best wait
706 	 * until we are called again and don't report an error
707 	 */
708 	if (mbx->ops.read(hw, &in_msg, 1))
709 		goto out;
710 
711 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
712 		/* msg is not CTS and is NACK we must have lost CTS status */
713 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
714 			ret_val = -1;
715 		goto out;
716 	}
717 
718 	/* the pf is talking, if we timed out in the past we reinit */
719 	if (!mbx->timeout) {
720 		ret_val = -1;
721 		goto out;
722 	}
723 
724 	/* if we passed all the tests above then the link is up and we no
725 	 * longer need to check for link
726 	 */
727 	mac->get_link_status = false;
728 
729 out:
730 	*link_up = !mac->get_link_status;
731 	return ret_val;
732 }
733 
734 /**
735  * Hyper-V variant; there is no mailbox communication.
736  */
737 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
738 					ixgbe_link_speed *speed,
739 					bool *link_up,
740 					bool autoneg_wait_to_complete)
741 {
742 	struct ixgbe_mbx_info *mbx = &hw->mbx;
743 	struct ixgbe_mac_info *mac = &hw->mac;
744 	u32 links_reg;
745 
746 	/* If we were hit with a reset drop the link */
747 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
748 		mac->get_link_status = true;
749 
750 	if (!mac->get_link_status)
751 		goto out;
752 
753 	/* if link status is down no point in checking to see if pf is up */
754 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
755 	if (!(links_reg & IXGBE_LINKS_UP))
756 		goto out;
757 
758 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
759 	 * before the link status is correct
760 	 */
761 	if (mac->type == ixgbe_mac_82599_vf) {
762 		int i;
763 
764 		for (i = 0; i < 5; i++) {
765 			udelay(100);
766 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
767 
768 			if (!(links_reg & IXGBE_LINKS_UP))
769 				goto out;
770 		}
771 	}
772 
773 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
774 	case IXGBE_LINKS_SPEED_10G_82599:
775 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
776 		break;
777 	case IXGBE_LINKS_SPEED_1G_82599:
778 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
779 		break;
780 	case IXGBE_LINKS_SPEED_100_82599:
781 		*speed = IXGBE_LINK_SPEED_100_FULL;
782 		break;
783 	}
784 
785 	/* if we passed all the tests above then the link is up and we no
786 	 * longer need to check for link
787 	 */
788 	mac->get_link_status = false;
789 
790 out:
791 	*link_up = !mac->get_link_status;
792 	return 0;
793 }
794 
795 /**
796  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
797  *  @hw: pointer to the HW structure
798  *  @max_size: value to assign to max frame size
799  **/
800 static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
801 {
802 	u32 msgbuf[2];
803 
804 	msgbuf[0] = IXGBE_VF_SET_LPE;
805 	msgbuf[1] = max_size;
806 	ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
807 }
808 
809 /**
810  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
811  * @hw: pointer to the HW structure
812  * @max_size: value to assign to max frame size
813  * Hyper-V variant.
814  **/
815 static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
816 {
817 	u32 reg;
818 
819 	/* If we are on Hyper-V, we implement this functionality
820 	 * differently.
821 	 */
822 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
823 	/* CRC == 4 */
824 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
825 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
826 }
827 
828 /**
829  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
830  *  @hw: pointer to the HW structure
831  *  @api: integer containing requested API version
832  **/
833 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
834 {
835 	int err;
836 	u32 msg[3];
837 
838 	/* Negotiate the mailbox API version */
839 	msg[0] = IXGBE_VF_API_NEGOTIATE;
840 	msg[1] = api;
841 	msg[2] = 0;
842 	err = hw->mbx.ops.write_posted(hw, msg, 3);
843 
844 	if (!err)
845 		err = hw->mbx.ops.read_posted(hw, msg, 3);
846 
847 	if (!err) {
848 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
849 
850 		/* Store value and return 0 on success */
851 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
852 			hw->api_version = api;
853 			return 0;
854 		}
855 
856 		err = IXGBE_ERR_INVALID_ARGUMENT;
857 	}
858 
859 	return err;
860 }
861 
862 /**
863  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
864  *  @hw: pointer to the HW structure
865  *  @api: integer containing requested API version
866  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
867  **/
868 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
869 {
870 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
871 	if (api != ixgbe_mbox_api_10)
872 		return IXGBE_ERR_INVALID_ARGUMENT;
873 
874 	return 0;
875 }
876 
877 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
878 		       unsigned int *default_tc)
879 {
880 	int err;
881 	u32 msg[5];
882 
883 	/* do nothing if API doesn't support ixgbevf_get_queues */
884 	switch (hw->api_version) {
885 	case ixgbe_mbox_api_11:
886 	case ixgbe_mbox_api_12:
887 		break;
888 	default:
889 		return 0;
890 	}
891 
892 	/* Fetch queue configuration from the PF */
893 	msg[0] = IXGBE_VF_GET_QUEUE;
894 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
895 	err = hw->mbx.ops.write_posted(hw, msg, 5);
896 
897 	if (!err)
898 		err = hw->mbx.ops.read_posted(hw, msg, 5);
899 
900 	if (!err) {
901 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
902 
903 		/* if we we didn't get an ACK there must have been
904 		 * some sort of mailbox error so we should treat it
905 		 * as such
906 		 */
907 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
908 			return IXGBE_ERR_MBX;
909 
910 		/* record and validate values from message */
911 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
912 		if (hw->mac.max_tx_queues == 0 ||
913 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
914 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
915 
916 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
917 		if (hw->mac.max_rx_queues == 0 ||
918 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
919 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
920 
921 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
922 		/* in case of unknown state assume we cannot tag frames */
923 		if (*num_tcs > hw->mac.max_rx_queues)
924 			*num_tcs = 1;
925 
926 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
927 		/* default to queue 0 on out-of-bounds queue number */
928 		if (*default_tc >= hw->mac.max_tx_queues)
929 			*default_tc = 0;
930 	}
931 
932 	return err;
933 }
934 
935 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
936 	.init_hw		= ixgbevf_init_hw_vf,
937 	.reset_hw		= ixgbevf_reset_hw_vf,
938 	.start_hw		= ixgbevf_start_hw_vf,
939 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
940 	.stop_adapter		= ixgbevf_stop_hw_vf,
941 	.setup_link		= ixgbevf_setup_mac_link_vf,
942 	.check_link		= ixgbevf_check_mac_link_vf,
943 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
944 	.set_rar		= ixgbevf_set_rar_vf,
945 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
946 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
947 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
948 	.set_vfta		= ixgbevf_set_vfta_vf,
949 	.set_rlpml		= ixgbevf_set_rlpml_vf,
950 };
951 
952 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
953 	.init_hw		= ixgbevf_init_hw_vf,
954 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
955 	.start_hw		= ixgbevf_start_hw_vf,
956 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
957 	.stop_adapter		= ixgbevf_stop_hw_vf,
958 	.setup_link		= ixgbevf_setup_mac_link_vf,
959 	.check_link		= ixgbevf_hv_check_mac_link_vf,
960 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
961 	.set_rar		= ixgbevf_hv_set_rar_vf,
962 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
963 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
964 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
965 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
966 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
967 };
968 
969 const struct ixgbevf_info ixgbevf_82599_vf_info = {
970 	.mac = ixgbe_mac_82599_vf,
971 	.mac_ops = &ixgbevf_mac_ops,
972 };
973 
974 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
975 	.mac = ixgbe_mac_82599_vf,
976 	.mac_ops = &ixgbevf_hv_mac_ops,
977 };
978 
979 const struct ixgbevf_info ixgbevf_X540_vf_info = {
980 	.mac = ixgbe_mac_X540_vf,
981 	.mac_ops = &ixgbevf_mac_ops,
982 };
983 
984 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
985 	.mac = ixgbe_mac_X540_vf,
986 	.mac_ops = &ixgbevf_hv_mac_ops,
987 };
988 
989 const struct ixgbevf_info ixgbevf_X550_vf_info = {
990 	.mac = ixgbe_mac_X550_vf,
991 	.mac_ops = &ixgbevf_mac_ops,
992 };
993 
994 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
995 	.mac = ixgbe_mac_X550_vf,
996 	.mac_ops = &ixgbevf_hv_mac_ops,
997 };
998 
999 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1000 	.mac = ixgbe_mac_X550EM_x_vf,
1001 	.mac_ops = &ixgbevf_mac_ops,
1002 };
1003 
1004 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1005 	.mac = ixgbe_mac_X550EM_x_vf,
1006 	.mac_ops = &ixgbevf_hv_mac_ops,
1007 };
1008