1 /*******************************************************************************
2 
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 *******************************************************************************/
26 
27 #include "vf.h"
28 #include "ixgbevf.h"
29 
30 /* On Hyper-V, to reset, we need to read from this offset
31  * from the PCI config space. This is the mechanism used on
32  * Hyper-V to support PF/VF communication.
33  */
34 #define IXGBE_HV_RESET_OFFSET           0x201
35 
36 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
37 					     u32 *retmsg, u16 size)
38 {
39 	struct ixgbe_mbx_info *mbx = &hw->mbx;
40 	s32 retval = mbx->ops.write_posted(hw, msg, size);
41 
42 	if (retval)
43 		return retval;
44 
45 	return mbx->ops.read_posted(hw, retmsg, size);
46 }
47 
48 /**
49  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
50  *  @hw: pointer to hardware structure
51  *
52  *  Starts the hardware by filling the bus info structure and media type, clears
53  *  all on chip counters, initializes receive address registers, multicast
54  *  table, VLAN filter table, calls routine to set up link and flow control
55  *  settings, and leaves transmit and receive units disabled and uninitialized
56  **/
57 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
58 {
59 	/* Clear adapter stopped flag */
60 	hw->adapter_stopped = false;
61 
62 	return 0;
63 }
64 
65 /**
66  *  ixgbevf_init_hw_vf - virtual function hardware initialization
67  *  @hw: pointer to hardware structure
68  *
69  *  Initialize the hardware by resetting the hardware and then starting
70  *  the hardware
71  **/
72 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
73 {
74 	s32 status = hw->mac.ops.start_hw(hw);
75 
76 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
77 
78 	return status;
79 }
80 
81 /**
82  *  ixgbevf_reset_hw_vf - Performs hardware reset
83  *  @hw: pointer to hardware structure
84  *
85  *  Resets the hardware by resetting the transmit and receive units, masks and
86  *  clears all interrupts.
87  **/
88 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
89 {
90 	struct ixgbe_mbx_info *mbx = &hw->mbx;
91 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
92 	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
93 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
94 	u8 *addr = (u8 *)(&msgbuf[1]);
95 
96 	/* Call adapter stop to disable tx/rx and clear interrupts */
97 	hw->mac.ops.stop_adapter(hw);
98 
99 	/* reset the api version */
100 	hw->api_version = ixgbe_mbox_api_10;
101 
102 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
103 	IXGBE_WRITE_FLUSH(hw);
104 
105 	/* we cannot reset while the RSTI / RSTD bits are asserted */
106 	while (!mbx->ops.check_for_rst(hw) && timeout) {
107 		timeout--;
108 		udelay(5);
109 	}
110 
111 	if (!timeout)
112 		return IXGBE_ERR_RESET_FAILED;
113 
114 	/* mailbox timeout can now become active */
115 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
116 
117 	msgbuf[0] = IXGBE_VF_RESET;
118 	mbx->ops.write_posted(hw, msgbuf, 1);
119 
120 	mdelay(10);
121 
122 	/* set our "perm_addr" based on info provided by PF
123 	 * also set up the mc_filter_type which is piggy backed
124 	 * on the mac address in word 3
125 	 */
126 	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
127 	if (ret_val)
128 		return ret_val;
129 
130 	/* New versions of the PF may NACK the reset return message
131 	 * to indicate that no MAC address has yet been assigned for
132 	 * the VF.
133 	 */
134 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
135 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
136 		return IXGBE_ERR_INVALID_MAC_ADDR;
137 
138 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
139 		ether_addr_copy(hw->mac.perm_addr, addr);
140 
141 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
142 
143 	return 0;
144 }
145 
146 /**
147  * Hyper-V variant; the VF/PF communication is through the PCI
148  * config space.
149  */
150 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
151 {
152 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
153 	struct ixgbevf_adapter *adapter = hw->back;
154 	int i;
155 
156 	for (i = 0; i < 6; i++)
157 		pci_read_config_byte(adapter->pdev,
158 				     (i + IXGBE_HV_RESET_OFFSET),
159 				     &hw->mac.perm_addr[i]);
160 	return 0;
161 #else
162 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
163 	return -EOPNOTSUPP;
164 #endif
165 }
166 
167 /**
168  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
169  *  @hw: pointer to hardware structure
170  *
171  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
172  *  disables transmit and receive units. The adapter_stopped flag is used by
173  *  the shared code and drivers to determine if the adapter is in a stopped
174  *  state and should not touch the hardware.
175  **/
176 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
177 {
178 	u32 number_of_queues;
179 	u32 reg_val;
180 	u16 i;
181 
182 	/* Set the adapter_stopped flag so other driver functions stop touching
183 	 * the hardware
184 	 */
185 	hw->adapter_stopped = true;
186 
187 	/* Disable the receive unit by stopped each queue */
188 	number_of_queues = hw->mac.max_rx_queues;
189 	for (i = 0; i < number_of_queues; i++) {
190 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
191 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
192 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
193 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
194 		}
195 	}
196 
197 	IXGBE_WRITE_FLUSH(hw);
198 
199 	/* Clear interrupt mask to stop from interrupts being generated */
200 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
201 
202 	/* Clear any pending interrupts */
203 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
204 
205 	/* Disable the transmit unit.  Each queue must be disabled. */
206 	number_of_queues = hw->mac.max_tx_queues;
207 	for (i = 0; i < number_of_queues; i++) {
208 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
209 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
210 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
211 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
212 		}
213 	}
214 
215 	return 0;
216 }
217 
218 /**
219  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
220  *  @hw: pointer to hardware structure
221  *  @mc_addr: the multicast address
222  *
223  *  Extracts the 12 bits, from a multicast address, to determine which
224  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
225  *  incoming Rx multicast addresses, to determine the bit-vector to check in
226  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
227  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
228  *  to mc_filter_type.
229  **/
230 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
231 {
232 	u32 vector = 0;
233 
234 	switch (hw->mac.mc_filter_type) {
235 	case 0:   /* use bits [47:36] of the address */
236 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
237 		break;
238 	case 1:   /* use bits [46:35] of the address */
239 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
240 		break;
241 	case 2:   /* use bits [45:34] of the address */
242 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
243 		break;
244 	case 3:   /* use bits [43:32] of the address */
245 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
246 		break;
247 	default:  /* Invalid mc_filter_type */
248 		break;
249 	}
250 
251 	/* vector can only be 12-bits or boundary will be exceeded */
252 	vector &= 0xFFF;
253 	return vector;
254 }
255 
256 /**
257  *  ixgbevf_get_mac_addr_vf - Read device MAC address
258  *  @hw: pointer to the HW structure
259  *  @mac_addr: pointer to storage for retrieved MAC address
260  **/
261 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
262 {
263 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
264 
265 	return 0;
266 }
267 
268 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
269 {
270 	u32 msgbuf[3], msgbuf_chk;
271 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
272 	s32 ret_val;
273 
274 	memset(msgbuf, 0, sizeof(msgbuf));
275 	/* If index is one then this is the start of a new list and needs
276 	 * indication to the PF so it can do it's own list management.
277 	 * If it is zero then that tells the PF to just clear all of
278 	 * this VF's macvlans and there is no new list.
279 	 */
280 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
281 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
282 	msgbuf_chk = msgbuf[0];
283 
284 	if (addr)
285 		ether_addr_copy(msg_addr, addr);
286 
287 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
288 					     sizeof(msgbuf) / sizeof(u32));
289 	if (!ret_val) {
290 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
291 
292 		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
293 			return -ENOMEM;
294 	}
295 
296 	return ret_val;
297 }
298 
299 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
300 {
301 	return -EOPNOTSUPP;
302 }
303 
304 /**
305  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
306  * @adapter: pointer to the port handle
307  * @reta: buffer to fill with RETA contents.
308  * @num_rx_queues: Number of Rx queues configured for this port
309  *
310  * The "reta" buffer should be big enough to contain 32 registers.
311  *
312  * Returns: 0 on success.
313  *          if API doesn't support this operation - (-EOPNOTSUPP).
314  */
315 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
316 {
317 	int err, i, j;
318 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
319 	u32 *hw_reta = &msgbuf[1];
320 	u32 mask = 0;
321 
322 	/* We have to use a mailbox for 82599 and x540 devices only.
323 	 * For these devices RETA has 128 entries.
324 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
325 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
326 	 */
327 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
328 
329 	/* We support the RSS querying for 82599 and x540 devices only.
330 	 * Thus return an error if API doesn't support RETA querying or querying
331 	 * is not supported for this device type.
332 	 */
333 	switch (hw->api_version) {
334 	case ixgbe_mbox_api_13:
335 	case ixgbe_mbox_api_12:
336 		if (hw->mac.type < ixgbe_mac_X550_vf)
337 			break;
338 		/* fall through */
339 	default:
340 		return -EOPNOTSUPP;
341 	}
342 
343 	msgbuf[0] = IXGBE_VF_GET_RETA;
344 
345 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
346 
347 	if (err)
348 		return err;
349 
350 	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
351 
352 	if (err)
353 		return err;
354 
355 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
356 
357 	/* If the operation has been refused by a PF return -EPERM */
358 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
359 		return -EPERM;
360 
361 	/* If we didn't get an ACK there must have been
362 	 * some sort of mailbox error so we should treat it
363 	 * as such.
364 	 */
365 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
366 		return IXGBE_ERR_MBX;
367 
368 	/* ixgbevf doesn't support more than 2 queues at the moment */
369 	if (num_rx_queues > 1)
370 		mask = 0x1;
371 
372 	for (i = 0; i < dwords; i++)
373 		for (j = 0; j < 16; j++)
374 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
375 
376 	return 0;
377 }
378 
379 /**
380  * ixgbevf_get_rss_key_locked - get the RSS Random Key
381  * @hw: pointer to the HW structure
382  * @rss_key: buffer to fill with RSS Hash Key contents.
383  *
384  * The "rss_key" buffer should be big enough to contain 10 registers.
385  *
386  * Returns: 0 on success.
387  *          if API doesn't support this operation - (-EOPNOTSUPP).
388  */
389 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
390 {
391 	int err;
392 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
393 
394 	/* We currently support the RSS Random Key retrieval for 82599 and x540
395 	 * devices only.
396 	 *
397 	 * Thus return an error if API doesn't support RSS Random Key retrieval
398 	 * or if the operation is not supported for this device type.
399 	 */
400 	switch (hw->api_version) {
401 	case ixgbe_mbox_api_13:
402 	case ixgbe_mbox_api_12:
403 		if (hw->mac.type < ixgbe_mac_X550_vf)
404 			break;
405 		/* fall through */
406 	default:
407 		return -EOPNOTSUPP;
408 	}
409 
410 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
411 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
412 
413 	if (err)
414 		return err;
415 
416 	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
417 
418 	if (err)
419 		return err;
420 
421 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
422 
423 	/* If the operation has been refused by a PF return -EPERM */
424 	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
425 		return -EPERM;
426 
427 	/* If we didn't get an ACK there must have been
428 	 * some sort of mailbox error so we should treat it
429 	 * as such.
430 	 */
431 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
432 		return IXGBE_ERR_MBX;
433 
434 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
435 
436 	return 0;
437 }
438 
439 /**
440  *  ixgbevf_set_rar_vf - set device MAC address
441  *  @hw: pointer to hardware structure
442  *  @index: Receive address register to write
443  *  @addr: Address to put into receive address register
444  *  @vmdq: Unused in this implementation
445  **/
446 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
447 			      u32 vmdq)
448 {
449 	u32 msgbuf[3];
450 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
451 	s32 ret_val;
452 
453 	memset(msgbuf, 0, sizeof(msgbuf));
454 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
455 	ether_addr_copy(msg_addr, addr);
456 
457 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
458 					     sizeof(msgbuf) / sizeof(u32));
459 
460 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
461 
462 	/* if nacked the address was rejected, use "perm_addr" */
463 	if (!ret_val &&
464 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
465 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
466 		return IXGBE_ERR_MBX;
467 	}
468 
469 	return ret_val;
470 }
471 
472 /**
473  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
474  *  @hw: pointer to hardware structure
475  *  @index: Receive address register to write
476  *  @addr: Address to put into receive address register
477  *  @vmdq: Unused in this implementation
478  *
479  * We don't really allow setting the device MAC address. However,
480  * if the address being set is the permanent MAC address we will
481  * permit that.
482  **/
483 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
484 				 u32 vmdq)
485 {
486 	if (ether_addr_equal(addr, hw->mac.perm_addr))
487 		return 0;
488 
489 	return -EOPNOTSUPP;
490 }
491 
492 /**
493  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
494  *  @hw: pointer to the HW structure
495  *  @netdev: pointer to net device structure
496  *
497  *  Updates the Multicast Table Array.
498  **/
499 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
500 					  struct net_device *netdev)
501 {
502 	struct netdev_hw_addr *ha;
503 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
504 	u16 *vector_list = (u16 *)&msgbuf[1];
505 	u32 cnt, i;
506 
507 	/* Each entry in the list uses 1 16 bit word.  We have 30
508 	 * 16 bit words available in our HW msg buffer (minus 1 for the
509 	 * msg type).  That's 30 hash values if we pack 'em right.  If
510 	 * there are more than 30 MC addresses to add then punt the
511 	 * extras for now and then add code to handle more than 30 later.
512 	 * It would be unusual for a server to request that many multi-cast
513 	 * addresses except for in large enterprise network environments.
514 	 */
515 
516 	cnt = netdev_mc_count(netdev);
517 	if (cnt > 30)
518 		cnt = 30;
519 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
520 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
521 
522 	i = 0;
523 	netdev_for_each_mc_addr(ha, netdev) {
524 		if (i == cnt)
525 			break;
526 		if (is_link_local_ether_addr(ha->addr))
527 			continue;
528 
529 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
530 	}
531 
532 	ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
533 
534 	return 0;
535 }
536 
537 /**
538  * Hyper-V variant - just a stub.
539  */
540 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
541 					     struct net_device *netdev)
542 {
543 	return -EOPNOTSUPP;
544 }
545 
546 /**
547  *  ixgbevf_update_xcast_mode - Update Multicast mode
548  *  @hw: pointer to the HW structure
549  *  @xcast_mode: new multicast mode
550  *
551  *  Updates the Multicast Mode of VF.
552  **/
553 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
554 {
555 	u32 msgbuf[2];
556 	s32 err;
557 
558 	switch (hw->api_version) {
559 	case ixgbe_mbox_api_12:
560 		/* promisc introduced in 1.3 version */
561 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
562 			return -EOPNOTSUPP;
563 		/* Fall threw */
564 	case ixgbe_mbox_api_13:
565 		break;
566 	default:
567 		return -EOPNOTSUPP;
568 	}
569 
570 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
571 	msgbuf[1] = xcast_mode;
572 
573 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
574 					 sizeof(msgbuf) / sizeof(u32));
575 	if (err)
576 		return err;
577 
578 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
579 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
580 		return -EPERM;
581 
582 	return 0;
583 }
584 
585 /**
586  * Hyper-V variant - just a stub.
587  */
588 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
589 {
590 	return -EOPNOTSUPP;
591 }
592 
593 /**
594  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
595  *  @hw: pointer to the HW structure
596  *  @vlan: 12 bit VLAN ID
597  *  @vind: unused by VF drivers
598  *  @vlan_on: if true then set bit, else clear bit
599  **/
600 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
601 			       bool vlan_on)
602 {
603 	u32 msgbuf[2];
604 	s32 err;
605 
606 	msgbuf[0] = IXGBE_VF_SET_VLAN;
607 	msgbuf[1] = vlan;
608 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
609 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
610 
611 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
612 					 sizeof(msgbuf) / sizeof(u32));
613 	if (err)
614 		goto mbx_err;
615 
616 	/* remove extra bits from the message */
617 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
618 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
619 
620 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
621 		err = IXGBE_ERR_INVALID_ARGUMENT;
622 
623 mbx_err:
624 	return err;
625 }
626 
627 /**
628  * Hyper-V variant - just a stub.
629  */
630 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
631 				  bool vlan_on)
632 {
633 	return -EOPNOTSUPP;
634 }
635 
636 /**
637  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
638  *  @hw: pointer to hardware structure
639  *  @speed: Unused in this implementation
640  *  @autoneg: Unused in this implementation
641  *  @autoneg_wait_to_complete: Unused in this implementation
642  *
643  *  Do nothing and return success.  VF drivers are not allowed to change
644  *  global settings.  Maintained for driver compatibility.
645  **/
646 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
647 				     ixgbe_link_speed speed, bool autoneg,
648 				     bool autoneg_wait_to_complete)
649 {
650 	return 0;
651 }
652 
653 /**
654  *  ixgbevf_check_mac_link_vf - Get link/speed status
655  *  @hw: pointer to hardware structure
656  *  @speed: pointer to link speed
657  *  @link_up: true is link is up, false otherwise
658  *  @autoneg_wait_to_complete: true when waiting for completion is needed
659  *
660  *  Reads the links register to determine if link is up and the current speed
661  **/
662 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
663 				     ixgbe_link_speed *speed,
664 				     bool *link_up,
665 				     bool autoneg_wait_to_complete)
666 {
667 	struct ixgbe_mbx_info *mbx = &hw->mbx;
668 	struct ixgbe_mac_info *mac = &hw->mac;
669 	s32 ret_val = 0;
670 	u32 links_reg;
671 	u32 in_msg = 0;
672 
673 	/* If we were hit with a reset drop the link */
674 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
675 		mac->get_link_status = true;
676 
677 	if (!mac->get_link_status)
678 		goto out;
679 
680 	/* if link status is down no point in checking to see if pf is up */
681 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
682 	if (!(links_reg & IXGBE_LINKS_UP))
683 		goto out;
684 
685 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
686 	 * before the link status is correct
687 	 */
688 	if (mac->type == ixgbe_mac_82599_vf) {
689 		int i;
690 
691 		for (i = 0; i < 5; i++) {
692 			udelay(100);
693 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
694 
695 			if (!(links_reg & IXGBE_LINKS_UP))
696 				goto out;
697 		}
698 	}
699 
700 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
701 	case IXGBE_LINKS_SPEED_10G_82599:
702 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
703 		break;
704 	case IXGBE_LINKS_SPEED_1G_82599:
705 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
706 		break;
707 	case IXGBE_LINKS_SPEED_100_82599:
708 		*speed = IXGBE_LINK_SPEED_100_FULL;
709 		break;
710 	}
711 
712 	/* if the read failed it could just be a mailbox collision, best wait
713 	 * until we are called again and don't report an error
714 	 */
715 	if (mbx->ops.read(hw, &in_msg, 1))
716 		goto out;
717 
718 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
719 		/* msg is not CTS and is NACK we must have lost CTS status */
720 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
721 			ret_val = -1;
722 		goto out;
723 	}
724 
725 	/* the pf is talking, if we timed out in the past we reinit */
726 	if (!mbx->timeout) {
727 		ret_val = -1;
728 		goto out;
729 	}
730 
731 	/* if we passed all the tests above then the link is up and we no
732 	 * longer need to check for link
733 	 */
734 	mac->get_link_status = false;
735 
736 out:
737 	*link_up = !mac->get_link_status;
738 	return ret_val;
739 }
740 
741 /**
742  * Hyper-V variant; there is no mailbox communication.
743  */
744 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
745 					ixgbe_link_speed *speed,
746 					bool *link_up,
747 					bool autoneg_wait_to_complete)
748 {
749 	struct ixgbe_mbx_info *mbx = &hw->mbx;
750 	struct ixgbe_mac_info *mac = &hw->mac;
751 	u32 links_reg;
752 
753 	/* If we were hit with a reset drop the link */
754 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
755 		mac->get_link_status = true;
756 
757 	if (!mac->get_link_status)
758 		goto out;
759 
760 	/* if link status is down no point in checking to see if pf is up */
761 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
762 	if (!(links_reg & IXGBE_LINKS_UP))
763 		goto out;
764 
765 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
766 	 * before the link status is correct
767 	 */
768 	if (mac->type == ixgbe_mac_82599_vf) {
769 		int i;
770 
771 		for (i = 0; i < 5; i++) {
772 			udelay(100);
773 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
774 
775 			if (!(links_reg & IXGBE_LINKS_UP))
776 				goto out;
777 		}
778 	}
779 
780 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
781 	case IXGBE_LINKS_SPEED_10G_82599:
782 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
783 		break;
784 	case IXGBE_LINKS_SPEED_1G_82599:
785 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
786 		break;
787 	case IXGBE_LINKS_SPEED_100_82599:
788 		*speed = IXGBE_LINK_SPEED_100_FULL;
789 		break;
790 	}
791 
792 	/* if we passed all the tests above then the link is up and we no
793 	 * longer need to check for link
794 	 */
795 	mac->get_link_status = false;
796 
797 out:
798 	*link_up = !mac->get_link_status;
799 	return 0;
800 }
801 
802 /**
803  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
804  *  @hw: pointer to the HW structure
805  *  @max_size: value to assign to max frame size
806  **/
807 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
808 {
809 	u32 msgbuf[2];
810 	s32 ret_val;
811 
812 	msgbuf[0] = IXGBE_VF_SET_LPE;
813 	msgbuf[1] = max_size;
814 
815 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
816 					     sizeof(msgbuf) / sizeof(u32));
817 	if (ret_val)
818 		return ret_val;
819 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
820 	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
821 		return IXGBE_ERR_MBX;
822 
823 	return 0;
824 }
825 
826 /**
827  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
828  * @hw: pointer to the HW structure
829  * @max_size: value to assign to max frame size
830  * Hyper-V variant.
831  **/
832 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
833 {
834 	u32 reg;
835 
836 	/* If we are on Hyper-V, we implement this functionality
837 	 * differently.
838 	 */
839 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
840 	/* CRC == 4 */
841 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
842 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
843 
844 	return 0;
845 }
846 
847 /**
848  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
849  *  @hw: pointer to the HW structure
850  *  @api: integer containing requested API version
851  **/
852 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
853 {
854 	int err;
855 	u32 msg[3];
856 
857 	/* Negotiate the mailbox API version */
858 	msg[0] = IXGBE_VF_API_NEGOTIATE;
859 	msg[1] = api;
860 	msg[2] = 0;
861 
862 	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
863 					 sizeof(msg) / sizeof(u32));
864 	if (!err) {
865 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
866 
867 		/* Store value and return 0 on success */
868 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
869 			hw->api_version = api;
870 			return 0;
871 		}
872 
873 		err = IXGBE_ERR_INVALID_ARGUMENT;
874 	}
875 
876 	return err;
877 }
878 
879 /**
880  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
881  *  @hw: pointer to the HW structure
882  *  @api: integer containing requested API version
883  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
884  **/
885 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
886 {
887 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
888 	if (api != ixgbe_mbox_api_10)
889 		return IXGBE_ERR_INVALID_ARGUMENT;
890 
891 	return 0;
892 }
893 
894 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
895 		       unsigned int *default_tc)
896 {
897 	int err;
898 	u32 msg[5];
899 
900 	/* do nothing if API doesn't support ixgbevf_get_queues */
901 	switch (hw->api_version) {
902 	case ixgbe_mbox_api_11:
903 	case ixgbe_mbox_api_12:
904 	case ixgbe_mbox_api_13:
905 		break;
906 	default:
907 		return 0;
908 	}
909 
910 	/* Fetch queue configuration from the PF */
911 	msg[0] = IXGBE_VF_GET_QUEUE;
912 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
913 
914 	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
915 					 sizeof(msg) / sizeof(u32));
916 	if (!err) {
917 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
918 
919 		/* if we we didn't get an ACK there must have been
920 		 * some sort of mailbox error so we should treat it
921 		 * as such
922 		 */
923 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
924 			return IXGBE_ERR_MBX;
925 
926 		/* record and validate values from message */
927 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
928 		if (hw->mac.max_tx_queues == 0 ||
929 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
930 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
931 
932 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
933 		if (hw->mac.max_rx_queues == 0 ||
934 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
935 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
936 
937 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
938 		/* in case of unknown state assume we cannot tag frames */
939 		if (*num_tcs > hw->mac.max_rx_queues)
940 			*num_tcs = 1;
941 
942 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
943 		/* default to queue 0 on out-of-bounds queue number */
944 		if (*default_tc >= hw->mac.max_tx_queues)
945 			*default_tc = 0;
946 	}
947 
948 	return err;
949 }
950 
951 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
952 	.init_hw		= ixgbevf_init_hw_vf,
953 	.reset_hw		= ixgbevf_reset_hw_vf,
954 	.start_hw		= ixgbevf_start_hw_vf,
955 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
956 	.stop_adapter		= ixgbevf_stop_hw_vf,
957 	.setup_link		= ixgbevf_setup_mac_link_vf,
958 	.check_link		= ixgbevf_check_mac_link_vf,
959 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
960 	.set_rar		= ixgbevf_set_rar_vf,
961 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
962 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
963 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
964 	.set_vfta		= ixgbevf_set_vfta_vf,
965 	.set_rlpml		= ixgbevf_set_rlpml_vf,
966 };
967 
968 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
969 	.init_hw		= ixgbevf_init_hw_vf,
970 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
971 	.start_hw		= ixgbevf_start_hw_vf,
972 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
973 	.stop_adapter		= ixgbevf_stop_hw_vf,
974 	.setup_link		= ixgbevf_setup_mac_link_vf,
975 	.check_link		= ixgbevf_hv_check_mac_link_vf,
976 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
977 	.set_rar		= ixgbevf_hv_set_rar_vf,
978 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
979 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
980 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
981 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
982 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
983 };
984 
985 const struct ixgbevf_info ixgbevf_82599_vf_info = {
986 	.mac = ixgbe_mac_82599_vf,
987 	.mac_ops = &ixgbevf_mac_ops,
988 };
989 
990 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
991 	.mac = ixgbe_mac_82599_vf,
992 	.mac_ops = &ixgbevf_hv_mac_ops,
993 };
994 
995 const struct ixgbevf_info ixgbevf_X540_vf_info = {
996 	.mac = ixgbe_mac_X540_vf,
997 	.mac_ops = &ixgbevf_mac_ops,
998 };
999 
1000 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1001 	.mac = ixgbe_mac_X540_vf,
1002 	.mac_ops = &ixgbevf_hv_mac_ops,
1003 };
1004 
1005 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1006 	.mac = ixgbe_mac_X550_vf,
1007 	.mac_ops = &ixgbevf_mac_ops,
1008 };
1009 
1010 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1011 	.mac = ixgbe_mac_X550_vf,
1012 	.mac_ops = &ixgbevf_hv_mac_ops,
1013 };
1014 
1015 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1016 	.mac = ixgbe_mac_X550EM_x_vf,
1017 	.mac_ops = &ixgbevf_mac_ops,
1018 };
1019 
1020 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1021 	.mac = ixgbe_mac_X550EM_x_vf,
1022 	.mac_ops = &ixgbevf_hv_mac_ops,
1023 };
1024 
1025 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1026 	.mac = ixgbe_mac_x550em_a_vf,
1027 	.mac_ops = &ixgbevf_mac_ops,
1028 };
1029