1 /*******************************************************************************
2 
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 *******************************************************************************/
26 
27 #include "vf.h"
28 #include "ixgbevf.h"
29 
30 /* On Hyper-V, to reset, we need to read from this offset
31  * from the PCI config space. This is the mechanism used on
32  * Hyper-V to support PF/VF communication.
33  */
34 #define IXGBE_HV_RESET_OFFSET           0x201
35 
36 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
37 					     u32 *retmsg, u16 size)
38 {
39 	struct ixgbe_mbx_info *mbx = &hw->mbx;
40 	s32 retval = mbx->ops.write_posted(hw, msg, size);
41 
42 	if (retval)
43 		return retval;
44 
45 	return mbx->ops.read_posted(hw, retmsg, size);
46 }
47 
48 /**
49  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
50  *  @hw: pointer to hardware structure
51  *
52  *  Starts the hardware by filling the bus info structure and media type, clears
53  *  all on chip counters, initializes receive address registers, multicast
54  *  table, VLAN filter table, calls routine to set up link and flow control
55  *  settings, and leaves transmit and receive units disabled and uninitialized
56  **/
57 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
58 {
59 	/* Clear adapter stopped flag */
60 	hw->adapter_stopped = false;
61 
62 	return 0;
63 }
64 
65 /**
66  *  ixgbevf_init_hw_vf - virtual function hardware initialization
67  *  @hw: pointer to hardware structure
68  *
69  *  Initialize the hardware by resetting the hardware and then starting
70  *  the hardware
71  **/
72 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
73 {
74 	s32 status = hw->mac.ops.start_hw(hw);
75 
76 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
77 
78 	return status;
79 }
80 
81 /**
82  *  ixgbevf_reset_hw_vf - Performs hardware reset
83  *  @hw: pointer to hardware structure
84  *
85  *  Resets the hardware by resetting the transmit and receive units, masks and
86  *  clears all interrupts.
87  **/
88 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
89 {
90 	struct ixgbe_mbx_info *mbx = &hw->mbx;
91 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
92 	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
93 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
94 	u8 *addr = (u8 *)(&msgbuf[1]);
95 
96 	/* Call adapter stop to disable tx/rx and clear interrupts */
97 	hw->mac.ops.stop_adapter(hw);
98 
99 	/* reset the api version */
100 	hw->api_version = ixgbe_mbox_api_10;
101 
102 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
103 	IXGBE_WRITE_FLUSH(hw);
104 
105 	/* we cannot reset while the RSTI / RSTD bits are asserted */
106 	while (!mbx->ops.check_for_rst(hw) && timeout) {
107 		timeout--;
108 		udelay(5);
109 	}
110 
111 	if (!timeout)
112 		return IXGBE_ERR_RESET_FAILED;
113 
114 	/* mailbox timeout can now become active */
115 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
116 
117 	msgbuf[0] = IXGBE_VF_RESET;
118 	mbx->ops.write_posted(hw, msgbuf, 1);
119 
120 	mdelay(10);
121 
122 	/* set our "perm_addr" based on info provided by PF
123 	 * also set up the mc_filter_type which is piggy backed
124 	 * on the mac address in word 3
125 	 */
126 	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
127 	if (ret_val)
128 		return ret_val;
129 
130 	/* New versions of the PF may NACK the reset return message
131 	 * to indicate that no MAC address has yet been assigned for
132 	 * the VF.
133 	 */
134 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
135 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
136 		return IXGBE_ERR_INVALID_MAC_ADDR;
137 
138 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
139 		ether_addr_copy(hw->mac.perm_addr, addr);
140 
141 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
142 
143 	return 0;
144 }
145 
146 /**
147  * Hyper-V variant; the VF/PF communication is through the PCI
148  * config space.
149  * @hw: pointer to private hardware struct
150  */
151 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
152 {
153 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
154 	struct ixgbevf_adapter *adapter = hw->back;
155 	int i;
156 
157 	for (i = 0; i < 6; i++)
158 		pci_read_config_byte(adapter->pdev,
159 				     (i + IXGBE_HV_RESET_OFFSET),
160 				     &hw->mac.perm_addr[i]);
161 	return 0;
162 #else
163 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
164 	return -EOPNOTSUPP;
165 #endif
166 }
167 
168 /**
169  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
170  *  @hw: pointer to hardware structure
171  *
172  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
173  *  disables transmit and receive units. The adapter_stopped flag is used by
174  *  the shared code and drivers to determine if the adapter is in a stopped
175  *  state and should not touch the hardware.
176  **/
177 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
178 {
179 	u32 number_of_queues;
180 	u32 reg_val;
181 	u16 i;
182 
183 	/* Set the adapter_stopped flag so other driver functions stop touching
184 	 * the hardware
185 	 */
186 	hw->adapter_stopped = true;
187 
188 	/* Disable the receive unit by stopped each queue */
189 	number_of_queues = hw->mac.max_rx_queues;
190 	for (i = 0; i < number_of_queues; i++) {
191 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
192 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
193 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
194 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
195 		}
196 	}
197 
198 	IXGBE_WRITE_FLUSH(hw);
199 
200 	/* Clear interrupt mask to stop from interrupts being generated */
201 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
202 
203 	/* Clear any pending interrupts */
204 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
205 
206 	/* Disable the transmit unit.  Each queue must be disabled. */
207 	number_of_queues = hw->mac.max_tx_queues;
208 	for (i = 0; i < number_of_queues; i++) {
209 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
210 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
211 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
212 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
213 		}
214 	}
215 
216 	return 0;
217 }
218 
219 /**
220  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
221  *  @hw: pointer to hardware structure
222  *  @mc_addr: the multicast address
223  *
224  *  Extracts the 12 bits, from a multicast address, to determine which
225  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
226  *  incoming Rx multicast addresses, to determine the bit-vector to check in
227  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
228  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
229  *  to mc_filter_type.
230  **/
231 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
232 {
233 	u32 vector = 0;
234 
235 	switch (hw->mac.mc_filter_type) {
236 	case 0:   /* use bits [47:36] of the address */
237 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
238 		break;
239 	case 1:   /* use bits [46:35] of the address */
240 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
241 		break;
242 	case 2:   /* use bits [45:34] of the address */
243 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
244 		break;
245 	case 3:   /* use bits [43:32] of the address */
246 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
247 		break;
248 	default:  /* Invalid mc_filter_type */
249 		break;
250 	}
251 
252 	/* vector can only be 12-bits or boundary will be exceeded */
253 	vector &= 0xFFF;
254 	return vector;
255 }
256 
257 /**
258  *  ixgbevf_get_mac_addr_vf - Read device MAC address
259  *  @hw: pointer to the HW structure
260  *  @mac_addr: pointer to storage for retrieved MAC address
261  **/
262 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
263 {
264 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
265 
266 	return 0;
267 }
268 
269 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
270 {
271 	u32 msgbuf[3], msgbuf_chk;
272 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
273 	s32 ret_val;
274 
275 	memset(msgbuf, 0, sizeof(msgbuf));
276 	/* If index is one then this is the start of a new list and needs
277 	 * indication to the PF so it can do it's own list management.
278 	 * If it is zero then that tells the PF to just clear all of
279 	 * this VF's macvlans and there is no new list.
280 	 */
281 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
282 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
283 	msgbuf_chk = msgbuf[0];
284 
285 	if (addr)
286 		ether_addr_copy(msg_addr, addr);
287 
288 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
289 					     ARRAY_SIZE(msgbuf));
290 	if (!ret_val) {
291 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
292 
293 		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
294 			return -ENOMEM;
295 	}
296 
297 	return ret_val;
298 }
299 
300 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
301 {
302 	return -EOPNOTSUPP;
303 }
304 
305 /**
306  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
307  * @hw: pointer to hardware structure
308  * @reta: buffer to fill with RETA contents.
309  * @num_rx_queues: Number of Rx queues configured for this port
310  *
311  * The "reta" buffer should be big enough to contain 32 registers.
312  *
313  * Returns: 0 on success.
314  *          if API doesn't support this operation - (-EOPNOTSUPP).
315  */
316 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
317 {
318 	int err, i, j;
319 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
320 	u32 *hw_reta = &msgbuf[1];
321 	u32 mask = 0;
322 
323 	/* We have to use a mailbox for 82599 and x540 devices only.
324 	 * For these devices RETA has 128 entries.
325 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
326 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
327 	 */
328 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
329 
330 	/* We support the RSS querying for 82599 and x540 devices only.
331 	 * Thus return an error if API doesn't support RETA querying or querying
332 	 * is not supported for this device type.
333 	 */
334 	switch (hw->api_version) {
335 	case ixgbe_mbox_api_13:
336 	case ixgbe_mbox_api_12:
337 		if (hw->mac.type < ixgbe_mac_X550_vf)
338 			break;
339 		/* fall through */
340 	default:
341 		return -EOPNOTSUPP;
342 	}
343 
344 	msgbuf[0] = IXGBE_VF_GET_RETA;
345 
346 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
347 
348 	if (err)
349 		return err;
350 
351 	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
352 
353 	if (err)
354 		return err;
355 
356 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
357 
358 	/* If the operation has been refused by a PF return -EPERM */
359 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
360 		return -EPERM;
361 
362 	/* If we didn't get an ACK there must have been
363 	 * some sort of mailbox error so we should treat it
364 	 * as such.
365 	 */
366 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
367 		return IXGBE_ERR_MBX;
368 
369 	/* ixgbevf doesn't support more than 2 queues at the moment */
370 	if (num_rx_queues > 1)
371 		mask = 0x1;
372 
373 	for (i = 0; i < dwords; i++)
374 		for (j = 0; j < 16; j++)
375 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
376 
377 	return 0;
378 }
379 
380 /**
381  * ixgbevf_get_rss_key_locked - get the RSS Random Key
382  * @hw: pointer to the HW structure
383  * @rss_key: buffer to fill with RSS Hash Key contents.
384  *
385  * The "rss_key" buffer should be big enough to contain 10 registers.
386  *
387  * Returns: 0 on success.
388  *          if API doesn't support this operation - (-EOPNOTSUPP).
389  */
390 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
391 {
392 	int err;
393 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
394 
395 	/* We currently support the RSS Random Key retrieval for 82599 and x540
396 	 * devices only.
397 	 *
398 	 * Thus return an error if API doesn't support RSS Random Key retrieval
399 	 * or if the operation is not supported for this device type.
400 	 */
401 	switch (hw->api_version) {
402 	case ixgbe_mbox_api_13:
403 	case ixgbe_mbox_api_12:
404 		if (hw->mac.type < ixgbe_mac_X550_vf)
405 			break;
406 		/* fall through */
407 	default:
408 		return -EOPNOTSUPP;
409 	}
410 
411 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
412 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
413 
414 	if (err)
415 		return err;
416 
417 	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
418 
419 	if (err)
420 		return err;
421 
422 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
423 
424 	/* If the operation has been refused by a PF return -EPERM */
425 	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
426 		return -EPERM;
427 
428 	/* If we didn't get an ACK there must have been
429 	 * some sort of mailbox error so we should treat it
430 	 * as such.
431 	 */
432 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
433 		return IXGBE_ERR_MBX;
434 
435 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
436 
437 	return 0;
438 }
439 
440 /**
441  *  ixgbevf_set_rar_vf - set device MAC address
442  *  @hw: pointer to hardware structure
443  *  @index: Receive address register to write
444  *  @addr: Address to put into receive address register
445  *  @vmdq: Unused in this implementation
446  **/
447 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
448 			      u32 vmdq)
449 {
450 	u32 msgbuf[3];
451 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
452 	s32 ret_val;
453 
454 	memset(msgbuf, 0, sizeof(msgbuf));
455 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
456 	ether_addr_copy(msg_addr, addr);
457 
458 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
459 					     ARRAY_SIZE(msgbuf));
460 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
461 
462 	/* if nacked the address was rejected, use "perm_addr" */
463 	if (!ret_val &&
464 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
465 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
466 		return IXGBE_ERR_MBX;
467 	}
468 
469 	return ret_val;
470 }
471 
472 /**
473  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
474  *  @hw: pointer to hardware structure
475  *  @index: Receive address register to write
476  *  @addr: Address to put into receive address register
477  *  @vmdq: Unused in this implementation
478  *
479  * We don't really allow setting the device MAC address. However,
480  * if the address being set is the permanent MAC address we will
481  * permit that.
482  **/
483 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
484 				 u32 vmdq)
485 {
486 	if (ether_addr_equal(addr, hw->mac.perm_addr))
487 		return 0;
488 
489 	return -EOPNOTSUPP;
490 }
491 
492 /**
493  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
494  *  @hw: pointer to the HW structure
495  *  @netdev: pointer to net device structure
496  *
497  *  Updates the Multicast Table Array.
498  **/
499 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
500 					  struct net_device *netdev)
501 {
502 	struct netdev_hw_addr *ha;
503 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
504 	u16 *vector_list = (u16 *)&msgbuf[1];
505 	u32 cnt, i;
506 
507 	/* Each entry in the list uses 1 16 bit word.  We have 30
508 	 * 16 bit words available in our HW msg buffer (minus 1 for the
509 	 * msg type).  That's 30 hash values if we pack 'em right.  If
510 	 * there are more than 30 MC addresses to add then punt the
511 	 * extras for now and then add code to handle more than 30 later.
512 	 * It would be unusual for a server to request that many multi-cast
513 	 * addresses except for in large enterprise network environments.
514 	 */
515 
516 	cnt = netdev_mc_count(netdev);
517 	if (cnt > 30)
518 		cnt = 30;
519 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
520 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
521 
522 	i = 0;
523 	netdev_for_each_mc_addr(ha, netdev) {
524 		if (i == cnt)
525 			break;
526 		if (is_link_local_ether_addr(ha->addr))
527 			continue;
528 
529 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
530 	}
531 
532 	ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
533 
534 	return 0;
535 }
536 
537 /**
538  * Hyper-V variant - just a stub.
539  * @hw: unused
540  * @netdev: unused
541  */
542 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
543 					     struct net_device *netdev)
544 {
545 	return -EOPNOTSUPP;
546 }
547 
548 /**
549  *  ixgbevf_update_xcast_mode - Update Multicast mode
550  *  @hw: pointer to the HW structure
551  *  @xcast_mode: new multicast mode
552  *
553  *  Updates the Multicast Mode of VF.
554  **/
555 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
556 {
557 	u32 msgbuf[2];
558 	s32 err;
559 
560 	switch (hw->api_version) {
561 	case ixgbe_mbox_api_12:
562 		/* promisc introduced in 1.3 version */
563 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
564 			return -EOPNOTSUPP;
565 		/* Fall threw */
566 	case ixgbe_mbox_api_13:
567 		break;
568 	default:
569 		return -EOPNOTSUPP;
570 	}
571 
572 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
573 	msgbuf[1] = xcast_mode;
574 
575 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
576 					 ARRAY_SIZE(msgbuf));
577 	if (err)
578 		return err;
579 
580 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
581 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
582 		return -EPERM;
583 
584 	return 0;
585 }
586 
587 /**
588  * Hyper-V variant - just a stub.
589  * @hw: unused
590  * @xcast_mode: unused
591  */
592 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
593 {
594 	return -EOPNOTSUPP;
595 }
596 
597 /**
598  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
599  *  @hw: pointer to the HW structure
600  *  @vlan: 12 bit VLAN ID
601  *  @vind: unused by VF drivers
602  *  @vlan_on: if true then set bit, else clear bit
603  **/
604 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
605 			       bool vlan_on)
606 {
607 	u32 msgbuf[2];
608 	s32 err;
609 
610 	msgbuf[0] = IXGBE_VF_SET_VLAN;
611 	msgbuf[1] = vlan;
612 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
613 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
614 
615 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
616 					 ARRAY_SIZE(msgbuf));
617 	if (err)
618 		goto mbx_err;
619 
620 	/* remove extra bits from the message */
621 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
622 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
623 
624 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
625 		err = IXGBE_ERR_INVALID_ARGUMENT;
626 
627 mbx_err:
628 	return err;
629 }
630 
631 /**
632  * Hyper-V variant - just a stub.
633  * @hw: unused
634  * @vlan: unused
635  * @vind: unused
636  * @vlan_on: unused
637  */
638 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
639 				  bool vlan_on)
640 {
641 	return -EOPNOTSUPP;
642 }
643 
644 /**
645  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
646  *  @hw: pointer to hardware structure
647  *  @speed: Unused in this implementation
648  *  @autoneg: Unused in this implementation
649  *  @autoneg_wait_to_complete: Unused in this implementation
650  *
651  *  Do nothing and return success.  VF drivers are not allowed to change
652  *  global settings.  Maintained for driver compatibility.
653  **/
654 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
655 				     ixgbe_link_speed speed, bool autoneg,
656 				     bool autoneg_wait_to_complete)
657 {
658 	return 0;
659 }
660 
661 /**
662  *  ixgbevf_check_mac_link_vf - Get link/speed status
663  *  @hw: pointer to hardware structure
664  *  @speed: pointer to link speed
665  *  @link_up: true is link is up, false otherwise
666  *  @autoneg_wait_to_complete: unused
667  *
668  *  Reads the links register to determine if link is up and the current speed
669  **/
670 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
671 				     ixgbe_link_speed *speed,
672 				     bool *link_up,
673 				     bool autoneg_wait_to_complete)
674 {
675 	struct ixgbe_mbx_info *mbx = &hw->mbx;
676 	struct ixgbe_mac_info *mac = &hw->mac;
677 	s32 ret_val = 0;
678 	u32 links_reg;
679 	u32 in_msg = 0;
680 
681 	/* If we were hit with a reset drop the link */
682 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
683 		mac->get_link_status = true;
684 
685 	if (!mac->get_link_status)
686 		goto out;
687 
688 	/* if link status is down no point in checking to see if pf is up */
689 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
690 	if (!(links_reg & IXGBE_LINKS_UP))
691 		goto out;
692 
693 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
694 	 * before the link status is correct
695 	 */
696 	if (mac->type == ixgbe_mac_82599_vf) {
697 		int i;
698 
699 		for (i = 0; i < 5; i++) {
700 			udelay(100);
701 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
702 
703 			if (!(links_reg & IXGBE_LINKS_UP))
704 				goto out;
705 		}
706 	}
707 
708 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
709 	case IXGBE_LINKS_SPEED_10G_82599:
710 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
711 		break;
712 	case IXGBE_LINKS_SPEED_1G_82599:
713 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
714 		break;
715 	case IXGBE_LINKS_SPEED_100_82599:
716 		*speed = IXGBE_LINK_SPEED_100_FULL;
717 		break;
718 	}
719 
720 	/* if the read failed it could just be a mailbox collision, best wait
721 	 * until we are called again and don't report an error
722 	 */
723 	if (mbx->ops.read(hw, &in_msg, 1))
724 		goto out;
725 
726 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
727 		/* msg is not CTS and is NACK we must have lost CTS status */
728 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
729 			ret_val = -1;
730 		goto out;
731 	}
732 
733 	/* the pf is talking, if we timed out in the past we reinit */
734 	if (!mbx->timeout) {
735 		ret_val = -1;
736 		goto out;
737 	}
738 
739 	/* if we passed all the tests above then the link is up and we no
740 	 * longer need to check for link
741 	 */
742 	mac->get_link_status = false;
743 
744 out:
745 	*link_up = !mac->get_link_status;
746 	return ret_val;
747 }
748 
749 /**
750  * Hyper-V variant; there is no mailbox communication.
751  * @hw: pointer to private hardware struct
752  * @speed: pointer to link speed
753  * @link_up: true is link is up, false otherwise
754  * @autoneg_wait_to_complete: unused
755  */
756 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
757 					ixgbe_link_speed *speed,
758 					bool *link_up,
759 					bool autoneg_wait_to_complete)
760 {
761 	struct ixgbe_mbx_info *mbx = &hw->mbx;
762 	struct ixgbe_mac_info *mac = &hw->mac;
763 	u32 links_reg;
764 
765 	/* If we were hit with a reset drop the link */
766 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
767 		mac->get_link_status = true;
768 
769 	if (!mac->get_link_status)
770 		goto out;
771 
772 	/* if link status is down no point in checking to see if pf is up */
773 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
774 	if (!(links_reg & IXGBE_LINKS_UP))
775 		goto out;
776 
777 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
778 	 * before the link status is correct
779 	 */
780 	if (mac->type == ixgbe_mac_82599_vf) {
781 		int i;
782 
783 		for (i = 0; i < 5; i++) {
784 			udelay(100);
785 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
786 
787 			if (!(links_reg & IXGBE_LINKS_UP))
788 				goto out;
789 		}
790 	}
791 
792 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
793 	case IXGBE_LINKS_SPEED_10G_82599:
794 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
795 		break;
796 	case IXGBE_LINKS_SPEED_1G_82599:
797 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
798 		break;
799 	case IXGBE_LINKS_SPEED_100_82599:
800 		*speed = IXGBE_LINK_SPEED_100_FULL;
801 		break;
802 	}
803 
804 	/* if we passed all the tests above then the link is up and we no
805 	 * longer need to check for link
806 	 */
807 	mac->get_link_status = false;
808 
809 out:
810 	*link_up = !mac->get_link_status;
811 	return 0;
812 }
813 
814 /**
815  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
816  *  @hw: pointer to the HW structure
817  *  @max_size: value to assign to max frame size
818  **/
819 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
820 {
821 	u32 msgbuf[2];
822 	s32 ret_val;
823 
824 	msgbuf[0] = IXGBE_VF_SET_LPE;
825 	msgbuf[1] = max_size;
826 
827 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
828 					     ARRAY_SIZE(msgbuf));
829 	if (ret_val)
830 		return ret_val;
831 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
832 	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
833 		return IXGBE_ERR_MBX;
834 
835 	return 0;
836 }
837 
838 /**
839  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
840  * @hw: pointer to the HW structure
841  * @max_size: value to assign to max frame size
842  * Hyper-V variant.
843  **/
844 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
845 {
846 	u32 reg;
847 
848 	/* If we are on Hyper-V, we implement this functionality
849 	 * differently.
850 	 */
851 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
852 	/* CRC == 4 */
853 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
854 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
855 
856 	return 0;
857 }
858 
859 /**
860  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
861  *  @hw: pointer to the HW structure
862  *  @api: integer containing requested API version
863  **/
864 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
865 {
866 	int err;
867 	u32 msg[3];
868 
869 	/* Negotiate the mailbox API version */
870 	msg[0] = IXGBE_VF_API_NEGOTIATE;
871 	msg[1] = api;
872 	msg[2] = 0;
873 
874 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
875 	if (!err) {
876 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
877 
878 		/* Store value and return 0 on success */
879 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
880 			hw->api_version = api;
881 			return 0;
882 		}
883 
884 		err = IXGBE_ERR_INVALID_ARGUMENT;
885 	}
886 
887 	return err;
888 }
889 
890 /**
891  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
892  *  @hw: pointer to the HW structure
893  *  @api: integer containing requested API version
894  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
895  **/
896 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
897 {
898 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
899 	if (api != ixgbe_mbox_api_10)
900 		return IXGBE_ERR_INVALID_ARGUMENT;
901 
902 	return 0;
903 }
904 
905 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
906 		       unsigned int *default_tc)
907 {
908 	int err;
909 	u32 msg[5];
910 
911 	/* do nothing if API doesn't support ixgbevf_get_queues */
912 	switch (hw->api_version) {
913 	case ixgbe_mbox_api_11:
914 	case ixgbe_mbox_api_12:
915 	case ixgbe_mbox_api_13:
916 		break;
917 	default:
918 		return 0;
919 	}
920 
921 	/* Fetch queue configuration from the PF */
922 	msg[0] = IXGBE_VF_GET_QUEUE;
923 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
924 
925 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
926 	if (!err) {
927 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
928 
929 		/* if we we didn't get an ACK there must have been
930 		 * some sort of mailbox error so we should treat it
931 		 * as such
932 		 */
933 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
934 			return IXGBE_ERR_MBX;
935 
936 		/* record and validate values from message */
937 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
938 		if (hw->mac.max_tx_queues == 0 ||
939 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
940 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
941 
942 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
943 		if (hw->mac.max_rx_queues == 0 ||
944 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
945 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
946 
947 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
948 		/* in case of unknown state assume we cannot tag frames */
949 		if (*num_tcs > hw->mac.max_rx_queues)
950 			*num_tcs = 1;
951 
952 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
953 		/* default to queue 0 on out-of-bounds queue number */
954 		if (*default_tc >= hw->mac.max_tx_queues)
955 			*default_tc = 0;
956 	}
957 
958 	return err;
959 }
960 
961 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
962 	.init_hw		= ixgbevf_init_hw_vf,
963 	.reset_hw		= ixgbevf_reset_hw_vf,
964 	.start_hw		= ixgbevf_start_hw_vf,
965 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
966 	.stop_adapter		= ixgbevf_stop_hw_vf,
967 	.setup_link		= ixgbevf_setup_mac_link_vf,
968 	.check_link		= ixgbevf_check_mac_link_vf,
969 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
970 	.set_rar		= ixgbevf_set_rar_vf,
971 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
972 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
973 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
974 	.set_vfta		= ixgbevf_set_vfta_vf,
975 	.set_rlpml		= ixgbevf_set_rlpml_vf,
976 };
977 
978 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
979 	.init_hw		= ixgbevf_init_hw_vf,
980 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
981 	.start_hw		= ixgbevf_start_hw_vf,
982 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
983 	.stop_adapter		= ixgbevf_stop_hw_vf,
984 	.setup_link		= ixgbevf_setup_mac_link_vf,
985 	.check_link		= ixgbevf_hv_check_mac_link_vf,
986 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
987 	.set_rar		= ixgbevf_hv_set_rar_vf,
988 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
989 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
990 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
991 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
992 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
993 };
994 
995 const struct ixgbevf_info ixgbevf_82599_vf_info = {
996 	.mac = ixgbe_mac_82599_vf,
997 	.mac_ops = &ixgbevf_mac_ops,
998 };
999 
1000 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1001 	.mac = ixgbe_mac_82599_vf,
1002 	.mac_ops = &ixgbevf_hv_mac_ops,
1003 };
1004 
1005 const struct ixgbevf_info ixgbevf_X540_vf_info = {
1006 	.mac = ixgbe_mac_X540_vf,
1007 	.mac_ops = &ixgbevf_mac_ops,
1008 };
1009 
1010 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1011 	.mac = ixgbe_mac_X540_vf,
1012 	.mac_ops = &ixgbevf_hv_mac_ops,
1013 };
1014 
1015 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1016 	.mac = ixgbe_mac_X550_vf,
1017 	.mac_ops = &ixgbevf_mac_ops,
1018 };
1019 
1020 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1021 	.mac = ixgbe_mac_X550_vf,
1022 	.mac_ops = &ixgbevf_hv_mac_ops,
1023 };
1024 
1025 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1026 	.mac = ixgbe_mac_X550EM_x_vf,
1027 	.mac_ops = &ixgbevf_mac_ops,
1028 };
1029 
1030 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1031 	.mac = ixgbe_mac_X550EM_x_vf,
1032 	.mac_ops = &ixgbevf_hv_mac_ops,
1033 };
1034 
1035 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1036 	.mac = ixgbe_mac_x550em_a_vf,
1037 	.mac_ops = &ixgbevf_mac_ops,
1038 };
1039