1 /*******************************************************************************
2 
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2014 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include <linux/pci.h>
30 #include <linux/delay.h>
31 #include <linux/sched.h>
32 
33 #include "ixgbe.h"
34 #include "ixgbe_phy.h"
35 #include "ixgbe_mbx.h"
36 
37 #define IXGBE_82599_MAX_TX_QUEUES 128
38 #define IXGBE_82599_MAX_RX_QUEUES 128
39 #define IXGBE_82599_RAR_ENTRIES   128
40 #define IXGBE_82599_MC_TBL_SIZE   128
41 #define IXGBE_82599_VFT_TBL_SIZE  128
42 #define IXGBE_82599_RX_PB_SIZE	  512
43 
44 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
45 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
46 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
47 static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
48 						 ixgbe_link_speed speed,
49 						 bool autoneg_wait_to_complete);
50 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
51 					   ixgbe_link_speed speed,
52 					   bool autoneg_wait_to_complete);
53 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
54 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 				      bool autoneg_wait_to_complete);
56 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 			       ixgbe_link_speed speed,
58 			       bool autoneg_wait_to_complete);
59 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
60 					 ixgbe_link_speed speed,
61 					 bool autoneg_wait_to_complete);
62 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
63 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 				     u8 dev_addr, u8 *data);
65 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
66 				      u8 dev_addr, u8 data);
67 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
68 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
69 
70 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
71 {
72 	u32 fwsm, manc, factps;
73 
74 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
75 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
76 		return false;
77 
78 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
79 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
80 		return false;
81 
82 	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
83 	if (factps & IXGBE_FACTPS_MNGCG)
84 		return false;
85 
86 	return true;
87 }
88 
89 static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
90 {
91 	struct ixgbe_mac_info *mac = &hw->mac;
92 
93 	/* enable the laser control functions for SFP+ fiber
94 	 * and MNG not enabled
95 	 */
96 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
97 	    !ixgbe_mng_enabled(hw)) {
98 		mac->ops.disable_tx_laser =
99 				       &ixgbe_disable_tx_laser_multispeed_fiber;
100 		mac->ops.enable_tx_laser =
101 					&ixgbe_enable_tx_laser_multispeed_fiber;
102 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
103 	} else {
104 		mac->ops.disable_tx_laser = NULL;
105 		mac->ops.enable_tx_laser = NULL;
106 		mac->ops.flap_tx_laser = NULL;
107 	}
108 
109 	if (hw->phy.multispeed_fiber) {
110 		/* Set up dual speed SFP+ support */
111 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
112 	} else {
113 		if ((mac->ops.get_media_type(hw) ==
114 		     ixgbe_media_type_backplane) &&
115 		    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
116 		     hw->phy.smart_speed == ixgbe_smart_speed_on) &&
117 		     !ixgbe_verify_lesm_fw_enabled_82599(hw))
118 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
119 		else
120 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
121 	}
122 }
123 
124 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
125 {
126 	s32 ret_val;
127 	u16 list_offset, data_offset, data_value;
128 
129 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
130 		ixgbe_init_mac_link_ops_82599(hw);
131 
132 		hw->phy.ops.reset = NULL;
133 
134 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
135 							      &data_offset);
136 		if (ret_val)
137 			return ret_val;
138 
139 		/* PHY config will finish before releasing the semaphore */
140 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
141 							IXGBE_GSSR_MAC_CSR_SM);
142 		if (ret_val)
143 			return IXGBE_ERR_SWFW_SYNC;
144 
145 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
146 			goto setup_sfp_err;
147 		while (data_value != 0xffff) {
148 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
149 			IXGBE_WRITE_FLUSH(hw);
150 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
151 				goto setup_sfp_err;
152 		}
153 
154 		/* Release the semaphore */
155 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
156 		/*
157 		 * Delay obtaining semaphore again to allow FW access,
158 		 * semaphore_delay is in ms usleep_range needs us.
159 		 */
160 		usleep_range(hw->eeprom.semaphore_delay * 1000,
161 			     hw->eeprom.semaphore_delay * 2000);
162 
163 		/* Restart DSP and set SFI mode */
164 		ret_val = hw->mac.ops.prot_autoc_write(hw,
165 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
166 			false);
167 
168 		if (ret_val) {
169 			hw_dbg(hw, " sfp module setup not complete\n");
170 			return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
171 		}
172 	}
173 
174 	return 0;
175 
176 setup_sfp_err:
177 	/* Release the semaphore */
178 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
179 	/* Delay obtaining semaphore again to allow FW access,
180 	 * semaphore_delay is in ms usleep_range needs us.
181 	 */
182 	usleep_range(hw->eeprom.semaphore_delay * 1000,
183 		     hw->eeprom.semaphore_delay * 2000);
184 	hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
185 	return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
186 }
187 
188 /**
189  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
190  *  @hw: pointer to hardware structure
191  *  @locked: Return the if we locked for this read.
192  *  @reg_val: Value we read from AUTOC
193  *
194  *  For this part (82599) we need to wrap read-modify-writes with a possible
195  *  FW/SW lock.  It is assumed this lock will be freed with the next
196  *  prot_autoc_write_82599().  Note, that locked can only be true in cases
197  *  where this function doesn't return an error.
198  **/
199 static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
200 				 u32 *reg_val)
201 {
202 	s32 ret_val;
203 
204 	*locked = false;
205 	/* If LESM is on then we need to hold the SW/FW semaphore. */
206 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
207 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
208 					IXGBE_GSSR_MAC_CSR_SM);
209 		if (ret_val)
210 			return IXGBE_ERR_SWFW_SYNC;
211 
212 		*locked = true;
213 	}
214 
215 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
216 	return 0;
217 }
218 
219 /**
220  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
221  * @hw: pointer to hardware structure
222  * @reg_val: value to write to AUTOC
223  * @locked: bool to indicate whether the SW/FW lock was already taken by
224  *	     previous proc_autoc_read_82599.
225  *
226  * This part (82599) may need to hold a the SW/FW lock around all writes to
227  * AUTOC. Likewise after a write we need to do a pipeline reset.
228  **/
229 static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
230 {
231 	s32 ret_val = 0;
232 
233 	/* Blocked by MNG FW so bail */
234 	if (ixgbe_check_reset_blocked(hw))
235 		goto out;
236 
237 	/* We only need to get the lock if:
238 	 *  - We didn't do it already (in the read part of a read-modify-write)
239 	 *  - LESM is enabled.
240 	 */
241 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
242 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
243 					IXGBE_GSSR_MAC_CSR_SM);
244 		if (ret_val)
245 			return IXGBE_ERR_SWFW_SYNC;
246 
247 		locked = true;
248 	}
249 
250 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
251 	ret_val = ixgbe_reset_pipeline_82599(hw);
252 
253 out:
254 	/* Free the SW/FW semaphore as we either grabbed it here or
255 	 * already had it when this function was called.
256 	 */
257 	if (locked)
258 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
259 
260 	return ret_val;
261 }
262 
263 static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
264 {
265 	struct ixgbe_mac_info *mac = &hw->mac;
266 
267 	ixgbe_init_mac_link_ops_82599(hw);
268 
269 	mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
270 	mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
271 	mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
272 	mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
273 	mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
274 	mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
275 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
276 
277 	return 0;
278 }
279 
280 /**
281  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
282  *  @hw: pointer to hardware structure
283  *
284  *  Initialize any function pointers that were not able to be
285  *  set during get_invariants because the PHY/SFP type was
286  *  not known.  Perform the SFP init if necessary.
287  *
288  **/
289 static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
290 {
291 	struct ixgbe_mac_info *mac = &hw->mac;
292 	struct ixgbe_phy_info *phy = &hw->phy;
293 	s32 ret_val;
294 	u32 esdp;
295 
296 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
297 		/* Store flag indicating I2C bus access control unit. */
298 		hw->phy.qsfp_shared_i2c_bus = true;
299 
300 		/* Initialize access to QSFP+ I2C bus */
301 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
302 		esdp |= IXGBE_ESDP_SDP0_DIR;
303 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
304 		esdp &= ~IXGBE_ESDP_SDP0;
305 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
306 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
307 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
308 		IXGBE_WRITE_FLUSH(hw);
309 
310 		phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
311 		phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
312 	}
313 
314 	/* Identify the PHY or SFP module */
315 	ret_val = phy->ops.identify(hw);
316 
317 	/* Setup function pointers based on detected SFP module and speeds */
318 	ixgbe_init_mac_link_ops_82599(hw);
319 
320 	/* If copper media, overwrite with copper function pointers */
321 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
322 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
323 		mac->ops.get_link_capabilities =
324 			&ixgbe_get_copper_link_capabilities_generic;
325 	}
326 
327 	/* Set necessary function pointers based on phy type */
328 	switch (hw->phy.type) {
329 	case ixgbe_phy_tn:
330 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
331 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
332 		phy->ops.get_firmware_version =
333 			     &ixgbe_get_phy_firmware_version_tnx;
334 		break;
335 	default:
336 		break;
337 	}
338 
339 	return ret_val;
340 }
341 
342 /**
343  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
344  *  @hw: pointer to hardware structure
345  *  @speed: pointer to link speed
346  *  @autoneg: true when autoneg or autotry is enabled
347  *
348  *  Determines the link capabilities by reading the AUTOC register.
349  **/
350 static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
351 					     ixgbe_link_speed *speed,
352 					     bool *autoneg)
353 {
354 	u32 autoc = 0;
355 
356 	/* Determine 1G link capabilities off of SFP+ type */
357 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
358 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
359 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
360 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
361 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
362 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
363 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
364 		*autoneg = true;
365 		return 0;
366 	}
367 
368 	/*
369 	 * Determine link capabilities based on the stored value of AUTOC,
370 	 * which represents EEPROM defaults.  If AUTOC value has not been
371 	 * stored, use the current register value.
372 	 */
373 	if (hw->mac.orig_link_settings_stored)
374 		autoc = hw->mac.orig_autoc;
375 	else
376 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
377 
378 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
379 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
380 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
381 		*autoneg = false;
382 		break;
383 
384 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
385 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
386 		*autoneg = false;
387 		break;
388 
389 	case IXGBE_AUTOC_LMS_1G_AN:
390 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
391 		*autoneg = true;
392 		break;
393 
394 	case IXGBE_AUTOC_LMS_10G_SERIAL:
395 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
396 		*autoneg = false;
397 		break;
398 
399 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
400 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
401 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
402 		if (autoc & IXGBE_AUTOC_KR_SUPP)
403 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
404 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
405 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
406 		if (autoc & IXGBE_AUTOC_KX_SUPP)
407 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
408 		*autoneg = true;
409 		break;
410 
411 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
412 		*speed = IXGBE_LINK_SPEED_100_FULL;
413 		if (autoc & IXGBE_AUTOC_KR_SUPP)
414 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
415 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
416 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
417 		if (autoc & IXGBE_AUTOC_KX_SUPP)
418 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
419 		*autoneg = true;
420 		break;
421 
422 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
423 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
424 		*autoneg = false;
425 		break;
426 
427 	default:
428 		return IXGBE_ERR_LINK_SETUP;
429 	}
430 
431 	if (hw->phy.multispeed_fiber) {
432 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
433 			  IXGBE_LINK_SPEED_1GB_FULL;
434 
435 		/* QSFP must not enable auto-negotiation */
436 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
437 			*autoneg = false;
438 		else
439 			*autoneg = true;
440 	}
441 
442 	return 0;
443 }
444 
445 /**
446  *  ixgbe_get_media_type_82599 - Get media type
447  *  @hw: pointer to hardware structure
448  *
449  *  Returns the media type (fiber, copper, backplane)
450  **/
451 static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
452 {
453 	/* Detect if there is a copper PHY attached. */
454 	switch (hw->phy.type) {
455 	case ixgbe_phy_cu_unknown:
456 	case ixgbe_phy_tn:
457 		return ixgbe_media_type_copper;
458 
459 	default:
460 		break;
461 	}
462 
463 	switch (hw->device_id) {
464 	case IXGBE_DEV_ID_82599_KX4:
465 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
466 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
467 	case IXGBE_DEV_ID_82599_KR:
468 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
469 	case IXGBE_DEV_ID_82599_XAUI_LOM:
470 		/* Default device ID is mezzanine card KX/KX4 */
471 		return ixgbe_media_type_backplane;
472 
473 	case IXGBE_DEV_ID_82599_SFP:
474 	case IXGBE_DEV_ID_82599_SFP_FCOE:
475 	case IXGBE_DEV_ID_82599_SFP_EM:
476 	case IXGBE_DEV_ID_82599_SFP_SF2:
477 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
478 	case IXGBE_DEV_ID_82599EN_SFP:
479 		return ixgbe_media_type_fiber;
480 
481 	case IXGBE_DEV_ID_82599_CX4:
482 		return ixgbe_media_type_cx4;
483 
484 	case IXGBE_DEV_ID_82599_T3_LOM:
485 		return ixgbe_media_type_copper;
486 
487 	case IXGBE_DEV_ID_82599_LS:
488 		return ixgbe_media_type_fiber_lco;
489 
490 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
491 		return ixgbe_media_type_fiber_qsfp;
492 
493 	default:
494 		return ixgbe_media_type_unknown;
495 	}
496 }
497 
498 /**
499  * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
500  * @hw: pointer to hardware structure
501  *
502  * Disables link, should be called during D3 power down sequence.
503  *
504  **/
505 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
506 {
507 	u32 autoc2_reg, fwsm;
508 	u16 ee_ctrl_2 = 0;
509 
510 	hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
511 
512 	/* Check to see if MNG FW could be enabled */
513 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
514 
515 	if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
516 	    !hw->wol_enabled &&
517 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
518 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
519 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
520 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
521 	}
522 }
523 
524 /**
525  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
526  *  @hw: pointer to hardware structure
527  *  @autoneg_wait_to_complete: true when waiting for completion is needed
528  *
529  *  Configures link settings based on values in the ixgbe_hw struct.
530  *  Restarts the link.  Performs autonegotiation if needed.
531  **/
532 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
533 			       bool autoneg_wait_to_complete)
534 {
535 	u32 autoc_reg;
536 	u32 links_reg;
537 	u32 i;
538 	s32 status = 0;
539 	bool got_lock = false;
540 
541 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
542 		status = hw->mac.ops.acquire_swfw_sync(hw,
543 						IXGBE_GSSR_MAC_CSR_SM);
544 		if (status)
545 			return status;
546 
547 		got_lock = true;
548 	}
549 
550 	/* Restart link */
551 	ixgbe_reset_pipeline_82599(hw);
552 
553 	if (got_lock)
554 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
555 
556 	/* Only poll for autoneg to complete if specified to do so */
557 	if (autoneg_wait_to_complete) {
558 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
559 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
560 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
561 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
562 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
563 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
565 			links_reg = 0; /* Just in case Autoneg time = 0 */
566 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
567 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
568 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
569 					break;
570 				msleep(100);
571 			}
572 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
573 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
574 				hw_dbg(hw, "Autoneg did not complete.\n");
575 			}
576 		}
577 	}
578 
579 	/* Add delay to filter out noises during initial link setup */
580 	msleep(50);
581 
582 	return status;
583 }
584 
585 /**
586  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
587  *  @hw: pointer to hardware structure
588  *
589  *  The base drivers may require better control over SFP+ module
590  *  PHY states.  This includes selectively shutting down the Tx
591  *  laser on the PHY, effectively halting physical link.
592  **/
593 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
594 {
595 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
596 
597 	/* Blocked by MNG FW so bail */
598 	if (ixgbe_check_reset_blocked(hw))
599 		return;
600 
601 	/* Disable tx laser; allow 100us to go dark per spec */
602 	esdp_reg |= IXGBE_ESDP_SDP3;
603 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
604 	IXGBE_WRITE_FLUSH(hw);
605 	udelay(100);
606 }
607 
608 /**
609  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
610  *  @hw: pointer to hardware structure
611  *
612  *  The base drivers may require better control over SFP+ module
613  *  PHY states.  This includes selectively turning on the Tx
614  *  laser on the PHY, effectively starting physical link.
615  **/
616 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
617 {
618 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
619 
620 	/* Enable tx laser; allow 100ms to light up */
621 	esdp_reg &= ~IXGBE_ESDP_SDP3;
622 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
623 	IXGBE_WRITE_FLUSH(hw);
624 	msleep(100);
625 }
626 
627 /**
628  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
629  *  @hw: pointer to hardware structure
630  *
631  *  When the driver changes the link speeds that it can support,
632  *  it sets autotry_restart to true to indicate that we need to
633  *  initiate a new autotry session with the link partner.  To do
634  *  so, we set the speed then disable and re-enable the tx laser, to
635  *  alert the link partner that it also needs to restart autotry on its
636  *  end.  This is consistent with true clause 37 autoneg, which also
637  *  involves a loss of signal.
638  **/
639 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
640 {
641 	/* Blocked by MNG FW so bail */
642 	if (ixgbe_check_reset_blocked(hw))
643 		return;
644 
645 	if (hw->mac.autotry_restart) {
646 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
647 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
648 		hw->mac.autotry_restart = false;
649 	}
650 }
651 
652 /**
653  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
654  *  @hw: pointer to hardware structure
655  *  @speed: new link speed
656  *  @autoneg_wait_to_complete: true when waiting for completion is needed
657  *
658  *  Set the link speed in the AUTOC register and restarts link.
659  **/
660 static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
661 					  ixgbe_link_speed speed,
662 					  bool autoneg_wait_to_complete)
663 {
664 	s32 status = 0;
665 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
666 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
667 	u32 speedcnt = 0;
668 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
669 	u32 i = 0;
670 	bool link_up = false;
671 	bool autoneg = false;
672 
673 	/* Mask off requested but non-supported speeds */
674 	status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
675 						   &autoneg);
676 	if (status != 0)
677 		return status;
678 
679 	speed &= link_speed;
680 
681 	/*
682 	 * Try each speed one by one, highest priority first.  We do this in
683 	 * software because 10gb fiber doesn't support speed autonegotiation.
684 	 */
685 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
686 		speedcnt++;
687 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
688 
689 		/* If we already have link at this speed, just jump out */
690 		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
691 						false);
692 		if (status != 0)
693 			return status;
694 
695 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
696 			goto out;
697 
698 		/* Set the module link speed */
699 		switch (hw->phy.media_type) {
700 		case ixgbe_media_type_fiber:
701 			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
702 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
703 			IXGBE_WRITE_FLUSH(hw);
704 			break;
705 		case ixgbe_media_type_fiber_qsfp:
706 			/* QSFP module automatically detects MAC link speed */
707 			break;
708 		default:
709 			hw_dbg(hw, "Unexpected media type.\n");
710 			break;
711 		}
712 
713 		/* Allow module to change analog characteristics (1G->10G) */
714 		msleep(40);
715 
716 		status = ixgbe_setup_mac_link_82599(hw,
717 						    IXGBE_LINK_SPEED_10GB_FULL,
718 						    autoneg_wait_to_complete);
719 		if (status != 0)
720 			return status;
721 
722 		/* Flap the tx laser if it has not already been done */
723 		if (hw->mac.ops.flap_tx_laser)
724 			hw->mac.ops.flap_tx_laser(hw);
725 
726 		/*
727 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
728 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
729 		 * attempted.  82599 uses the same timing for 10g SFI.
730 		 */
731 		for (i = 0; i < 5; i++) {
732 			/* Wait for the link partner to also set speed */
733 			msleep(100);
734 
735 			/* If we have link, just jump out */
736 			status = hw->mac.ops.check_link(hw, &link_speed,
737 							&link_up, false);
738 			if (status != 0)
739 				return status;
740 
741 			if (link_up)
742 				goto out;
743 		}
744 	}
745 
746 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
747 		speedcnt++;
748 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
749 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
750 
751 		/* If we already have link at this speed, just jump out */
752 		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
753 						false);
754 		if (status != 0)
755 			return status;
756 
757 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
758 			goto out;
759 
760 		/* Set the module link speed */
761 		switch (hw->phy.media_type) {
762 		case ixgbe_media_type_fiber:
763 			esdp_reg &= ~IXGBE_ESDP_SDP5;
764 			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
765 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
766 			IXGBE_WRITE_FLUSH(hw);
767 			break;
768 		case ixgbe_media_type_fiber_qsfp:
769 			/* QSFP module automatically detects MAC link speed */
770 			break;
771 		default:
772 			hw_dbg(hw, "Unexpected media type.\n");
773 			break;
774 		}
775 
776 		/* Allow module to change analog characteristics (10G->1G) */
777 		msleep(40);
778 
779 		status = ixgbe_setup_mac_link_82599(hw,
780 						    IXGBE_LINK_SPEED_1GB_FULL,
781 						    autoneg_wait_to_complete);
782 		if (status != 0)
783 			return status;
784 
785 		/* Flap the tx laser if it has not already been done */
786 		if (hw->mac.ops.flap_tx_laser)
787 			hw->mac.ops.flap_tx_laser(hw);
788 
789 		/* Wait for the link partner to also set speed */
790 		msleep(100);
791 
792 		/* If we have link, just jump out */
793 		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
794 						false);
795 		if (status != 0)
796 			return status;
797 
798 		if (link_up)
799 			goto out;
800 	}
801 
802 	/*
803 	 * We didn't get link.  Configure back to the highest speed we tried,
804 	 * (if there was more than one).  We call ourselves back with just the
805 	 * single highest speed that the user requested.
806 	 */
807 	if (speedcnt > 1)
808 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
809 							       highest_link_speed,
810 							       autoneg_wait_to_complete);
811 
812 out:
813 	/* Set autoneg_advertised value based on input link speed */
814 	hw->phy.autoneg_advertised = 0;
815 
816 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
817 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
818 
819 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
820 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
821 
822 	return status;
823 }
824 
825 /**
826  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
827  *  @hw: pointer to hardware structure
828  *  @speed: new link speed
829  *  @autoneg_wait_to_complete: true when waiting for completion is needed
830  *
831  *  Implements the Intel SmartSpeed algorithm.
832  **/
833 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
834 				     ixgbe_link_speed speed,
835 				     bool autoneg_wait_to_complete)
836 {
837 	s32 status = 0;
838 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
839 	s32 i, j;
840 	bool link_up = false;
841 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
842 
843 	 /* Set autoneg_advertised value based on input link speed */
844 	hw->phy.autoneg_advertised = 0;
845 
846 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
847 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
848 
849 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
850 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
851 
852 	if (speed & IXGBE_LINK_SPEED_100_FULL)
853 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
854 
855 	/*
856 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
857 	 * autoneg advertisement if link is unable to be established at the
858 	 * highest negotiated rate.  This can sometimes happen due to integrity
859 	 * issues with the physical media connection.
860 	 */
861 
862 	/* First, try to get link with full advertisement */
863 	hw->phy.smart_speed_active = false;
864 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
865 		status = ixgbe_setup_mac_link_82599(hw, speed,
866 						    autoneg_wait_to_complete);
867 		if (status != 0)
868 			goto out;
869 
870 		/*
871 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
872 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
873 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
874 		 * Table 9 in the AN MAS.
875 		 */
876 		for (i = 0; i < 5; i++) {
877 			mdelay(100);
878 
879 			/* If we have link, just jump out */
880 			status = hw->mac.ops.check_link(hw, &link_speed,
881 							&link_up, false);
882 			if (status != 0)
883 				goto out;
884 
885 			if (link_up)
886 				goto out;
887 		}
888 	}
889 
890 	/*
891 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
892 	 * (or BX4/BX), then disable KR and try again.
893 	 */
894 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
895 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
896 		goto out;
897 
898 	/* Turn SmartSpeed on to disable KR support */
899 	hw->phy.smart_speed_active = true;
900 	status = ixgbe_setup_mac_link_82599(hw, speed,
901 					    autoneg_wait_to_complete);
902 	if (status != 0)
903 		goto out;
904 
905 	/*
906 	 * Wait for the controller to acquire link.  600ms will allow for
907 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
908 	 * parallel detect, both 10g and 1g. This allows for the maximum
909 	 * connect attempts as defined in the AN MAS table 73-7.
910 	 */
911 	for (i = 0; i < 6; i++) {
912 		mdelay(100);
913 
914 		/* If we have link, just jump out */
915 		status = hw->mac.ops.check_link(hw, &link_speed,
916 						&link_up, false);
917 		if (status != 0)
918 			goto out;
919 
920 		if (link_up)
921 			goto out;
922 	}
923 
924 	/* We didn't get link.  Turn SmartSpeed back off. */
925 	hw->phy.smart_speed_active = false;
926 	status = ixgbe_setup_mac_link_82599(hw, speed,
927 					    autoneg_wait_to_complete);
928 
929 out:
930 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
931 		hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
932 	return status;
933 }
934 
935 /**
936  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
937  *  @hw: pointer to hardware structure
938  *  @speed: new link speed
939  *  @autoneg_wait_to_complete: true when waiting for completion is needed
940  *
941  *  Set the link speed in the AUTOC register and restarts link.
942  **/
943 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
944 				      ixgbe_link_speed speed,
945 				      bool autoneg_wait_to_complete)
946 {
947 	bool autoneg = false;
948 	s32 status;
949 	u32 pma_pmd_1g, link_mode, links_reg, i;
950 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
951 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
952 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
953 
954 	/* holds the value of AUTOC register at this current point in time */
955 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
956 	/* holds the cached value of AUTOC register */
957 	u32 orig_autoc = 0;
958 	/* temporary variable used for comparison purposes */
959 	u32 autoc = current_autoc;
960 
961 	/* Check to see if speed passed in is supported. */
962 	status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
963 						   &autoneg);
964 	if (status)
965 		return status;
966 
967 	speed &= link_capabilities;
968 
969 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
970 		return IXGBE_ERR_LINK_SETUP;
971 
972 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
973 	if (hw->mac.orig_link_settings_stored)
974 		orig_autoc = hw->mac.orig_autoc;
975 	else
976 		orig_autoc = autoc;
977 
978 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
979 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
980 
981 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
982 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
983 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
984 		/* Set KX4/KX/KR support according to speed requested */
985 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
986 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
987 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
988 				autoc |= IXGBE_AUTOC_KX4_SUPP;
989 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
990 			    (hw->phy.smart_speed_active == false))
991 				autoc |= IXGBE_AUTOC_KR_SUPP;
992 		}
993 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
994 			autoc |= IXGBE_AUTOC_KX_SUPP;
995 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
996 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
997 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
998 		/* Switch from 1G SFI to 10G SFI if requested */
999 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1000 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1001 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1002 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1003 		}
1004 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1005 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1006 		/* Switch from 10G SFI to 1G SFI if requested */
1007 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1008 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1009 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1010 			if (autoneg)
1011 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
1012 			else
1013 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1014 		}
1015 	}
1016 
1017 	if (autoc != current_autoc) {
1018 		/* Restart link */
1019 		status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1020 		if (status)
1021 			return status;
1022 
1023 		/* Only poll for autoneg to complete if specified to do so */
1024 		if (autoneg_wait_to_complete) {
1025 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1026 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1027 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1028 				links_reg = 0; /*Just in case Autoneg time=0*/
1029 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1030 					links_reg =
1031 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
1032 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1033 						break;
1034 					msleep(100);
1035 				}
1036 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1037 					status =
1038 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1039 					hw_dbg(hw, "Autoneg did not complete.\n");
1040 				}
1041 			}
1042 		}
1043 
1044 		/* Add delay to filter out noises during initial link setup */
1045 		msleep(50);
1046 	}
1047 
1048 	return status;
1049 }
1050 
1051 /**
1052  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1053  *  @hw: pointer to hardware structure
1054  *  @speed: new link speed
1055  *  @autoneg_wait_to_complete: true if waiting is needed to complete
1056  *
1057  *  Restarts link on PHY and MAC based on settings passed in.
1058  **/
1059 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1060 					 ixgbe_link_speed speed,
1061 					 bool autoneg_wait_to_complete)
1062 {
1063 	s32 status;
1064 
1065 	/* Setup the PHY according to input speed */
1066 	status = hw->phy.ops.setup_link_speed(hw, speed,
1067 					      autoneg_wait_to_complete);
1068 	/* Set up MAC */
1069 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1070 
1071 	return status;
1072 }
1073 
1074 /**
1075  *  ixgbe_reset_hw_82599 - Perform hardware reset
1076  *  @hw: pointer to hardware structure
1077  *
1078  *  Resets the hardware by resetting the transmit and receive units, masks
1079  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1080  *  reset.
1081  **/
1082 static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1083 {
1084 	ixgbe_link_speed link_speed;
1085 	s32 status;
1086 	u32 ctrl, i, autoc, autoc2;
1087 	u32 curr_lms;
1088 	bool link_up = false;
1089 
1090 	/* Call adapter stop to disable tx/rx and clear interrupts */
1091 	status = hw->mac.ops.stop_adapter(hw);
1092 	if (status)
1093 		return status;
1094 
1095 	/* flush pending Tx transactions */
1096 	ixgbe_clear_tx_pending(hw);
1097 
1098 	/* PHY ops must be identified and initialized prior to reset */
1099 
1100 	/* Identify PHY and related function pointers */
1101 	status = hw->phy.ops.init(hw);
1102 
1103 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1104 		return status;
1105 
1106 	/* Setup SFP module if there is one present. */
1107 	if (hw->phy.sfp_setup_needed) {
1108 		status = hw->mac.ops.setup_sfp(hw);
1109 		hw->phy.sfp_setup_needed = false;
1110 	}
1111 
1112 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1113 		return status;
1114 
1115 	/* Reset PHY */
1116 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
1117 		hw->phy.ops.reset(hw);
1118 
1119 	/* remember AUTOC from before we reset */
1120 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1121 
1122 mac_reset_top:
1123 	/*
1124 	 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1125 	 * If link reset is used when link is up, it might reset the PHY when
1126 	 * mng is using it.  If link is down or the flag to force full link
1127 	 * reset is set, then perform link reset.
1128 	 */
1129 	ctrl = IXGBE_CTRL_LNK_RST;
1130 	if (!hw->force_full_reset) {
1131 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1132 		if (link_up)
1133 			ctrl = IXGBE_CTRL_RST;
1134 	}
1135 
1136 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1137 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1138 	IXGBE_WRITE_FLUSH(hw);
1139 
1140 	/* Poll for reset bit to self-clear indicating reset is complete */
1141 	for (i = 0; i < 10; i++) {
1142 		udelay(1);
1143 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1144 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1145 			break;
1146 	}
1147 
1148 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1149 		status = IXGBE_ERR_RESET_FAILED;
1150 		hw_dbg(hw, "Reset polling failed to complete.\n");
1151 	}
1152 
1153 	msleep(50);
1154 
1155 	/*
1156 	 * Double resets are required for recovery from certain error
1157 	 * conditions.  Between resets, it is necessary to stall to allow time
1158 	 * for any pending HW events to complete.
1159 	 */
1160 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1161 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1162 		goto mac_reset_top;
1163 	}
1164 
1165 	/*
1166 	 * Store the original AUTOC/AUTOC2 values if they have not been
1167 	 * stored off yet.  Otherwise restore the stored original
1168 	 * values since the reset operation sets back to defaults.
1169 	 */
1170 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1171 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1172 
1173 	/* Enable link if disabled in NVM */
1174 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1175 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1176 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1177 		IXGBE_WRITE_FLUSH(hw);
1178 	}
1179 
1180 	if (hw->mac.orig_link_settings_stored == false) {
1181 		hw->mac.orig_autoc = autoc;
1182 		hw->mac.orig_autoc2 = autoc2;
1183 		hw->mac.orig_link_settings_stored = true;
1184 	} else {
1185 
1186 		/* If MNG FW is running on a multi-speed device that
1187 		 * doesn't autoneg with out driver support we need to
1188 		 * leave LMS in the state it was before we MAC reset.
1189 		 * Likewise if we support WoL we don't want change the
1190 		 * LMS state either.
1191 		 */
1192 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1193 		    hw->wol_enabled)
1194 			hw->mac.orig_autoc =
1195 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1196 				curr_lms;
1197 
1198 		if (autoc != hw->mac.orig_autoc) {
1199 			status = hw->mac.ops.prot_autoc_write(hw,
1200 							hw->mac.orig_autoc,
1201 							false);
1202 			if (status)
1203 				return status;
1204 		}
1205 
1206 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1207 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1208 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1209 			autoc2 |= (hw->mac.orig_autoc2 &
1210 				   IXGBE_AUTOC2_UPPER_MASK);
1211 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1212 		}
1213 	}
1214 
1215 	/* Store the permanent mac address */
1216 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1217 
1218 	/*
1219 	 * Store MAC address from RAR0, clear receive address registers, and
1220 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1221 	 * since we modify this value when programming the SAN MAC address.
1222 	 */
1223 	hw->mac.num_rar_entries = 128;
1224 	hw->mac.ops.init_rx_addrs(hw);
1225 
1226 	/* Store the permanent SAN mac address */
1227 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1228 
1229 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1230 	if (is_valid_ether_addr(hw->mac.san_addr)) {
1231 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1232 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1233 
1234 		/* Save the SAN MAC RAR index */
1235 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1236 
1237 		/* Reserve the last RAR for the SAN MAC address */
1238 		hw->mac.num_rar_entries--;
1239 	}
1240 
1241 	/* Store the alternative WWNN/WWPN prefix */
1242 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1243 				       &hw->mac.wwpn_prefix);
1244 
1245 	return status;
1246 }
1247 
1248 /**
1249  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1250  *  @hw: pointer to hardware structure
1251  **/
1252 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1253 {
1254 	int i;
1255 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1256 
1257 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1258 
1259 	/*
1260 	 * Before starting reinitialization process,
1261 	 * FDIRCMD.CMD must be zero.
1262 	 */
1263 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1264 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1265 		      IXGBE_FDIRCMD_CMD_MASK))
1266 			break;
1267 		udelay(10);
1268 	}
1269 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1270 		hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
1271 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1272 	}
1273 
1274 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1275 	IXGBE_WRITE_FLUSH(hw);
1276 	/*
1277 	 * 82599 adapters flow director init flow cannot be restarted,
1278 	 * Workaround 82599 silicon errata by performing the following steps
1279 	 * before re-writing the FDIRCTRL control register with the same value.
1280 	 * - write 1 to bit 8 of FDIRCMD register &
1281 	 * - write 0 to bit 8 of FDIRCMD register
1282 	 */
1283 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1284 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1285 			 IXGBE_FDIRCMD_CLEARHT));
1286 	IXGBE_WRITE_FLUSH(hw);
1287 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1288 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1289 			 ~IXGBE_FDIRCMD_CLEARHT));
1290 	IXGBE_WRITE_FLUSH(hw);
1291 	/*
1292 	 * Clear FDIR Hash register to clear any leftover hashes
1293 	 * waiting to be programmed.
1294 	 */
1295 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1296 	IXGBE_WRITE_FLUSH(hw);
1297 
1298 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1299 	IXGBE_WRITE_FLUSH(hw);
1300 
1301 	/* Poll init-done after we write FDIRCTRL register */
1302 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1303 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1304 				   IXGBE_FDIRCTRL_INIT_DONE)
1305 			break;
1306 		usleep_range(1000, 2000);
1307 	}
1308 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1309 		hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
1310 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1311 	}
1312 
1313 	/* Clear FDIR statistics registers (read to clear) */
1314 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1315 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1316 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1317 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1318 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1319 
1320 	return 0;
1321 }
1322 
1323 /**
1324  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1325  *  @hw: pointer to hardware structure
1326  *  @fdirctrl: value to write to flow director control register
1327  **/
1328 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1329 {
1330 	int i;
1331 
1332 	/* Prime the keys for hashing */
1333 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1334 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1335 
1336 	/*
1337 	 * Poll init-done after we write the register.  Estimated times:
1338 	 *      10G: PBALLOC = 11b, timing is 60us
1339 	 *       1G: PBALLOC = 11b, timing is 600us
1340 	 *     100M: PBALLOC = 11b, timing is 6ms
1341 	 *
1342 	 *     Multiple these timings by 4 if under full Rx load
1343 	 *
1344 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1345 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1346 	 * this might not finish in our poll time, but we can live with that
1347 	 * for now.
1348 	 */
1349 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1350 	IXGBE_WRITE_FLUSH(hw);
1351 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1352 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1353 				   IXGBE_FDIRCTRL_INIT_DONE)
1354 			break;
1355 		usleep_range(1000, 2000);
1356 	}
1357 
1358 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1359 		hw_dbg(hw, "Flow Director poll time exceeded!\n");
1360 }
1361 
1362 /**
1363  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1364  *  @hw: pointer to hardware structure
1365  *  @fdirctrl: value to write to flow director control register, initially
1366  *             contains just the value of the Rx packet buffer allocation
1367  **/
1368 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1369 {
1370 	/*
1371 	 * Continue setup of fdirctrl register bits:
1372 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1373 	 *  Set the maximum length per hash bucket to 0xA filters
1374 	 *  Send interrupt when 64 filters are left
1375 	 */
1376 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1377 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1378 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1379 
1380 	/* write hashes and fdirctrl register, poll for completion */
1381 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1382 
1383 	return 0;
1384 }
1385 
1386 /**
1387  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1388  *  @hw: pointer to hardware structure
1389  *  @fdirctrl: value to write to flow director control register, initially
1390  *             contains just the value of the Rx packet buffer allocation
1391  **/
1392 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1393 {
1394 	/*
1395 	 * Continue setup of fdirctrl register bits:
1396 	 *  Turn perfect match filtering on
1397 	 *  Report hash in RSS field of Rx wb descriptor
1398 	 *  Initialize the drop queue
1399 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1400 	 *  Set the maximum length per hash bucket to 0xA filters
1401 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1402 	 */
1403 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1404 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1405 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1406 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1407 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1408 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1409 
1410 	/* write hashes and fdirctrl register, poll for completion */
1411 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1412 
1413 	return 0;
1414 }
1415 
1416 /*
1417  * These defines allow us to quickly generate all of the necessary instructions
1418  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1419  * for values 0 through 15
1420  */
1421 #define IXGBE_ATR_COMMON_HASH_KEY \
1422 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1423 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1424 do { \
1425 	u32 n = (_n); \
1426 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1427 		common_hash ^= lo_hash_dword >> n; \
1428 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1429 		bucket_hash ^= lo_hash_dword >> n; \
1430 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1431 		sig_hash ^= lo_hash_dword << (16 - n); \
1432 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1433 		common_hash ^= hi_hash_dword >> n; \
1434 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1435 		bucket_hash ^= hi_hash_dword >> n; \
1436 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1437 		sig_hash ^= hi_hash_dword << (16 - n); \
1438 } while (0)
1439 
1440 /**
1441  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1442  *  @stream: input bitstream to compute the hash on
1443  *
1444  *  This function is almost identical to the function above but contains
1445  *  several optomizations such as unwinding all of the loops, letting the
1446  *  compiler work out all of the conditional ifs since the keys are static
1447  *  defines, and computing two keys at once since the hashed dword stream
1448  *  will be the same for both keys.
1449  **/
1450 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1451 					    union ixgbe_atr_hash_dword common)
1452 {
1453 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1454 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1455 
1456 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1457 	flow_vm_vlan = ntohl(input.dword);
1458 
1459 	/* generate common hash dword */
1460 	hi_hash_dword = ntohl(common.dword);
1461 
1462 	/* low dword is word swapped version of common */
1463 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1464 
1465 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1466 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1467 
1468 	/* Process bits 0 and 16 */
1469 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1470 
1471 	/*
1472 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1473 	 * delay this because bit 0 of the stream should not be processed
1474 	 * so we do not add the vlan until after bit 0 was processed
1475 	 */
1476 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1477 
1478 	/* Process remaining 30 bit of the key */
1479 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1480 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1481 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1482 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1483 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1484 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1485 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1486 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1487 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1488 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1489 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1490 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1491 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1492 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1493 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1494 
1495 	/* combine common_hash result with signature and bucket hashes */
1496 	bucket_hash ^= common_hash;
1497 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1498 
1499 	sig_hash ^= common_hash << 16;
1500 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1501 
1502 	/* return completed signature hash */
1503 	return sig_hash ^ bucket_hash;
1504 }
1505 
1506 /**
1507  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1508  *  @hw: pointer to hardware structure
1509  *  @input: unique input dword
1510  *  @common: compressed common input dword
1511  *  @queue: queue index to direct traffic to
1512  **/
1513 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1514 					  union ixgbe_atr_hash_dword input,
1515 					  union ixgbe_atr_hash_dword common,
1516 					  u8 queue)
1517 {
1518 	u64  fdirhashcmd;
1519 	u32  fdircmd;
1520 
1521 	/*
1522 	 * Get the flow_type in order to program FDIRCMD properly
1523 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1524 	 */
1525 	switch (input.formatted.flow_type) {
1526 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1527 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1528 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1529 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1530 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1531 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1532 		break;
1533 	default:
1534 		hw_dbg(hw, " Error on flow type input\n");
1535 		return IXGBE_ERR_CONFIG;
1536 	}
1537 
1538 	/* configure FDIRCMD register */
1539 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1540 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1541 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1542 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1543 
1544 	/*
1545 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1546 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1547 	 */
1548 	fdirhashcmd = (u64)fdircmd << 32;
1549 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1550 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1551 
1552 	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1553 
1554 	return 0;
1555 }
1556 
1557 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1558 do { \
1559 	u32 n = (_n); \
1560 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1561 		bucket_hash ^= lo_hash_dword >> n; \
1562 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1563 		bucket_hash ^= hi_hash_dword >> n; \
1564 } while (0)
1565 
1566 /**
1567  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1568  *  @atr_input: input bitstream to compute the hash on
1569  *  @input_mask: mask for the input bitstream
1570  *
1571  *  This function serves two main purposes.  First it applys the input_mask
1572  *  to the atr_input resulting in a cleaned up atr_input data stream.
1573  *  Secondly it computes the hash and stores it in the bkt_hash field at
1574  *  the end of the input byte stream.  This way it will be available for
1575  *  future use without needing to recompute the hash.
1576  **/
1577 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1578 					  union ixgbe_atr_input *input_mask)
1579 {
1580 
1581 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1582 	u32 bucket_hash = 0, hi_dword = 0;
1583 	int i;
1584 
1585 	/* Apply masks to input data */
1586 	for (i = 0; i <= 10; i++)
1587 		input->dword_stream[i] &= input_mask->dword_stream[i];
1588 
1589 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1590 	flow_vm_vlan = ntohl(input->dword_stream[0]);
1591 
1592 	/* generate common hash dword */
1593 	for (i = 1; i <= 10; i++)
1594 		hi_dword ^= input->dword_stream[i];
1595 	hi_hash_dword = ntohl(hi_dword);
1596 
1597 	/* low dword is word swapped version of common */
1598 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1599 
1600 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1601 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1602 
1603 	/* Process bits 0 and 16 */
1604 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1605 
1606 	/*
1607 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1608 	 * delay this because bit 0 of the stream should not be processed
1609 	 * so we do not add the vlan until after bit 0 was processed
1610 	 */
1611 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1612 
1613 	/* Process remaining 30 bit of the key */
1614 	for (i = 1; i <= 15; i++)
1615 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1616 
1617 	/*
1618 	 * Limit hash to 13 bits since max bucket count is 8K.
1619 	 * Store result at the end of the input stream.
1620 	 */
1621 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1622 }
1623 
1624 /**
1625  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1626  *  @input_mask: mask to be bit swapped
1627  *
1628  *  The source and destination port masks for flow director are bit swapped
1629  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1630  *  generate a correctly swapped value we need to bit swap the mask and that
1631  *  is what is accomplished by this function.
1632  **/
1633 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1634 {
1635 	u32 mask = ntohs(input_mask->formatted.dst_port);
1636 
1637 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1638 	mask |= ntohs(input_mask->formatted.src_port);
1639 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1640 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1641 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1642 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1643 }
1644 
1645 /*
1646  * These two macros are meant to address the fact that we have registers
1647  * that are either all or in part big-endian.  As a result on big-endian
1648  * systems we will end up byte swapping the value to little-endian before
1649  * it is byte swapped again and written to the hardware in the original
1650  * big-endian format.
1651  */
1652 #define IXGBE_STORE_AS_BE32(_value) \
1653 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1654 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1655 
1656 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1657 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1658 
1659 #define IXGBE_STORE_AS_BE16(_value) \
1660 	ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1661 
1662 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1663 				    union ixgbe_atr_input *input_mask)
1664 {
1665 	/* mask IPv6 since it is currently not supported */
1666 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1667 	u32 fdirtcpm;
1668 
1669 	/*
1670 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1671 	 * are zero, then assume a full mask for that field.  Also assume that
1672 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1673 	 * cannot be masked out in this implementation.
1674 	 *
1675 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1676 	 * point in time.
1677 	 */
1678 
1679 	/* verify bucket hash is cleared on hash generation */
1680 	if (input_mask->formatted.bkt_hash)
1681 		hw_dbg(hw, " bucket hash should always be 0 in mask\n");
1682 
1683 	/* Program FDIRM and verify partial masks */
1684 	switch (input_mask->formatted.vm_pool & 0x7F) {
1685 	case 0x0:
1686 		fdirm |= IXGBE_FDIRM_POOL;
1687 	case 0x7F:
1688 		break;
1689 	default:
1690 		hw_dbg(hw, " Error on vm pool mask\n");
1691 		return IXGBE_ERR_CONFIG;
1692 	}
1693 
1694 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1695 	case 0x0:
1696 		fdirm |= IXGBE_FDIRM_L4P;
1697 		if (input_mask->formatted.dst_port ||
1698 		    input_mask->formatted.src_port) {
1699 			hw_dbg(hw, " Error on src/dst port mask\n");
1700 			return IXGBE_ERR_CONFIG;
1701 		}
1702 	case IXGBE_ATR_L4TYPE_MASK:
1703 		break;
1704 	default:
1705 		hw_dbg(hw, " Error on flow type mask\n");
1706 		return IXGBE_ERR_CONFIG;
1707 	}
1708 
1709 	switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
1710 	case 0x0000:
1711 		/* mask VLAN ID, fall through to mask VLAN priority */
1712 		fdirm |= IXGBE_FDIRM_VLANID;
1713 	case 0x0FFF:
1714 		/* mask VLAN priority */
1715 		fdirm |= IXGBE_FDIRM_VLANP;
1716 		break;
1717 	case 0xE000:
1718 		/* mask VLAN ID only, fall through */
1719 		fdirm |= IXGBE_FDIRM_VLANID;
1720 	case 0xEFFF:
1721 		/* no VLAN fields masked */
1722 		break;
1723 	default:
1724 		hw_dbg(hw, " Error on VLAN mask\n");
1725 		return IXGBE_ERR_CONFIG;
1726 	}
1727 
1728 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1729 	case 0x0000:
1730 		/* Mask Flex Bytes, fall through */
1731 		fdirm |= IXGBE_FDIRM_FLEX;
1732 	case 0xFFFF:
1733 		break;
1734 	default:
1735 		hw_dbg(hw, " Error on flexible byte mask\n");
1736 		return IXGBE_ERR_CONFIG;
1737 	}
1738 
1739 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1740 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1741 
1742 	/* store the TCP/UDP port masks, bit reversed from port layout */
1743 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1744 
1745 	/* write both the same so that UDP and TCP use the same mask */
1746 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1747 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1748 
1749 	/* store source and destination IP masks (big-enian) */
1750 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1751 			     ~input_mask->formatted.src_ip[0]);
1752 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1753 			     ~input_mask->formatted.dst_ip[0]);
1754 
1755 	return 0;
1756 }
1757 
1758 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1759 					  union ixgbe_atr_input *input,
1760 					  u16 soft_id, u8 queue)
1761 {
1762 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1763 
1764 	/* currently IPv6 is not supported, must be programmed with 0 */
1765 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1766 			     input->formatted.src_ip[0]);
1767 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1768 			     input->formatted.src_ip[1]);
1769 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1770 			     input->formatted.src_ip[2]);
1771 
1772 	/* record the source address (big-endian) */
1773 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1774 
1775 	/* record the first 32 bits of the destination address (big-endian) */
1776 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1777 
1778 	/* record source and destination port (little-endian)*/
1779 	fdirport = ntohs(input->formatted.dst_port);
1780 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1781 	fdirport |= ntohs(input->formatted.src_port);
1782 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1783 
1784 	/* record vlan (little-endian) and flex_bytes(big-endian) */
1785 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1786 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1787 	fdirvlan |= ntohs(input->formatted.vlan_id);
1788 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1789 
1790 	/* configure FDIRHASH register */
1791 	fdirhash = input->formatted.bkt_hash;
1792 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1793 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1794 
1795 	/*
1796 	 * flush all previous writes to make certain registers are
1797 	 * programmed prior to issuing the command
1798 	 */
1799 	IXGBE_WRITE_FLUSH(hw);
1800 
1801 	/* configure FDIRCMD register */
1802 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1803 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1804 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1805 		fdircmd |= IXGBE_FDIRCMD_DROP;
1806 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1807 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1808 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1809 
1810 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1811 
1812 	return 0;
1813 }
1814 
1815 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1816 					  union ixgbe_atr_input *input,
1817 					  u16 soft_id)
1818 {
1819 	u32 fdirhash;
1820 	u32 fdircmd = 0;
1821 	u32 retry_count;
1822 	s32 err = 0;
1823 
1824 	/* configure FDIRHASH register */
1825 	fdirhash = input->formatted.bkt_hash;
1826 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1827 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1828 
1829 	/* flush hash to HW */
1830 	IXGBE_WRITE_FLUSH(hw);
1831 
1832 	/* Query if filter is present */
1833 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1834 
1835 	for (retry_count = 10; retry_count; retry_count--) {
1836 		/* allow 10us for query to process */
1837 		udelay(10);
1838 		/* verify query completed successfully */
1839 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1840 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1841 			break;
1842 	}
1843 
1844 	if (!retry_count)
1845 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
1846 
1847 	/* if filter exists in hardware then remove it */
1848 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1849 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1850 		IXGBE_WRITE_FLUSH(hw);
1851 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1852 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1853 	}
1854 
1855 	return err;
1856 }
1857 
1858 /**
1859  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1860  *  @hw: pointer to hardware structure
1861  *  @reg: analog register to read
1862  *  @val: read value
1863  *
1864  *  Performs read operation to Omer analog register specified.
1865  **/
1866 static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1867 {
1868 	u32  core_ctl;
1869 
1870 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1871 			(reg << 8));
1872 	IXGBE_WRITE_FLUSH(hw);
1873 	udelay(10);
1874 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1875 	*val = (u8)core_ctl;
1876 
1877 	return 0;
1878 }
1879 
1880 /**
1881  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1882  *  @hw: pointer to hardware structure
1883  *  @reg: atlas register to write
1884  *  @val: value to write
1885  *
1886  *  Performs write operation to Omer analog register specified.
1887  **/
1888 static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1889 {
1890 	u32  core_ctl;
1891 
1892 	core_ctl = (reg << 8) | val;
1893 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1894 	IXGBE_WRITE_FLUSH(hw);
1895 	udelay(10);
1896 
1897 	return 0;
1898 }
1899 
1900 /**
1901  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1902  *  @hw: pointer to hardware structure
1903  *
1904  *  Starts the hardware using the generic start_hw function
1905  *  and the generation start_hw function.
1906  *  Then performs revision-specific operations, if any.
1907  **/
1908 static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1909 {
1910 	s32 ret_val = 0;
1911 
1912 	ret_val = ixgbe_start_hw_generic(hw);
1913 	if (ret_val)
1914 		return ret_val;
1915 
1916 	ret_val = ixgbe_start_hw_gen2(hw);
1917 	if (ret_val)
1918 		return ret_val;
1919 
1920 	/* We need to run link autotry after the driver loads */
1921 	hw->mac.autotry_restart = true;
1922 
1923 	if (ret_val)
1924 		return ret_val;
1925 
1926 	return ixgbe_verify_fw_version_82599(hw);
1927 }
1928 
1929 /**
1930  *  ixgbe_identify_phy_82599 - Get physical layer module
1931  *  @hw: pointer to hardware structure
1932  *
1933  *  Determines the physical layer module found on the current adapter.
1934  *  If PHY already detected, maintains current PHY type in hw struct,
1935  *  otherwise executes the PHY detection routine.
1936  **/
1937 static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1938 {
1939 	s32 status;
1940 
1941 	/* Detect PHY if not unknown - returns success if already detected. */
1942 	status = ixgbe_identify_phy_generic(hw);
1943 	if (status) {
1944 		/* 82599 10GBASE-T requires an external PHY */
1945 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1946 			return status;
1947 		status = ixgbe_identify_module_generic(hw);
1948 	}
1949 
1950 	/* Set PHY type none if no PHY detected */
1951 	if (hw->phy.type == ixgbe_phy_unknown) {
1952 		hw->phy.type = ixgbe_phy_none;
1953 		status = 0;
1954 	}
1955 
1956 	/* Return error if SFP module has been detected but is not supported */
1957 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1958 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
1959 
1960 	return status;
1961 }
1962 
1963 /**
1964  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
1965  *  @hw: pointer to hardware structure
1966  *  @regval: register value to write to RXCTRL
1967  *
1968  *  Enables the Rx DMA unit for 82599
1969  **/
1970 static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1971 {
1972 	/*
1973 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
1974 	 * If traffic is incoming before we enable the Rx unit, it could hang
1975 	 * the Rx DMA unit.  Therefore, make sure the security engine is
1976 	 * completely disabled prior to enabling the Rx unit.
1977 	 */
1978 	hw->mac.ops.disable_rx_buff(hw);
1979 
1980 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1981 
1982 	hw->mac.ops.enable_rx_buff(hw);
1983 
1984 	return 0;
1985 }
1986 
1987 /**
1988  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
1989  *  @hw: pointer to hardware structure
1990  *
1991  *  Verifies that installed the firmware version is 0.6 or higher
1992  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
1993  *
1994  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
1995  *  if the FW version is not supported.
1996  **/
1997 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
1998 {
1999 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2000 	u16 fw_offset, fw_ptp_cfg_offset;
2001 	u16 offset;
2002 	u16 fw_version = 0;
2003 
2004 	/* firmware check is only necessary for SFI devices */
2005 	if (hw->phy.media_type != ixgbe_media_type_fiber)
2006 		return 0;
2007 
2008 	/* get the offset to the Firmware Module block */
2009 	offset = IXGBE_FW_PTR;
2010 	if (hw->eeprom.ops.read(hw, offset, &fw_offset))
2011 		goto fw_version_err;
2012 
2013 	if (fw_offset == 0 || fw_offset == 0xFFFF)
2014 		return IXGBE_ERR_EEPROM_VERSION;
2015 
2016 	/* get the offset to the Pass Through Patch Configuration block */
2017 	offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
2018 	if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
2019 		goto fw_version_err;
2020 
2021 	if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
2022 		return IXGBE_ERR_EEPROM_VERSION;
2023 
2024 	/* get the firmware version */
2025 	offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
2026 	if (hw->eeprom.ops.read(hw, offset, &fw_version))
2027 		goto fw_version_err;
2028 
2029 	if (fw_version > 0x5)
2030 		status = 0;
2031 
2032 	return status;
2033 
2034 fw_version_err:
2035 	hw_err(hw, "eeprom read at offset %d failed\n", offset);
2036 	return IXGBE_ERR_EEPROM_VERSION;
2037 }
2038 
2039 /**
2040  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2041  *  @hw: pointer to hardware structure
2042  *
2043  *  Returns true if the LESM FW module is present and enabled. Otherwise
2044  *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
2045  **/
2046 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2047 {
2048 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2049 	s32 status;
2050 
2051 	/* get the offset to the Firmware Module block */
2052 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2053 
2054 	if (status || fw_offset == 0 || fw_offset == 0xFFFF)
2055 		return false;
2056 
2057 	/* get the offset to the LESM Parameters block */
2058 	status = hw->eeprom.ops.read(hw, (fw_offset +
2059 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2060 				     &fw_lesm_param_offset);
2061 
2062 	if (status ||
2063 	    fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
2064 		return false;
2065 
2066 	/* get the lesm state word */
2067 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2068 				     IXGBE_FW_LESM_STATE_1),
2069 				     &fw_lesm_state);
2070 
2071 	if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2072 		return true;
2073 
2074 	return false;
2075 }
2076 
2077 /**
2078  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2079  *  fastest available method
2080  *
2081  *  @hw: pointer to hardware structure
2082  *  @offset: offset of  word in EEPROM to read
2083  *  @words: number of words
2084  *  @data: word(s) read from the EEPROM
2085  *
2086  *  Retrieves 16 bit word(s) read from EEPROM
2087  **/
2088 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2089 					  u16 words, u16 *data)
2090 {
2091 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2092 
2093 	/* If EEPROM is detected and can be addressed using 14 bits,
2094 	 * use EERD otherwise use bit bang
2095 	 */
2096 	if (eeprom->type == ixgbe_eeprom_spi &&
2097 	    offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
2098 		return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
2099 
2100 	return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
2101 							 data);
2102 }
2103 
2104 /**
2105  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2106  *  fastest available method
2107  *
2108  *  @hw: pointer to hardware structure
2109  *  @offset: offset of  word in the EEPROM to read
2110  *  @data: word read from the EEPROM
2111  *
2112  *  Reads a 16 bit word from the EEPROM
2113  **/
2114 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2115 				   u16 offset, u16 *data)
2116 {
2117 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2118 
2119 	/*
2120 	 * If EEPROM is detected and can be addressed using 14 bits,
2121 	 * use EERD otherwise use bit bang
2122 	 */
2123 	if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
2124 		return ixgbe_read_eerd_generic(hw, offset, data);
2125 
2126 	return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2127 }
2128 
2129 /**
2130  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2131  *
2132  * @hw: pointer to hardware structure
2133  *
2134  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2135  * full pipeline reset.  Note - We must hold the SW/FW semaphore before writing
2136  * to AUTOC, so this function assumes the semaphore is held.
2137  **/
2138 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2139 {
2140 	s32 ret_val;
2141 	u32 anlp1_reg = 0;
2142 	u32 i, autoc_reg, autoc2_reg;
2143 
2144 	/* Enable link if disabled in NVM */
2145 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2146 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2147 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2148 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2149 		IXGBE_WRITE_FLUSH(hw);
2150 	}
2151 
2152 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2153 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2154 
2155 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2156 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2157 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2158 
2159 	/* Wait for AN to leave state 0 */
2160 	for (i = 0; i < 10; i++) {
2161 		usleep_range(4000, 8000);
2162 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2163 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2164 			break;
2165 	}
2166 
2167 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2168 		hw_dbg(hw, "auto negotiation not completed\n");
2169 		ret_val = IXGBE_ERR_RESET_FAILED;
2170 		goto reset_pipeline_out;
2171 	}
2172 
2173 	ret_val = 0;
2174 
2175 reset_pipeline_out:
2176 	/* Write AUTOC register with original LMS field and Restart_AN */
2177 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2178 	IXGBE_WRITE_FLUSH(hw);
2179 
2180 	return ret_val;
2181 }
2182 
2183 /**
2184  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2185  *  @hw: pointer to hardware structure
2186  *  @byte_offset: byte offset to read
2187  *  @data: value read
2188  *
2189  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
2190  *  a specified device address.
2191  **/
2192 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2193 				     u8 dev_addr, u8 *data)
2194 {
2195 	u32 esdp;
2196 	s32 status;
2197 	s32 timeout = 200;
2198 
2199 	if (hw->phy.qsfp_shared_i2c_bus == true) {
2200 		/* Acquire I2C bus ownership. */
2201 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2202 		esdp |= IXGBE_ESDP_SDP0;
2203 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2204 		IXGBE_WRITE_FLUSH(hw);
2205 
2206 		while (timeout) {
2207 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2208 			if (esdp & IXGBE_ESDP_SDP1)
2209 				break;
2210 
2211 			usleep_range(5000, 10000);
2212 			timeout--;
2213 		}
2214 
2215 		if (!timeout) {
2216 			hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2217 			status = IXGBE_ERR_I2C;
2218 			goto release_i2c_access;
2219 		}
2220 	}
2221 
2222 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2223 
2224 release_i2c_access:
2225 	if (hw->phy.qsfp_shared_i2c_bus == true) {
2226 		/* Release I2C bus ownership. */
2227 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2228 		esdp &= ~IXGBE_ESDP_SDP0;
2229 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2230 		IXGBE_WRITE_FLUSH(hw);
2231 	}
2232 
2233 	return status;
2234 }
2235 
2236 /**
2237  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2238  *  @hw: pointer to hardware structure
2239  *  @byte_offset: byte offset to write
2240  *  @data: value to write
2241  *
2242  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
2243  *  a specified device address.
2244  **/
2245 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2246 				      u8 dev_addr, u8 data)
2247 {
2248 	u32 esdp;
2249 	s32 status;
2250 	s32 timeout = 200;
2251 
2252 	if (hw->phy.qsfp_shared_i2c_bus == true) {
2253 		/* Acquire I2C bus ownership. */
2254 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2255 		esdp |= IXGBE_ESDP_SDP0;
2256 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2257 		IXGBE_WRITE_FLUSH(hw);
2258 
2259 		while (timeout) {
2260 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2261 			if (esdp & IXGBE_ESDP_SDP1)
2262 				break;
2263 
2264 			usleep_range(5000, 10000);
2265 			timeout--;
2266 		}
2267 
2268 		if (!timeout) {
2269 			hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2270 			status = IXGBE_ERR_I2C;
2271 			goto release_i2c_access;
2272 		}
2273 	}
2274 
2275 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2276 
2277 release_i2c_access:
2278 	if (hw->phy.qsfp_shared_i2c_bus == true) {
2279 		/* Release I2C bus ownership. */
2280 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2281 		esdp &= ~IXGBE_ESDP_SDP0;
2282 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2283 		IXGBE_WRITE_FLUSH(hw);
2284 	}
2285 
2286 	return status;
2287 }
2288 
2289 static struct ixgbe_mac_operations mac_ops_82599 = {
2290 	.init_hw                = &ixgbe_init_hw_generic,
2291 	.reset_hw               = &ixgbe_reset_hw_82599,
2292 	.start_hw               = &ixgbe_start_hw_82599,
2293 	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
2294 	.get_media_type         = &ixgbe_get_media_type_82599,
2295 	.enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
2296 	.disable_rx_buff	= &ixgbe_disable_rx_buff_generic,
2297 	.enable_rx_buff		= &ixgbe_enable_rx_buff_generic,
2298 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
2299 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
2300 	.get_device_caps        = &ixgbe_get_device_caps_generic,
2301 	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
2302 	.stop_adapter           = &ixgbe_stop_adapter_generic,
2303 	.get_bus_info           = &ixgbe_get_bus_info_generic,
2304 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
2305 	.read_analog_reg8       = &ixgbe_read_analog_reg8_82599,
2306 	.write_analog_reg8      = &ixgbe_write_analog_reg8_82599,
2307 	.stop_link_on_d3	= &ixgbe_stop_mac_link_on_d3_82599,
2308 	.setup_link             = &ixgbe_setup_mac_link_82599,
2309 	.set_rxpba		= &ixgbe_set_rxpba_generic,
2310 	.check_link             = &ixgbe_check_mac_link_generic,
2311 	.get_link_capabilities  = &ixgbe_get_link_capabilities_82599,
2312 	.led_on                 = &ixgbe_led_on_generic,
2313 	.led_off                = &ixgbe_led_off_generic,
2314 	.blink_led_start        = &ixgbe_blink_led_start_generic,
2315 	.blink_led_stop         = &ixgbe_blink_led_stop_generic,
2316 	.set_rar                = &ixgbe_set_rar_generic,
2317 	.clear_rar              = &ixgbe_clear_rar_generic,
2318 	.set_vmdq               = &ixgbe_set_vmdq_generic,
2319 	.set_vmdq_san_mac	= &ixgbe_set_vmdq_san_mac_generic,
2320 	.clear_vmdq             = &ixgbe_clear_vmdq_generic,
2321 	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
2322 	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
2323 	.enable_mc              = &ixgbe_enable_mc_generic,
2324 	.disable_mc             = &ixgbe_disable_mc_generic,
2325 	.clear_vfta             = &ixgbe_clear_vfta_generic,
2326 	.set_vfta               = &ixgbe_set_vfta_generic,
2327 	.fc_enable              = &ixgbe_fc_enable_generic,
2328 	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
2329 	.init_uta_tables        = &ixgbe_init_uta_tables_generic,
2330 	.setup_sfp              = &ixgbe_setup_sfp_modules_82599,
2331 	.set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
2332 	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2333 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
2334 	.release_swfw_sync      = &ixgbe_release_swfw_sync,
2335 	.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2336 	.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2337 	.prot_autoc_read	= &prot_autoc_read_82599,
2338 	.prot_autoc_write	= &prot_autoc_write_82599,
2339 };
2340 
2341 static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2342 	.init_params		= &ixgbe_init_eeprom_params_generic,
2343 	.read			= &ixgbe_read_eeprom_82599,
2344 	.read_buffer		= &ixgbe_read_eeprom_buffer_82599,
2345 	.write			= &ixgbe_write_eeprom_generic,
2346 	.write_buffer		= &ixgbe_write_eeprom_buffer_bit_bang_generic,
2347 	.calc_checksum		= &ixgbe_calc_eeprom_checksum_generic,
2348 	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic,
2349 	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
2350 };
2351 
2352 static struct ixgbe_phy_operations phy_ops_82599 = {
2353 	.identify		= &ixgbe_identify_phy_82599,
2354 	.identify_sfp		= &ixgbe_identify_module_generic,
2355 	.init			= &ixgbe_init_phy_ops_82599,
2356 	.reset			= &ixgbe_reset_phy_generic,
2357 	.read_reg		= &ixgbe_read_phy_reg_generic,
2358 	.write_reg		= &ixgbe_write_phy_reg_generic,
2359 	.setup_link		= &ixgbe_setup_phy_link_generic,
2360 	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic,
2361 	.read_i2c_byte		= &ixgbe_read_i2c_byte_generic,
2362 	.write_i2c_byte		= &ixgbe_write_i2c_byte_generic,
2363 	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_generic,
2364 	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_generic,
2365 	.write_i2c_eeprom	= &ixgbe_write_i2c_eeprom_generic,
2366 	.check_overtemp		= &ixgbe_tn_check_overtemp,
2367 };
2368 
2369 struct ixgbe_info ixgbe_82599_info = {
2370 	.mac                    = ixgbe_mac_82599EB,
2371 	.get_invariants         = &ixgbe_get_invariants_82599,
2372 	.mac_ops                = &mac_ops_82599,
2373 	.eeprom_ops             = &eeprom_ops_82599,
2374 	.phy_ops                = &phy_ops_82599,
2375 	.mbx_ops                = &mbx_ops_generic,
2376 };
2377