1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3 
4 /* e1000_82575
5  * e1000_82576
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/if_ether.h>
12 #include <linux/i2c.h>
13 
14 #include "e1000_mac.h"
15 #include "e1000_82575.h"
16 #include "e1000_i210.h"
17 #include "igb.h"
18 
19 static s32  igb_get_invariants_82575(struct e1000_hw *);
20 static s32  igb_acquire_phy_82575(struct e1000_hw *);
21 static void igb_release_phy_82575(struct e1000_hw *);
22 static s32  igb_acquire_nvm_82575(struct e1000_hw *);
23 static void igb_release_nvm_82575(struct e1000_hw *);
24 static s32  igb_check_for_link_82575(struct e1000_hw *);
25 static s32  igb_get_cfg_done_82575(struct e1000_hw *);
26 static s32  igb_init_hw_82575(struct e1000_hw *);
27 static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
28 static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
29 static s32  igb_reset_hw_82575(struct e1000_hw *);
30 static s32  igb_reset_hw_82580(struct e1000_hw *);
31 static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
32 static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
33 static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
34 static s32  igb_setup_copper_link_82575(struct e1000_hw *);
35 static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
36 static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
37 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
38 static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
39 static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
40 						 u16 *);
41 static s32  igb_get_phy_id_82575(struct e1000_hw *);
42 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
43 static bool igb_sgmii_active_82575(struct e1000_hw *);
44 static s32  igb_reset_init_script_82575(struct e1000_hw *);
45 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
46 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
47 static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
48 static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
49 static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
50 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
51 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
52 static const u16 e1000_82580_rxpbs_table[] = {
53 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
54 
55 /* Due to a hw errata, if the host tries to  configure the VFTA register
56  * while performing queries from the BMC or DMA, then the VFTA in some
57  * cases won't be written.
58  */
59 
60 /**
61  *  igb_write_vfta_i350 - Write value to VLAN filter table
62  *  @hw: pointer to the HW structure
63  *  @offset: register offset in VLAN filter table
64  *  @value: register value written to VLAN filter table
65  *
66  *  Writes value at the given offset in the register array which stores
67  *  the VLAN filter table.
68  **/
69 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
70 {
71 	struct igb_adapter *adapter = hw->back;
72 	int i;
73 
74 	for (i = 10; i--;)
75 		array_wr32(E1000_VFTA, offset, value);
76 
77 	wrfl();
78 	adapter->shadow_vfta[offset] = value;
79 }
80 
81 /**
82  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
83  *  @hw: pointer to the HW structure
84  *
85  *  Called to determine if the I2C pins are being used for I2C or as an
86  *  external MDIO interface since the two options are mutually exclusive.
87  **/
88 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
89 {
90 	u32 reg = 0;
91 	bool ext_mdio = false;
92 
93 	switch (hw->mac.type) {
94 	case e1000_82575:
95 	case e1000_82576:
96 		reg = rd32(E1000_MDIC);
97 		ext_mdio = !!(reg & E1000_MDIC_DEST);
98 		break;
99 	case e1000_82580:
100 	case e1000_i350:
101 	case e1000_i354:
102 	case e1000_i210:
103 	case e1000_i211:
104 		reg = rd32(E1000_MDICNFG);
105 		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
106 		break;
107 	default:
108 		break;
109 	}
110 	return ext_mdio;
111 }
112 
113 /**
114  *  igb_check_for_link_media_swap - Check which M88E1112 interface linked
115  *  @hw: pointer to the HW structure
116  *
117  *  Poll the M88E1112 interfaces to see which interface achieved link.
118  */
119 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
120 {
121 	struct e1000_phy_info *phy = &hw->phy;
122 	s32 ret_val;
123 	u16 data;
124 	u8 port = 0;
125 
126 	/* Check the copper medium. */
127 	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
128 	if (ret_val)
129 		return ret_val;
130 
131 	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
132 	if (ret_val)
133 		return ret_val;
134 
135 	if (data & E1000_M88E1112_STATUS_LINK)
136 		port = E1000_MEDIA_PORT_COPPER;
137 
138 	/* Check the other medium. */
139 	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
140 	if (ret_val)
141 		return ret_val;
142 
143 	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
144 	if (ret_val)
145 		return ret_val;
146 
147 
148 	if (data & E1000_M88E1112_STATUS_LINK)
149 		port = E1000_MEDIA_PORT_OTHER;
150 
151 	/* Determine if a swap needs to happen. */
152 	if (port && (hw->dev_spec._82575.media_port != port)) {
153 		hw->dev_spec._82575.media_port = port;
154 		hw->dev_spec._82575.media_changed = true;
155 	}
156 
157 	if (port == E1000_MEDIA_PORT_COPPER) {
158 		/* reset page to 0 */
159 		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
160 		if (ret_val)
161 			return ret_val;
162 		igb_check_for_link_82575(hw);
163 	} else {
164 		igb_check_for_link_82575(hw);
165 		/* reset page to 0 */
166 		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
167 		if (ret_val)
168 			return ret_val;
169 	}
170 
171 	return 0;
172 }
173 
174 /**
175  *  igb_init_phy_params_82575 - Init PHY func ptrs.
176  *  @hw: pointer to the HW structure
177  **/
178 static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
179 {
180 	struct e1000_phy_info *phy = &hw->phy;
181 	s32 ret_val = 0;
182 	u32 ctrl_ext;
183 
184 	if (hw->phy.media_type != e1000_media_type_copper) {
185 		phy->type = e1000_phy_none;
186 		goto out;
187 	}
188 
189 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
190 	phy->reset_delay_us	= 100;
191 
192 	ctrl_ext = rd32(E1000_CTRL_EXT);
193 
194 	if (igb_sgmii_active_82575(hw)) {
195 		phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
196 		ctrl_ext |= E1000_CTRL_I2C_ENA;
197 	} else {
198 		phy->ops.reset = igb_phy_hw_reset;
199 		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
200 	}
201 
202 	wr32(E1000_CTRL_EXT, ctrl_ext);
203 	igb_reset_mdicnfg_82580(hw);
204 
205 	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
206 		phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
207 		phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
208 	} else {
209 		switch (hw->mac.type) {
210 		case e1000_82580:
211 		case e1000_i350:
212 		case e1000_i354:
213 		case e1000_i210:
214 		case e1000_i211:
215 			phy->ops.read_reg = igb_read_phy_reg_82580;
216 			phy->ops.write_reg = igb_write_phy_reg_82580;
217 			break;
218 		default:
219 			phy->ops.read_reg = igb_read_phy_reg_igp;
220 			phy->ops.write_reg = igb_write_phy_reg_igp;
221 		}
222 	}
223 
224 	/* set lan id */
225 	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
226 			E1000_STATUS_FUNC_SHIFT;
227 
228 	/* Make sure the PHY is in a good state. Several people have reported
229 	 * firmware leaving the PHY's page select register set to something
230 	 * other than the default of zero, which causes the PHY ID read to
231 	 * access something other than the intended register.
232 	 */
233 	ret_val = hw->phy.ops.reset(hw);
234 	if (ret_val) {
235 		hw_dbg("Error resetting the PHY.\n");
236 		goto out;
237 	}
238 
239 	/* Set phy->phy_addr and phy->id. */
240 	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
241 	ret_val = igb_get_phy_id_82575(hw);
242 	if (ret_val)
243 		return ret_val;
244 
245 	/* Verify phy id and set remaining function pointers */
246 	switch (phy->id) {
247 	case M88E1543_E_PHY_ID:
248 	case M88E1512_E_PHY_ID:
249 	case I347AT4_E_PHY_ID:
250 	case M88E1112_E_PHY_ID:
251 	case M88E1111_I_PHY_ID:
252 		phy->type		= e1000_phy_m88;
253 		phy->ops.check_polarity	= igb_check_polarity_m88;
254 		phy->ops.get_phy_info	= igb_get_phy_info_m88;
255 		if (phy->id != M88E1111_I_PHY_ID)
256 			phy->ops.get_cable_length =
257 					 igb_get_cable_length_m88_gen2;
258 		else
259 			phy->ops.get_cable_length = igb_get_cable_length_m88;
260 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
261 		/* Check if this PHY is configured for media swap. */
262 		if (phy->id == M88E1112_E_PHY_ID) {
263 			u16 data;
264 
265 			ret_val = phy->ops.write_reg(hw,
266 						     E1000_M88E1112_PAGE_ADDR,
267 						     2);
268 			if (ret_val)
269 				goto out;
270 
271 			ret_val = phy->ops.read_reg(hw,
272 						    E1000_M88E1112_MAC_CTRL_1,
273 						    &data);
274 			if (ret_val)
275 				goto out;
276 
277 			data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
278 			       E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
279 			if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
280 			    data == E1000_M88E1112_AUTO_COPPER_BASEX)
281 				hw->mac.ops.check_for_link =
282 						igb_check_for_link_media_swap;
283 		}
284 		if (phy->id == M88E1512_E_PHY_ID) {
285 			ret_val = igb_initialize_M88E1512_phy(hw);
286 			if (ret_val)
287 				goto out;
288 		}
289 		if (phy->id == M88E1543_E_PHY_ID) {
290 			ret_val = igb_initialize_M88E1543_phy(hw);
291 			if (ret_val)
292 				goto out;
293 		}
294 		break;
295 	case IGP03E1000_E_PHY_ID:
296 		phy->type = e1000_phy_igp_3;
297 		phy->ops.get_phy_info = igb_get_phy_info_igp;
298 		phy->ops.get_cable_length = igb_get_cable_length_igp_2;
299 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
300 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
301 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
302 		break;
303 	case I82580_I_PHY_ID:
304 	case I350_I_PHY_ID:
305 		phy->type = e1000_phy_82580;
306 		phy->ops.force_speed_duplex =
307 					 igb_phy_force_speed_duplex_82580;
308 		phy->ops.get_cable_length = igb_get_cable_length_82580;
309 		phy->ops.get_phy_info = igb_get_phy_info_82580;
310 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
311 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
312 		break;
313 	case I210_I_PHY_ID:
314 		phy->type		= e1000_phy_i210;
315 		phy->ops.check_polarity	= igb_check_polarity_m88;
316 		phy->ops.get_cfg_done	= igb_get_cfg_done_i210;
317 		phy->ops.get_phy_info	= igb_get_phy_info_m88;
318 		phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
319 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
320 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
321 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
322 		break;
323 	case BCM54616_E_PHY_ID:
324 		phy->type = e1000_phy_bcm54616;
325 		break;
326 	default:
327 		ret_val = -E1000_ERR_PHY;
328 		goto out;
329 	}
330 
331 out:
332 	return ret_val;
333 }
334 
335 /**
336  *  igb_init_nvm_params_82575 - Init NVM func ptrs.
337  *  @hw: pointer to the HW structure
338  **/
339 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
340 {
341 	struct e1000_nvm_info *nvm = &hw->nvm;
342 	u32 eecd = rd32(E1000_EECD);
343 	u16 size;
344 
345 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
346 		     E1000_EECD_SIZE_EX_SHIFT);
347 
348 	/* Added to a constant, "size" becomes the left-shift value
349 	 * for setting word_size.
350 	 */
351 	size += NVM_WORD_SIZE_BASE_SHIFT;
352 
353 	/* Just in case size is out of range, cap it to the largest
354 	 * EEPROM size supported
355 	 */
356 	if (size > 15)
357 		size = 15;
358 
359 	nvm->word_size = BIT(size);
360 	nvm->opcode_bits = 8;
361 	nvm->delay_usec = 1;
362 
363 	switch (nvm->override) {
364 	case e1000_nvm_override_spi_large:
365 		nvm->page_size = 32;
366 		nvm->address_bits = 16;
367 		break;
368 	case e1000_nvm_override_spi_small:
369 		nvm->page_size = 8;
370 		nvm->address_bits = 8;
371 		break;
372 	default:
373 		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
374 		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
375 				    16 : 8;
376 		break;
377 	}
378 	if (nvm->word_size == BIT(15))
379 		nvm->page_size = 128;
380 
381 	nvm->type = e1000_nvm_eeprom_spi;
382 
383 	/* NVM Function Pointers */
384 	nvm->ops.acquire = igb_acquire_nvm_82575;
385 	nvm->ops.release = igb_release_nvm_82575;
386 	nvm->ops.write = igb_write_nvm_spi;
387 	nvm->ops.validate = igb_validate_nvm_checksum;
388 	nvm->ops.update = igb_update_nvm_checksum;
389 	if (nvm->word_size < BIT(15))
390 		nvm->ops.read = igb_read_nvm_eerd;
391 	else
392 		nvm->ops.read = igb_read_nvm_spi;
393 
394 	/* override generic family function pointers for specific descendants */
395 	switch (hw->mac.type) {
396 	case e1000_82580:
397 		nvm->ops.validate = igb_validate_nvm_checksum_82580;
398 		nvm->ops.update = igb_update_nvm_checksum_82580;
399 		break;
400 	case e1000_i354:
401 	case e1000_i350:
402 		nvm->ops.validate = igb_validate_nvm_checksum_i350;
403 		nvm->ops.update = igb_update_nvm_checksum_i350;
404 		break;
405 	default:
406 		break;
407 	}
408 
409 	return 0;
410 }
411 
412 /**
413  *  igb_init_mac_params_82575 - Init MAC func ptrs.
414  *  @hw: pointer to the HW structure
415  **/
416 static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
417 {
418 	struct e1000_mac_info *mac = &hw->mac;
419 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
420 
421 	/* Set mta register count */
422 	mac->mta_reg_count = 128;
423 	/* Set uta register count */
424 	mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
425 	/* Set rar entry count */
426 	switch (mac->type) {
427 	case e1000_82576:
428 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
429 		break;
430 	case e1000_82580:
431 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
432 		break;
433 	case e1000_i350:
434 	case e1000_i354:
435 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
436 		break;
437 	default:
438 		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
439 		break;
440 	}
441 	/* reset */
442 	if (mac->type >= e1000_82580)
443 		mac->ops.reset_hw = igb_reset_hw_82580;
444 	else
445 		mac->ops.reset_hw = igb_reset_hw_82575;
446 
447 	if (mac->type >= e1000_i210) {
448 		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
449 		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
450 
451 	} else {
452 		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
453 		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
454 	}
455 
456 	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
457 		mac->ops.write_vfta = igb_write_vfta_i350;
458 	else
459 		mac->ops.write_vfta = igb_write_vfta;
460 
461 	/* Set if part includes ASF firmware */
462 	mac->asf_firmware_present = true;
463 	/* Set if manageability features are enabled. */
464 	mac->arc_subsystem_valid =
465 		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
466 			? true : false;
467 	/* enable EEE on i350 parts and later parts */
468 	if (mac->type >= e1000_i350)
469 		dev_spec->eee_disable = false;
470 	else
471 		dev_spec->eee_disable = true;
472 	/* Allow a single clear of the SW semaphore on I210 and newer */
473 	if (mac->type >= e1000_i210)
474 		dev_spec->clear_semaphore_once = true;
475 	/* physical interface link setup */
476 	mac->ops.setup_physical_interface =
477 		(hw->phy.media_type == e1000_media_type_copper)
478 			? igb_setup_copper_link_82575
479 			: igb_setup_serdes_link_82575;
480 
481 	if (mac->type == e1000_82580) {
482 		switch (hw->device_id) {
483 		/* feature not supported on these id's */
484 		case E1000_DEV_ID_DH89XXCC_SGMII:
485 		case E1000_DEV_ID_DH89XXCC_SERDES:
486 		case E1000_DEV_ID_DH89XXCC_BACKPLANE:
487 		case E1000_DEV_ID_DH89XXCC_SFP:
488 			break;
489 		default:
490 			hw->dev_spec._82575.mas_capable = true;
491 			break;
492 		}
493 	}
494 	return 0;
495 }
496 
497 /**
498  *  igb_set_sfp_media_type_82575 - derives SFP module media type.
499  *  @hw: pointer to the HW structure
500  *
501  *  The media type is chosen based on SFP module.
502  *  compatibility flags retrieved from SFP ID EEPROM.
503  **/
504 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
505 {
506 	s32 ret_val = E1000_ERR_CONFIG;
507 	u32 ctrl_ext = 0;
508 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
509 	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
510 	u8 tranceiver_type = 0;
511 	s32 timeout = 3;
512 
513 	/* Turn I2C interface ON and power on sfp cage */
514 	ctrl_ext = rd32(E1000_CTRL_EXT);
515 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
516 	wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
517 
518 	wrfl();
519 
520 	/* Read SFP module data */
521 	while (timeout) {
522 		ret_val = igb_read_sfp_data_byte(hw,
523 			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
524 			&tranceiver_type);
525 		if (ret_val == 0)
526 			break;
527 		msleep(100);
528 		timeout--;
529 	}
530 	if (ret_val != 0)
531 		goto out;
532 
533 	ret_val = igb_read_sfp_data_byte(hw,
534 			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
535 			(u8 *)eth_flags);
536 	if (ret_val != 0)
537 		goto out;
538 
539 	/* Check if there is some SFP module plugged and powered */
540 	if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
541 	    (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
542 		dev_spec->module_plugged = true;
543 		if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
544 			hw->phy.media_type = e1000_media_type_internal_serdes;
545 		} else if (eth_flags->e100_base_fx) {
546 			dev_spec->sgmii_active = true;
547 			hw->phy.media_type = e1000_media_type_internal_serdes;
548 		} else if (eth_flags->e1000_base_t) {
549 			dev_spec->sgmii_active = true;
550 			hw->phy.media_type = e1000_media_type_copper;
551 		} else {
552 			hw->phy.media_type = e1000_media_type_unknown;
553 			hw_dbg("PHY module has not been recognized\n");
554 			goto out;
555 		}
556 	} else {
557 		hw->phy.media_type = e1000_media_type_unknown;
558 	}
559 	ret_val = 0;
560 out:
561 	/* Restore I2C interface setting */
562 	wr32(E1000_CTRL_EXT, ctrl_ext);
563 	return ret_val;
564 }
565 
566 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
567 {
568 	struct e1000_mac_info *mac = &hw->mac;
569 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
570 	s32 ret_val;
571 	u32 ctrl_ext = 0;
572 	u32 link_mode = 0;
573 
574 	switch (hw->device_id) {
575 	case E1000_DEV_ID_82575EB_COPPER:
576 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
577 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
578 		mac->type = e1000_82575;
579 		break;
580 	case E1000_DEV_ID_82576:
581 	case E1000_DEV_ID_82576_NS:
582 	case E1000_DEV_ID_82576_NS_SERDES:
583 	case E1000_DEV_ID_82576_FIBER:
584 	case E1000_DEV_ID_82576_SERDES:
585 	case E1000_DEV_ID_82576_QUAD_COPPER:
586 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
587 	case E1000_DEV_ID_82576_SERDES_QUAD:
588 		mac->type = e1000_82576;
589 		break;
590 	case E1000_DEV_ID_82580_COPPER:
591 	case E1000_DEV_ID_82580_FIBER:
592 	case E1000_DEV_ID_82580_QUAD_FIBER:
593 	case E1000_DEV_ID_82580_SERDES:
594 	case E1000_DEV_ID_82580_SGMII:
595 	case E1000_DEV_ID_82580_COPPER_DUAL:
596 	case E1000_DEV_ID_DH89XXCC_SGMII:
597 	case E1000_DEV_ID_DH89XXCC_SERDES:
598 	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
599 	case E1000_DEV_ID_DH89XXCC_SFP:
600 		mac->type = e1000_82580;
601 		break;
602 	case E1000_DEV_ID_I350_COPPER:
603 	case E1000_DEV_ID_I350_FIBER:
604 	case E1000_DEV_ID_I350_SERDES:
605 	case E1000_DEV_ID_I350_SGMII:
606 		mac->type = e1000_i350;
607 		break;
608 	case E1000_DEV_ID_I210_COPPER:
609 	case E1000_DEV_ID_I210_FIBER:
610 	case E1000_DEV_ID_I210_SERDES:
611 	case E1000_DEV_ID_I210_SGMII:
612 	case E1000_DEV_ID_I210_COPPER_FLASHLESS:
613 	case E1000_DEV_ID_I210_SERDES_FLASHLESS:
614 		mac->type = e1000_i210;
615 		break;
616 	case E1000_DEV_ID_I211_COPPER:
617 		mac->type = e1000_i211;
618 		break;
619 	case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
620 	case E1000_DEV_ID_I354_SGMII:
621 	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
622 		mac->type = e1000_i354;
623 		break;
624 	default:
625 		return -E1000_ERR_MAC_INIT;
626 	}
627 
628 	/* Set media type */
629 	/* The 82575 uses bits 22:23 for link mode. The mode can be changed
630 	 * based on the EEPROM. We cannot rely upon device ID. There
631 	 * is no distinguishable difference between fiber and internal
632 	 * SerDes mode on the 82575. There can be an external PHY attached
633 	 * on the SGMII interface. For this, we'll set sgmii_active to true.
634 	 */
635 	hw->phy.media_type = e1000_media_type_copper;
636 	dev_spec->sgmii_active = false;
637 	dev_spec->module_plugged = false;
638 
639 	ctrl_ext = rd32(E1000_CTRL_EXT);
640 
641 	link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
642 	switch (link_mode) {
643 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
644 		hw->phy.media_type = e1000_media_type_internal_serdes;
645 		break;
646 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
647 		/* Get phy control interface type set (MDIO vs. I2C)*/
648 		if (igb_sgmii_uses_mdio_82575(hw)) {
649 			hw->phy.media_type = e1000_media_type_copper;
650 			dev_spec->sgmii_active = true;
651 			break;
652 		}
653 		/* fall through for I2C based SGMII */
654 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
655 		/* read media type from SFP EEPROM */
656 		ret_val = igb_set_sfp_media_type_82575(hw);
657 		if ((ret_val != 0) ||
658 		    (hw->phy.media_type == e1000_media_type_unknown)) {
659 			/* If media type was not identified then return media
660 			 * type defined by the CTRL_EXT settings.
661 			 */
662 			hw->phy.media_type = e1000_media_type_internal_serdes;
663 
664 			if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
665 				hw->phy.media_type = e1000_media_type_copper;
666 				dev_spec->sgmii_active = true;
667 			}
668 
669 			break;
670 		}
671 
672 		/* do not change link mode for 100BaseFX */
673 		if (dev_spec->eth_flags.e100_base_fx)
674 			break;
675 
676 		/* change current link mode setting */
677 		ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
678 
679 		if (hw->phy.media_type == e1000_media_type_copper)
680 			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
681 		else
682 			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
683 
684 		wr32(E1000_CTRL_EXT, ctrl_ext);
685 
686 		break;
687 	default:
688 		break;
689 	}
690 
691 	/* mac initialization and operations */
692 	ret_val = igb_init_mac_params_82575(hw);
693 	if (ret_val)
694 		goto out;
695 
696 	/* NVM initialization */
697 	ret_val = igb_init_nvm_params_82575(hw);
698 	switch (hw->mac.type) {
699 	case e1000_i210:
700 	case e1000_i211:
701 		ret_val = igb_init_nvm_params_i210(hw);
702 		break;
703 	default:
704 		break;
705 	}
706 
707 	if (ret_val)
708 		goto out;
709 
710 	/* if part supports SR-IOV then initialize mailbox parameters */
711 	switch (mac->type) {
712 	case e1000_82576:
713 	case e1000_i350:
714 		igb_init_mbx_params_pf(hw);
715 		break;
716 	default:
717 		break;
718 	}
719 
720 	/* setup PHY parameters */
721 	ret_val = igb_init_phy_params_82575(hw);
722 
723 out:
724 	return ret_val;
725 }
726 
727 /**
728  *  igb_acquire_phy_82575 - Acquire rights to access PHY
729  *  @hw: pointer to the HW structure
730  *
731  *  Acquire access rights to the correct PHY.  This is a
732  *  function pointer entry point called by the api module.
733  **/
734 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
735 {
736 	u16 mask = E1000_SWFW_PHY0_SM;
737 
738 	if (hw->bus.func == E1000_FUNC_1)
739 		mask = E1000_SWFW_PHY1_SM;
740 	else if (hw->bus.func == E1000_FUNC_2)
741 		mask = E1000_SWFW_PHY2_SM;
742 	else if (hw->bus.func == E1000_FUNC_3)
743 		mask = E1000_SWFW_PHY3_SM;
744 
745 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
746 }
747 
748 /**
749  *  igb_release_phy_82575 - Release rights to access PHY
750  *  @hw: pointer to the HW structure
751  *
752  *  A wrapper to release access rights to the correct PHY.  This is a
753  *  function pointer entry point called by the api module.
754  **/
755 static void igb_release_phy_82575(struct e1000_hw *hw)
756 {
757 	u16 mask = E1000_SWFW_PHY0_SM;
758 
759 	if (hw->bus.func == E1000_FUNC_1)
760 		mask = E1000_SWFW_PHY1_SM;
761 	else if (hw->bus.func == E1000_FUNC_2)
762 		mask = E1000_SWFW_PHY2_SM;
763 	else if (hw->bus.func == E1000_FUNC_3)
764 		mask = E1000_SWFW_PHY3_SM;
765 
766 	hw->mac.ops.release_swfw_sync(hw, mask);
767 }
768 
769 /**
770  *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
771  *  @hw: pointer to the HW structure
772  *  @offset: register offset to be read
773  *  @data: pointer to the read data
774  *
775  *  Reads the PHY register at offset using the serial gigabit media independent
776  *  interface and stores the retrieved information in data.
777  **/
778 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
779 					  u16 *data)
780 {
781 	s32 ret_val = -E1000_ERR_PARAM;
782 
783 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
784 		hw_dbg("PHY Address %u is out of range\n", offset);
785 		goto out;
786 	}
787 
788 	ret_val = hw->phy.ops.acquire(hw);
789 	if (ret_val)
790 		goto out;
791 
792 	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
793 
794 	hw->phy.ops.release(hw);
795 
796 out:
797 	return ret_val;
798 }
799 
800 /**
801  *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
802  *  @hw: pointer to the HW structure
803  *  @offset: register offset to write to
804  *  @data: data to write at register offset
805  *
806  *  Writes the data to PHY register at the offset using the serial gigabit
807  *  media independent interface.
808  **/
809 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
810 					   u16 data)
811 {
812 	s32 ret_val = -E1000_ERR_PARAM;
813 
814 
815 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
816 		hw_dbg("PHY Address %d is out of range\n", offset);
817 		goto out;
818 	}
819 
820 	ret_val = hw->phy.ops.acquire(hw);
821 	if (ret_val)
822 		goto out;
823 
824 	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
825 
826 	hw->phy.ops.release(hw);
827 
828 out:
829 	return ret_val;
830 }
831 
832 /**
833  *  igb_get_phy_id_82575 - Retrieve PHY addr and id
834  *  @hw: pointer to the HW structure
835  *
836  *  Retrieves the PHY address and ID for both PHY's which do and do not use
837  *  sgmi interface.
838  **/
839 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
840 {
841 	struct e1000_phy_info *phy = &hw->phy;
842 	s32  ret_val = 0;
843 	u16 phy_id;
844 	u32 ctrl_ext;
845 	u32 mdic;
846 
847 	/* Extra read required for some PHY's on i354 */
848 	if (hw->mac.type == e1000_i354)
849 		igb_get_phy_id(hw);
850 
851 	/* For SGMII PHYs, we try the list of possible addresses until
852 	 * we find one that works.  For non-SGMII PHYs
853 	 * (e.g. integrated copper PHYs), an address of 1 should
854 	 * work.  The result of this function should mean phy->phy_addr
855 	 * and phy->id are set correctly.
856 	 */
857 	if (!(igb_sgmii_active_82575(hw))) {
858 		phy->addr = 1;
859 		ret_val = igb_get_phy_id(hw);
860 		goto out;
861 	}
862 
863 	if (igb_sgmii_uses_mdio_82575(hw)) {
864 		switch (hw->mac.type) {
865 		case e1000_82575:
866 		case e1000_82576:
867 			mdic = rd32(E1000_MDIC);
868 			mdic &= E1000_MDIC_PHY_MASK;
869 			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
870 			break;
871 		case e1000_82580:
872 		case e1000_i350:
873 		case e1000_i354:
874 		case e1000_i210:
875 		case e1000_i211:
876 			mdic = rd32(E1000_MDICNFG);
877 			mdic &= E1000_MDICNFG_PHY_MASK;
878 			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
879 			break;
880 		default:
881 			ret_val = -E1000_ERR_PHY;
882 			goto out;
883 		}
884 		ret_val = igb_get_phy_id(hw);
885 		goto out;
886 	}
887 
888 	/* Power on sgmii phy if it is disabled */
889 	ctrl_ext = rd32(E1000_CTRL_EXT);
890 	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
891 	wrfl();
892 	msleep(300);
893 
894 	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
895 	 * Therefore, we need to test 1-7
896 	 */
897 	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
898 		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
899 		if (ret_val == 0) {
900 			hw_dbg("Vendor ID 0x%08X read at address %u\n",
901 			       phy_id, phy->addr);
902 			/* At the time of this writing, The M88 part is
903 			 * the only supported SGMII PHY product.
904 			 */
905 			if (phy_id == M88_VENDOR)
906 				break;
907 		} else {
908 			hw_dbg("PHY address %u was unreadable\n", phy->addr);
909 		}
910 	}
911 
912 	/* A valid PHY type couldn't be found. */
913 	if (phy->addr == 8) {
914 		phy->addr = 0;
915 		ret_val = -E1000_ERR_PHY;
916 		goto out;
917 	} else {
918 		ret_val = igb_get_phy_id(hw);
919 	}
920 
921 	/* restore previous sfp cage power state */
922 	wr32(E1000_CTRL_EXT, ctrl_ext);
923 
924 out:
925 	return ret_val;
926 }
927 
928 /**
929  *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
930  *  @hw: pointer to the HW structure
931  *
932  *  Resets the PHY using the serial gigabit media independent interface.
933  **/
934 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
935 {
936 	struct e1000_phy_info *phy = &hw->phy;
937 	s32 ret_val;
938 
939 	/* This isn't a true "hard" reset, but is the only reset
940 	 * available to us at this time.
941 	 */
942 
943 	hw_dbg("Soft resetting SGMII attached PHY...\n");
944 
945 	/* SFP documentation requires the following to configure the SPF module
946 	 * to work on SGMII.  No further documentation is given.
947 	 */
948 	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
949 	if (ret_val)
950 		goto out;
951 
952 	ret_val = igb_phy_sw_reset(hw);
953 	if (ret_val)
954 		goto out;
955 
956 	if (phy->id == M88E1512_E_PHY_ID)
957 		ret_val = igb_initialize_M88E1512_phy(hw);
958 	if (phy->id == M88E1543_E_PHY_ID)
959 		ret_val = igb_initialize_M88E1543_phy(hw);
960 out:
961 	return ret_val;
962 }
963 
964 /**
965  *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
966  *  @hw: pointer to the HW structure
967  *  @active: true to enable LPLU, false to disable
968  *
969  *  Sets the LPLU D0 state according to the active flag.  When
970  *  activating LPLU this function also disables smart speed
971  *  and vice versa.  LPLU will not be activated unless the
972  *  device autonegotiation advertisement meets standards of
973  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
974  *  This is a function pointer entry point only called by
975  *  PHY setup routines.
976  **/
977 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
978 {
979 	struct e1000_phy_info *phy = &hw->phy;
980 	s32 ret_val;
981 	u16 data;
982 
983 	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
984 	if (ret_val)
985 		goto out;
986 
987 	if (active) {
988 		data |= IGP02E1000_PM_D0_LPLU;
989 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
990 						 data);
991 		if (ret_val)
992 			goto out;
993 
994 		/* When LPLU is enabled, we should disable SmartSpeed */
995 		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
996 						&data);
997 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
998 		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
999 						 data);
1000 		if (ret_val)
1001 			goto out;
1002 	} else {
1003 		data &= ~IGP02E1000_PM_D0_LPLU;
1004 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1005 						 data);
1006 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1007 		 * during Dx states where the power conservation is most
1008 		 * important.  During driver activity we should enable
1009 		 * SmartSpeed, so performance is maintained.
1010 		 */
1011 		if (phy->smart_speed == e1000_smart_speed_on) {
1012 			ret_val = phy->ops.read_reg(hw,
1013 					IGP01E1000_PHY_PORT_CONFIG, &data);
1014 			if (ret_val)
1015 				goto out;
1016 
1017 			data |= IGP01E1000_PSCFR_SMART_SPEED;
1018 			ret_val = phy->ops.write_reg(hw,
1019 					IGP01E1000_PHY_PORT_CONFIG, data);
1020 			if (ret_val)
1021 				goto out;
1022 		} else if (phy->smart_speed == e1000_smart_speed_off) {
1023 			ret_val = phy->ops.read_reg(hw,
1024 					IGP01E1000_PHY_PORT_CONFIG, &data);
1025 			if (ret_val)
1026 				goto out;
1027 
1028 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1029 			ret_val = phy->ops.write_reg(hw,
1030 					IGP01E1000_PHY_PORT_CONFIG, data);
1031 			if (ret_val)
1032 				goto out;
1033 		}
1034 	}
1035 
1036 out:
1037 	return ret_val;
1038 }
1039 
1040 /**
1041  *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
1042  *  @hw: pointer to the HW structure
1043  *  @active: true to enable LPLU, false to disable
1044  *
1045  *  Sets the LPLU D0 state according to the active flag.  When
1046  *  activating LPLU this function also disables smart speed
1047  *  and vice versa.  LPLU will not be activated unless the
1048  *  device autonegotiation advertisement meets standards of
1049  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1050  *  This is a function pointer entry point only called by
1051  *  PHY setup routines.
1052  **/
1053 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1054 {
1055 	struct e1000_phy_info *phy = &hw->phy;
1056 	u16 data;
1057 
1058 	data = rd32(E1000_82580_PHY_POWER_MGMT);
1059 
1060 	if (active) {
1061 		data |= E1000_82580_PM_D0_LPLU;
1062 
1063 		/* When LPLU is enabled, we should disable SmartSpeed */
1064 		data &= ~E1000_82580_PM_SPD;
1065 	} else {
1066 		data &= ~E1000_82580_PM_D0_LPLU;
1067 
1068 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1069 		 * during Dx states where the power conservation is most
1070 		 * important.  During driver activity we should enable
1071 		 * SmartSpeed, so performance is maintained.
1072 		 */
1073 		if (phy->smart_speed == e1000_smart_speed_on)
1074 			data |= E1000_82580_PM_SPD;
1075 		else if (phy->smart_speed == e1000_smart_speed_off)
1076 			data &= ~E1000_82580_PM_SPD; }
1077 
1078 	wr32(E1000_82580_PHY_POWER_MGMT, data);
1079 	return 0;
1080 }
1081 
1082 /**
1083  *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
1084  *  @hw: pointer to the HW structure
1085  *  @active: boolean used to enable/disable lplu
1086  *
1087  *  Success returns 0, Failure returns 1
1088  *
1089  *  The low power link up (lplu) state is set to the power management level D3
1090  *  and SmartSpeed is disabled when active is true, else clear lplu for D3
1091  *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
1092  *  is used during Dx states where the power conservation is most important.
1093  *  During driver activity, SmartSpeed should be enabled so performance is
1094  *  maintained.
1095  **/
1096 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1097 {
1098 	struct e1000_phy_info *phy = &hw->phy;
1099 	u16 data;
1100 
1101 	data = rd32(E1000_82580_PHY_POWER_MGMT);
1102 
1103 	if (!active) {
1104 		data &= ~E1000_82580_PM_D3_LPLU;
1105 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1106 		 * during Dx states where the power conservation is most
1107 		 * important.  During driver activity we should enable
1108 		 * SmartSpeed, so performance is maintained.
1109 		 */
1110 		if (phy->smart_speed == e1000_smart_speed_on)
1111 			data |= E1000_82580_PM_SPD;
1112 		else if (phy->smart_speed == e1000_smart_speed_off)
1113 			data &= ~E1000_82580_PM_SPD;
1114 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1115 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1116 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1117 		data |= E1000_82580_PM_D3_LPLU;
1118 		/* When LPLU is enabled, we should disable SmartSpeed */
1119 		data &= ~E1000_82580_PM_SPD;
1120 	}
1121 
1122 	wr32(E1000_82580_PHY_POWER_MGMT, data);
1123 	return 0;
1124 }
1125 
1126 /**
1127  *  igb_acquire_nvm_82575 - Request for access to EEPROM
1128  *  @hw: pointer to the HW structure
1129  *
1130  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
1131  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
1132  *  Return successful if access grant bit set, else clear the request for
1133  *  EEPROM access and return -E1000_ERR_NVM (-1).
1134  **/
1135 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
1136 {
1137 	s32 ret_val;
1138 
1139 	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
1140 	if (ret_val)
1141 		goto out;
1142 
1143 	ret_val = igb_acquire_nvm(hw);
1144 
1145 	if (ret_val)
1146 		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1147 
1148 out:
1149 	return ret_val;
1150 }
1151 
1152 /**
1153  *  igb_release_nvm_82575 - Release exclusive access to EEPROM
1154  *  @hw: pointer to the HW structure
1155  *
1156  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
1157  *  then release the semaphores acquired.
1158  **/
1159 static void igb_release_nvm_82575(struct e1000_hw *hw)
1160 {
1161 	igb_release_nvm(hw);
1162 	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1163 }
1164 
1165 /**
1166  *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1167  *  @hw: pointer to the HW structure
1168  *  @mask: specifies which semaphore to acquire
1169  *
1170  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
1171  *  will also specify which port we're acquiring the lock for.
1172  **/
1173 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1174 {
1175 	u32 swfw_sync;
1176 	u32 swmask = mask;
1177 	u32 fwmask = mask << 16;
1178 	s32 ret_val = 0;
1179 	s32 i = 0, timeout = 200;
1180 
1181 	while (i < timeout) {
1182 		if (igb_get_hw_semaphore(hw)) {
1183 			ret_val = -E1000_ERR_SWFW_SYNC;
1184 			goto out;
1185 		}
1186 
1187 		swfw_sync = rd32(E1000_SW_FW_SYNC);
1188 		if (!(swfw_sync & (fwmask | swmask)))
1189 			break;
1190 
1191 		/* Firmware currently using resource (fwmask)
1192 		 * or other software thread using resource (swmask)
1193 		 */
1194 		igb_put_hw_semaphore(hw);
1195 		mdelay(5);
1196 		i++;
1197 	}
1198 
1199 	if (i == timeout) {
1200 		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
1201 		ret_val = -E1000_ERR_SWFW_SYNC;
1202 		goto out;
1203 	}
1204 
1205 	swfw_sync |= swmask;
1206 	wr32(E1000_SW_FW_SYNC, swfw_sync);
1207 
1208 	igb_put_hw_semaphore(hw);
1209 
1210 out:
1211 	return ret_val;
1212 }
1213 
1214 /**
1215  *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
1216  *  @hw: pointer to the HW structure
1217  *  @mask: specifies which semaphore to acquire
1218  *
1219  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
1220  *  will also specify which port we're releasing the lock for.
1221  **/
1222 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1223 {
1224 	u32 swfw_sync;
1225 
1226 	while (igb_get_hw_semaphore(hw) != 0)
1227 		; /* Empty */
1228 
1229 	swfw_sync = rd32(E1000_SW_FW_SYNC);
1230 	swfw_sync &= ~mask;
1231 	wr32(E1000_SW_FW_SYNC, swfw_sync);
1232 
1233 	igb_put_hw_semaphore(hw);
1234 }
1235 
1236 /**
1237  *  igb_get_cfg_done_82575 - Read config done bit
1238  *  @hw: pointer to the HW structure
1239  *
1240  *  Read the management control register for the config done bit for
1241  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
1242  *  to read the config done bit, so an error is *ONLY* logged and returns
1243  *  0.  If we were to return with error, EEPROM-less silicon
1244  *  would not be able to be reset or change link.
1245  **/
1246 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1247 {
1248 	s32 timeout = PHY_CFG_TIMEOUT;
1249 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1250 
1251 	if (hw->bus.func == 1)
1252 		mask = E1000_NVM_CFG_DONE_PORT_1;
1253 	else if (hw->bus.func == E1000_FUNC_2)
1254 		mask = E1000_NVM_CFG_DONE_PORT_2;
1255 	else if (hw->bus.func == E1000_FUNC_3)
1256 		mask = E1000_NVM_CFG_DONE_PORT_3;
1257 
1258 	while (timeout) {
1259 		if (rd32(E1000_EEMNGCTL) & mask)
1260 			break;
1261 		usleep_range(1000, 2000);
1262 		timeout--;
1263 	}
1264 	if (!timeout)
1265 		hw_dbg("MNG configuration cycle has not completed.\n");
1266 
1267 	/* If EEPROM is not marked present, init the PHY manually */
1268 	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1269 	    (hw->phy.type == e1000_phy_igp_3))
1270 		igb_phy_init_script_igp3(hw);
1271 
1272 	return 0;
1273 }
1274 
1275 /**
1276  *  igb_get_link_up_info_82575 - Get link speed/duplex info
1277  *  @hw: pointer to the HW structure
1278  *  @speed: stores the current speed
1279  *  @duplex: stores the current duplex
1280  *
1281  *  This is a wrapper function, if using the serial gigabit media independent
1282  *  interface, use PCS to retrieve the link speed and duplex information.
1283  *  Otherwise, use the generic function to get the link speed and duplex info.
1284  **/
1285 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1286 					u16 *duplex)
1287 {
1288 	s32 ret_val;
1289 
1290 	if (hw->phy.media_type != e1000_media_type_copper)
1291 		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1292 							       duplex);
1293 	else
1294 		ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1295 								    duplex);
1296 
1297 	return ret_val;
1298 }
1299 
1300 /**
1301  *  igb_check_for_link_82575 - Check for link
1302  *  @hw: pointer to the HW structure
1303  *
1304  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1305  *  use the generic interface for determining link.
1306  **/
1307 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1308 {
1309 	s32 ret_val;
1310 	u16 speed, duplex;
1311 
1312 	if (hw->phy.media_type != e1000_media_type_copper) {
1313 		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1314 							     &duplex);
1315 		/* Use this flag to determine if link needs to be checked or
1316 		 * not.  If  we have link clear the flag so that we do not
1317 		 * continue to check for link.
1318 		 */
1319 		hw->mac.get_link_status = !hw->mac.serdes_has_link;
1320 
1321 		/* Configure Flow Control now that Auto-Neg has completed.
1322 		 * First, we need to restore the desired flow control
1323 		 * settings because we may have had to re-autoneg with a
1324 		 * different link partner.
1325 		 */
1326 		ret_val = igb_config_fc_after_link_up(hw);
1327 		if (ret_val)
1328 			hw_dbg("Error configuring flow control\n");
1329 	} else {
1330 		ret_val = igb_check_for_copper_link(hw);
1331 	}
1332 
1333 	return ret_val;
1334 }
1335 
1336 /**
1337  *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1338  *  @hw: pointer to the HW structure
1339  **/
1340 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1341 {
1342 	u32 reg;
1343 
1344 
1345 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1346 	    !igb_sgmii_active_82575(hw))
1347 		return;
1348 
1349 	/* Enable PCS to turn on link */
1350 	reg = rd32(E1000_PCS_CFG0);
1351 	reg |= E1000_PCS_CFG_PCS_EN;
1352 	wr32(E1000_PCS_CFG0, reg);
1353 
1354 	/* Power up the laser */
1355 	reg = rd32(E1000_CTRL_EXT);
1356 	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1357 	wr32(E1000_CTRL_EXT, reg);
1358 
1359 	/* flush the write to verify completion */
1360 	wrfl();
1361 	usleep_range(1000, 2000);
1362 }
1363 
1364 /**
1365  *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1366  *  @hw: pointer to the HW structure
1367  *  @speed: stores the current speed
1368  *  @duplex: stores the current duplex
1369  *
1370  *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1371  *  duplex, then store the values in the pointers provided.
1372  **/
1373 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1374 						u16 *duplex)
1375 {
1376 	struct e1000_mac_info *mac = &hw->mac;
1377 	u32 pcs, status;
1378 
1379 	/* Set up defaults for the return values of this function */
1380 	mac->serdes_has_link = false;
1381 	*speed = 0;
1382 	*duplex = 0;
1383 
1384 	/* Read the PCS Status register for link state. For non-copper mode,
1385 	 * the status register is not accurate. The PCS status register is
1386 	 * used instead.
1387 	 */
1388 	pcs = rd32(E1000_PCS_LSTAT);
1389 
1390 	/* The link up bit determines when link is up on autoneg. The sync ok
1391 	 * gets set once both sides sync up and agree upon link. Stable link
1392 	 * can be determined by checking for both link up and link sync ok
1393 	 */
1394 	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1395 		mac->serdes_has_link = true;
1396 
1397 		/* Detect and store PCS speed */
1398 		if (pcs & E1000_PCS_LSTS_SPEED_1000)
1399 			*speed = SPEED_1000;
1400 		else if (pcs & E1000_PCS_LSTS_SPEED_100)
1401 			*speed = SPEED_100;
1402 		else
1403 			*speed = SPEED_10;
1404 
1405 		/* Detect and store PCS duplex */
1406 		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1407 			*duplex = FULL_DUPLEX;
1408 		else
1409 			*duplex = HALF_DUPLEX;
1410 
1411 	/* Check if it is an I354 2.5Gb backplane connection. */
1412 		if (mac->type == e1000_i354) {
1413 			status = rd32(E1000_STATUS);
1414 			if ((status & E1000_STATUS_2P5_SKU) &&
1415 			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
1416 				*speed = SPEED_2500;
1417 				*duplex = FULL_DUPLEX;
1418 				hw_dbg("2500 Mbs, ");
1419 				hw_dbg("Full Duplex\n");
1420 			}
1421 		}
1422 
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 /**
1429  *  igb_shutdown_serdes_link_82575 - Remove link during power down
1430  *  @hw: pointer to the HW structure
1431  *
1432  *  In the case of fiber serdes, shut down optics and PCS on driver unload
1433  *  when management pass thru is not enabled.
1434  **/
1435 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1436 {
1437 	u32 reg;
1438 
1439 	if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1440 	    igb_sgmii_active_82575(hw))
1441 		return;
1442 
1443 	if (!igb_enable_mng_pass_thru(hw)) {
1444 		/* Disable PCS to turn off link */
1445 		reg = rd32(E1000_PCS_CFG0);
1446 		reg &= ~E1000_PCS_CFG_PCS_EN;
1447 		wr32(E1000_PCS_CFG0, reg);
1448 
1449 		/* shutdown the laser */
1450 		reg = rd32(E1000_CTRL_EXT);
1451 		reg |= E1000_CTRL_EXT_SDP3_DATA;
1452 		wr32(E1000_CTRL_EXT, reg);
1453 
1454 		/* flush the write to verify completion */
1455 		wrfl();
1456 		usleep_range(1000, 2000);
1457 	}
1458 }
1459 
1460 /**
1461  *  igb_reset_hw_82575 - Reset hardware
1462  *  @hw: pointer to the HW structure
1463  *
1464  *  This resets the hardware into a known state.  This is a
1465  *  function pointer entry point called by the api module.
1466  **/
1467 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1468 {
1469 	u32 ctrl;
1470 	s32 ret_val;
1471 
1472 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
1473 	 * on the last TLP read/write transaction when MAC is reset.
1474 	 */
1475 	ret_val = igb_disable_pcie_master(hw);
1476 	if (ret_val)
1477 		hw_dbg("PCI-E Master disable polling has failed.\n");
1478 
1479 	/* set the completion timeout for interface */
1480 	ret_val = igb_set_pcie_completion_timeout(hw);
1481 	if (ret_val)
1482 		hw_dbg("PCI-E Set completion timeout has failed.\n");
1483 
1484 	hw_dbg("Masking off all interrupts\n");
1485 	wr32(E1000_IMC, 0xffffffff);
1486 
1487 	wr32(E1000_RCTL, 0);
1488 	wr32(E1000_TCTL, E1000_TCTL_PSP);
1489 	wrfl();
1490 
1491 	usleep_range(10000, 20000);
1492 
1493 	ctrl = rd32(E1000_CTRL);
1494 
1495 	hw_dbg("Issuing a global reset to MAC\n");
1496 	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1497 
1498 	ret_val = igb_get_auto_rd_done(hw);
1499 	if (ret_val) {
1500 		/* When auto config read does not complete, do not
1501 		 * return with an error. This can happen in situations
1502 		 * where there is no eeprom and prevents getting link.
1503 		 */
1504 		hw_dbg("Auto Read Done did not complete\n");
1505 	}
1506 
1507 	/* If EEPROM is not present, run manual init scripts */
1508 	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1509 		igb_reset_init_script_82575(hw);
1510 
1511 	/* Clear any pending interrupt events. */
1512 	wr32(E1000_IMC, 0xffffffff);
1513 	rd32(E1000_ICR);
1514 
1515 	/* Install any alternate MAC address into RAR0 */
1516 	ret_val = igb_check_alt_mac_addr(hw);
1517 
1518 	return ret_val;
1519 }
1520 
1521 /**
1522  *  igb_init_hw_82575 - Initialize hardware
1523  *  @hw: pointer to the HW structure
1524  *
1525  *  This inits the hardware readying it for operation.
1526  **/
1527 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1528 {
1529 	struct e1000_mac_info *mac = &hw->mac;
1530 	s32 ret_val;
1531 	u16 i, rar_count = mac->rar_entry_count;
1532 
1533 	if ((hw->mac.type >= e1000_i210) &&
1534 	    !(igb_get_flash_presence_i210(hw))) {
1535 		ret_val = igb_pll_workaround_i210(hw);
1536 		if (ret_val)
1537 			return ret_val;
1538 	}
1539 
1540 	/* Initialize identification LED */
1541 	ret_val = igb_id_led_init(hw);
1542 	if (ret_val) {
1543 		hw_dbg("Error initializing identification LED\n");
1544 		/* This is not fatal and we should not stop init due to this */
1545 	}
1546 
1547 	/* Disabling VLAN filtering */
1548 	hw_dbg("Initializing the IEEE VLAN\n");
1549 	igb_clear_vfta(hw);
1550 
1551 	/* Setup the receive address */
1552 	igb_init_rx_addrs(hw, rar_count);
1553 
1554 	/* Zero out the Multicast HASH table */
1555 	hw_dbg("Zeroing the MTA\n");
1556 	for (i = 0; i < mac->mta_reg_count; i++)
1557 		array_wr32(E1000_MTA, i, 0);
1558 
1559 	/* Zero out the Unicast HASH table */
1560 	hw_dbg("Zeroing the UTA\n");
1561 	for (i = 0; i < mac->uta_reg_count; i++)
1562 		array_wr32(E1000_UTA, i, 0);
1563 
1564 	/* Setup link and flow control */
1565 	ret_val = igb_setup_link(hw);
1566 
1567 	/* Clear all of the statistics registers (clear on read).  It is
1568 	 * important that we do this after we have tried to establish link
1569 	 * because the symbol error count will increment wildly if there
1570 	 * is no link.
1571 	 */
1572 	igb_clear_hw_cntrs_82575(hw);
1573 	return ret_val;
1574 }
1575 
1576 /**
1577  *  igb_setup_copper_link_82575 - Configure copper link settings
1578  *  @hw: pointer to the HW structure
1579  *
1580  *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1581  *  for link, once link is established calls to configure collision distance
1582  *  and flow control are called.
1583  **/
1584 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1585 {
1586 	u32 ctrl;
1587 	s32  ret_val;
1588 	u32 phpm_reg;
1589 
1590 	ctrl = rd32(E1000_CTRL);
1591 	ctrl |= E1000_CTRL_SLU;
1592 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1593 	wr32(E1000_CTRL, ctrl);
1594 
1595 	/* Clear Go Link Disconnect bit on supported devices */
1596 	switch (hw->mac.type) {
1597 	case e1000_82580:
1598 	case e1000_i350:
1599 	case e1000_i210:
1600 	case e1000_i211:
1601 		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1602 		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1603 		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1604 		break;
1605 	default:
1606 		break;
1607 	}
1608 
1609 	ret_val = igb_setup_serdes_link_82575(hw);
1610 	if (ret_val)
1611 		goto out;
1612 
1613 	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1614 		/* allow time for SFP cage time to power up phy */
1615 		msleep(300);
1616 
1617 		ret_val = hw->phy.ops.reset(hw);
1618 		if (ret_val) {
1619 			hw_dbg("Error resetting the PHY.\n");
1620 			goto out;
1621 		}
1622 	}
1623 	switch (hw->phy.type) {
1624 	case e1000_phy_i210:
1625 	case e1000_phy_m88:
1626 		switch (hw->phy.id) {
1627 		case I347AT4_E_PHY_ID:
1628 		case M88E1112_E_PHY_ID:
1629 		case M88E1543_E_PHY_ID:
1630 		case M88E1512_E_PHY_ID:
1631 		case I210_I_PHY_ID:
1632 			ret_val = igb_copper_link_setup_m88_gen2(hw);
1633 			break;
1634 		default:
1635 			ret_val = igb_copper_link_setup_m88(hw);
1636 			break;
1637 		}
1638 		break;
1639 	case e1000_phy_igp_3:
1640 		ret_val = igb_copper_link_setup_igp(hw);
1641 		break;
1642 	case e1000_phy_82580:
1643 		ret_val = igb_copper_link_setup_82580(hw);
1644 		break;
1645 	case e1000_phy_bcm54616:
1646 		ret_val = 0;
1647 		break;
1648 	default:
1649 		ret_val = -E1000_ERR_PHY;
1650 		break;
1651 	}
1652 
1653 	if (ret_val)
1654 		goto out;
1655 
1656 	ret_val = igb_setup_copper_link(hw);
1657 out:
1658 	return ret_val;
1659 }
1660 
1661 /**
1662  *  igb_setup_serdes_link_82575 - Setup link for serdes
1663  *  @hw: pointer to the HW structure
1664  *
1665  *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1666  *  used on copper connections where the serialized gigabit media independent
1667  *  interface (sgmii), or serdes fiber is being used.  Configures the link
1668  *  for auto-negotiation or forces speed/duplex.
1669  **/
1670 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1671 {
1672 	u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1673 	bool pcs_autoneg;
1674 	s32 ret_val = 0;
1675 	u16 data;
1676 
1677 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1678 	    !igb_sgmii_active_82575(hw))
1679 		return ret_val;
1680 
1681 
1682 	/* On the 82575, SerDes loopback mode persists until it is
1683 	 * explicitly turned off or a power cycle is performed.  A read to
1684 	 * the register does not indicate its status.  Therefore, we ensure
1685 	 * loopback mode is disabled during initialization.
1686 	 */
1687 	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1688 
1689 	/* power on the sfp cage if present and turn on I2C */
1690 	ctrl_ext = rd32(E1000_CTRL_EXT);
1691 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1692 	ctrl_ext |= E1000_CTRL_I2C_ENA;
1693 	wr32(E1000_CTRL_EXT, ctrl_ext);
1694 
1695 	ctrl_reg = rd32(E1000_CTRL);
1696 	ctrl_reg |= E1000_CTRL_SLU;
1697 
1698 	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1699 		/* set both sw defined pins */
1700 		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1701 
1702 		/* Set switch control to serdes energy detect */
1703 		reg = rd32(E1000_CONNSW);
1704 		reg |= E1000_CONNSW_ENRGSRC;
1705 		wr32(E1000_CONNSW, reg);
1706 	}
1707 
1708 	reg = rd32(E1000_PCS_LCTL);
1709 
1710 	/* default pcs_autoneg to the same setting as mac autoneg */
1711 	pcs_autoneg = hw->mac.autoneg;
1712 
1713 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1714 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
1715 		/* sgmii mode lets the phy handle forcing speed/duplex */
1716 		pcs_autoneg = true;
1717 		/* autoneg time out should be disabled for SGMII mode */
1718 		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1719 		break;
1720 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1721 		/* disable PCS autoneg and support parallel detect only */
1722 		pcs_autoneg = false;
1723 	default:
1724 		if (hw->mac.type == e1000_82575 ||
1725 		    hw->mac.type == e1000_82576) {
1726 			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1727 			if (ret_val) {
1728 				hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1729 				return ret_val;
1730 			}
1731 
1732 			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1733 				pcs_autoneg = false;
1734 		}
1735 
1736 		/* non-SGMII modes only supports a speed of 1000/Full for the
1737 		 * link so it is best to just force the MAC and let the pcs
1738 		 * link either autoneg or be forced to 1000/Full
1739 		 */
1740 		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1741 				E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1742 
1743 		/* set speed of 1000/Full if speed/duplex is forced */
1744 		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1745 		break;
1746 	}
1747 
1748 	wr32(E1000_CTRL, ctrl_reg);
1749 
1750 	/* New SerDes mode allows for forcing speed or autonegotiating speed
1751 	 * at 1gb. Autoneg should be default set by most drivers. This is the
1752 	 * mode that will be compatible with older link partners and switches.
1753 	 * However, both are supported by the hardware and some drivers/tools.
1754 	 */
1755 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1756 		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1757 
1758 	if (pcs_autoneg) {
1759 		/* Set PCS register for autoneg */
1760 		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1761 		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1762 
1763 		/* Disable force flow control for autoneg */
1764 		reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1765 
1766 		/* Configure flow control advertisement for autoneg */
1767 		anadv_reg = rd32(E1000_PCS_ANADV);
1768 		anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1769 		switch (hw->fc.requested_mode) {
1770 		case e1000_fc_full:
1771 		case e1000_fc_rx_pause:
1772 			anadv_reg |= E1000_TXCW_ASM_DIR;
1773 			anadv_reg |= E1000_TXCW_PAUSE;
1774 			break;
1775 		case e1000_fc_tx_pause:
1776 			anadv_reg |= E1000_TXCW_ASM_DIR;
1777 			break;
1778 		default:
1779 			break;
1780 		}
1781 		wr32(E1000_PCS_ANADV, anadv_reg);
1782 
1783 		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1784 	} else {
1785 		/* Set PCS register for forced link */
1786 		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1787 
1788 		/* Force flow control for forced link */
1789 		reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1790 
1791 		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1792 	}
1793 
1794 	wr32(E1000_PCS_LCTL, reg);
1795 
1796 	if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1797 		igb_force_mac_fc(hw);
1798 
1799 	return ret_val;
1800 }
1801 
1802 /**
1803  *  igb_sgmii_active_82575 - Return sgmii state
1804  *  @hw: pointer to the HW structure
1805  *
1806  *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1807  *  which can be enabled for use in the embedded applications.  Simply
1808  *  return the current state of the sgmii interface.
1809  **/
1810 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1811 {
1812 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1813 	return dev_spec->sgmii_active;
1814 }
1815 
1816 /**
1817  *  igb_reset_init_script_82575 - Inits HW defaults after reset
1818  *  @hw: pointer to the HW structure
1819  *
1820  *  Inits recommended HW defaults after a reset when there is no EEPROM
1821  *  detected. This is only for the 82575.
1822  **/
1823 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1824 {
1825 	if (hw->mac.type == e1000_82575) {
1826 		hw_dbg("Running reset init script for 82575\n");
1827 		/* SerDes configuration via SERDESCTRL */
1828 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1829 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1830 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1831 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1832 
1833 		/* CCM configuration via CCMCTL register */
1834 		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1835 		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1836 
1837 		/* PCIe lanes configuration */
1838 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1839 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1840 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1841 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1842 
1843 		/* PCIe PLL Configuration */
1844 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1845 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1846 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 /**
1853  *  igb_read_mac_addr_82575 - Read device MAC address
1854  *  @hw: pointer to the HW structure
1855  **/
1856 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1857 {
1858 	s32 ret_val = 0;
1859 
1860 	/* If there's an alternate MAC address place it in RAR0
1861 	 * so that it will override the Si installed default perm
1862 	 * address.
1863 	 */
1864 	ret_val = igb_check_alt_mac_addr(hw);
1865 	if (ret_val)
1866 		goto out;
1867 
1868 	ret_val = igb_read_mac_addr(hw);
1869 
1870 out:
1871 	return ret_val;
1872 }
1873 
1874 /**
1875  * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1876  * @hw: pointer to the HW structure
1877  *
1878  * In the case of a PHY power down to save power, or to turn off link during a
1879  * driver unload, or wake on lan is not enabled, remove the link.
1880  **/
1881 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1882 {
1883 	/* If the management interface is not enabled, then power down */
1884 	if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1885 		igb_power_down_phy_copper(hw);
1886 }
1887 
1888 /**
1889  *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1890  *  @hw: pointer to the HW structure
1891  *
1892  *  Clears the hardware counters by reading the counter registers.
1893  **/
1894 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1895 {
1896 	igb_clear_hw_cntrs_base(hw);
1897 
1898 	rd32(E1000_PRC64);
1899 	rd32(E1000_PRC127);
1900 	rd32(E1000_PRC255);
1901 	rd32(E1000_PRC511);
1902 	rd32(E1000_PRC1023);
1903 	rd32(E1000_PRC1522);
1904 	rd32(E1000_PTC64);
1905 	rd32(E1000_PTC127);
1906 	rd32(E1000_PTC255);
1907 	rd32(E1000_PTC511);
1908 	rd32(E1000_PTC1023);
1909 	rd32(E1000_PTC1522);
1910 
1911 	rd32(E1000_ALGNERRC);
1912 	rd32(E1000_RXERRC);
1913 	rd32(E1000_TNCRS);
1914 	rd32(E1000_CEXTERR);
1915 	rd32(E1000_TSCTC);
1916 	rd32(E1000_TSCTFC);
1917 
1918 	rd32(E1000_MGTPRC);
1919 	rd32(E1000_MGTPDC);
1920 	rd32(E1000_MGTPTC);
1921 
1922 	rd32(E1000_IAC);
1923 	rd32(E1000_ICRXOC);
1924 
1925 	rd32(E1000_ICRXPTC);
1926 	rd32(E1000_ICRXATC);
1927 	rd32(E1000_ICTXPTC);
1928 	rd32(E1000_ICTXATC);
1929 	rd32(E1000_ICTXQEC);
1930 	rd32(E1000_ICTXQMTC);
1931 	rd32(E1000_ICRXDMTC);
1932 
1933 	rd32(E1000_CBTMPC);
1934 	rd32(E1000_HTDPMC);
1935 	rd32(E1000_CBRMPC);
1936 	rd32(E1000_RPTHC);
1937 	rd32(E1000_HGPTC);
1938 	rd32(E1000_HTCBDPC);
1939 	rd32(E1000_HGORCL);
1940 	rd32(E1000_HGORCH);
1941 	rd32(E1000_HGOTCL);
1942 	rd32(E1000_HGOTCH);
1943 	rd32(E1000_LENERRS);
1944 
1945 	/* This register should not be read in copper configurations */
1946 	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1947 	    igb_sgmii_active_82575(hw))
1948 		rd32(E1000_SCVPC);
1949 }
1950 
1951 /**
1952  *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1953  *  @hw: pointer to the HW structure
1954  *
1955  *  After rx enable if manageability is enabled then there is likely some
1956  *  bad data at the start of the fifo and possibly in the DMA fifo. This
1957  *  function clears the fifos and flushes any packets that came in as rx was
1958  *  being enabled.
1959  **/
1960 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1961 {
1962 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1963 	int i, ms_wait;
1964 
1965 	/* disable IPv6 options as per hardware errata */
1966 	rfctl = rd32(E1000_RFCTL);
1967 	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
1968 	wr32(E1000_RFCTL, rfctl);
1969 
1970 	if (hw->mac.type != e1000_82575 ||
1971 	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1972 		return;
1973 
1974 	/* Disable all RX queues */
1975 	for (i = 0; i < 4; i++) {
1976 		rxdctl[i] = rd32(E1000_RXDCTL(i));
1977 		wr32(E1000_RXDCTL(i),
1978 		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1979 	}
1980 	/* Poll all queues to verify they have shut down */
1981 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1982 		usleep_range(1000, 2000);
1983 		rx_enabled = 0;
1984 		for (i = 0; i < 4; i++)
1985 			rx_enabled |= rd32(E1000_RXDCTL(i));
1986 		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1987 			break;
1988 	}
1989 
1990 	if (ms_wait == 10)
1991 		hw_dbg("Queue disable timed out after 10ms\n");
1992 
1993 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1994 	 * incoming packets are rejected.  Set enable and wait 2ms so that
1995 	 * any packet that was coming in as RCTL.EN was set is flushed
1996 	 */
1997 	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1998 
1999 	rlpml = rd32(E1000_RLPML);
2000 	wr32(E1000_RLPML, 0);
2001 
2002 	rctl = rd32(E1000_RCTL);
2003 	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
2004 	temp_rctl |= E1000_RCTL_LPE;
2005 
2006 	wr32(E1000_RCTL, temp_rctl);
2007 	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
2008 	wrfl();
2009 	usleep_range(2000, 3000);
2010 
2011 	/* Enable RX queues that were previously enabled and restore our
2012 	 * previous state
2013 	 */
2014 	for (i = 0; i < 4; i++)
2015 		wr32(E1000_RXDCTL(i), rxdctl[i]);
2016 	wr32(E1000_RCTL, rctl);
2017 	wrfl();
2018 
2019 	wr32(E1000_RLPML, rlpml);
2020 	wr32(E1000_RFCTL, rfctl);
2021 
2022 	/* Flush receive errors generated by workaround */
2023 	rd32(E1000_ROC);
2024 	rd32(E1000_RNBC);
2025 	rd32(E1000_MPC);
2026 }
2027 
2028 /**
2029  *  igb_set_pcie_completion_timeout - set pci-e completion timeout
2030  *  @hw: pointer to the HW structure
2031  *
2032  *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
2033  *  however the hardware default for these parts is 500us to 1ms which is less
2034  *  than the 10ms recommended by the pci-e spec.  To address this we need to
2035  *  increase the value to either 10ms to 200ms for capability version 1 config,
2036  *  or 16ms to 55ms for version 2.
2037  **/
2038 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2039 {
2040 	u32 gcr = rd32(E1000_GCR);
2041 	s32 ret_val = 0;
2042 	u16 pcie_devctl2;
2043 
2044 	/* only take action if timeout value is defaulted to 0 */
2045 	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
2046 		goto out;
2047 
2048 	/* if capabilities version is type 1 we can write the
2049 	 * timeout of 10ms to 200ms through the GCR register
2050 	 */
2051 	if (!(gcr & E1000_GCR_CAP_VER2)) {
2052 		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
2053 		goto out;
2054 	}
2055 
2056 	/* for version 2 capabilities we need to write the config space
2057 	 * directly in order to set the completion timeout value for
2058 	 * 16ms to 55ms
2059 	 */
2060 	ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2061 					&pcie_devctl2);
2062 	if (ret_val)
2063 		goto out;
2064 
2065 	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2066 
2067 	ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2068 					 &pcie_devctl2);
2069 out:
2070 	/* disable completion timeout resend */
2071 	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
2072 
2073 	wr32(E1000_GCR, gcr);
2074 	return ret_val;
2075 }
2076 
2077 /**
2078  *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
2079  *  @hw: pointer to the hardware struct
2080  *  @enable: state to enter, either enabled or disabled
2081  *  @pf: Physical Function pool - do not set anti-spoofing for the PF
2082  *
2083  *  enables/disables L2 switch anti-spoofing functionality.
2084  **/
2085 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
2086 {
2087 	u32 reg_val, reg_offset;
2088 
2089 	switch (hw->mac.type) {
2090 	case e1000_82576:
2091 		reg_offset = E1000_DTXSWC;
2092 		break;
2093 	case e1000_i350:
2094 	case e1000_i354:
2095 		reg_offset = E1000_TXSWC;
2096 		break;
2097 	default:
2098 		return;
2099 	}
2100 
2101 	reg_val = rd32(reg_offset);
2102 	if (enable) {
2103 		reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
2104 			     E1000_DTXSWC_VLAN_SPOOF_MASK);
2105 		/* The PF can spoof - it has to in order to
2106 		 * support emulation mode NICs
2107 		 */
2108 		reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
2109 	} else {
2110 		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
2111 			     E1000_DTXSWC_VLAN_SPOOF_MASK);
2112 	}
2113 	wr32(reg_offset, reg_val);
2114 }
2115 
2116 /**
2117  *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
2118  *  @hw: pointer to the hardware struct
2119  *  @enable: state to enter, either enabled or disabled
2120  *
2121  *  enables/disables L2 switch loopback functionality.
2122  **/
2123 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
2124 {
2125 	u32 dtxswc;
2126 
2127 	switch (hw->mac.type) {
2128 	case e1000_82576:
2129 		dtxswc = rd32(E1000_DTXSWC);
2130 		if (enable)
2131 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2132 		else
2133 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2134 		wr32(E1000_DTXSWC, dtxswc);
2135 		break;
2136 	case e1000_i354:
2137 	case e1000_i350:
2138 		dtxswc = rd32(E1000_TXSWC);
2139 		if (enable)
2140 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2141 		else
2142 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2143 		wr32(E1000_TXSWC, dtxswc);
2144 		break;
2145 	default:
2146 		/* Currently no other hardware supports loopback */
2147 		break;
2148 	}
2149 
2150 }
2151 
2152 /**
2153  *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
2154  *  @hw: pointer to the hardware struct
2155  *  @enable: state to enter, either enabled or disabled
2156  *
2157  *  enables/disables replication of packets across multiple pools.
2158  **/
2159 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
2160 {
2161 	u32 vt_ctl = rd32(E1000_VT_CTL);
2162 
2163 	if (enable)
2164 		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
2165 	else
2166 		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
2167 
2168 	wr32(E1000_VT_CTL, vt_ctl);
2169 }
2170 
2171 /**
2172  *  igb_read_phy_reg_82580 - Read 82580 MDI control register
2173  *  @hw: pointer to the HW structure
2174  *  @offset: register offset to be read
2175  *  @data: pointer to the read data
2176  *
2177  *  Reads the MDI control register in the PHY at offset and stores the
2178  *  information read to data.
2179  **/
2180 s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2181 {
2182 	s32 ret_val;
2183 
2184 	ret_val = hw->phy.ops.acquire(hw);
2185 	if (ret_val)
2186 		goto out;
2187 
2188 	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2189 
2190 	hw->phy.ops.release(hw);
2191 
2192 out:
2193 	return ret_val;
2194 }
2195 
2196 /**
2197  *  igb_write_phy_reg_82580 - Write 82580 MDI control register
2198  *  @hw: pointer to the HW structure
2199  *  @offset: register offset to write to
2200  *  @data: data to write to register at offset
2201  *
2202  *  Writes data to MDI control register in the PHY at offset.
2203  **/
2204 s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2205 {
2206 	s32 ret_val;
2207 
2208 
2209 	ret_val = hw->phy.ops.acquire(hw);
2210 	if (ret_val)
2211 		goto out;
2212 
2213 	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2214 
2215 	hw->phy.ops.release(hw);
2216 
2217 out:
2218 	return ret_val;
2219 }
2220 
2221 /**
2222  *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2223  *  @hw: pointer to the HW structure
2224  *
2225  *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2226  *  the values found in the EEPROM.  This addresses an issue in which these
2227  *  bits are not restored from EEPROM after reset.
2228  **/
2229 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
2230 {
2231 	s32 ret_val = 0;
2232 	u32 mdicnfg;
2233 	u16 nvm_data = 0;
2234 
2235 	if (hw->mac.type != e1000_82580)
2236 		goto out;
2237 	if (!igb_sgmii_active_82575(hw))
2238 		goto out;
2239 
2240 	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2241 				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2242 				   &nvm_data);
2243 	if (ret_val) {
2244 		hw_dbg("NVM Read Error\n");
2245 		goto out;
2246 	}
2247 
2248 	mdicnfg = rd32(E1000_MDICNFG);
2249 	if (nvm_data & NVM_WORD24_EXT_MDIO)
2250 		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2251 	if (nvm_data & NVM_WORD24_COM_MDIO)
2252 		mdicnfg |= E1000_MDICNFG_COM_MDIO;
2253 	wr32(E1000_MDICNFG, mdicnfg);
2254 out:
2255 	return ret_val;
2256 }
2257 
2258 /**
2259  *  igb_reset_hw_82580 - Reset hardware
2260  *  @hw: pointer to the HW structure
2261  *
2262  *  This resets function or entire device (all ports, etc.)
2263  *  to a known state.
2264  **/
2265 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2266 {
2267 	s32 ret_val = 0;
2268 	/* BH SW mailbox bit in SW_FW_SYNC */
2269 	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2270 	u32 ctrl;
2271 	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2272 
2273 	hw->dev_spec._82575.global_device_reset = false;
2274 
2275 	/* due to hw errata, global device reset doesn't always
2276 	 * work on 82580
2277 	 */
2278 	if (hw->mac.type == e1000_82580)
2279 		global_device_reset = false;
2280 
2281 	/* Get current control state. */
2282 	ctrl = rd32(E1000_CTRL);
2283 
2284 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
2285 	 * on the last TLP read/write transaction when MAC is reset.
2286 	 */
2287 	ret_val = igb_disable_pcie_master(hw);
2288 	if (ret_val)
2289 		hw_dbg("PCI-E Master disable polling has failed.\n");
2290 
2291 	hw_dbg("Masking off all interrupts\n");
2292 	wr32(E1000_IMC, 0xffffffff);
2293 	wr32(E1000_RCTL, 0);
2294 	wr32(E1000_TCTL, E1000_TCTL_PSP);
2295 	wrfl();
2296 
2297 	usleep_range(10000, 11000);
2298 
2299 	/* Determine whether or not a global dev reset is requested */
2300 	if (global_device_reset &&
2301 		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2302 			global_device_reset = false;
2303 
2304 	if (global_device_reset &&
2305 		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2306 		ctrl |= E1000_CTRL_DEV_RST;
2307 	else
2308 		ctrl |= E1000_CTRL_RST;
2309 
2310 	wr32(E1000_CTRL, ctrl);
2311 	wrfl();
2312 
2313 	/* Add delay to insure DEV_RST has time to complete */
2314 	if (global_device_reset)
2315 		usleep_range(5000, 6000);
2316 
2317 	ret_val = igb_get_auto_rd_done(hw);
2318 	if (ret_val) {
2319 		/* When auto config read does not complete, do not
2320 		 * return with an error. This can happen in situations
2321 		 * where there is no eeprom and prevents getting link.
2322 		 */
2323 		hw_dbg("Auto Read Done did not complete\n");
2324 	}
2325 
2326 	/* clear global device reset status bit */
2327 	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2328 
2329 	/* Clear any pending interrupt events. */
2330 	wr32(E1000_IMC, 0xffffffff);
2331 	rd32(E1000_ICR);
2332 
2333 	ret_val = igb_reset_mdicnfg_82580(hw);
2334 	if (ret_val)
2335 		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2336 
2337 	/* Install any alternate MAC address into RAR0 */
2338 	ret_val = igb_check_alt_mac_addr(hw);
2339 
2340 	/* Release semaphore */
2341 	if (global_device_reset)
2342 		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2343 
2344 	return ret_val;
2345 }
2346 
2347 /**
2348  *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2349  *  @data: data received by reading RXPBS register
2350  *
2351  *  The 82580 uses a table based approach for packet buffer allocation sizes.
2352  *  This function converts the retrieved value into the correct table value
2353  *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2354  *  0x0 36  72 144   1   2   4   8  16
2355  *  0x8 35  70 140 rsv rsv rsv rsv rsv
2356  */
2357 u16 igb_rxpbs_adjust_82580(u32 data)
2358 {
2359 	u16 ret_val = 0;
2360 
2361 	if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2362 		ret_val = e1000_82580_rxpbs_table[data];
2363 
2364 	return ret_val;
2365 }
2366 
2367 /**
2368  *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
2369  *  checksum
2370  *  @hw: pointer to the HW structure
2371  *  @offset: offset in words of the checksum protected region
2372  *
2373  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2374  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2375  **/
2376 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2377 						 u16 offset)
2378 {
2379 	s32 ret_val = 0;
2380 	u16 checksum = 0;
2381 	u16 i, nvm_data;
2382 
2383 	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2384 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2385 		if (ret_val) {
2386 			hw_dbg("NVM Read Error\n");
2387 			goto out;
2388 		}
2389 		checksum += nvm_data;
2390 	}
2391 
2392 	if (checksum != (u16) NVM_SUM) {
2393 		hw_dbg("NVM Checksum Invalid\n");
2394 		ret_val = -E1000_ERR_NVM;
2395 		goto out;
2396 	}
2397 
2398 out:
2399 	return ret_val;
2400 }
2401 
2402 /**
2403  *  igb_update_nvm_checksum_with_offset - Update EEPROM
2404  *  checksum
2405  *  @hw: pointer to the HW structure
2406  *  @offset: offset in words of the checksum protected region
2407  *
2408  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2409  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2410  *  value to the EEPROM.
2411  **/
2412 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2413 {
2414 	s32 ret_val;
2415 	u16 checksum = 0;
2416 	u16 i, nvm_data;
2417 
2418 	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2419 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2420 		if (ret_val) {
2421 			hw_dbg("NVM Read Error while updating checksum.\n");
2422 			goto out;
2423 		}
2424 		checksum += nvm_data;
2425 	}
2426 	checksum = (u16) NVM_SUM - checksum;
2427 	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2428 				&checksum);
2429 	if (ret_val)
2430 		hw_dbg("NVM Write Error while updating checksum.\n");
2431 
2432 out:
2433 	return ret_val;
2434 }
2435 
2436 /**
2437  *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2438  *  @hw: pointer to the HW structure
2439  *
2440  *  Calculates the EEPROM section checksum by reading/adding each word of
2441  *  the EEPROM and then verifies that the sum of the EEPROM is
2442  *  equal to 0xBABA.
2443  **/
2444 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2445 {
2446 	s32 ret_val = 0;
2447 	u16 eeprom_regions_count = 1;
2448 	u16 j, nvm_data;
2449 	u16 nvm_offset;
2450 
2451 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2452 	if (ret_val) {
2453 		hw_dbg("NVM Read Error\n");
2454 		goto out;
2455 	}
2456 
2457 	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2458 		/* if checksums compatibility bit is set validate checksums
2459 		 * for all 4 ports.
2460 		 */
2461 		eeprom_regions_count = 4;
2462 	}
2463 
2464 	for (j = 0; j < eeprom_regions_count; j++) {
2465 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2466 		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2467 								nvm_offset);
2468 		if (ret_val != 0)
2469 			goto out;
2470 	}
2471 
2472 out:
2473 	return ret_val;
2474 }
2475 
2476 /**
2477  *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
2478  *  @hw: pointer to the HW structure
2479  *
2480  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2481  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2482  *  checksum and writes the value to the EEPROM.
2483  **/
2484 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2485 {
2486 	s32 ret_val;
2487 	u16 j, nvm_data;
2488 	u16 nvm_offset;
2489 
2490 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2491 	if (ret_val) {
2492 		hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2493 		goto out;
2494 	}
2495 
2496 	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2497 		/* set compatibility bit to validate checksums appropriately */
2498 		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2499 		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2500 					&nvm_data);
2501 		if (ret_val) {
2502 			hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2503 			goto out;
2504 		}
2505 	}
2506 
2507 	for (j = 0; j < 4; j++) {
2508 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2509 		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2510 		if (ret_val)
2511 			goto out;
2512 	}
2513 
2514 out:
2515 	return ret_val;
2516 }
2517 
2518 /**
2519  *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2520  *  @hw: pointer to the HW structure
2521  *
2522  *  Calculates the EEPROM section checksum by reading/adding each word of
2523  *  the EEPROM and then verifies that the sum of the EEPROM is
2524  *  equal to 0xBABA.
2525  **/
2526 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2527 {
2528 	s32 ret_val = 0;
2529 	u16 j;
2530 	u16 nvm_offset;
2531 
2532 	for (j = 0; j < 4; j++) {
2533 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2534 		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2535 								nvm_offset);
2536 		if (ret_val != 0)
2537 			goto out;
2538 	}
2539 
2540 out:
2541 	return ret_val;
2542 }
2543 
2544 /**
2545  *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
2546  *  @hw: pointer to the HW structure
2547  *
2548  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2549  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2550  *  checksum and writes the value to the EEPROM.
2551  **/
2552 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2553 {
2554 	s32 ret_val = 0;
2555 	u16 j;
2556 	u16 nvm_offset;
2557 
2558 	for (j = 0; j < 4; j++) {
2559 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2560 		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2561 		if (ret_val != 0)
2562 			goto out;
2563 	}
2564 
2565 out:
2566 	return ret_val;
2567 }
2568 
2569 /**
2570  *  __igb_access_emi_reg - Read/write EMI register
2571  *  @hw: pointer to the HW structure
2572  *  @addr: EMI address to program
2573  *  @data: pointer to value to read/write from/to the EMI address
2574  *  @read: boolean flag to indicate read or write
2575  **/
2576 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2577 				  u16 *data, bool read)
2578 {
2579 	s32 ret_val = 0;
2580 
2581 	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2582 	if (ret_val)
2583 		return ret_val;
2584 
2585 	if (read)
2586 		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2587 	else
2588 		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2589 
2590 	return ret_val;
2591 }
2592 
2593 /**
2594  *  igb_read_emi_reg - Read Extended Management Interface register
2595  *  @hw: pointer to the HW structure
2596  *  @addr: EMI address to program
2597  *  @data: value to be read from the EMI address
2598  **/
2599 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2600 {
2601 	return __igb_access_emi_reg(hw, addr, data, true);
2602 }
2603 
2604 /**
2605  *  igb_set_eee_i350 - Enable/disable EEE support
2606  *  @hw: pointer to the HW structure
2607  *  @adv1G: boolean flag enabling 1G EEE advertisement
2608  *  @adv100m: boolean flag enabling 100M EEE advertisement
2609  *
2610  *  Enable/disable EEE based on setting in dev_spec structure.
2611  *
2612  **/
2613 s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
2614 {
2615 	u32 ipcnfg, eeer;
2616 
2617 	if ((hw->mac.type < e1000_i350) ||
2618 	    (hw->phy.media_type != e1000_media_type_copper))
2619 		goto out;
2620 	ipcnfg = rd32(E1000_IPCNFG);
2621 	eeer = rd32(E1000_EEER);
2622 
2623 	/* enable or disable per user setting */
2624 	if (!(hw->dev_spec._82575.eee_disable)) {
2625 		u32 eee_su = rd32(E1000_EEE_SU);
2626 
2627 		if (adv100M)
2628 			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
2629 		else
2630 			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
2631 
2632 		if (adv1G)
2633 			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
2634 		else
2635 			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
2636 
2637 		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2638 			E1000_EEER_LPI_FC);
2639 
2640 		/* This bit should not be set in normal operation. */
2641 		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2642 			hw_dbg("LPI Clock Stop Bit should not be set!\n");
2643 
2644 	} else {
2645 		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2646 			E1000_IPCNFG_EEE_100M_AN);
2647 		eeer &= ~(E1000_EEER_TX_LPI_EN |
2648 			E1000_EEER_RX_LPI_EN |
2649 			E1000_EEER_LPI_FC);
2650 	}
2651 	wr32(E1000_IPCNFG, ipcnfg);
2652 	wr32(E1000_EEER, eeer);
2653 	rd32(E1000_IPCNFG);
2654 	rd32(E1000_EEER);
2655 out:
2656 
2657 	return 0;
2658 }
2659 
2660 /**
2661  *  igb_set_eee_i354 - Enable/disable EEE support
2662  *  @hw: pointer to the HW structure
2663  *  @adv1G: boolean flag enabling 1G EEE advertisement
2664  *  @adv100m: boolean flag enabling 100M EEE advertisement
2665  *
2666  *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
2667  *
2668  **/
2669 s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
2670 {
2671 	struct e1000_phy_info *phy = &hw->phy;
2672 	s32 ret_val = 0;
2673 	u16 phy_data;
2674 
2675 	if ((hw->phy.media_type != e1000_media_type_copper) ||
2676 	    ((phy->id != M88E1543_E_PHY_ID) &&
2677 	     (phy->id != M88E1512_E_PHY_ID)))
2678 		goto out;
2679 
2680 	if (!hw->dev_spec._82575.eee_disable) {
2681 		/* Switch to PHY page 18. */
2682 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2683 		if (ret_val)
2684 			goto out;
2685 
2686 		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2687 					    &phy_data);
2688 		if (ret_val)
2689 			goto out;
2690 
2691 		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2692 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2693 					     phy_data);
2694 		if (ret_val)
2695 			goto out;
2696 
2697 		/* Return the PHY to page 0. */
2698 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2699 		if (ret_val)
2700 			goto out;
2701 
2702 		/* Turn on EEE advertisement. */
2703 		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2704 					     E1000_EEE_ADV_DEV_I354,
2705 					     &phy_data);
2706 		if (ret_val)
2707 			goto out;
2708 
2709 		if (adv100M)
2710 			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
2711 		else
2712 			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
2713 
2714 		if (adv1G)
2715 			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
2716 		else
2717 			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
2718 
2719 		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2720 						E1000_EEE_ADV_DEV_I354,
2721 						phy_data);
2722 	} else {
2723 		/* Turn off EEE advertisement. */
2724 		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2725 					     E1000_EEE_ADV_DEV_I354,
2726 					     &phy_data);
2727 		if (ret_val)
2728 			goto out;
2729 
2730 		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2731 			      E1000_EEE_ADV_1000_SUPPORTED);
2732 		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2733 					      E1000_EEE_ADV_DEV_I354,
2734 					      phy_data);
2735 	}
2736 
2737 out:
2738 	return ret_val;
2739 }
2740 
2741 /**
2742  *  igb_get_eee_status_i354 - Get EEE status
2743  *  @hw: pointer to the HW structure
2744  *  @status: EEE status
2745  *
2746  *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
2747  *  been received.
2748  **/
2749 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2750 {
2751 	struct e1000_phy_info *phy = &hw->phy;
2752 	s32 ret_val = 0;
2753 	u16 phy_data;
2754 
2755 	/* Check if EEE is supported on this device. */
2756 	if ((hw->phy.media_type != e1000_media_type_copper) ||
2757 	    ((phy->id != M88E1543_E_PHY_ID) &&
2758 	     (phy->id != M88E1512_E_PHY_ID)))
2759 		goto out;
2760 
2761 	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2762 				     E1000_PCS_STATUS_DEV_I354,
2763 				     &phy_data);
2764 	if (ret_val)
2765 		goto out;
2766 
2767 	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2768 			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2769 
2770 out:
2771 	return ret_val;
2772 }
2773 
2774 static const u8 e1000_emc_temp_data[4] = {
2775 	E1000_EMC_INTERNAL_DATA,
2776 	E1000_EMC_DIODE1_DATA,
2777 	E1000_EMC_DIODE2_DATA,
2778 	E1000_EMC_DIODE3_DATA
2779 };
2780 static const u8 e1000_emc_therm_limit[4] = {
2781 	E1000_EMC_INTERNAL_THERM_LIMIT,
2782 	E1000_EMC_DIODE1_THERM_LIMIT,
2783 	E1000_EMC_DIODE2_THERM_LIMIT,
2784 	E1000_EMC_DIODE3_THERM_LIMIT
2785 };
2786 
2787 #ifdef CONFIG_IGB_HWMON
2788 /**
2789  *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2790  *  @hw: pointer to hardware structure
2791  *
2792  *  Updates the temperatures in mac.thermal_sensor_data
2793  **/
2794 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2795 {
2796 	u16 ets_offset;
2797 	u16 ets_cfg;
2798 	u16 ets_sensor;
2799 	u8  num_sensors;
2800 	u8  sensor_index;
2801 	u8  sensor_location;
2802 	u8  i;
2803 	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2804 
2805 	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2806 		return E1000_NOT_IMPLEMENTED;
2807 
2808 	data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2809 
2810 	/* Return the internal sensor only if ETS is unsupported */
2811 	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2812 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2813 		return 0;
2814 
2815 	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2816 	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2817 	    != NVM_ETS_TYPE_EMC)
2818 		return E1000_NOT_IMPLEMENTED;
2819 
2820 	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2821 	if (num_sensors > E1000_MAX_SENSORS)
2822 		num_sensors = E1000_MAX_SENSORS;
2823 
2824 	for (i = 1; i < num_sensors; i++) {
2825 		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2826 		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2827 				NVM_ETS_DATA_INDEX_SHIFT);
2828 		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2829 				   NVM_ETS_DATA_LOC_SHIFT);
2830 
2831 		if (sensor_location != 0)
2832 			hw->phy.ops.read_i2c_byte(hw,
2833 					e1000_emc_temp_data[sensor_index],
2834 					E1000_I2C_THERMAL_SENSOR_ADDR,
2835 					&data->sensor[i].temp);
2836 	}
2837 	return 0;
2838 }
2839 
2840 /**
2841  *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2842  *  @hw: pointer to hardware structure
2843  *
2844  *  Sets the thermal sensor thresholds according to the NVM map
2845  *  and save off the threshold and location values into mac.thermal_sensor_data
2846  **/
2847 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2848 {
2849 	u16 ets_offset;
2850 	u16 ets_cfg;
2851 	u16 ets_sensor;
2852 	u8  low_thresh_delta;
2853 	u8  num_sensors;
2854 	u8  sensor_index;
2855 	u8  sensor_location;
2856 	u8  therm_limit;
2857 	u8  i;
2858 	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2859 
2860 	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2861 		return E1000_NOT_IMPLEMENTED;
2862 
2863 	memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2864 
2865 	data->sensor[0].location = 0x1;
2866 	data->sensor[0].caution_thresh =
2867 		(rd32(E1000_THHIGHTC) & 0xFF);
2868 	data->sensor[0].max_op_thresh =
2869 		(rd32(E1000_THLOWTC) & 0xFF);
2870 
2871 	/* Return the internal sensor only if ETS is unsupported */
2872 	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2873 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2874 		return 0;
2875 
2876 	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2877 	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2878 	    != NVM_ETS_TYPE_EMC)
2879 		return E1000_NOT_IMPLEMENTED;
2880 
2881 	low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2882 			    NVM_ETS_LTHRES_DELTA_SHIFT);
2883 	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2884 
2885 	for (i = 1; i <= num_sensors; i++) {
2886 		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2887 		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2888 				NVM_ETS_DATA_INDEX_SHIFT);
2889 		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2890 				   NVM_ETS_DATA_LOC_SHIFT);
2891 		therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2892 
2893 		hw->phy.ops.write_i2c_byte(hw,
2894 			e1000_emc_therm_limit[sensor_index],
2895 			E1000_I2C_THERMAL_SENSOR_ADDR,
2896 			therm_limit);
2897 
2898 		if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2899 			data->sensor[i].location = sensor_location;
2900 			data->sensor[i].caution_thresh = therm_limit;
2901 			data->sensor[i].max_op_thresh = therm_limit -
2902 							low_thresh_delta;
2903 		}
2904 	}
2905 	return 0;
2906 }
2907 
2908 #endif
2909 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2910 	.init_hw              = igb_init_hw_82575,
2911 	.check_for_link       = igb_check_for_link_82575,
2912 	.rar_set              = igb_rar_set,
2913 	.read_mac_addr        = igb_read_mac_addr_82575,
2914 	.get_speed_and_duplex = igb_get_link_up_info_82575,
2915 #ifdef CONFIG_IGB_HWMON
2916 	.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2917 	.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2918 #endif
2919 };
2920 
2921 static const struct e1000_phy_operations e1000_phy_ops_82575 = {
2922 	.acquire              = igb_acquire_phy_82575,
2923 	.get_cfg_done         = igb_get_cfg_done_82575,
2924 	.release              = igb_release_phy_82575,
2925 	.write_i2c_byte       = igb_write_i2c_byte,
2926 	.read_i2c_byte        = igb_read_i2c_byte,
2927 };
2928 
2929 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2930 	.acquire              = igb_acquire_nvm_82575,
2931 	.read                 = igb_read_nvm_eerd,
2932 	.release              = igb_release_nvm_82575,
2933 	.write                = igb_write_nvm_spi,
2934 };
2935 
2936 const struct e1000_info e1000_82575_info = {
2937 	.get_invariants = igb_get_invariants_82575,
2938 	.mac_ops = &e1000_mac_ops_82575,
2939 	.phy_ops = &e1000_phy_ops_82575,
2940 	.nvm_ops = &e1000_nvm_ops_82575,
2941 };
2942 
2943