1 /*******************************************************************************
2 
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2013 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 /* 82562G 10/100 Network Connection
30  * 82562G-2 10/100 Network Connection
31  * 82562GT 10/100 Network Connection
32  * 82562GT-2 10/100 Network Connection
33  * 82562V 10/100 Network Connection
34  * 82562V-2 10/100 Network Connection
35  * 82566DC-2 Gigabit Network Connection
36  * 82566DC Gigabit Network Connection
37  * 82566DM-2 Gigabit Network Connection
38  * 82566DM Gigabit Network Connection
39  * 82566MC Gigabit Network Connection
40  * 82566MM Gigabit Network Connection
41  * 82567LM Gigabit Network Connection
42  * 82567LF Gigabit Network Connection
43  * 82567V Gigabit Network Connection
44  * 82567LM-2 Gigabit Network Connection
45  * 82567LF-2 Gigabit Network Connection
46  * 82567V-2 Gigabit Network Connection
47  * 82567LF-3 Gigabit Network Connection
48  * 82567LM-3 Gigabit Network Connection
49  * 82567LM-4 Gigabit Network Connection
50  * 82577LM Gigabit Network Connection
51  * 82577LC Gigabit Network Connection
52  * 82578DM Gigabit Network Connection
53  * 82578DC Gigabit Network Connection
54  * 82579LM Gigabit Network Connection
55  * 82579V Gigabit Network Connection
56  */
57 
58 #include "e1000.h"
59 
60 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
61 /* Offset 04h HSFSTS */
62 union ich8_hws_flash_status {
63 	struct ich8_hsfsts {
64 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
65 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
66 		u16 dael       :1; /* bit 2 Direct Access error Log */
67 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
68 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
69 		u16 reserved1  :2; /* bit 13:6 Reserved */
70 		u16 reserved2  :6; /* bit 13:6 Reserved */
71 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
72 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
73 	} hsf_status;
74 	u16 regval;
75 };
76 
77 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
78 /* Offset 06h FLCTL */
79 union ich8_hws_flash_ctrl {
80 	struct ich8_hsflctl {
81 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
82 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
83 		u16 reserved   :5;   /* 7:3 Reserved  */
84 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
85 		u16 flockdn    :6;   /* 15:10 Reserved */
86 	} hsf_ctrl;
87 	u16 regval;
88 };
89 
90 /* ICH Flash Region Access Permissions */
91 union ich8_hws_flash_regacc {
92 	struct ich8_flracc {
93 		u32 grra      :8; /* 0:7 GbE region Read Access */
94 		u32 grwa      :8; /* 8:15 GbE region Write Access */
95 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
96 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
97 	} hsf_flregacc;
98 	u16 regval;
99 };
100 
101 /* ICH Flash Protected Region */
102 union ich8_flash_protected_range {
103 	struct ich8_pr {
104 		u32 base:13;     /* 0:12 Protected Range Base */
105 		u32 reserved1:2; /* 13:14 Reserved */
106 		u32 rpe:1;       /* 15 Read Protection Enable */
107 		u32 limit:13;    /* 16:28 Protected Range Limit */
108 		u32 reserved2:2; /* 29:30 Reserved */
109 		u32 wpe:1;       /* 31 Write Protection Enable */
110 	} range;
111 	u32 regval;
112 };
113 
114 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
115 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
117 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
118 						u32 offset, u8 byte);
119 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
120 					 u8 *data);
121 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
122 					 u16 *data);
123 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
124 					 u8 size, u16 *data);
125 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
126 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
127 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
128 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
130 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
131 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
132 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
133 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
134 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
135 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
136 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
137 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
138 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
139 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
140 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
141 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
142 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
143 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145 
146 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
147 {
148 	return readw(hw->flash_address + reg);
149 }
150 
151 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
152 {
153 	return readl(hw->flash_address + reg);
154 }
155 
156 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
157 {
158 	writew(val, hw->flash_address + reg);
159 }
160 
161 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
162 {
163 	writel(val, hw->flash_address + reg);
164 }
165 
166 #define er16flash(reg)		__er16flash(hw, (reg))
167 #define er32flash(reg)		__er32flash(hw, (reg))
168 #define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
169 #define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
170 
171 /**
172  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
173  *  @hw: pointer to the HW structure
174  *
175  *  Test access to the PHY registers by reading the PHY ID registers.  If
176  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
177  *  otherwise assume the read PHY ID is correct if it is valid.
178  *
179  *  Assumes the sw/fw/hw semaphore is already acquired.
180  **/
181 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
182 {
183 	u16 phy_reg = 0;
184 	u32 phy_id = 0;
185 	s32 ret_val;
186 	u16 retry_count;
187 
188 	for (retry_count = 0; retry_count < 2; retry_count++) {
189 		ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
190 		if (ret_val || (phy_reg == 0xFFFF))
191 			continue;
192 		phy_id = (u32)(phy_reg << 16);
193 
194 		ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
195 		if (ret_val || (phy_reg == 0xFFFF)) {
196 			phy_id = 0;
197 			continue;
198 		}
199 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
200 		break;
201 	}
202 
203 	if (hw->phy.id) {
204 		if (hw->phy.id == phy_id)
205 			return true;
206 	} else if (phy_id) {
207 		hw->phy.id = phy_id;
208 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
209 		return true;
210 	}
211 
212 	/* In case the PHY needs to be in mdio slow mode,
213 	 * set slow mode and try to get the PHY id again.
214 	 */
215 	hw->phy.ops.release(hw);
216 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
217 	if (!ret_val)
218 		ret_val = e1000e_get_phy_id(hw);
219 	hw->phy.ops.acquire(hw);
220 
221 	return !ret_val;
222 }
223 
224 /**
225  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
226  *  @hw: pointer to the HW structure
227  *
228  *  Workarounds/flow necessary for PHY initialization during driver load
229  *  and resume paths.
230  **/
231 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
232 {
233 	u32 mac_reg, fwsm = er32(FWSM);
234 	s32 ret_val;
235 	u16 phy_reg;
236 
237 	/* Gate automatic PHY configuration by hardware on managed and
238 	 * non-managed 82579 and newer adapters.
239 	 */
240 	e1000_gate_hw_phy_config_ich8lan(hw, true);
241 
242 	ret_val = hw->phy.ops.acquire(hw);
243 	if (ret_val) {
244 		e_dbg("Failed to initialize PHY flow\n");
245 		goto out;
246 	}
247 
248 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
249 	 * inaccessible and resetting the PHY is not blocked, toggle the
250 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
251 	 */
252 	switch (hw->mac.type) {
253 	case e1000_pch_lpt:
254 		if (e1000_phy_is_accessible_pchlan(hw))
255 			break;
256 
257 		/* Before toggling LANPHYPC, see if PHY is accessible by
258 		 * forcing MAC to SMBus mode first.
259 		 */
260 		mac_reg = er32(CTRL_EXT);
261 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
262 		ew32(CTRL_EXT, mac_reg);
263 
264 		/* fall-through */
265 	case e1000_pch2lan:
266 		if (e1000_phy_is_accessible_pchlan(hw)) {
267 			if (hw->mac.type == e1000_pch_lpt) {
268 				/* Unforce SMBus mode in PHY */
269 				e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
270 				phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
271 				e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
272 
273 				/* Unforce SMBus mode in MAC */
274 				mac_reg = er32(CTRL_EXT);
275 				mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
276 				ew32(CTRL_EXT, mac_reg);
277 			}
278 			break;
279 		}
280 
281 		/* fall-through */
282 	case e1000_pchlan:
283 		if ((hw->mac.type == e1000_pchlan) &&
284 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
285 			break;
286 
287 		if (hw->phy.ops.check_reset_block(hw)) {
288 			e_dbg("Required LANPHYPC toggle blocked by ME\n");
289 			break;
290 		}
291 
292 		e_dbg("Toggling LANPHYPC\n");
293 
294 		/* Set Phy Config Counter to 50msec */
295 		mac_reg = er32(FEXTNVM3);
296 		mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
297 		mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
298 		ew32(FEXTNVM3, mac_reg);
299 
300 		if (hw->mac.type == e1000_pch_lpt) {
301 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
302 			 * So ensure that the MAC is also out of SMBus mode
303 			 */
304 			mac_reg = er32(CTRL_EXT);
305 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
306 			ew32(CTRL_EXT, mac_reg);
307 		}
308 
309 		/* Toggle LANPHYPC Value bit */
310 		mac_reg = er32(CTRL);
311 		mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
312 		mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
313 		ew32(CTRL, mac_reg);
314 		e1e_flush();
315 		udelay(10);
316 		mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
317 		ew32(CTRL, mac_reg);
318 		e1e_flush();
319 		if (hw->mac.type < e1000_pch_lpt) {
320 			msleep(50);
321 		} else {
322 			u16 count = 20;
323 			do {
324 				usleep_range(5000, 10000);
325 			} while (!(er32(CTRL_EXT) &
326 				   E1000_CTRL_EXT_LPCD) && count--);
327 		}
328 		break;
329 	default:
330 		break;
331 	}
332 
333 	hw->phy.ops.release(hw);
334 
335 	/* Reset the PHY before any access to it.  Doing so, ensures
336 	 * that the PHY is in a known good state before we read/write
337 	 * PHY registers.  The generic reset is sufficient here,
338 	 * because we haven't determined the PHY type yet.
339 	 */
340 	ret_val = e1000e_phy_hw_reset_generic(hw);
341 
342 out:
343 	/* Ungate automatic PHY configuration on non-managed 82579 */
344 	if ((hw->mac.type == e1000_pch2lan) &&
345 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
346 		usleep_range(10000, 20000);
347 		e1000_gate_hw_phy_config_ich8lan(hw, false);
348 	}
349 
350 	return ret_val;
351 }
352 
353 /**
354  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
355  *  @hw: pointer to the HW structure
356  *
357  *  Initialize family-specific PHY parameters and function pointers.
358  **/
359 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
360 {
361 	struct e1000_phy_info *phy = &hw->phy;
362 	s32 ret_val;
363 
364 	phy->addr                     = 1;
365 	phy->reset_delay_us           = 100;
366 
367 	phy->ops.set_page             = e1000_set_page_igp;
368 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
369 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
370 	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
371 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
372 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
373 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
374 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
375 	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
376 	phy->ops.power_up             = e1000_power_up_phy_copper;
377 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
378 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
379 
380 	phy->id = e1000_phy_unknown;
381 
382 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
383 	if (ret_val)
384 		return ret_val;
385 
386 	if (phy->id == e1000_phy_unknown)
387 		switch (hw->mac.type) {
388 		default:
389 			ret_val = e1000e_get_phy_id(hw);
390 			if (ret_val)
391 				return ret_val;
392 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
393 				break;
394 			/* fall-through */
395 		case e1000_pch2lan:
396 		case e1000_pch_lpt:
397 			/* In case the PHY needs to be in mdio slow mode,
398 			 * set slow mode and try to get the PHY id again.
399 			 */
400 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
401 			if (ret_val)
402 				return ret_val;
403 			ret_val = e1000e_get_phy_id(hw);
404 			if (ret_val)
405 				return ret_val;
406 			break;
407 		}
408 	phy->type = e1000e_get_phy_type_from_id(phy->id);
409 
410 	switch (phy->type) {
411 	case e1000_phy_82577:
412 	case e1000_phy_82579:
413 	case e1000_phy_i217:
414 		phy->ops.check_polarity = e1000_check_polarity_82577;
415 		phy->ops.force_speed_duplex =
416 		    e1000_phy_force_speed_duplex_82577;
417 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
418 		phy->ops.get_info = e1000_get_phy_info_82577;
419 		phy->ops.commit = e1000e_phy_sw_reset;
420 		break;
421 	case e1000_phy_82578:
422 		phy->ops.check_polarity = e1000_check_polarity_m88;
423 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
424 		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
425 		phy->ops.get_info = e1000e_get_phy_info_m88;
426 		break;
427 	default:
428 		ret_val = -E1000_ERR_PHY;
429 		break;
430 	}
431 
432 	return ret_val;
433 }
434 
435 /**
436  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
437  *  @hw: pointer to the HW structure
438  *
439  *  Initialize family-specific PHY parameters and function pointers.
440  **/
441 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
442 {
443 	struct e1000_phy_info *phy = &hw->phy;
444 	s32 ret_val;
445 	u16 i = 0;
446 
447 	phy->addr			= 1;
448 	phy->reset_delay_us		= 100;
449 
450 	phy->ops.power_up               = e1000_power_up_phy_copper;
451 	phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
452 
453 	/* We may need to do this twice - once for IGP and if that fails,
454 	 * we'll set BM func pointers and try again
455 	 */
456 	ret_val = e1000e_determine_phy_address(hw);
457 	if (ret_val) {
458 		phy->ops.write_reg = e1000e_write_phy_reg_bm;
459 		phy->ops.read_reg  = e1000e_read_phy_reg_bm;
460 		ret_val = e1000e_determine_phy_address(hw);
461 		if (ret_val) {
462 			e_dbg("Cannot determine PHY addr. Erroring out\n");
463 			return ret_val;
464 		}
465 	}
466 
467 	phy->id = 0;
468 	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
469 	       (i++ < 100)) {
470 		usleep_range(1000, 2000);
471 		ret_val = e1000e_get_phy_id(hw);
472 		if (ret_val)
473 			return ret_val;
474 	}
475 
476 	/* Verify phy id */
477 	switch (phy->id) {
478 	case IGP03E1000_E_PHY_ID:
479 		phy->type = e1000_phy_igp_3;
480 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
481 		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
482 		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
483 		phy->ops.get_info = e1000e_get_phy_info_igp;
484 		phy->ops.check_polarity = e1000_check_polarity_igp;
485 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
486 		break;
487 	case IFE_E_PHY_ID:
488 	case IFE_PLUS_E_PHY_ID:
489 	case IFE_C_E_PHY_ID:
490 		phy->type = e1000_phy_ife;
491 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
492 		phy->ops.get_info = e1000_get_phy_info_ife;
493 		phy->ops.check_polarity = e1000_check_polarity_ife;
494 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
495 		break;
496 	case BME1000_E_PHY_ID:
497 		phy->type = e1000_phy_bm;
498 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
499 		phy->ops.read_reg = e1000e_read_phy_reg_bm;
500 		phy->ops.write_reg = e1000e_write_phy_reg_bm;
501 		phy->ops.commit = e1000e_phy_sw_reset;
502 		phy->ops.get_info = e1000e_get_phy_info_m88;
503 		phy->ops.check_polarity = e1000_check_polarity_m88;
504 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
505 		break;
506 	default:
507 		return -E1000_ERR_PHY;
508 		break;
509 	}
510 
511 	return 0;
512 }
513 
514 /**
515  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
516  *  @hw: pointer to the HW structure
517  *
518  *  Initialize family-specific NVM parameters and function
519  *  pointers.
520  **/
521 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
522 {
523 	struct e1000_nvm_info *nvm = &hw->nvm;
524 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
525 	u32 gfpreg, sector_base_addr, sector_end_addr;
526 	u16 i;
527 
528 	/* Can't read flash registers if the register set isn't mapped. */
529 	if (!hw->flash_address) {
530 		e_dbg("ERROR: Flash registers not mapped\n");
531 		return -E1000_ERR_CONFIG;
532 	}
533 
534 	nvm->type = e1000_nvm_flash_sw;
535 
536 	gfpreg = er32flash(ICH_FLASH_GFPREG);
537 
538 	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
539 	 * Add 1 to sector_end_addr since this sector is included in
540 	 * the overall size.
541 	 */
542 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
543 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
544 
545 	/* flash_base_addr is byte-aligned */
546 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
547 
548 	/* find total size of the NVM, then cut in half since the total
549 	 * size represents two separate NVM banks.
550 	 */
551 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
552 				<< FLASH_SECTOR_ADDR_SHIFT;
553 	nvm->flash_bank_size /= 2;
554 	/* Adjust to word count */
555 	nvm->flash_bank_size /= sizeof(u16);
556 
557 	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
558 
559 	/* Clear shadow ram */
560 	for (i = 0; i < nvm->word_size; i++) {
561 		dev_spec->shadow_ram[i].modified = false;
562 		dev_spec->shadow_ram[i].value    = 0xFFFF;
563 	}
564 
565 	return 0;
566 }
567 
568 /**
569  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
570  *  @hw: pointer to the HW structure
571  *
572  *  Initialize family-specific MAC parameters and function
573  *  pointers.
574  **/
575 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
576 {
577 	struct e1000_mac_info *mac = &hw->mac;
578 
579 	/* Set media type function pointer */
580 	hw->phy.media_type = e1000_media_type_copper;
581 
582 	/* Set mta register count */
583 	mac->mta_reg_count = 32;
584 	/* Set rar entry count */
585 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
586 	if (mac->type == e1000_ich8lan)
587 		mac->rar_entry_count--;
588 	/* FWSM register */
589 	mac->has_fwsm = true;
590 	/* ARC subsystem not supported */
591 	mac->arc_subsystem_valid = false;
592 	/* Adaptive IFS supported */
593 	mac->adaptive_ifs = true;
594 
595 	/* LED and other operations */
596 	switch (mac->type) {
597 	case e1000_ich8lan:
598 	case e1000_ich9lan:
599 	case e1000_ich10lan:
600 		/* check management mode */
601 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
602 		/* ID LED init */
603 		mac->ops.id_led_init = e1000e_id_led_init_generic;
604 		/* blink LED */
605 		mac->ops.blink_led = e1000e_blink_led_generic;
606 		/* setup LED */
607 		mac->ops.setup_led = e1000e_setup_led_generic;
608 		/* cleanup LED */
609 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
610 		/* turn on/off LED */
611 		mac->ops.led_on = e1000_led_on_ich8lan;
612 		mac->ops.led_off = e1000_led_off_ich8lan;
613 		break;
614 	case e1000_pch2lan:
615 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
616 		mac->ops.rar_set = e1000_rar_set_pch2lan;
617 		/* fall-through */
618 	case e1000_pch_lpt:
619 	case e1000_pchlan:
620 		/* check management mode */
621 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
622 		/* ID LED init */
623 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
624 		/* setup LED */
625 		mac->ops.setup_led = e1000_setup_led_pchlan;
626 		/* cleanup LED */
627 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
628 		/* turn on/off LED */
629 		mac->ops.led_on = e1000_led_on_pchlan;
630 		mac->ops.led_off = e1000_led_off_pchlan;
631 		break;
632 	default:
633 		break;
634 	}
635 
636 	if (mac->type == e1000_pch_lpt) {
637 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
638 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
639 	}
640 
641 	/* Enable PCS Lock-loss workaround for ICH8 */
642 	if (mac->type == e1000_ich8lan)
643 		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
644 
645 	return 0;
646 }
647 
648 /**
649  *  __e1000_access_emi_reg_locked - Read/write EMI register
650  *  @hw: pointer to the HW structure
651  *  @addr: EMI address to program
652  *  @data: pointer to value to read/write from/to the EMI address
653  *  @read: boolean flag to indicate read or write
654  *
655  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
656  **/
657 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
658 					 u16 *data, bool read)
659 {
660 	s32 ret_val;
661 
662 	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
663 	if (ret_val)
664 		return ret_val;
665 
666 	if (read)
667 		ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
668 	else
669 		ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
670 
671 	return ret_val;
672 }
673 
674 /**
675  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
676  *  @hw: pointer to the HW structure
677  *  @addr: EMI address to program
678  *  @data: value to be read from the EMI address
679  *
680  *  Assumes the SW/FW/HW Semaphore is already acquired.
681  **/
682 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
683 {
684 	return __e1000_access_emi_reg_locked(hw, addr, data, true);
685 }
686 
687 /**
688  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
689  *  @hw: pointer to the HW structure
690  *  @addr: EMI address to program
691  *  @data: value to be written to the EMI address
692  *
693  *  Assumes the SW/FW/HW Semaphore is already acquired.
694  **/
695 static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
696 {
697 	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
698 }
699 
700 /**
701  *  e1000_set_eee_pchlan - Enable/disable EEE support
702  *  @hw: pointer to the HW structure
703  *
704  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
705  *  the link and the EEE capabilities of the link partner.  The LPI Control
706  *  register bits will remain set only if/when link is up.
707  **/
708 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
709 {
710 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
711 	s32 ret_val;
712 	u16 lpi_ctrl;
713 
714 	if ((hw->phy.type != e1000_phy_82579) &&
715 	    (hw->phy.type != e1000_phy_i217))
716 		return 0;
717 
718 	ret_val = hw->phy.ops.acquire(hw);
719 	if (ret_val)
720 		return ret_val;
721 
722 	ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
723 	if (ret_val)
724 		goto release;
725 
726 	/* Clear bits that enable EEE in various speeds */
727 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
728 
729 	/* Enable EEE if not disabled by user */
730 	if (!dev_spec->eee_disable) {
731 		u16 lpa, pcs_status, data;
732 
733 		/* Save off link partner's EEE ability */
734 		switch (hw->phy.type) {
735 		case e1000_phy_82579:
736 			lpa = I82579_EEE_LP_ABILITY;
737 			pcs_status = I82579_EEE_PCS_STATUS;
738 			break;
739 		case e1000_phy_i217:
740 			lpa = I217_EEE_LP_ABILITY;
741 			pcs_status = I217_EEE_PCS_STATUS;
742 			break;
743 		default:
744 			ret_val = -E1000_ERR_PHY;
745 			goto release;
746 		}
747 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
748 						    &dev_spec->eee_lp_ability);
749 		if (ret_val)
750 			goto release;
751 
752 		/* Enable EEE only for speeds in which the link partner is
753 		 * EEE capable.
754 		 */
755 		if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
756 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
757 
758 		if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
759 			e1e_rphy_locked(hw, MII_LPA, &data);
760 			if (data & LPA_100FULL)
761 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
762 			else
763 				/* EEE is not supported in 100Half, so ignore
764 				 * partner's EEE in 100 ability if full-duplex
765 				 * is not advertised.
766 				 */
767 				dev_spec->eee_lp_ability &=
768 				    ~I82579_EEE_100_SUPPORTED;
769 		}
770 
771 		/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
772 		ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
773 		if (ret_val)
774 			goto release;
775 	}
776 
777 	ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
778 release:
779 	hw->phy.ops.release(hw);
780 
781 	return ret_val;
782 }
783 
784 /**
785  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
786  *  @hw:   pointer to the HW structure
787  *  @link: link up bool flag
788  *
789  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
790  *  preventing further DMA write requests.  Workaround the issue by disabling
791  *  the de-assertion of the clock request when in 1Gpbs mode.
792  **/
793 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
794 {
795 	u32 fextnvm6 = er32(FEXTNVM6);
796 	s32 ret_val = 0;
797 
798 	if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
799 		u16 kmrn_reg;
800 
801 		ret_val = hw->phy.ops.acquire(hw);
802 		if (ret_val)
803 			return ret_val;
804 
805 		ret_val =
806 		    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
807 						&kmrn_reg);
808 		if (ret_val)
809 			goto release;
810 
811 		ret_val =
812 		    e1000e_write_kmrn_reg_locked(hw,
813 						 E1000_KMRNCTRLSTA_K1_CONFIG,
814 						 kmrn_reg &
815 						 ~E1000_KMRNCTRLSTA_K1_ENABLE);
816 		if (ret_val)
817 			goto release;
818 
819 		usleep_range(10, 20);
820 
821 		ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
822 
823 		ret_val =
824 		    e1000e_write_kmrn_reg_locked(hw,
825 						 E1000_KMRNCTRLSTA_K1_CONFIG,
826 						 kmrn_reg);
827 release:
828 		hw->phy.ops.release(hw);
829 	} else {
830 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
831 		ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
832 	}
833 
834 	return ret_val;
835 }
836 
837 /**
838  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
839  *  @hw: pointer to the HW structure
840  *
841  *  Checks to see of the link status of the hardware has changed.  If a
842  *  change in link status has been detected, then we read the PHY registers
843  *  to get the current speed/duplex if link exists.
844  **/
845 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
846 {
847 	struct e1000_mac_info *mac = &hw->mac;
848 	s32 ret_val;
849 	bool link;
850 	u16 phy_reg;
851 
852 	/* We only want to go out to the PHY registers to see if Auto-Neg
853 	 * has completed and/or if our link status has changed.  The
854 	 * get_link_status flag is set upon receiving a Link Status
855 	 * Change or Rx Sequence Error interrupt.
856 	 */
857 	if (!mac->get_link_status)
858 		return 0;
859 
860 	/* First we want to see if the MII Status Register reports
861 	 * link.  If so, then we want to get the current speed/duplex
862 	 * of the PHY.
863 	 */
864 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
865 	if (ret_val)
866 		return ret_val;
867 
868 	if (hw->mac.type == e1000_pchlan) {
869 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
870 		if (ret_val)
871 			return ret_val;
872 	}
873 
874 	/* Work-around I218 hang issue */
875 	if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
877 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
878 		if (ret_val)
879 			return ret_val;
880 	}
881 
882 	/* Clear link partner's EEE ability */
883 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
884 
885 	if (!link)
886 		return 0; /* No link detected */
887 
888 	mac->get_link_status = false;
889 
890 	switch (hw->mac.type) {
891 	case e1000_pch2lan:
892 		ret_val = e1000_k1_workaround_lv(hw);
893 		if (ret_val)
894 			return ret_val;
895 		/* fall-thru */
896 	case e1000_pchlan:
897 		if (hw->phy.type == e1000_phy_82578) {
898 			ret_val = e1000_link_stall_workaround_hv(hw);
899 			if (ret_val)
900 				return ret_val;
901 		}
902 
903 		/* Workaround for PCHx parts in half-duplex:
904 		 * Set the number of preambles removed from the packet
905 		 * when it is passed from the PHY to the MAC to prevent
906 		 * the MAC from misinterpreting the packet type.
907 		 */
908 		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
909 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
910 
911 		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
912 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
913 
914 		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
915 		break;
916 	default:
917 		break;
918 	}
919 
920 	/* Check if there was DownShift, must be checked
921 	 * immediately after link-up
922 	 */
923 	e1000e_check_downshift(hw);
924 
925 	/* Enable/Disable EEE after link up */
926 	ret_val = e1000_set_eee_pchlan(hw);
927 	if (ret_val)
928 		return ret_val;
929 
930 	/* If we are forcing speed/duplex, then we simply return since
931 	 * we have already determined whether we have link or not.
932 	 */
933 	if (!mac->autoneg)
934 		return -E1000_ERR_CONFIG;
935 
936 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
937 	 * of MAC speed/duplex configuration.  So we only need to
938 	 * configure Collision Distance in the MAC.
939 	 */
940 	mac->ops.config_collision_dist(hw);
941 
942 	/* Configure Flow Control now that Auto-Neg has completed.
943 	 * First, we need to restore the desired flow control
944 	 * settings because we may have had to re-autoneg with a
945 	 * different link partner.
946 	 */
947 	ret_val = e1000e_config_fc_after_link_up(hw);
948 	if (ret_val)
949 		e_dbg("Error configuring flow control\n");
950 
951 	return ret_val;
952 }
953 
954 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
955 {
956 	struct e1000_hw *hw = &adapter->hw;
957 	s32 rc;
958 
959 	rc = e1000_init_mac_params_ich8lan(hw);
960 	if (rc)
961 		return rc;
962 
963 	rc = e1000_init_nvm_params_ich8lan(hw);
964 	if (rc)
965 		return rc;
966 
967 	switch (hw->mac.type) {
968 	case e1000_ich8lan:
969 	case e1000_ich9lan:
970 	case e1000_ich10lan:
971 		rc = e1000_init_phy_params_ich8lan(hw);
972 		break;
973 	case e1000_pchlan:
974 	case e1000_pch2lan:
975 	case e1000_pch_lpt:
976 		rc = e1000_init_phy_params_pchlan(hw);
977 		break;
978 	default:
979 		break;
980 	}
981 	if (rc)
982 		return rc;
983 
984 	/* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
985 	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
986 	 */
987 	if ((adapter->hw.phy.type == e1000_phy_ife) ||
988 	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
989 	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
990 		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
991 		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
992 
993 		hw->mac.ops.blink_led = NULL;
994 	}
995 
996 	if ((adapter->hw.mac.type == e1000_ich8lan) &&
997 	    (adapter->hw.phy.type != e1000_phy_ife))
998 		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
999 
1000 	/* Enable workaround for 82579 w/ ME enabled */
1001 	if ((adapter->hw.mac.type == e1000_pch2lan) &&
1002 	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1003 		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1004 
1005 	/* Disable EEE by default until IEEE802.3az spec is finalized */
1006 	if (adapter->flags2 & FLAG2_HAS_EEE)
1007 		adapter->hw.dev_spec.ich8lan.eee_disable = true;
1008 
1009 	return 0;
1010 }
1011 
1012 static DEFINE_MUTEX(nvm_mutex);
1013 
1014 /**
1015  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1016  *  @hw: pointer to the HW structure
1017  *
1018  *  Acquires the mutex for performing NVM operations.
1019  **/
1020 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1021 {
1022 	mutex_lock(&nvm_mutex);
1023 
1024 	return 0;
1025 }
1026 
1027 /**
1028  *  e1000_release_nvm_ich8lan - Release NVM mutex
1029  *  @hw: pointer to the HW structure
1030  *
1031  *  Releases the mutex used while performing NVM operations.
1032  **/
1033 static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1034 {
1035 	mutex_unlock(&nvm_mutex);
1036 }
1037 
1038 /**
1039  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1040  *  @hw: pointer to the HW structure
1041  *
1042  *  Acquires the software control flag for performing PHY and select
1043  *  MAC CSR accesses.
1044  **/
1045 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1046 {
1047 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1048 	s32 ret_val = 0;
1049 
1050 	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1051 			     &hw->adapter->state)) {
1052 		e_dbg("contention for Phy access\n");
1053 		return -E1000_ERR_PHY;
1054 	}
1055 
1056 	while (timeout) {
1057 		extcnf_ctrl = er32(EXTCNF_CTRL);
1058 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1059 			break;
1060 
1061 		mdelay(1);
1062 		timeout--;
1063 	}
1064 
1065 	if (!timeout) {
1066 		e_dbg("SW has already locked the resource.\n");
1067 		ret_val = -E1000_ERR_CONFIG;
1068 		goto out;
1069 	}
1070 
1071 	timeout = SW_FLAG_TIMEOUT;
1072 
1073 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1074 	ew32(EXTCNF_CTRL, extcnf_ctrl);
1075 
1076 	while (timeout) {
1077 		extcnf_ctrl = er32(EXTCNF_CTRL);
1078 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1079 			break;
1080 
1081 		mdelay(1);
1082 		timeout--;
1083 	}
1084 
1085 	if (!timeout) {
1086 		e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1087 		      er32(FWSM), extcnf_ctrl);
1088 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1089 		ew32(EXTCNF_CTRL, extcnf_ctrl);
1090 		ret_val = -E1000_ERR_CONFIG;
1091 		goto out;
1092 	}
1093 
1094 out:
1095 	if (ret_val)
1096 		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1097 
1098 	return ret_val;
1099 }
1100 
1101 /**
1102  *  e1000_release_swflag_ich8lan - Release software control flag
1103  *  @hw: pointer to the HW structure
1104  *
1105  *  Releases the software control flag for performing PHY and select
1106  *  MAC CSR accesses.
1107  **/
1108 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1109 {
1110 	u32 extcnf_ctrl;
1111 
1112 	extcnf_ctrl = er32(EXTCNF_CTRL);
1113 
1114 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1115 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1116 		ew32(EXTCNF_CTRL, extcnf_ctrl);
1117 	} else {
1118 		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1119 	}
1120 
1121 	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1122 }
1123 
1124 /**
1125  *  e1000_check_mng_mode_ich8lan - Checks management mode
1126  *  @hw: pointer to the HW structure
1127  *
1128  *  This checks if the adapter has any manageability enabled.
1129  *  This is a function pointer entry point only called by read/write
1130  *  routines for the PHY and NVM parts.
1131  **/
1132 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1133 {
1134 	u32 fwsm;
1135 
1136 	fwsm = er32(FWSM);
1137 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1138 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1139 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1140 }
1141 
1142 /**
1143  *  e1000_check_mng_mode_pchlan - Checks management mode
1144  *  @hw: pointer to the HW structure
1145  *
1146  *  This checks if the adapter has iAMT enabled.
1147  *  This is a function pointer entry point only called by read/write
1148  *  routines for the PHY and NVM parts.
1149  **/
1150 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1151 {
1152 	u32 fwsm;
1153 
1154 	fwsm = er32(FWSM);
1155 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1156 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1157 }
1158 
1159 /**
1160  *  e1000_rar_set_pch2lan - Set receive address register
1161  *  @hw: pointer to the HW structure
1162  *  @addr: pointer to the receive address
1163  *  @index: receive address array register
1164  *
1165  *  Sets the receive address array register at index to the address passed
1166  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1167  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1168  *  Use SHRA[0-3] in place of those reserved for ME.
1169  **/
1170 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1171 {
1172 	u32 rar_low, rar_high;
1173 
1174 	/* HW expects these in little endian so we reverse the byte order
1175 	 * from network order (big endian) to little endian
1176 	 */
1177 	rar_low = ((u32)addr[0] |
1178 		   ((u32)addr[1] << 8) |
1179 		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1180 
1181 	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1182 
1183 	/* If MAC address zero, no need to set the AV bit */
1184 	if (rar_low || rar_high)
1185 		rar_high |= E1000_RAH_AV;
1186 
1187 	if (index == 0) {
1188 		ew32(RAL(index), rar_low);
1189 		e1e_flush();
1190 		ew32(RAH(index), rar_high);
1191 		e1e_flush();
1192 		return;
1193 	}
1194 
1195 	if (index < hw->mac.rar_entry_count) {
1196 		s32 ret_val;
1197 
1198 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1199 		if (ret_val)
1200 			goto out;
1201 
1202 		ew32(SHRAL(index - 1), rar_low);
1203 		e1e_flush();
1204 		ew32(SHRAH(index - 1), rar_high);
1205 		e1e_flush();
1206 
1207 		e1000_release_swflag_ich8lan(hw);
1208 
1209 		/* verify the register updates */
1210 		if ((er32(SHRAL(index - 1)) == rar_low) &&
1211 		    (er32(SHRAH(index - 1)) == rar_high))
1212 			return;
1213 
1214 		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1215 		      (index - 1), er32(FWSM));
1216 	}
1217 
1218 out:
1219 	e_dbg("Failed to write receive address at index %d\n", index);
1220 }
1221 
1222 /**
1223  *  e1000_rar_set_pch_lpt - Set receive address registers
1224  *  @hw: pointer to the HW structure
1225  *  @addr: pointer to the receive address
1226  *  @index: receive address array register
1227  *
1228  *  Sets the receive address register array at index to the address passed
1229  *  in by addr. For LPT, RAR[0] is the base address register that is to
1230  *  contain the MAC address. SHRA[0-10] are the shared receive address
1231  *  registers that are shared between the Host and manageability engine (ME).
1232  **/
1233 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1234 {
1235 	u32 rar_low, rar_high;
1236 	u32 wlock_mac;
1237 
1238 	/* HW expects these in little endian so we reverse the byte order
1239 	 * from network order (big endian) to little endian
1240 	 */
1241 	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1242 		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1243 
1244 	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1245 
1246 	/* If MAC address zero, no need to set the AV bit */
1247 	if (rar_low || rar_high)
1248 		rar_high |= E1000_RAH_AV;
1249 
1250 	if (index == 0) {
1251 		ew32(RAL(index), rar_low);
1252 		e1e_flush();
1253 		ew32(RAH(index), rar_high);
1254 		e1e_flush();
1255 		return;
1256 	}
1257 
1258 	/* The manageability engine (ME) can lock certain SHRAR registers that
1259 	 * it is using - those registers are unavailable for use.
1260 	 */
1261 	if (index < hw->mac.rar_entry_count) {
1262 		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1263 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1264 
1265 		/* Check if all SHRAR registers are locked */
1266 		if (wlock_mac == 1)
1267 			goto out;
1268 
1269 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1270 			s32 ret_val;
1271 
1272 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1273 
1274 			if (ret_val)
1275 				goto out;
1276 
1277 			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1278 			e1e_flush();
1279 			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1280 			e1e_flush();
1281 
1282 			e1000_release_swflag_ich8lan(hw);
1283 
1284 			/* verify the register updates */
1285 			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1286 			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1287 				return;
1288 		}
1289 	}
1290 
1291 out:
1292 	e_dbg("Failed to write receive address at index %d\n", index);
1293 }
1294 
1295 /**
1296  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1297  *  @hw: pointer to the HW structure
1298  *
1299  *  Checks if firmware is blocking the reset of the PHY.
1300  *  This is a function pointer entry point only called by
1301  *  reset routines.
1302  **/
1303 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1304 {
1305 	u32 fwsm;
1306 
1307 	fwsm = er32(FWSM);
1308 
1309 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
1310 }
1311 
1312 /**
1313  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1314  *  @hw: pointer to the HW structure
1315  *
1316  *  Assumes semaphore already acquired.
1317  *
1318  **/
1319 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1320 {
1321 	u16 phy_data;
1322 	u32 strap = er32(STRAP);
1323 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1324 	    E1000_STRAP_SMT_FREQ_SHIFT;
1325 	s32 ret_val;
1326 
1327 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1328 
1329 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1330 	if (ret_val)
1331 		return ret_val;
1332 
1333 	phy_data &= ~HV_SMB_ADDR_MASK;
1334 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1335 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1336 
1337 	if (hw->phy.type == e1000_phy_i217) {
1338 		/* Restore SMBus frequency */
1339 		if (freq--) {
1340 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1341 			phy_data |= (freq & (1 << 0)) <<
1342 			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
1343 			phy_data |= (freq & (1 << 1)) <<
1344 			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1345 		} else {
1346 			e_dbg("Unsupported SMB frequency in PHY\n");
1347 		}
1348 	}
1349 
1350 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1351 }
1352 
1353 /**
1354  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1355  *  @hw:   pointer to the HW structure
1356  *
1357  *  SW should configure the LCD from the NVM extended configuration region
1358  *  as a workaround for certain parts.
1359  **/
1360 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1361 {
1362 	struct e1000_phy_info *phy = &hw->phy;
1363 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1364 	s32 ret_val = 0;
1365 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1366 
1367 	/* Initialize the PHY from the NVM on ICH platforms.  This
1368 	 * is needed due to an issue where the NVM configuration is
1369 	 * not properly autoloaded after power transitions.
1370 	 * Therefore, after each PHY reset, we will load the
1371 	 * configuration data out of the NVM manually.
1372 	 */
1373 	switch (hw->mac.type) {
1374 	case e1000_ich8lan:
1375 		if (phy->type != e1000_phy_igp_3)
1376 			return ret_val;
1377 
1378 		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1379 		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1380 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1381 			break;
1382 		}
1383 		/* Fall-thru */
1384 	case e1000_pchlan:
1385 	case e1000_pch2lan:
1386 	case e1000_pch_lpt:
1387 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1388 		break;
1389 	default:
1390 		return ret_val;
1391 	}
1392 
1393 	ret_val = hw->phy.ops.acquire(hw);
1394 	if (ret_val)
1395 		return ret_val;
1396 
1397 	data = er32(FEXTNVM);
1398 	if (!(data & sw_cfg_mask))
1399 		goto release;
1400 
1401 	/* Make sure HW does not configure LCD from PHY
1402 	 * extended configuration before SW configuration
1403 	 */
1404 	data = er32(EXTCNF_CTRL);
1405 	if ((hw->mac.type < e1000_pch2lan) &&
1406 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1407 		goto release;
1408 
1409 	cnf_size = er32(EXTCNF_SIZE);
1410 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1411 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1412 	if (!cnf_size)
1413 		goto release;
1414 
1415 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1416 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1417 
1418 	if (((hw->mac.type == e1000_pchlan) &&
1419 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1420 	    (hw->mac.type > e1000_pchlan)) {
1421 		/* HW configures the SMBus address and LEDs when the
1422 		 * OEM and LCD Write Enable bits are set in the NVM.
1423 		 * When both NVM bits are cleared, SW will configure
1424 		 * them instead.
1425 		 */
1426 		ret_val = e1000_write_smbus_addr(hw);
1427 		if (ret_val)
1428 			goto release;
1429 
1430 		data = er32(LEDCTL);
1431 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1432 							(u16)data);
1433 		if (ret_val)
1434 			goto release;
1435 	}
1436 
1437 	/* Configure LCD from extended configuration region. */
1438 
1439 	/* cnf_base_addr is in DWORD */
1440 	word_addr = (u16)(cnf_base_addr << 1);
1441 
1442 	for (i = 0; i < cnf_size; i++) {
1443 		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1444 					 &reg_data);
1445 		if (ret_val)
1446 			goto release;
1447 
1448 		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1449 					 1, &reg_addr);
1450 		if (ret_val)
1451 			goto release;
1452 
1453 		/* Save off the PHY page for future writes. */
1454 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1455 			phy_page = reg_data;
1456 			continue;
1457 		}
1458 
1459 		reg_addr &= PHY_REG_MASK;
1460 		reg_addr |= phy_page;
1461 
1462 		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1463 		if (ret_val)
1464 			goto release;
1465 	}
1466 
1467 release:
1468 	hw->phy.ops.release(hw);
1469 	return ret_val;
1470 }
1471 
1472 /**
1473  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1474  *  @hw:   pointer to the HW structure
1475  *  @link: link up bool flag
1476  *
1477  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1478  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1479  *  If link is down, the function will restore the default K1 setting located
1480  *  in the NVM.
1481  **/
1482 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1483 {
1484 	s32 ret_val = 0;
1485 	u16 status_reg = 0;
1486 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1487 
1488 	if (hw->mac.type != e1000_pchlan)
1489 		return 0;
1490 
1491 	/* Wrap the whole flow with the sw flag */
1492 	ret_val = hw->phy.ops.acquire(hw);
1493 	if (ret_val)
1494 		return ret_val;
1495 
1496 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1497 	if (link) {
1498 		if (hw->phy.type == e1000_phy_82578) {
1499 			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1500 						  &status_reg);
1501 			if (ret_val)
1502 				goto release;
1503 
1504 			status_reg &= BM_CS_STATUS_LINK_UP |
1505 			              BM_CS_STATUS_RESOLVED |
1506 			              BM_CS_STATUS_SPEED_MASK;
1507 
1508 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1509 			                   BM_CS_STATUS_RESOLVED |
1510 			                   BM_CS_STATUS_SPEED_1000))
1511 				k1_enable = false;
1512 		}
1513 
1514 		if (hw->phy.type == e1000_phy_82577) {
1515 			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1516 			if (ret_val)
1517 				goto release;
1518 
1519 			status_reg &= HV_M_STATUS_LINK_UP |
1520 			              HV_M_STATUS_AUTONEG_COMPLETE |
1521 			              HV_M_STATUS_SPEED_MASK;
1522 
1523 			if (status_reg == (HV_M_STATUS_LINK_UP |
1524 			                   HV_M_STATUS_AUTONEG_COMPLETE |
1525 			                   HV_M_STATUS_SPEED_1000))
1526 				k1_enable = false;
1527 		}
1528 
1529 		/* Link stall fix for link up */
1530 		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1531 		if (ret_val)
1532 			goto release;
1533 
1534 	} else {
1535 		/* Link stall fix for link down */
1536 		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1537 		if (ret_val)
1538 			goto release;
1539 	}
1540 
1541 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1542 
1543 release:
1544 	hw->phy.ops.release(hw);
1545 
1546 	return ret_val;
1547 }
1548 
1549 /**
1550  *  e1000_configure_k1_ich8lan - Configure K1 power state
1551  *  @hw: pointer to the HW structure
1552  *  @enable: K1 state to configure
1553  *
1554  *  Configure the K1 power state based on the provided parameter.
1555  *  Assumes semaphore already acquired.
1556  *
1557  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1558  **/
1559 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1560 {
1561 	s32 ret_val;
1562 	u32 ctrl_reg = 0;
1563 	u32 ctrl_ext = 0;
1564 	u32 reg = 0;
1565 	u16 kmrn_reg = 0;
1566 
1567 	ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1568 					      &kmrn_reg);
1569 	if (ret_val)
1570 		return ret_val;
1571 
1572 	if (k1_enable)
1573 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1574 	else
1575 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1576 
1577 	ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1578 					       kmrn_reg);
1579 	if (ret_val)
1580 		return ret_val;
1581 
1582 	udelay(20);
1583 	ctrl_ext = er32(CTRL_EXT);
1584 	ctrl_reg = er32(CTRL);
1585 
1586 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1587 	reg |= E1000_CTRL_FRCSPD;
1588 	ew32(CTRL, reg);
1589 
1590 	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1591 	e1e_flush();
1592 	udelay(20);
1593 	ew32(CTRL, ctrl_reg);
1594 	ew32(CTRL_EXT, ctrl_ext);
1595 	e1e_flush();
1596 	udelay(20);
1597 
1598 	return 0;
1599 }
1600 
1601 /**
1602  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1603  *  @hw:       pointer to the HW structure
1604  *  @d0_state: boolean if entering d0 or d3 device state
1605  *
1606  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1607  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1608  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1609  **/
1610 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1611 {
1612 	s32 ret_val = 0;
1613 	u32 mac_reg;
1614 	u16 oem_reg;
1615 
1616 	if (hw->mac.type < e1000_pchlan)
1617 		return ret_val;
1618 
1619 	ret_val = hw->phy.ops.acquire(hw);
1620 	if (ret_val)
1621 		return ret_val;
1622 
1623 	if (hw->mac.type == e1000_pchlan) {
1624 		mac_reg = er32(EXTCNF_CTRL);
1625 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1626 			goto release;
1627 	}
1628 
1629 	mac_reg = er32(FEXTNVM);
1630 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1631 		goto release;
1632 
1633 	mac_reg = er32(PHY_CTRL);
1634 
1635 	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1636 	if (ret_val)
1637 		goto release;
1638 
1639 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1640 
1641 	if (d0_state) {
1642 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1643 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1644 
1645 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1646 			oem_reg |= HV_OEM_BITS_LPLU;
1647 	} else {
1648 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1649 			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1650 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1651 
1652 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1653 			       E1000_PHY_CTRL_NOND0A_LPLU))
1654 			oem_reg |= HV_OEM_BITS_LPLU;
1655 	}
1656 
1657 	/* Set Restart auto-neg to activate the bits */
1658 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1659 	    !hw->phy.ops.check_reset_block(hw))
1660 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1661 
1662 	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1663 
1664 release:
1665 	hw->phy.ops.release(hw);
1666 
1667 	return ret_val;
1668 }
1669 
1670 
1671 /**
1672  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1673  *  @hw:   pointer to the HW structure
1674  **/
1675 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1676 {
1677 	s32 ret_val;
1678 	u16 data;
1679 
1680 	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1681 	if (ret_val)
1682 		return ret_val;
1683 
1684 	data |= HV_KMRN_MDIO_SLOW;
1685 
1686 	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1687 
1688 	return ret_val;
1689 }
1690 
1691 /**
1692  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1693  *  done after every PHY reset.
1694  **/
1695 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1696 {
1697 	s32 ret_val = 0;
1698 	u16 phy_data;
1699 
1700 	if (hw->mac.type != e1000_pchlan)
1701 		return 0;
1702 
1703 	/* Set MDIO slow mode before any other MDIO access */
1704 	if (hw->phy.type == e1000_phy_82577) {
1705 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1706 		if (ret_val)
1707 			return ret_val;
1708 	}
1709 
1710 	if (((hw->phy.type == e1000_phy_82577) &&
1711 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1712 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1713 		/* Disable generation of early preamble */
1714 		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1715 		if (ret_val)
1716 			return ret_val;
1717 
1718 		/* Preamble tuning for SSC */
1719 		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1720 		if (ret_val)
1721 			return ret_val;
1722 	}
1723 
1724 	if (hw->phy.type == e1000_phy_82578) {
1725 		/* Return registers to default by doing a soft reset then
1726 		 * writing 0x3140 to the control register.
1727 		 */
1728 		if (hw->phy.revision < 2) {
1729 			e1000e_phy_sw_reset(hw);
1730 			ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
1731 		}
1732 	}
1733 
1734 	/* Select page 0 */
1735 	ret_val = hw->phy.ops.acquire(hw);
1736 	if (ret_val)
1737 		return ret_val;
1738 
1739 	hw->phy.addr = 1;
1740 	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1741 	hw->phy.ops.release(hw);
1742 	if (ret_val)
1743 		return ret_val;
1744 
1745 	/* Configure the K1 Si workaround during phy reset assuming there is
1746 	 * link so that it disables K1 if link is in 1Gbps.
1747 	 */
1748 	ret_val = e1000_k1_gig_workaround_hv(hw, true);
1749 	if (ret_val)
1750 		return ret_val;
1751 
1752 	/* Workaround for link disconnects on a busy hub in half duplex */
1753 	ret_val = hw->phy.ops.acquire(hw);
1754 	if (ret_val)
1755 		return ret_val;
1756 	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1757 	if (ret_val)
1758 		goto release;
1759 	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1760 	if (ret_val)
1761 		goto release;
1762 
1763 	/* set MSE higher to enable link to stay up when noise is high */
1764 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
1765 release:
1766 	hw->phy.ops.release(hw);
1767 
1768 	return ret_val;
1769 }
1770 
1771 /**
1772  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1773  *  @hw:   pointer to the HW structure
1774  **/
1775 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1776 {
1777 	u32 mac_reg;
1778 	u16 i, phy_reg = 0;
1779 	s32 ret_val;
1780 
1781 	ret_val = hw->phy.ops.acquire(hw);
1782 	if (ret_val)
1783 		return;
1784 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1785 	if (ret_val)
1786 		goto release;
1787 
1788 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1789 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1790 		mac_reg = er32(RAL(i));
1791 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1792 					   (u16)(mac_reg & 0xFFFF));
1793 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1794 					   (u16)((mac_reg >> 16) & 0xFFFF));
1795 
1796 		mac_reg = er32(RAH(i));
1797 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1798 					   (u16)(mac_reg & 0xFFFF));
1799 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1800 					   (u16)((mac_reg & E1000_RAH_AV)
1801 						 >> 16));
1802 	}
1803 
1804 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1805 
1806 release:
1807 	hw->phy.ops.release(hw);
1808 }
1809 
1810 /**
1811  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1812  *  with 82579 PHY
1813  *  @hw: pointer to the HW structure
1814  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1815  **/
1816 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1817 {
1818 	s32 ret_val = 0;
1819 	u16 phy_reg, data;
1820 	u32 mac_reg;
1821 	u16 i;
1822 
1823 	if (hw->mac.type < e1000_pch2lan)
1824 		return 0;
1825 
1826 	/* disable Rx path while enabling/disabling workaround */
1827 	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1828 	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1829 	if (ret_val)
1830 		return ret_val;
1831 
1832 	if (enable) {
1833 		/* Write Rx addresses (rar_entry_count for RAL/H, +4 for
1834 		 * SHRAL/H) and initial CRC values to the MAC
1835 		 */
1836 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1837 			u8 mac_addr[ETH_ALEN] = {0};
1838 			u32 addr_high, addr_low;
1839 
1840 			addr_high = er32(RAH(i));
1841 			if (!(addr_high & E1000_RAH_AV))
1842 				continue;
1843 			addr_low = er32(RAL(i));
1844 			mac_addr[0] = (addr_low & 0xFF);
1845 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1846 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1847 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1848 			mac_addr[4] = (addr_high & 0xFF);
1849 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1850 
1851 			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1852 		}
1853 
1854 		/* Write Rx addresses to the PHY */
1855 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1856 
1857 		/* Enable jumbo frame workaround in the MAC */
1858 		mac_reg = er32(FFLT_DBG);
1859 		mac_reg &= ~(1 << 14);
1860 		mac_reg |= (7 << 15);
1861 		ew32(FFLT_DBG, mac_reg);
1862 
1863 		mac_reg = er32(RCTL);
1864 		mac_reg |= E1000_RCTL_SECRC;
1865 		ew32(RCTL, mac_reg);
1866 
1867 		ret_val = e1000e_read_kmrn_reg(hw,
1868 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1869 						&data);
1870 		if (ret_val)
1871 			return ret_val;
1872 		ret_val = e1000e_write_kmrn_reg(hw,
1873 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1874 						data | (1 << 0));
1875 		if (ret_val)
1876 			return ret_val;
1877 		ret_val = e1000e_read_kmrn_reg(hw,
1878 						E1000_KMRNCTRLSTA_HD_CTRL,
1879 						&data);
1880 		if (ret_val)
1881 			return ret_val;
1882 		data &= ~(0xF << 8);
1883 		data |= (0xB << 8);
1884 		ret_val = e1000e_write_kmrn_reg(hw,
1885 						E1000_KMRNCTRLSTA_HD_CTRL,
1886 						data);
1887 		if (ret_val)
1888 			return ret_val;
1889 
1890 		/* Enable jumbo frame workaround in the PHY */
1891 		e1e_rphy(hw, PHY_REG(769, 23), &data);
1892 		data &= ~(0x7F << 5);
1893 		data |= (0x37 << 5);
1894 		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1895 		if (ret_val)
1896 			return ret_val;
1897 		e1e_rphy(hw, PHY_REG(769, 16), &data);
1898 		data &= ~(1 << 13);
1899 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1900 		if (ret_val)
1901 			return ret_val;
1902 		e1e_rphy(hw, PHY_REG(776, 20), &data);
1903 		data &= ~(0x3FF << 2);
1904 		data |= (0x1A << 2);
1905 		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1906 		if (ret_val)
1907 			return ret_val;
1908 		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
1909 		if (ret_val)
1910 			return ret_val;
1911 		e1e_rphy(hw, HV_PM_CTRL, &data);
1912 		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1913 		if (ret_val)
1914 			return ret_val;
1915 	} else {
1916 		/* Write MAC register values back to h/w defaults */
1917 		mac_reg = er32(FFLT_DBG);
1918 		mac_reg &= ~(0xF << 14);
1919 		ew32(FFLT_DBG, mac_reg);
1920 
1921 		mac_reg = er32(RCTL);
1922 		mac_reg &= ~E1000_RCTL_SECRC;
1923 		ew32(RCTL, mac_reg);
1924 
1925 		ret_val = e1000e_read_kmrn_reg(hw,
1926 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1927 						&data);
1928 		if (ret_val)
1929 			return ret_val;
1930 		ret_val = e1000e_write_kmrn_reg(hw,
1931 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1932 						data & ~(1 << 0));
1933 		if (ret_val)
1934 			return ret_val;
1935 		ret_val = e1000e_read_kmrn_reg(hw,
1936 						E1000_KMRNCTRLSTA_HD_CTRL,
1937 						&data);
1938 		if (ret_val)
1939 			return ret_val;
1940 		data &= ~(0xF << 8);
1941 		data |= (0xB << 8);
1942 		ret_val = e1000e_write_kmrn_reg(hw,
1943 						E1000_KMRNCTRLSTA_HD_CTRL,
1944 						data);
1945 		if (ret_val)
1946 			return ret_val;
1947 
1948 		/* Write PHY register values back to h/w defaults */
1949 		e1e_rphy(hw, PHY_REG(769, 23), &data);
1950 		data &= ~(0x7F << 5);
1951 		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1952 		if (ret_val)
1953 			return ret_val;
1954 		e1e_rphy(hw, PHY_REG(769, 16), &data);
1955 		data |= (1 << 13);
1956 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1957 		if (ret_val)
1958 			return ret_val;
1959 		e1e_rphy(hw, PHY_REG(776, 20), &data);
1960 		data &= ~(0x3FF << 2);
1961 		data |= (0x8 << 2);
1962 		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1963 		if (ret_val)
1964 			return ret_val;
1965 		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1966 		if (ret_val)
1967 			return ret_val;
1968 		e1e_rphy(hw, HV_PM_CTRL, &data);
1969 		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1970 		if (ret_val)
1971 			return ret_val;
1972 	}
1973 
1974 	/* re-enable Rx path after enabling/disabling workaround */
1975 	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1976 }
1977 
1978 /**
1979  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1980  *  done after every PHY reset.
1981  **/
1982 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1983 {
1984 	s32 ret_val = 0;
1985 
1986 	if (hw->mac.type != e1000_pch2lan)
1987 		return 0;
1988 
1989 	/* Set MDIO slow mode before any other MDIO access */
1990 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1991 	if (ret_val)
1992 		return ret_val;
1993 
1994 	ret_val = hw->phy.ops.acquire(hw);
1995 	if (ret_val)
1996 		return ret_val;
1997 	/* set MSE higher to enable link to stay up when noise is high */
1998 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
1999 	if (ret_val)
2000 		goto release;
2001 	/* drop link after 5 times MSE threshold was reached */
2002 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2003 release:
2004 	hw->phy.ops.release(hw);
2005 
2006 	return ret_val;
2007 }
2008 
2009 /**
2010  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2011  *  @hw:   pointer to the HW structure
2012  *
2013  *  Workaround to set the K1 beacon duration for 82579 parts
2014  **/
2015 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2016 {
2017 	s32 ret_val = 0;
2018 	u16 status_reg = 0;
2019 	u32 mac_reg;
2020 	u16 phy_reg;
2021 
2022 	if (hw->mac.type != e1000_pch2lan)
2023 		return 0;
2024 
2025 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
2026 	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2027 	if (ret_val)
2028 		return ret_val;
2029 
2030 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2031 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2032 		mac_reg = er32(FEXTNVM4);
2033 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2034 
2035 		ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2036 		if (ret_val)
2037 			return ret_val;
2038 
2039 		if (status_reg & HV_M_STATUS_SPEED_1000) {
2040 			u16 pm_phy_reg;
2041 
2042 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2043 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2044 			/* LV 1G Packet drop issue wa  */
2045 			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2046 			if (ret_val)
2047 				return ret_val;
2048 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2049 			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2050 			if (ret_val)
2051 				return ret_val;
2052 		} else {
2053 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2054 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2055 		}
2056 		ew32(FEXTNVM4, mac_reg);
2057 		ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2058 	}
2059 
2060 	return ret_val;
2061 }
2062 
2063 /**
2064  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2065  *  @hw:   pointer to the HW structure
2066  *  @gate: boolean set to true to gate, false to ungate
2067  *
2068  *  Gate/ungate the automatic PHY configuration via hardware; perform
2069  *  the configuration via software instead.
2070  **/
2071 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2072 {
2073 	u32 extcnf_ctrl;
2074 
2075 	if (hw->mac.type < e1000_pch2lan)
2076 		return;
2077 
2078 	extcnf_ctrl = er32(EXTCNF_CTRL);
2079 
2080 	if (gate)
2081 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2082 	else
2083 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2084 
2085 	ew32(EXTCNF_CTRL, extcnf_ctrl);
2086 }
2087 
2088 /**
2089  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2090  *  @hw: pointer to the HW structure
2091  *
2092  *  Check the appropriate indication the MAC has finished configuring the
2093  *  PHY after a software reset.
2094  **/
2095 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2096 {
2097 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2098 
2099 	/* Wait for basic configuration completes before proceeding */
2100 	do {
2101 		data = er32(STATUS);
2102 		data &= E1000_STATUS_LAN_INIT_DONE;
2103 		udelay(100);
2104 	} while ((!data) && --loop);
2105 
2106 	/* If basic configuration is incomplete before the above loop
2107 	 * count reaches 0, loading the configuration from NVM will
2108 	 * leave the PHY in a bad state possibly resulting in no link.
2109 	 */
2110 	if (loop == 0)
2111 		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2112 
2113 	/* Clear the Init Done bit for the next init event */
2114 	data = er32(STATUS);
2115 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2116 	ew32(STATUS, data);
2117 }
2118 
2119 /**
2120  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2121  *  @hw: pointer to the HW structure
2122  **/
2123 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2124 {
2125 	s32 ret_val = 0;
2126 	u16 reg;
2127 
2128 	if (hw->phy.ops.check_reset_block(hw))
2129 		return 0;
2130 
2131 	/* Allow time for h/w to get to quiescent state after reset */
2132 	usleep_range(10000, 20000);
2133 
2134 	/* Perform any necessary post-reset workarounds */
2135 	switch (hw->mac.type) {
2136 	case e1000_pchlan:
2137 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2138 		if (ret_val)
2139 			return ret_val;
2140 		break;
2141 	case e1000_pch2lan:
2142 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2143 		if (ret_val)
2144 			return ret_val;
2145 		break;
2146 	default:
2147 		break;
2148 	}
2149 
2150 	/* Clear the host wakeup bit after lcd reset */
2151 	if (hw->mac.type >= e1000_pchlan) {
2152 		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2153 		reg &= ~BM_WUC_HOST_WU_BIT;
2154 		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2155 	}
2156 
2157 	/* Configure the LCD with the extended configuration region in NVM */
2158 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2159 	if (ret_val)
2160 		return ret_val;
2161 
2162 	/* Configure the LCD with the OEM bits in NVM */
2163 	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2164 
2165 	if (hw->mac.type == e1000_pch2lan) {
2166 		/* Ungate automatic PHY configuration on non-managed 82579 */
2167 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2168 			usleep_range(10000, 20000);
2169 			e1000_gate_hw_phy_config_ich8lan(hw, false);
2170 		}
2171 
2172 		/* Set EEE LPI Update Timer to 200usec */
2173 		ret_val = hw->phy.ops.acquire(hw);
2174 		if (ret_val)
2175 			return ret_val;
2176 		ret_val = e1000_write_emi_reg_locked(hw,
2177 						     I82579_LPI_UPDATE_TIMER,
2178 						     0x1387);
2179 		hw->phy.ops.release(hw);
2180 	}
2181 
2182 	return ret_val;
2183 }
2184 
2185 /**
2186  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2187  *  @hw: pointer to the HW structure
2188  *
2189  *  Resets the PHY
2190  *  This is a function pointer entry point called by drivers
2191  *  or other shared routines.
2192  **/
2193 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2194 {
2195 	s32 ret_val = 0;
2196 
2197 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2198 	if ((hw->mac.type == e1000_pch2lan) &&
2199 	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2200 		e1000_gate_hw_phy_config_ich8lan(hw, true);
2201 
2202 	ret_val = e1000e_phy_hw_reset_generic(hw);
2203 	if (ret_val)
2204 		return ret_val;
2205 
2206 	return e1000_post_phy_reset_ich8lan(hw);
2207 }
2208 
2209 /**
2210  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2211  *  @hw: pointer to the HW structure
2212  *  @active: true to enable LPLU, false to disable
2213  *
2214  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2215  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2216  *  the phy speed. This function will manually set the LPLU bit and restart
2217  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2218  *  since it configures the same bit.
2219  **/
2220 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2221 {
2222 	s32 ret_val;
2223 	u16 oem_reg;
2224 
2225 	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2226 	if (ret_val)
2227 		return ret_val;
2228 
2229 	if (active)
2230 		oem_reg |= HV_OEM_BITS_LPLU;
2231 	else
2232 		oem_reg &= ~HV_OEM_BITS_LPLU;
2233 
2234 	if (!hw->phy.ops.check_reset_block(hw))
2235 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2236 
2237 	return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2238 }
2239 
2240 /**
2241  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2242  *  @hw: pointer to the HW structure
2243  *  @active: true to enable LPLU, false to disable
2244  *
2245  *  Sets the LPLU D0 state according to the active flag.  When
2246  *  activating LPLU this function also disables smart speed
2247  *  and vice versa.  LPLU will not be activated unless the
2248  *  device autonegotiation advertisement meets standards of
2249  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2250  *  This is a function pointer entry point only called by
2251  *  PHY setup routines.
2252  **/
2253 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2254 {
2255 	struct e1000_phy_info *phy = &hw->phy;
2256 	u32 phy_ctrl;
2257 	s32 ret_val = 0;
2258 	u16 data;
2259 
2260 	if (phy->type == e1000_phy_ife)
2261 		return 0;
2262 
2263 	phy_ctrl = er32(PHY_CTRL);
2264 
2265 	if (active) {
2266 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2267 		ew32(PHY_CTRL, phy_ctrl);
2268 
2269 		if (phy->type != e1000_phy_igp_3)
2270 			return 0;
2271 
2272 		/* Call gig speed drop workaround on LPLU before accessing
2273 		 * any PHY registers
2274 		 */
2275 		if (hw->mac.type == e1000_ich8lan)
2276 			e1000e_gig_downshift_workaround_ich8lan(hw);
2277 
2278 		/* When LPLU is enabled, we should disable SmartSpeed */
2279 		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2280 		if (ret_val)
2281 			return ret_val;
2282 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2283 		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2284 		if (ret_val)
2285 			return ret_val;
2286 	} else {
2287 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2288 		ew32(PHY_CTRL, phy_ctrl);
2289 
2290 		if (phy->type != e1000_phy_igp_3)
2291 			return 0;
2292 
2293 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2294 		 * during Dx states where the power conservation is most
2295 		 * important.  During driver activity we should enable
2296 		 * SmartSpeed, so performance is maintained.
2297 		 */
2298 		if (phy->smart_speed == e1000_smart_speed_on) {
2299 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2300 					   &data);
2301 			if (ret_val)
2302 				return ret_val;
2303 
2304 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2305 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2306 					   data);
2307 			if (ret_val)
2308 				return ret_val;
2309 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2310 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2311 					   &data);
2312 			if (ret_val)
2313 				return ret_val;
2314 
2315 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2316 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2317 					   data);
2318 			if (ret_val)
2319 				return ret_val;
2320 		}
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 /**
2327  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2328  *  @hw: pointer to the HW structure
2329  *  @active: true to enable LPLU, false to disable
2330  *
2331  *  Sets the LPLU D3 state according to the active flag.  When
2332  *  activating LPLU this function also disables smart speed
2333  *  and vice versa.  LPLU will not be activated unless the
2334  *  device autonegotiation advertisement meets standards of
2335  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2336  *  This is a function pointer entry point only called by
2337  *  PHY setup routines.
2338  **/
2339 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2340 {
2341 	struct e1000_phy_info *phy = &hw->phy;
2342 	u32 phy_ctrl;
2343 	s32 ret_val = 0;
2344 	u16 data;
2345 
2346 	phy_ctrl = er32(PHY_CTRL);
2347 
2348 	if (!active) {
2349 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2350 		ew32(PHY_CTRL, phy_ctrl);
2351 
2352 		if (phy->type != e1000_phy_igp_3)
2353 			return 0;
2354 
2355 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2356 		 * during Dx states where the power conservation is most
2357 		 * important.  During driver activity we should enable
2358 		 * SmartSpeed, so performance is maintained.
2359 		 */
2360 		if (phy->smart_speed == e1000_smart_speed_on) {
2361 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2362 					   &data);
2363 			if (ret_val)
2364 				return ret_val;
2365 
2366 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2367 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2368 					   data);
2369 			if (ret_val)
2370 				return ret_val;
2371 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2372 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2373 					   &data);
2374 			if (ret_val)
2375 				return ret_val;
2376 
2377 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2378 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2379 					   data);
2380 			if (ret_val)
2381 				return ret_val;
2382 		}
2383 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2384 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2385 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2386 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2387 		ew32(PHY_CTRL, phy_ctrl);
2388 
2389 		if (phy->type != e1000_phy_igp_3)
2390 			return 0;
2391 
2392 		/* Call gig speed drop workaround on LPLU before accessing
2393 		 * any PHY registers
2394 		 */
2395 		if (hw->mac.type == e1000_ich8lan)
2396 			e1000e_gig_downshift_workaround_ich8lan(hw);
2397 
2398 		/* When LPLU is enabled, we should disable SmartSpeed */
2399 		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2400 		if (ret_val)
2401 			return ret_val;
2402 
2403 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2404 		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2405 	}
2406 
2407 	return ret_val;
2408 }
2409 
2410 /**
2411  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2412  *  @hw: pointer to the HW structure
2413  *  @bank:  pointer to the variable that returns the active bank
2414  *
2415  *  Reads signature byte from the NVM using the flash access registers.
2416  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2417  **/
2418 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2419 {
2420 	u32 eecd;
2421 	struct e1000_nvm_info *nvm = &hw->nvm;
2422 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2423 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2424 	u8 sig_byte = 0;
2425 	s32 ret_val;
2426 
2427 	switch (hw->mac.type) {
2428 	case e1000_ich8lan:
2429 	case e1000_ich9lan:
2430 		eecd = er32(EECD);
2431 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2432 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2433 			if (eecd & E1000_EECD_SEC1VAL)
2434 				*bank = 1;
2435 			else
2436 				*bank = 0;
2437 
2438 			return 0;
2439 		}
2440 		e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2441 		/* fall-thru */
2442 	default:
2443 		/* set bank to 0 in case flash read fails */
2444 		*bank = 0;
2445 
2446 		/* Check bank 0 */
2447 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2448 		                                        &sig_byte);
2449 		if (ret_val)
2450 			return ret_val;
2451 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2452 		    E1000_ICH_NVM_SIG_VALUE) {
2453 			*bank = 0;
2454 			return 0;
2455 		}
2456 
2457 		/* Check bank 1 */
2458 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2459 		                                        bank1_offset,
2460 		                                        &sig_byte);
2461 		if (ret_val)
2462 			return ret_val;
2463 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2464 		    E1000_ICH_NVM_SIG_VALUE) {
2465 			*bank = 1;
2466 			return 0;
2467 		}
2468 
2469 		e_dbg("ERROR: No valid NVM bank present\n");
2470 		return -E1000_ERR_NVM;
2471 	}
2472 }
2473 
2474 /**
2475  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2476  *  @hw: pointer to the HW structure
2477  *  @offset: The offset (in bytes) of the word(s) to read.
2478  *  @words: Size of data to read in words
2479  *  @data: Pointer to the word(s) to read at offset.
2480  *
2481  *  Reads a word(s) from the NVM using the flash access registers.
2482  **/
2483 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2484 				  u16 *data)
2485 {
2486 	struct e1000_nvm_info *nvm = &hw->nvm;
2487 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2488 	u32 act_offset;
2489 	s32 ret_val = 0;
2490 	u32 bank = 0;
2491 	u16 i, word;
2492 
2493 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2494 	    (words == 0)) {
2495 		e_dbg("nvm parameter(s) out of bounds\n");
2496 		ret_val = -E1000_ERR_NVM;
2497 		goto out;
2498 	}
2499 
2500 	nvm->ops.acquire(hw);
2501 
2502 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2503 	if (ret_val) {
2504 		e_dbg("Could not detect valid bank, assuming bank 0\n");
2505 		bank = 0;
2506 	}
2507 
2508 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2509 	act_offset += offset;
2510 
2511 	ret_val = 0;
2512 	for (i = 0; i < words; i++) {
2513 		if (dev_spec->shadow_ram[offset+i].modified) {
2514 			data[i] = dev_spec->shadow_ram[offset+i].value;
2515 		} else {
2516 			ret_val = e1000_read_flash_word_ich8lan(hw,
2517 								act_offset + i,
2518 								&word);
2519 			if (ret_val)
2520 				break;
2521 			data[i] = word;
2522 		}
2523 	}
2524 
2525 	nvm->ops.release(hw);
2526 
2527 out:
2528 	if (ret_val)
2529 		e_dbg("NVM read error: %d\n", ret_val);
2530 
2531 	return ret_val;
2532 }
2533 
2534 /**
2535  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2536  *  @hw: pointer to the HW structure
2537  *
2538  *  This function does initial flash setup so that a new read/write/erase cycle
2539  *  can be started.
2540  **/
2541 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2542 {
2543 	union ich8_hws_flash_status hsfsts;
2544 	s32 ret_val = -E1000_ERR_NVM;
2545 
2546 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2547 
2548 	/* Check if the flash descriptor is valid */
2549 	if (!hsfsts.hsf_status.fldesvalid) {
2550 		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
2551 		return -E1000_ERR_NVM;
2552 	}
2553 
2554 	/* Clear FCERR and DAEL in hw status by writing 1 */
2555 	hsfsts.hsf_status.flcerr = 1;
2556 	hsfsts.hsf_status.dael = 1;
2557 
2558 	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2559 
2560 	/* Either we should have a hardware SPI cycle in progress
2561 	 * bit to check against, in order to start a new cycle or
2562 	 * FDONE bit should be changed in the hardware so that it
2563 	 * is 1 after hardware reset, which can then be used as an
2564 	 * indication whether a cycle is in progress or has been
2565 	 * completed.
2566 	 */
2567 
2568 	if (!hsfsts.hsf_status.flcinprog) {
2569 		/* There is no cycle running at present,
2570 		 * so we can start a cycle.
2571 		 * Begin by setting Flash Cycle Done.
2572 		 */
2573 		hsfsts.hsf_status.flcdone = 1;
2574 		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2575 		ret_val = 0;
2576 	} else {
2577 		s32 i;
2578 
2579 		/* Otherwise poll for sometime so the current
2580 		 * cycle has a chance to end before giving up.
2581 		 */
2582 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2583 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2584 			if (!hsfsts.hsf_status.flcinprog) {
2585 				ret_val = 0;
2586 				break;
2587 			}
2588 			udelay(1);
2589 		}
2590 		if (!ret_val) {
2591 			/* Successful in waiting for previous cycle to timeout,
2592 			 * now set the Flash Cycle Done.
2593 			 */
2594 			hsfsts.hsf_status.flcdone = 1;
2595 			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2596 		} else {
2597 			e_dbg("Flash controller busy, cannot get access\n");
2598 		}
2599 	}
2600 
2601 	return ret_val;
2602 }
2603 
2604 /**
2605  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2606  *  @hw: pointer to the HW structure
2607  *  @timeout: maximum time to wait for completion
2608  *
2609  *  This function starts a flash cycle and waits for its completion.
2610  **/
2611 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2612 {
2613 	union ich8_hws_flash_ctrl hsflctl;
2614 	union ich8_hws_flash_status hsfsts;
2615 	u32 i = 0;
2616 
2617 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2618 	hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2619 	hsflctl.hsf_ctrl.flcgo = 1;
2620 	ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2621 
2622 	/* wait till FDONE bit is set to 1 */
2623 	do {
2624 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2625 		if (hsfsts.hsf_status.flcdone)
2626 			break;
2627 		udelay(1);
2628 	} while (i++ < timeout);
2629 
2630 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2631 		return 0;
2632 
2633 	return -E1000_ERR_NVM;
2634 }
2635 
2636 /**
2637  *  e1000_read_flash_word_ich8lan - Read word from flash
2638  *  @hw: pointer to the HW structure
2639  *  @offset: offset to data location
2640  *  @data: pointer to the location for storing the data
2641  *
2642  *  Reads the flash word at offset into data.  Offset is converted
2643  *  to bytes before read.
2644  **/
2645 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2646 					 u16 *data)
2647 {
2648 	/* Must convert offset into bytes. */
2649 	offset <<= 1;
2650 
2651 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2652 }
2653 
2654 /**
2655  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2656  *  @hw: pointer to the HW structure
2657  *  @offset: The offset of the byte to read.
2658  *  @data: Pointer to a byte to store the value read.
2659  *
2660  *  Reads a single byte from the NVM using the flash access registers.
2661  **/
2662 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2663 					 u8 *data)
2664 {
2665 	s32 ret_val;
2666 	u16 word = 0;
2667 
2668 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2669 	if (ret_val)
2670 		return ret_val;
2671 
2672 	*data = (u8)word;
2673 
2674 	return 0;
2675 }
2676 
2677 /**
2678  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2679  *  @hw: pointer to the HW structure
2680  *  @offset: The offset (in bytes) of the byte or word to read.
2681  *  @size: Size of data to read, 1=byte 2=word
2682  *  @data: Pointer to the word to store the value read.
2683  *
2684  *  Reads a byte or word from the NVM using the flash access registers.
2685  **/
2686 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2687 					 u8 size, u16 *data)
2688 {
2689 	union ich8_hws_flash_status hsfsts;
2690 	union ich8_hws_flash_ctrl hsflctl;
2691 	u32 flash_linear_addr;
2692 	u32 flash_data = 0;
2693 	s32 ret_val = -E1000_ERR_NVM;
2694 	u8 count = 0;
2695 
2696 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2697 		return -E1000_ERR_NVM;
2698 
2699 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2700 			    hw->nvm.flash_base_addr;
2701 
2702 	do {
2703 		udelay(1);
2704 		/* Steps */
2705 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2706 		if (ret_val)
2707 			break;
2708 
2709 		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2710 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2711 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2712 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2713 		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2714 
2715 		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2716 
2717 		ret_val = e1000_flash_cycle_ich8lan(hw,
2718 						ICH_FLASH_READ_COMMAND_TIMEOUT);
2719 
2720 		/* Check if FCERR is set to 1, if set to 1, clear it
2721 		 * and try the whole sequence a few more times, else
2722 		 * read in (shift in) the Flash Data0, the order is
2723 		 * least significant byte first msb to lsb
2724 		 */
2725 		if (!ret_val) {
2726 			flash_data = er32flash(ICH_FLASH_FDATA0);
2727 			if (size == 1)
2728 				*data = (u8)(flash_data & 0x000000FF);
2729 			else if (size == 2)
2730 				*data = (u16)(flash_data & 0x0000FFFF);
2731 			break;
2732 		} else {
2733 			/* If we've gotten here, then things are probably
2734 			 * completely hosed, but if the error condition is
2735 			 * detected, it won't hurt to give it another try...
2736 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2737 			 */
2738 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2739 			if (hsfsts.hsf_status.flcerr) {
2740 				/* Repeat for some time before giving up. */
2741 				continue;
2742 			} else if (!hsfsts.hsf_status.flcdone) {
2743 				e_dbg("Timeout error - flash cycle did not complete.\n");
2744 				break;
2745 			}
2746 		}
2747 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2748 
2749 	return ret_val;
2750 }
2751 
2752 /**
2753  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2754  *  @hw: pointer to the HW structure
2755  *  @offset: The offset (in bytes) of the word(s) to write.
2756  *  @words: Size of data to write in words
2757  *  @data: Pointer to the word(s) to write at offset.
2758  *
2759  *  Writes a byte or word to the NVM using the flash access registers.
2760  **/
2761 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2762 				   u16 *data)
2763 {
2764 	struct e1000_nvm_info *nvm = &hw->nvm;
2765 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2766 	u16 i;
2767 
2768 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2769 	    (words == 0)) {
2770 		e_dbg("nvm parameter(s) out of bounds\n");
2771 		return -E1000_ERR_NVM;
2772 	}
2773 
2774 	nvm->ops.acquire(hw);
2775 
2776 	for (i = 0; i < words; i++) {
2777 		dev_spec->shadow_ram[offset+i].modified = true;
2778 		dev_spec->shadow_ram[offset+i].value = data[i];
2779 	}
2780 
2781 	nvm->ops.release(hw);
2782 
2783 	return 0;
2784 }
2785 
2786 /**
2787  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2788  *  @hw: pointer to the HW structure
2789  *
2790  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2791  *  which writes the checksum to the shadow ram.  The changes in the shadow
2792  *  ram are then committed to the EEPROM by processing each bank at a time
2793  *  checking for the modified bit and writing only the pending changes.
2794  *  After a successful commit, the shadow ram is cleared and is ready for
2795  *  future writes.
2796  **/
2797 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2798 {
2799 	struct e1000_nvm_info *nvm = &hw->nvm;
2800 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2801 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2802 	s32 ret_val;
2803 	u16 data;
2804 
2805 	ret_val = e1000e_update_nvm_checksum_generic(hw);
2806 	if (ret_val)
2807 		goto out;
2808 
2809 	if (nvm->type != e1000_nvm_flash_sw)
2810 		goto out;
2811 
2812 	nvm->ops.acquire(hw);
2813 
2814 	/* We're writing to the opposite bank so if we're on bank 1,
2815 	 * write to bank 0 etc.  We also need to erase the segment that
2816 	 * is going to be written
2817 	 */
2818 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2819 	if (ret_val) {
2820 		e_dbg("Could not detect valid bank, assuming bank 0\n");
2821 		bank = 0;
2822 	}
2823 
2824 	if (bank == 0) {
2825 		new_bank_offset = nvm->flash_bank_size;
2826 		old_bank_offset = 0;
2827 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2828 		if (ret_val)
2829 			goto release;
2830 	} else {
2831 		old_bank_offset = nvm->flash_bank_size;
2832 		new_bank_offset = 0;
2833 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2834 		if (ret_val)
2835 			goto release;
2836 	}
2837 
2838 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2839 		/* Determine whether to write the value stored
2840 		 * in the other NVM bank or a modified value stored
2841 		 * in the shadow RAM
2842 		 */
2843 		if (dev_spec->shadow_ram[i].modified) {
2844 			data = dev_spec->shadow_ram[i].value;
2845 		} else {
2846 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2847 			                                        old_bank_offset,
2848 			                                        &data);
2849 			if (ret_val)
2850 				break;
2851 		}
2852 
2853 		/* If the word is 0x13, then make sure the signature bits
2854 		 * (15:14) are 11b until the commit has completed.
2855 		 * This will allow us to write 10b which indicates the
2856 		 * signature is valid.  We want to do this after the write
2857 		 * has completed so that we don't mark the segment valid
2858 		 * while the write is still in progress
2859 		 */
2860 		if (i == E1000_ICH_NVM_SIG_WORD)
2861 			data |= E1000_ICH_NVM_SIG_MASK;
2862 
2863 		/* Convert offset to bytes. */
2864 		act_offset = (i + new_bank_offset) << 1;
2865 
2866 		udelay(100);
2867 		/* Write the bytes to the new bank. */
2868 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2869 							       act_offset,
2870 							       (u8)data);
2871 		if (ret_val)
2872 			break;
2873 
2874 		udelay(100);
2875 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2876 							  act_offset + 1,
2877 							  (u8)(data >> 8));
2878 		if (ret_val)
2879 			break;
2880 	}
2881 
2882 	/* Don't bother writing the segment valid bits if sector
2883 	 * programming failed.
2884 	 */
2885 	if (ret_val) {
2886 		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2887 		e_dbg("Flash commit failed.\n");
2888 		goto release;
2889 	}
2890 
2891 	/* Finally validate the new segment by setting bit 15:14
2892 	 * to 10b in word 0x13 , this can be done without an
2893 	 * erase as well since these bits are 11 to start with
2894 	 * and we need to change bit 14 to 0b
2895 	 */
2896 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2897 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2898 	if (ret_val)
2899 		goto release;
2900 
2901 	data &= 0xBFFF;
2902 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2903 						       act_offset * 2 + 1,
2904 						       (u8)(data >> 8));
2905 	if (ret_val)
2906 		goto release;
2907 
2908 	/* And invalidate the previously valid segment by setting
2909 	 * its signature word (0x13) high_byte to 0b. This can be
2910 	 * done without an erase because flash erase sets all bits
2911 	 * to 1's. We can write 1's to 0's without an erase
2912 	 */
2913 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2914 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2915 	if (ret_val)
2916 		goto release;
2917 
2918 	/* Great!  Everything worked, we can now clear the cached entries. */
2919 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2920 		dev_spec->shadow_ram[i].modified = false;
2921 		dev_spec->shadow_ram[i].value = 0xFFFF;
2922 	}
2923 
2924 release:
2925 	nvm->ops.release(hw);
2926 
2927 	/* Reload the EEPROM, or else modifications will not appear
2928 	 * until after the next adapter reset.
2929 	 */
2930 	if (!ret_val) {
2931 		nvm->ops.reload(hw);
2932 		usleep_range(10000, 20000);
2933 	}
2934 
2935 out:
2936 	if (ret_val)
2937 		e_dbg("NVM update error: %d\n", ret_val);
2938 
2939 	return ret_val;
2940 }
2941 
2942 /**
2943  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2944  *  @hw: pointer to the HW structure
2945  *
2946  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2947  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2948  *  calculated, in which case we need to calculate the checksum and set bit 6.
2949  **/
2950 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2951 {
2952 	s32 ret_val;
2953 	u16 data;
2954 	u16 word;
2955 	u16 valid_csum_mask;
2956 
2957 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
2958 	 * the checksum needs to be fixed.  This bit is an indication that
2959 	 * the NVM was prepared by OEM software and did not calculate
2960 	 * the checksum...a likely scenario.
2961 	 */
2962 	switch (hw->mac.type) {
2963 	case e1000_pch_lpt:
2964 		word = NVM_COMPAT;
2965 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
2966 		break;
2967 	default:
2968 		word = NVM_FUTURE_INIT_WORD1;
2969 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
2970 		break;
2971 	}
2972 
2973 	ret_val = e1000_read_nvm(hw, word, 1, &data);
2974 	if (ret_val)
2975 		return ret_val;
2976 
2977 	if (!(data & valid_csum_mask)) {
2978 		data |= valid_csum_mask;
2979 		ret_val = e1000_write_nvm(hw, word, 1, &data);
2980 		if (ret_val)
2981 			return ret_val;
2982 		ret_val = e1000e_update_nvm_checksum(hw);
2983 		if (ret_val)
2984 			return ret_val;
2985 	}
2986 
2987 	return e1000e_validate_nvm_checksum_generic(hw);
2988 }
2989 
2990 /**
2991  *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
2992  *  @hw: pointer to the HW structure
2993  *
2994  *  To prevent malicious write/erase of the NVM, set it to be read-only
2995  *  so that the hardware ignores all write/erase cycles of the NVM via
2996  *  the flash control registers.  The shadow-ram copy of the NVM will
2997  *  still be updated, however any updates to this copy will not stick
2998  *  across driver reloads.
2999  **/
3000 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
3001 {
3002 	struct e1000_nvm_info *nvm = &hw->nvm;
3003 	union ich8_flash_protected_range pr0;
3004 	union ich8_hws_flash_status hsfsts;
3005 	u32 gfpreg;
3006 
3007 	nvm->ops.acquire(hw);
3008 
3009 	gfpreg = er32flash(ICH_FLASH_GFPREG);
3010 
3011 	/* Write-protect GbE Sector of NVM */
3012 	pr0.regval = er32flash(ICH_FLASH_PR0);
3013 	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
3014 	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
3015 	pr0.range.wpe = true;
3016 	ew32flash(ICH_FLASH_PR0, pr0.regval);
3017 
3018 	/* Lock down a subset of GbE Flash Control Registers, e.g.
3019 	 * PR0 to prevent the write-protection from being lifted.
3020 	 * Once FLOCKDN is set, the registers protected by it cannot
3021 	 * be written until FLOCKDN is cleared by a hardware reset.
3022 	 */
3023 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3024 	hsfsts.hsf_status.flockdn = true;
3025 	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3026 
3027 	nvm->ops.release(hw);
3028 }
3029 
3030 /**
3031  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3032  *  @hw: pointer to the HW structure
3033  *  @offset: The offset (in bytes) of the byte/word to read.
3034  *  @size: Size of data to read, 1=byte 2=word
3035  *  @data: The byte(s) to write to the NVM.
3036  *
3037  *  Writes one/two bytes to the NVM using the flash access registers.
3038  **/
3039 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3040 					  u8 size, u16 data)
3041 {
3042 	union ich8_hws_flash_status hsfsts;
3043 	union ich8_hws_flash_ctrl hsflctl;
3044 	u32 flash_linear_addr;
3045 	u32 flash_data = 0;
3046 	s32 ret_val;
3047 	u8 count = 0;
3048 
3049 	if (size < 1 || size > 2 || data > size * 0xff ||
3050 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
3051 		return -E1000_ERR_NVM;
3052 
3053 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3054 			    hw->nvm.flash_base_addr;
3055 
3056 	do {
3057 		udelay(1);
3058 		/* Steps */
3059 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3060 		if (ret_val)
3061 			break;
3062 
3063 		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3064 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3065 		hsflctl.hsf_ctrl.fldbcount = size -1;
3066 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3067 		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3068 
3069 		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3070 
3071 		if (size == 1)
3072 			flash_data = (u32)data & 0x00FF;
3073 		else
3074 			flash_data = (u32)data;
3075 
3076 		ew32flash(ICH_FLASH_FDATA0, flash_data);
3077 
3078 		/* check if FCERR is set to 1 , if set to 1, clear it
3079 		 * and try the whole sequence a few more times else done
3080 		 */
3081 		ret_val = e1000_flash_cycle_ich8lan(hw,
3082 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3083 		if (!ret_val)
3084 			break;
3085 
3086 		/* If we're here, then things are most likely
3087 		 * completely hosed, but if the error condition
3088 		 * is detected, it won't hurt to give it another
3089 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3090 		 */
3091 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3092 		if (hsfsts.hsf_status.flcerr)
3093 			/* Repeat for some time before giving up. */
3094 			continue;
3095 		if (!hsfsts.hsf_status.flcdone) {
3096 			e_dbg("Timeout error - flash cycle did not complete.\n");
3097 			break;
3098 		}
3099 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3100 
3101 	return ret_val;
3102 }
3103 
3104 /**
3105  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3106  *  @hw: pointer to the HW structure
3107  *  @offset: The index of the byte to read.
3108  *  @data: The byte to write to the NVM.
3109  *
3110  *  Writes a single byte to the NVM using the flash access registers.
3111  **/
3112 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3113 					  u8 data)
3114 {
3115 	u16 word = (u16)data;
3116 
3117 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3118 }
3119 
3120 /**
3121  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3122  *  @hw: pointer to the HW structure
3123  *  @offset: The offset of the byte to write.
3124  *  @byte: The byte to write to the NVM.
3125  *
3126  *  Writes a single byte to the NVM using the flash access registers.
3127  *  Goes through a retry algorithm before giving up.
3128  **/
3129 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3130 						u32 offset, u8 byte)
3131 {
3132 	s32 ret_val;
3133 	u16 program_retries;
3134 
3135 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3136 	if (!ret_val)
3137 		return ret_val;
3138 
3139 	for (program_retries = 0; program_retries < 100; program_retries++) {
3140 		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
3141 		udelay(100);
3142 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3143 		if (!ret_val)
3144 			break;
3145 	}
3146 	if (program_retries == 100)
3147 		return -E1000_ERR_NVM;
3148 
3149 	return 0;
3150 }
3151 
3152 /**
3153  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3154  *  @hw: pointer to the HW structure
3155  *  @bank: 0 for first bank, 1 for second bank, etc.
3156  *
3157  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3158  *  bank N is 4096 * N + flash_reg_addr.
3159  **/
3160 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3161 {
3162 	struct e1000_nvm_info *nvm = &hw->nvm;
3163 	union ich8_hws_flash_status hsfsts;
3164 	union ich8_hws_flash_ctrl hsflctl;
3165 	u32 flash_linear_addr;
3166 	/* bank size is in 16bit words - adjust to bytes */
3167 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3168 	s32 ret_val;
3169 	s32 count = 0;
3170 	s32 j, iteration, sector_size;
3171 
3172 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3173 
3174 	/* Determine HW Sector size: Read BERASE bits of hw flash status
3175 	 * register
3176 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3177 	 *     consecutive sectors.  The start index for the nth Hw sector
3178 	 *     can be calculated as = bank * 4096 + n * 256
3179 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3180 	 *     The start index for the nth Hw sector can be calculated
3181 	 *     as = bank * 4096
3182 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3183 	 *     (ich9 only, otherwise error condition)
3184 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3185 	 */
3186 	switch (hsfsts.hsf_status.berasesz) {
3187 	case 0:
3188 		/* Hw sector size 256 */
3189 		sector_size = ICH_FLASH_SEG_SIZE_256;
3190 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3191 		break;
3192 	case 1:
3193 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3194 		iteration = 1;
3195 		break;
3196 	case 2:
3197 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3198 		iteration = 1;
3199 		break;
3200 	case 3:
3201 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3202 		iteration = 1;
3203 		break;
3204 	default:
3205 		return -E1000_ERR_NVM;
3206 	}
3207 
3208 	/* Start with the base address, then add the sector offset. */
3209 	flash_linear_addr = hw->nvm.flash_base_addr;
3210 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3211 
3212 	for (j = 0; j < iteration ; j++) {
3213 		do {
3214 			/* Steps */
3215 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3216 			if (ret_val)
3217 				return ret_val;
3218 
3219 			/* Write a value 11 (block Erase) in Flash
3220 			 * Cycle field in hw flash control
3221 			 */
3222 			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3223 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3224 			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3225 
3226 			/* Write the last 24 bits of an index within the
3227 			 * block into Flash Linear address field in Flash
3228 			 * Address.
3229 			 */
3230 			flash_linear_addr += (j * sector_size);
3231 			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3232 
3233 			ret_val = e1000_flash_cycle_ich8lan(hw,
3234 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3235 			if (!ret_val)
3236 				break;
3237 
3238 			/* Check if FCERR is set to 1.  If 1,
3239 			 * clear it and try the whole sequence
3240 			 * a few more times else Done
3241 			 */
3242 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3243 			if (hsfsts.hsf_status.flcerr)
3244 				/* repeat for some time before giving up */
3245 				continue;
3246 			else if (!hsfsts.hsf_status.flcdone)
3247 				return ret_val;
3248 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3249 	}
3250 
3251 	return 0;
3252 }
3253 
3254 /**
3255  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3256  *  @hw: pointer to the HW structure
3257  *  @data: Pointer to the LED settings
3258  *
3259  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3260  *  settings is all 0's or F's, set the LED default to a valid LED default
3261  *  setting.
3262  **/
3263 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3264 {
3265 	s32 ret_val;
3266 
3267 	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
3268 	if (ret_val) {
3269 		e_dbg("NVM Read Error\n");
3270 		return ret_val;
3271 	}
3272 
3273 	if (*data == ID_LED_RESERVED_0000 ||
3274 	    *data == ID_LED_RESERVED_FFFF)
3275 		*data = ID_LED_DEFAULT_ICH8LAN;
3276 
3277 	return 0;
3278 }
3279 
3280 /**
3281  *  e1000_id_led_init_pchlan - store LED configurations
3282  *  @hw: pointer to the HW structure
3283  *
3284  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3285  *  the PHY LED configuration register.
3286  *
3287  *  PCH also does not have an "always on" or "always off" mode which
3288  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3289  *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
3290  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3291  *  link based on logic in e1000_led_[on|off]_pchlan().
3292  **/
3293 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3294 {
3295 	struct e1000_mac_info *mac = &hw->mac;
3296 	s32 ret_val;
3297 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3298 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3299 	u16 data, i, temp, shift;
3300 
3301 	/* Get default ID LED modes */
3302 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3303 	if (ret_val)
3304 		return ret_val;
3305 
3306 	mac->ledctl_default = er32(LEDCTL);
3307 	mac->ledctl_mode1 = mac->ledctl_default;
3308 	mac->ledctl_mode2 = mac->ledctl_default;
3309 
3310 	for (i = 0; i < 4; i++) {
3311 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3312 		shift = (i * 5);
3313 		switch (temp) {
3314 		case ID_LED_ON1_DEF2:
3315 		case ID_LED_ON1_ON2:
3316 		case ID_LED_ON1_OFF2:
3317 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3318 			mac->ledctl_mode1 |= (ledctl_on << shift);
3319 			break;
3320 		case ID_LED_OFF1_DEF2:
3321 		case ID_LED_OFF1_ON2:
3322 		case ID_LED_OFF1_OFF2:
3323 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3324 			mac->ledctl_mode1 |= (ledctl_off << shift);
3325 			break;
3326 		default:
3327 			/* Do nothing */
3328 			break;
3329 		}
3330 		switch (temp) {
3331 		case ID_LED_DEF1_ON2:
3332 		case ID_LED_ON1_ON2:
3333 		case ID_LED_OFF1_ON2:
3334 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3335 			mac->ledctl_mode2 |= (ledctl_on << shift);
3336 			break;
3337 		case ID_LED_DEF1_OFF2:
3338 		case ID_LED_ON1_OFF2:
3339 		case ID_LED_OFF1_OFF2:
3340 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3341 			mac->ledctl_mode2 |= (ledctl_off << shift);
3342 			break;
3343 		default:
3344 			/* Do nothing */
3345 			break;
3346 		}
3347 	}
3348 
3349 	return 0;
3350 }
3351 
3352 /**
3353  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3354  *  @hw: pointer to the HW structure
3355  *
3356  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3357  *  register, so the the bus width is hard coded.
3358  **/
3359 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3360 {
3361 	struct e1000_bus_info *bus = &hw->bus;
3362 	s32 ret_val;
3363 
3364 	ret_val = e1000e_get_bus_info_pcie(hw);
3365 
3366 	/* ICH devices are "PCI Express"-ish.  They have
3367 	 * a configuration space, but do not contain
3368 	 * PCI Express Capability registers, so bus width
3369 	 * must be hardcoded.
3370 	 */
3371 	if (bus->width == e1000_bus_width_unknown)
3372 		bus->width = e1000_bus_width_pcie_x1;
3373 
3374 	return ret_val;
3375 }
3376 
3377 /**
3378  *  e1000_reset_hw_ich8lan - Reset the hardware
3379  *  @hw: pointer to the HW structure
3380  *
3381  *  Does a full reset of the hardware which includes a reset of the PHY and
3382  *  MAC.
3383  **/
3384 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3385 {
3386 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3387 	u16 kum_cfg;
3388 	u32 ctrl, reg;
3389 	s32 ret_val;
3390 
3391 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
3392 	 * on the last TLP read/write transaction when MAC is reset.
3393 	 */
3394 	ret_val = e1000e_disable_pcie_master(hw);
3395 	if (ret_val)
3396 		e_dbg("PCI-E Master disable polling has failed.\n");
3397 
3398 	e_dbg("Masking off all interrupts\n");
3399 	ew32(IMC, 0xffffffff);
3400 
3401 	/* Disable the Transmit and Receive units.  Then delay to allow
3402 	 * any pending transactions to complete before we hit the MAC
3403 	 * with the global reset.
3404 	 */
3405 	ew32(RCTL, 0);
3406 	ew32(TCTL, E1000_TCTL_PSP);
3407 	e1e_flush();
3408 
3409 	usleep_range(10000, 20000);
3410 
3411 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3412 	if (hw->mac.type == e1000_ich8lan) {
3413 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3414 		ew32(PBA, E1000_PBA_8K);
3415 		/* Set Packet Buffer Size to 16k. */
3416 		ew32(PBS, E1000_PBS_16K);
3417 	}
3418 
3419 	if (hw->mac.type == e1000_pchlan) {
3420 		/* Save the NVM K1 bit setting */
3421 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3422 		if (ret_val)
3423 			return ret_val;
3424 
3425 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3426 			dev_spec->nvm_k1_enabled = true;
3427 		else
3428 			dev_spec->nvm_k1_enabled = false;
3429 	}
3430 
3431 	ctrl = er32(CTRL);
3432 
3433 	if (!hw->phy.ops.check_reset_block(hw)) {
3434 		/* Full-chip reset requires MAC and PHY reset at the same
3435 		 * time to make sure the interface between MAC and the
3436 		 * external PHY is reset.
3437 		 */
3438 		ctrl |= E1000_CTRL_PHY_RST;
3439 
3440 		/* Gate automatic PHY configuration by hardware on
3441 		 * non-managed 82579
3442 		 */
3443 		if ((hw->mac.type == e1000_pch2lan) &&
3444 		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3445 			e1000_gate_hw_phy_config_ich8lan(hw, true);
3446 	}
3447 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3448 	e_dbg("Issuing a global reset to ich8lan\n");
3449 	ew32(CTRL, (ctrl | E1000_CTRL_RST));
3450 	/* cannot issue a flush here because it hangs the hardware */
3451 	msleep(20);
3452 
3453 	/* Set Phy Config Counter to 50msec */
3454 	if (hw->mac.type == e1000_pch2lan) {
3455 		reg = er32(FEXTNVM3);
3456 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3457 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3458 		ew32(FEXTNVM3, reg);
3459 	}
3460 
3461 	if (!ret_val)
3462 		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3463 
3464 	if (ctrl & E1000_CTRL_PHY_RST) {
3465 		ret_val = hw->phy.ops.get_cfg_done(hw);
3466 		if (ret_val)
3467 			return ret_val;
3468 
3469 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3470 		if (ret_val)
3471 			return ret_val;
3472 	}
3473 
3474 	/* For PCH, this write will make sure that any noise
3475 	 * will be detected as a CRC error and be dropped rather than show up
3476 	 * as a bad packet to the DMA engine.
3477 	 */
3478 	if (hw->mac.type == e1000_pchlan)
3479 		ew32(CRC_OFFSET, 0x65656565);
3480 
3481 	ew32(IMC, 0xffffffff);
3482 	er32(ICR);
3483 
3484 	reg = er32(KABGTXD);
3485 	reg |= E1000_KABGTXD_BGSQLBIAS;
3486 	ew32(KABGTXD, reg);
3487 
3488 	return 0;
3489 }
3490 
3491 /**
3492  *  e1000_init_hw_ich8lan - Initialize the hardware
3493  *  @hw: pointer to the HW structure
3494  *
3495  *  Prepares the hardware for transmit and receive by doing the following:
3496  *   - initialize hardware bits
3497  *   - initialize LED identification
3498  *   - setup receive address registers
3499  *   - setup flow control
3500  *   - setup transmit descriptors
3501  *   - clear statistics
3502  **/
3503 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3504 {
3505 	struct e1000_mac_info *mac = &hw->mac;
3506 	u32 ctrl_ext, txdctl, snoop;
3507 	s32 ret_val;
3508 	u16 i;
3509 
3510 	e1000_initialize_hw_bits_ich8lan(hw);
3511 
3512 	/* Initialize identification LED */
3513 	ret_val = mac->ops.id_led_init(hw);
3514 	if (ret_val)
3515 		e_dbg("Error initializing identification LED\n");
3516 		/* This is not fatal and we should not stop init due to this */
3517 
3518 	/* Setup the receive address. */
3519 	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3520 
3521 	/* Zero out the Multicast HASH table */
3522 	e_dbg("Zeroing the MTA\n");
3523 	for (i = 0; i < mac->mta_reg_count; i++)
3524 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3525 
3526 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
3527 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3528 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3529 	 */
3530 	if (hw->phy.type == e1000_phy_82578) {
3531 		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3532 		i &= ~BM_WUC_HOST_WU_BIT;
3533 		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3534 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3535 		if (ret_val)
3536 			return ret_val;
3537 	}
3538 
3539 	/* Setup link and flow control */
3540 	ret_val = mac->ops.setup_link(hw);
3541 
3542 	/* Set the transmit descriptor write-back policy for both queues */
3543 	txdctl = er32(TXDCTL(0));
3544 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3545 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3546 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3547 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3548 	ew32(TXDCTL(0), txdctl);
3549 	txdctl = er32(TXDCTL(1));
3550 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3551 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3552 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3553 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3554 	ew32(TXDCTL(1), txdctl);
3555 
3556 	/* ICH8 has opposite polarity of no_snoop bits.
3557 	 * By default, we should use snoop behavior.
3558 	 */
3559 	if (mac->type == e1000_ich8lan)
3560 		snoop = PCIE_ICH8_SNOOP_ALL;
3561 	else
3562 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3563 	e1000e_set_pcie_no_snoop(hw, snoop);
3564 
3565 	ctrl_ext = er32(CTRL_EXT);
3566 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3567 	ew32(CTRL_EXT, ctrl_ext);
3568 
3569 	/* Clear all of the statistics registers (clear on read).  It is
3570 	 * important that we do this after we have tried to establish link
3571 	 * because the symbol error count will increment wildly if there
3572 	 * is no link.
3573 	 */
3574 	e1000_clear_hw_cntrs_ich8lan(hw);
3575 
3576 	return ret_val;
3577 }
3578 /**
3579  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3580  *  @hw: pointer to the HW structure
3581  *
3582  *  Sets/Clears required hardware bits necessary for correctly setting up the
3583  *  hardware for transmit and receive.
3584  **/
3585 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3586 {
3587 	u32 reg;
3588 
3589 	/* Extended Device Control */
3590 	reg = er32(CTRL_EXT);
3591 	reg |= (1 << 22);
3592 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3593 	if (hw->mac.type >= e1000_pchlan)
3594 		reg |= E1000_CTRL_EXT_PHYPDEN;
3595 	ew32(CTRL_EXT, reg);
3596 
3597 	/* Transmit Descriptor Control 0 */
3598 	reg = er32(TXDCTL(0));
3599 	reg |= (1 << 22);
3600 	ew32(TXDCTL(0), reg);
3601 
3602 	/* Transmit Descriptor Control 1 */
3603 	reg = er32(TXDCTL(1));
3604 	reg |= (1 << 22);
3605 	ew32(TXDCTL(1), reg);
3606 
3607 	/* Transmit Arbitration Control 0 */
3608 	reg = er32(TARC(0));
3609 	if (hw->mac.type == e1000_ich8lan)
3610 		reg |= (1 << 28) | (1 << 29);
3611 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3612 	ew32(TARC(0), reg);
3613 
3614 	/* Transmit Arbitration Control 1 */
3615 	reg = er32(TARC(1));
3616 	if (er32(TCTL) & E1000_TCTL_MULR)
3617 		reg &= ~(1 << 28);
3618 	else
3619 		reg |= (1 << 28);
3620 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3621 	ew32(TARC(1), reg);
3622 
3623 	/* Device Status */
3624 	if (hw->mac.type == e1000_ich8lan) {
3625 		reg = er32(STATUS);
3626 		reg &= ~(1 << 31);
3627 		ew32(STATUS, reg);
3628 	}
3629 
3630 	/* work-around descriptor data corruption issue during nfs v2 udp
3631 	 * traffic, just disable the nfs filtering capability
3632 	 */
3633 	reg = er32(RFCTL);
3634 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3635 
3636 	/* Disable IPv6 extension header parsing because some malformed
3637 	 * IPv6 headers can hang the Rx.
3638 	 */
3639 	if (hw->mac.type == e1000_ich8lan)
3640 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3641 	ew32(RFCTL, reg);
3642 
3643 	/* Enable ECC on Lynxpoint */
3644 	if (hw->mac.type == e1000_pch_lpt) {
3645 		reg = er32(PBECCSTS);
3646 		reg |= E1000_PBECCSTS_ECC_ENABLE;
3647 		ew32(PBECCSTS, reg);
3648 
3649 		reg = er32(CTRL);
3650 		reg |= E1000_CTRL_MEHE;
3651 		ew32(CTRL, reg);
3652 	}
3653 }
3654 
3655 /**
3656  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3657  *  @hw: pointer to the HW structure
3658  *
3659  *  Determines which flow control settings to use, then configures flow
3660  *  control.  Calls the appropriate media-specific link configuration
3661  *  function.  Assuming the adapter has a valid link partner, a valid link
3662  *  should be established.  Assumes the hardware has previously been reset
3663  *  and the transmitter and receiver are not enabled.
3664  **/
3665 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3666 {
3667 	s32 ret_val;
3668 
3669 	if (hw->phy.ops.check_reset_block(hw))
3670 		return 0;
3671 
3672 	/* ICH parts do not have a word in the NVM to determine
3673 	 * the default flow control setting, so we explicitly
3674 	 * set it to full.
3675 	 */
3676 	if (hw->fc.requested_mode == e1000_fc_default) {
3677 		/* Workaround h/w hang when Tx flow control enabled */
3678 		if (hw->mac.type == e1000_pchlan)
3679 			hw->fc.requested_mode = e1000_fc_rx_pause;
3680 		else
3681 			hw->fc.requested_mode = e1000_fc_full;
3682 	}
3683 
3684 	/* Save off the requested flow control mode for use later.  Depending
3685 	 * on the link partner's capabilities, we may or may not use this mode.
3686 	 */
3687 	hw->fc.current_mode = hw->fc.requested_mode;
3688 
3689 	e_dbg("After fix-ups FlowControl is now = %x\n",
3690 		hw->fc.current_mode);
3691 
3692 	/* Continue to configure the copper link. */
3693 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3694 	if (ret_val)
3695 		return ret_val;
3696 
3697 	ew32(FCTTV, hw->fc.pause_time);
3698 	if ((hw->phy.type == e1000_phy_82578) ||
3699 	    (hw->phy.type == e1000_phy_82579) ||
3700 	    (hw->phy.type == e1000_phy_i217) ||
3701 	    (hw->phy.type == e1000_phy_82577)) {
3702 		ew32(FCRTV_PCH, hw->fc.refresh_time);
3703 
3704 		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3705 				   hw->fc.pause_time);
3706 		if (ret_val)
3707 			return ret_val;
3708 	}
3709 
3710 	return e1000e_set_fc_watermarks(hw);
3711 }
3712 
3713 /**
3714  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3715  *  @hw: pointer to the HW structure
3716  *
3717  *  Configures the kumeran interface to the PHY to wait the appropriate time
3718  *  when polling the PHY, then call the generic setup_copper_link to finish
3719  *  configuring the copper link.
3720  **/
3721 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3722 {
3723 	u32 ctrl;
3724 	s32 ret_val;
3725 	u16 reg_data;
3726 
3727 	ctrl = er32(CTRL);
3728 	ctrl |= E1000_CTRL_SLU;
3729 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3730 	ew32(CTRL, ctrl);
3731 
3732 	/* Set the mac to wait the maximum time between each iteration
3733 	 * and increase the max iterations when polling the phy;
3734 	 * this fixes erroneous timeouts at 10Mbps.
3735 	 */
3736 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3737 	if (ret_val)
3738 		return ret_val;
3739 	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3740 	                               &reg_data);
3741 	if (ret_val)
3742 		return ret_val;
3743 	reg_data |= 0x3F;
3744 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3745 	                                reg_data);
3746 	if (ret_val)
3747 		return ret_val;
3748 
3749 	switch (hw->phy.type) {
3750 	case e1000_phy_igp_3:
3751 		ret_val = e1000e_copper_link_setup_igp(hw);
3752 		if (ret_val)
3753 			return ret_val;
3754 		break;
3755 	case e1000_phy_bm:
3756 	case e1000_phy_82578:
3757 		ret_val = e1000e_copper_link_setup_m88(hw);
3758 		if (ret_val)
3759 			return ret_val;
3760 		break;
3761 	case e1000_phy_82577:
3762 	case e1000_phy_82579:
3763 	case e1000_phy_i217:
3764 		ret_val = e1000_copper_link_setup_82577(hw);
3765 		if (ret_val)
3766 			return ret_val;
3767 		break;
3768 	case e1000_phy_ife:
3769 		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3770 		if (ret_val)
3771 			return ret_val;
3772 
3773 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3774 
3775 		switch (hw->phy.mdix) {
3776 		case 1:
3777 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3778 			break;
3779 		case 2:
3780 			reg_data |= IFE_PMC_FORCE_MDIX;
3781 			break;
3782 		case 0:
3783 		default:
3784 			reg_data |= IFE_PMC_AUTO_MDIX;
3785 			break;
3786 		}
3787 		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3788 		if (ret_val)
3789 			return ret_val;
3790 		break;
3791 	default:
3792 		break;
3793 	}
3794 
3795 	return e1000e_setup_copper_link(hw);
3796 }
3797 
3798 /**
3799  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3800  *  @hw: pointer to the HW structure
3801  *  @speed: pointer to store current link speed
3802  *  @duplex: pointer to store the current link duplex
3803  *
3804  *  Calls the generic get_speed_and_duplex to retrieve the current link
3805  *  information and then calls the Kumeran lock loss workaround for links at
3806  *  gigabit speeds.
3807  **/
3808 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3809 					  u16 *duplex)
3810 {
3811 	s32 ret_val;
3812 
3813 	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3814 	if (ret_val)
3815 		return ret_val;
3816 
3817 	if ((hw->mac.type == e1000_ich8lan) &&
3818 	    (hw->phy.type == e1000_phy_igp_3) &&
3819 	    (*speed == SPEED_1000)) {
3820 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3821 	}
3822 
3823 	return ret_val;
3824 }
3825 
3826 /**
3827  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3828  *  @hw: pointer to the HW structure
3829  *
3830  *  Work-around for 82566 Kumeran PCS lock loss:
3831  *  On link status change (i.e. PCI reset, speed change) and link is up and
3832  *  speed is gigabit-
3833  *    0) if workaround is optionally disabled do nothing
3834  *    1) wait 1ms for Kumeran link to come up
3835  *    2) check Kumeran Diagnostic register PCS lock loss bit
3836  *    3) if not set the link is locked (all is good), otherwise...
3837  *    4) reset the PHY
3838  *    5) repeat up to 10 times
3839  *  Note: this is only called for IGP3 copper when speed is 1gb.
3840  **/
3841 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3842 {
3843 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3844 	u32 phy_ctrl;
3845 	s32 ret_val;
3846 	u16 i, data;
3847 	bool link;
3848 
3849 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3850 		return 0;
3851 
3852 	/* Make sure link is up before proceeding.  If not just return.
3853 	 * Attempting this while link is negotiating fouled up link
3854 	 * stability
3855 	 */
3856 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3857 	if (!link)
3858 		return 0;
3859 
3860 	for (i = 0; i < 10; i++) {
3861 		/* read once to clear */
3862 		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3863 		if (ret_val)
3864 			return ret_val;
3865 		/* and again to get new status */
3866 		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3867 		if (ret_val)
3868 			return ret_val;
3869 
3870 		/* check for PCS lock */
3871 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3872 			return 0;
3873 
3874 		/* Issue PHY reset */
3875 		e1000_phy_hw_reset(hw);
3876 		mdelay(5);
3877 	}
3878 	/* Disable GigE link negotiation */
3879 	phy_ctrl = er32(PHY_CTRL);
3880 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3881 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3882 	ew32(PHY_CTRL, phy_ctrl);
3883 
3884 	/* Call gig speed drop workaround on Gig disable before accessing
3885 	 * any PHY registers
3886 	 */
3887 	e1000e_gig_downshift_workaround_ich8lan(hw);
3888 
3889 	/* unable to acquire PCS lock */
3890 	return -E1000_ERR_PHY;
3891 }
3892 
3893 /**
3894  *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3895  *  @hw: pointer to the HW structure
3896  *  @state: boolean value used to set the current Kumeran workaround state
3897  *
3898  *  If ICH8, set the current Kumeran workaround state (enabled - true
3899  *  /disabled - false).
3900  **/
3901 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3902 						 bool state)
3903 {
3904 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3905 
3906 	if (hw->mac.type != e1000_ich8lan) {
3907 		e_dbg("Workaround applies to ICH8 only.\n");
3908 		return;
3909 	}
3910 
3911 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3912 }
3913 
3914 /**
3915  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3916  *  @hw: pointer to the HW structure
3917  *
3918  *  Workaround for 82566 power-down on D3 entry:
3919  *    1) disable gigabit link
3920  *    2) write VR power-down enable
3921  *    3) read it back
3922  *  Continue if successful, else issue LCD reset and repeat
3923  **/
3924 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3925 {
3926 	u32 reg;
3927 	u16 data;
3928 	u8  retry = 0;
3929 
3930 	if (hw->phy.type != e1000_phy_igp_3)
3931 		return;
3932 
3933 	/* Try the workaround twice (if needed) */
3934 	do {
3935 		/* Disable link */
3936 		reg = er32(PHY_CTRL);
3937 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3938 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3939 		ew32(PHY_CTRL, reg);
3940 
3941 		/* Call gig speed drop workaround on Gig disable before
3942 		 * accessing any PHY registers
3943 		 */
3944 		if (hw->mac.type == e1000_ich8lan)
3945 			e1000e_gig_downshift_workaround_ich8lan(hw);
3946 
3947 		/* Write VR power-down enable */
3948 		e1e_rphy(hw, IGP3_VR_CTRL, &data);
3949 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3950 		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3951 
3952 		/* Read it back and test */
3953 		e1e_rphy(hw, IGP3_VR_CTRL, &data);
3954 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3955 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3956 			break;
3957 
3958 		/* Issue PHY reset and repeat at most one more time */
3959 		reg = er32(CTRL);
3960 		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3961 		retry++;
3962 	} while (retry);
3963 }
3964 
3965 /**
3966  *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3967  *  @hw: pointer to the HW structure
3968  *
3969  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3970  *  LPLU, Gig disable, MDIC PHY reset):
3971  *    1) Set Kumeran Near-end loopback
3972  *    2) Clear Kumeran Near-end loopback
3973  *  Should only be called for ICH8[m] devices with any 1G Phy.
3974  **/
3975 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3976 {
3977 	s32 ret_val;
3978 	u16 reg_data;
3979 
3980 	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
3981 		return;
3982 
3983 	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3984 				      &reg_data);
3985 	if (ret_val)
3986 		return;
3987 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3988 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3989 				       reg_data);
3990 	if (ret_val)
3991 		return;
3992 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3993 	e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
3994 }
3995 
3996 /**
3997  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3998  *  @hw: pointer to the HW structure
3999  *
4000  *  During S0 to Sx transition, it is possible the link remains at gig
4001  *  instead of negotiating to a lower speed.  Before going to Sx, set
4002  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4003  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4004  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4005  *  needs to be written.
4006  *  Parts that support (and are linked to a partner which support) EEE in
4007  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4008  *  than 10Mbps w/o EEE.
4009  **/
4010 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4011 {
4012 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4013 	u32 phy_ctrl;
4014 	s32 ret_val;
4015 
4016 	phy_ctrl = er32(PHY_CTRL);
4017 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4018 
4019 	if (hw->phy.type == e1000_phy_i217) {
4020 		u16 phy_reg, device_id = hw->adapter->pdev->device;
4021 
4022 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4023 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4024 			u32 fextnvm6 = er32(FEXTNVM6);
4025 
4026 			ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4027 		}
4028 
4029 		ret_val = hw->phy.ops.acquire(hw);
4030 		if (ret_val)
4031 			goto out;
4032 
4033 		if (!dev_spec->eee_disable) {
4034 			u16 eee_advert;
4035 
4036 			ret_val =
4037 			    e1000_read_emi_reg_locked(hw,
4038 						      I217_EEE_ADVERTISEMENT,
4039 						      &eee_advert);
4040 			if (ret_val)
4041 				goto release;
4042 
4043 			/* Disable LPLU if both link partners support 100BaseT
4044 			 * EEE and 100Full is advertised on both ends of the
4045 			 * link.
4046 			 */
4047 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4048 			    (dev_spec->eee_lp_ability &
4049 			     I82579_EEE_100_SUPPORTED) &&
4050 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4051 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4052 					      E1000_PHY_CTRL_NOND0A_LPLU);
4053 		}
4054 
4055 		/* For i217 Intel Rapid Start Technology support,
4056 		 * when the system is going into Sx and no manageability engine
4057 		 * is present, the driver must configure proxy to reset only on
4058 		 * power good.  LPI (Low Power Idle) state must also reset only
4059 		 * on power good, as well as the MTA (Multicast table array).
4060 		 * The SMBus release must also be disabled on LCD reset.
4061 		 */
4062 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4063 			/* Enable proxy to reset only on power good. */
4064 			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
4065 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4066 			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4067 
4068 			/* Set bit enable LPI (EEE) to reset only on
4069 			 * power good.
4070 			 */
4071 			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4072 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4073 			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4074 
4075 			/* Disable the SMB release on LCD reset. */
4076 			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4077 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4078 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4079 		}
4080 
4081 		/* Enable MTA to reset for Intel Rapid Start Technology
4082 		 * Support
4083 		 */
4084 		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4085 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4086 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4087 
4088 release:
4089 		hw->phy.ops.release(hw);
4090 	}
4091 out:
4092 	ew32(PHY_CTRL, phy_ctrl);
4093 
4094 	if (hw->mac.type == e1000_ich8lan)
4095 		e1000e_gig_downshift_workaround_ich8lan(hw);
4096 
4097 	if (hw->mac.type >= e1000_pchlan) {
4098 		e1000_oem_bits_config_ich8lan(hw, false);
4099 
4100 		/* Reset PHY to activate OEM bits on 82577/8 */
4101 		if (hw->mac.type == e1000_pchlan)
4102 			e1000e_phy_hw_reset_generic(hw);
4103 
4104 		ret_val = hw->phy.ops.acquire(hw);
4105 		if (ret_val)
4106 			return;
4107 		e1000_write_smbus_addr(hw);
4108 		hw->phy.ops.release(hw);
4109 	}
4110 }
4111 
4112 /**
4113  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4114  *  @hw: pointer to the HW structure
4115  *
4116  *  During Sx to S0 transitions on non-managed devices or managed devices
4117  *  on which PHY resets are not blocked, if the PHY registers cannot be
4118  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4119  *  the PHY.
4120  *  On i217, setup Intel Rapid Start Technology.
4121  **/
4122 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4123 {
4124 	s32 ret_val;
4125 
4126 	if (hw->mac.type < e1000_pch2lan)
4127 		return;
4128 
4129 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4130 	if (ret_val) {
4131 		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
4132 		return;
4133 	}
4134 
4135 	/* For i217 Intel Rapid Start Technology support when the system
4136 	 * is transitioning from Sx and no manageability engine is present
4137 	 * configure SMBus to restore on reset, disable proxy, and enable
4138 	 * the reset on MTA (Multicast table array).
4139 	 */
4140 	if (hw->phy.type == e1000_phy_i217) {
4141 		u16 phy_reg;
4142 
4143 		ret_val = hw->phy.ops.acquire(hw);
4144 		if (ret_val) {
4145 			e_dbg("Failed to setup iRST\n");
4146 			return;
4147 		}
4148 
4149 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4150 			/* Restore clear on SMB if no manageability engine
4151 			 * is present
4152 			 */
4153 			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4154 			if (ret_val)
4155 				goto release;
4156 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4157 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4158 
4159 			/* Disable Proxy */
4160 			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
4161 		}
4162 		/* Enable reset on MTA */
4163 		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4164 		if (ret_val)
4165 			goto release;
4166 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4167 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4168 release:
4169 		if (ret_val)
4170 			e_dbg("Error %d in resume workarounds\n", ret_val);
4171 		hw->phy.ops.release(hw);
4172 	}
4173 }
4174 
4175 /**
4176  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4177  *  @hw: pointer to the HW structure
4178  *
4179  *  Return the LED back to the default configuration.
4180  **/
4181 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4182 {
4183 	if (hw->phy.type == e1000_phy_ife)
4184 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
4185 
4186 	ew32(LEDCTL, hw->mac.ledctl_default);
4187 	return 0;
4188 }
4189 
4190 /**
4191  *  e1000_led_on_ich8lan - Turn LEDs on
4192  *  @hw: pointer to the HW structure
4193  *
4194  *  Turn on the LEDs.
4195  **/
4196 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4197 {
4198 	if (hw->phy.type == e1000_phy_ife)
4199 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4200 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4201 
4202 	ew32(LEDCTL, hw->mac.ledctl_mode2);
4203 	return 0;
4204 }
4205 
4206 /**
4207  *  e1000_led_off_ich8lan - Turn LEDs off
4208  *  @hw: pointer to the HW structure
4209  *
4210  *  Turn off the LEDs.
4211  **/
4212 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4213 {
4214 	if (hw->phy.type == e1000_phy_ife)
4215 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4216 				(IFE_PSCL_PROBE_MODE |
4217 				 IFE_PSCL_PROBE_LEDS_OFF));
4218 
4219 	ew32(LEDCTL, hw->mac.ledctl_mode1);
4220 	return 0;
4221 }
4222 
4223 /**
4224  *  e1000_setup_led_pchlan - Configures SW controllable LED
4225  *  @hw: pointer to the HW structure
4226  *
4227  *  This prepares the SW controllable LED for use.
4228  **/
4229 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4230 {
4231 	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
4232 }
4233 
4234 /**
4235  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4236  *  @hw: pointer to the HW structure
4237  *
4238  *  Return the LED back to the default configuration.
4239  **/
4240 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4241 {
4242 	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
4243 }
4244 
4245 /**
4246  *  e1000_led_on_pchlan - Turn LEDs on
4247  *  @hw: pointer to the HW structure
4248  *
4249  *  Turn on the LEDs.
4250  **/
4251 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4252 {
4253 	u16 data = (u16)hw->mac.ledctl_mode2;
4254 	u32 i, led;
4255 
4256 	/* If no link, then turn LED on by setting the invert bit
4257 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4258 	 */
4259 	if (!(er32(STATUS) & E1000_STATUS_LU)) {
4260 		for (i = 0; i < 3; i++) {
4261 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4262 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4263 			    E1000_LEDCTL_MODE_LINK_UP)
4264 				continue;
4265 			if (led & E1000_PHY_LED0_IVRT)
4266 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4267 			else
4268 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4269 		}
4270 	}
4271 
4272 	return e1e_wphy(hw, HV_LED_CONFIG, data);
4273 }
4274 
4275 /**
4276  *  e1000_led_off_pchlan - Turn LEDs off
4277  *  @hw: pointer to the HW structure
4278  *
4279  *  Turn off the LEDs.
4280  **/
4281 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4282 {
4283 	u16 data = (u16)hw->mac.ledctl_mode1;
4284 	u32 i, led;
4285 
4286 	/* If no link, then turn LED off by clearing the invert bit
4287 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4288 	 */
4289 	if (!(er32(STATUS) & E1000_STATUS_LU)) {
4290 		for (i = 0; i < 3; i++) {
4291 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4292 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4293 			    E1000_LEDCTL_MODE_LINK_UP)
4294 				continue;
4295 			if (led & E1000_PHY_LED0_IVRT)
4296 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4297 			else
4298 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4299 		}
4300 	}
4301 
4302 	return e1e_wphy(hw, HV_LED_CONFIG, data);
4303 }
4304 
4305 /**
4306  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4307  *  @hw: pointer to the HW structure
4308  *
4309  *  Read appropriate register for the config done bit for completion status
4310  *  and configure the PHY through s/w for EEPROM-less parts.
4311  *
4312  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4313  *  config done bit, so only an error is logged and continues.  If we were
4314  *  to return with error, EEPROM-less silicon would not be able to be reset
4315  *  or change link.
4316  **/
4317 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4318 {
4319 	s32 ret_val = 0;
4320 	u32 bank = 0;
4321 	u32 status;
4322 
4323 	e1000e_get_cfg_done_generic(hw);
4324 
4325 	/* Wait for indication from h/w that it has completed basic config */
4326 	if (hw->mac.type >= e1000_ich10lan) {
4327 		e1000_lan_init_done_ich8lan(hw);
4328 	} else {
4329 		ret_val = e1000e_get_auto_rd_done(hw);
4330 		if (ret_val) {
4331 			/* When auto config read does not complete, do not
4332 			 * return with an error. This can happen in situations
4333 			 * where there is no eeprom and prevents getting link.
4334 			 */
4335 			e_dbg("Auto Read Done did not complete\n");
4336 			ret_val = 0;
4337 		}
4338 	}
4339 
4340 	/* Clear PHY Reset Asserted bit */
4341 	status = er32(STATUS);
4342 	if (status & E1000_STATUS_PHYRA)
4343 		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
4344 	else
4345 		e_dbg("PHY Reset Asserted not set - needs delay\n");
4346 
4347 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4348 	if (hw->mac.type <= e1000_ich9lan) {
4349 		if (!(er32(EECD) & E1000_EECD_PRES) &&
4350 		    (hw->phy.type == e1000_phy_igp_3)) {
4351 			e1000e_phy_init_script_igp3(hw);
4352 		}
4353 	} else {
4354 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4355 			/* Maybe we should do a basic PHY config */
4356 			e_dbg("EEPROM not present\n");
4357 			ret_val = -E1000_ERR_CONFIG;
4358 		}
4359 	}
4360 
4361 	return ret_val;
4362 }
4363 
4364 /**
4365  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4366  * @hw: pointer to the HW structure
4367  *
4368  * In the case of a PHY power down to save power, or to turn off link during a
4369  * driver unload, or wake on lan is not enabled, remove the link.
4370  **/
4371 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4372 {
4373 	/* If the management interface is not enabled, then power down */
4374 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4375 	      hw->phy.ops.check_reset_block(hw)))
4376 		e1000_power_down_phy_copper(hw);
4377 }
4378 
4379 /**
4380  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4381  *  @hw: pointer to the HW structure
4382  *
4383  *  Clears hardware counters specific to the silicon family and calls
4384  *  clear_hw_cntrs_generic to clear all general purpose counters.
4385  **/
4386 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4387 {
4388 	u16 phy_data;
4389 	s32 ret_val;
4390 
4391 	e1000e_clear_hw_cntrs_base(hw);
4392 
4393 	er32(ALGNERRC);
4394 	er32(RXERRC);
4395 	er32(TNCRS);
4396 	er32(CEXTERR);
4397 	er32(TSCTC);
4398 	er32(TSCTFC);
4399 
4400 	er32(MGTPRC);
4401 	er32(MGTPDC);
4402 	er32(MGTPTC);
4403 
4404 	er32(IAC);
4405 	er32(ICRXOC);
4406 
4407 	/* Clear PHY statistics registers */
4408 	if ((hw->phy.type == e1000_phy_82578) ||
4409 	    (hw->phy.type == e1000_phy_82579) ||
4410 	    (hw->phy.type == e1000_phy_i217) ||
4411 	    (hw->phy.type == e1000_phy_82577)) {
4412 		ret_val = hw->phy.ops.acquire(hw);
4413 		if (ret_val)
4414 			return;
4415 		ret_val = hw->phy.ops.set_page(hw,
4416 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4417 		if (ret_val)
4418 			goto release;
4419 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4420 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4421 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4422 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4423 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4424 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4425 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4426 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4427 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4428 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4429 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4430 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4431 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4432 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4433 release:
4434 		hw->phy.ops.release(hw);
4435 	}
4436 }
4437 
4438 static const struct e1000_mac_operations ich8_mac_ops = {
4439 	/* check_mng_mode dependent on mac type */
4440 	.check_for_link		= e1000_check_for_copper_link_ich8lan,
4441 	/* cleanup_led dependent on mac type */
4442 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
4443 	.get_bus_info		= e1000_get_bus_info_ich8lan,
4444 	.set_lan_id		= e1000_set_lan_id_single_port,
4445 	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
4446 	/* led_on dependent on mac type */
4447 	/* led_off dependent on mac type */
4448 	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
4449 	.reset_hw		= e1000_reset_hw_ich8lan,
4450 	.init_hw		= e1000_init_hw_ich8lan,
4451 	.setup_link		= e1000_setup_link_ich8lan,
4452 	.setup_physical_interface = e1000_setup_copper_link_ich8lan,
4453 	/* id_led_init dependent on mac type */
4454 	.config_collision_dist	= e1000e_config_collision_dist_generic,
4455 	.rar_set		= e1000e_rar_set_generic,
4456 };
4457 
4458 static const struct e1000_phy_operations ich8_phy_ops = {
4459 	.acquire		= e1000_acquire_swflag_ich8lan,
4460 	.check_reset_block	= e1000_check_reset_block_ich8lan,
4461 	.commit			= NULL,
4462 	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
4463 	.get_cable_length	= e1000e_get_cable_length_igp_2,
4464 	.read_reg		= e1000e_read_phy_reg_igp,
4465 	.release		= e1000_release_swflag_ich8lan,
4466 	.reset			= e1000_phy_hw_reset_ich8lan,
4467 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
4468 	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
4469 	.write_reg		= e1000e_write_phy_reg_igp,
4470 };
4471 
4472 static const struct e1000_nvm_operations ich8_nvm_ops = {
4473 	.acquire		= e1000_acquire_nvm_ich8lan,
4474 	.read			= e1000_read_nvm_ich8lan,
4475 	.release		= e1000_release_nvm_ich8lan,
4476 	.reload			= e1000e_reload_nvm_generic,
4477 	.update			= e1000_update_nvm_checksum_ich8lan,
4478 	.valid_led_default	= e1000_valid_led_default_ich8lan,
4479 	.validate		= e1000_validate_nvm_checksum_ich8lan,
4480 	.write			= e1000_write_nvm_ich8lan,
4481 };
4482 
4483 const struct e1000_info e1000_ich8_info = {
4484 	.mac			= e1000_ich8lan,
4485 	.flags			= FLAG_HAS_WOL
4486 				  | FLAG_IS_ICH
4487 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4488 				  | FLAG_HAS_AMT
4489 				  | FLAG_HAS_FLASH
4490 				  | FLAG_APME_IN_WUC,
4491 	.pba			= 8,
4492 	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
4493 	.get_variants		= e1000_get_variants_ich8lan,
4494 	.mac_ops		= &ich8_mac_ops,
4495 	.phy_ops		= &ich8_phy_ops,
4496 	.nvm_ops		= &ich8_nvm_ops,
4497 };
4498 
4499 const struct e1000_info e1000_ich9_info = {
4500 	.mac			= e1000_ich9lan,
4501 	.flags			= FLAG_HAS_JUMBO_FRAMES
4502 				  | FLAG_IS_ICH
4503 				  | FLAG_HAS_WOL
4504 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4505 				  | FLAG_HAS_AMT
4506 				  | FLAG_HAS_FLASH
4507 				  | FLAG_APME_IN_WUC,
4508 	.pba			= 18,
4509 	.max_hw_frame_size	= DEFAULT_JUMBO,
4510 	.get_variants		= e1000_get_variants_ich8lan,
4511 	.mac_ops		= &ich8_mac_ops,
4512 	.phy_ops		= &ich8_phy_ops,
4513 	.nvm_ops		= &ich8_nvm_ops,
4514 };
4515 
4516 const struct e1000_info e1000_ich10_info = {
4517 	.mac			= e1000_ich10lan,
4518 	.flags			= FLAG_HAS_JUMBO_FRAMES
4519 				  | FLAG_IS_ICH
4520 				  | FLAG_HAS_WOL
4521 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4522 				  | FLAG_HAS_AMT
4523 				  | FLAG_HAS_FLASH
4524 				  | FLAG_APME_IN_WUC,
4525 	.pba			= 18,
4526 	.max_hw_frame_size	= DEFAULT_JUMBO,
4527 	.get_variants		= e1000_get_variants_ich8lan,
4528 	.mac_ops		= &ich8_mac_ops,
4529 	.phy_ops		= &ich8_phy_ops,
4530 	.nvm_ops		= &ich8_nvm_ops,
4531 };
4532 
4533 const struct e1000_info e1000_pch_info = {
4534 	.mac			= e1000_pchlan,
4535 	.flags			= FLAG_IS_ICH
4536 				  | FLAG_HAS_WOL
4537 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4538 				  | FLAG_HAS_AMT
4539 				  | FLAG_HAS_FLASH
4540 				  | FLAG_HAS_JUMBO_FRAMES
4541 				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4542 				  | FLAG_APME_IN_WUC,
4543 	.flags2			= FLAG2_HAS_PHY_STATS,
4544 	.pba			= 26,
4545 	.max_hw_frame_size	= 4096,
4546 	.get_variants		= e1000_get_variants_ich8lan,
4547 	.mac_ops		= &ich8_mac_ops,
4548 	.phy_ops		= &ich8_phy_ops,
4549 	.nvm_ops		= &ich8_nvm_ops,
4550 };
4551 
4552 const struct e1000_info e1000_pch2_info = {
4553 	.mac			= e1000_pch2lan,
4554 	.flags			= FLAG_IS_ICH
4555 				  | FLAG_HAS_WOL
4556 				  | FLAG_HAS_HW_TIMESTAMP
4557 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4558 				  | FLAG_HAS_AMT
4559 				  | FLAG_HAS_FLASH
4560 				  | FLAG_HAS_JUMBO_FRAMES
4561 				  | FLAG_APME_IN_WUC,
4562 	.flags2			= FLAG2_HAS_PHY_STATS
4563 				  | FLAG2_HAS_EEE,
4564 	.pba			= 26,
4565 	.max_hw_frame_size	= 9018,
4566 	.get_variants		= e1000_get_variants_ich8lan,
4567 	.mac_ops		= &ich8_mac_ops,
4568 	.phy_ops		= &ich8_phy_ops,
4569 	.nvm_ops		= &ich8_nvm_ops,
4570 };
4571 
4572 const struct e1000_info e1000_pch_lpt_info = {
4573 	.mac			= e1000_pch_lpt,
4574 	.flags			= FLAG_IS_ICH
4575 				  | FLAG_HAS_WOL
4576 				  | FLAG_HAS_HW_TIMESTAMP
4577 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4578 				  | FLAG_HAS_AMT
4579 				  | FLAG_HAS_FLASH
4580 				  | FLAG_HAS_JUMBO_FRAMES
4581 				  | FLAG_APME_IN_WUC,
4582 	.flags2			= FLAG2_HAS_PHY_STATS
4583 				  | FLAG2_HAS_EEE,
4584 	.pba			= 26,
4585 	.max_hw_frame_size	= 9018,
4586 	.get_variants		= e1000_get_variants_ich8lan,
4587 	.mac_ops		= &ich8_mac_ops,
4588 	.phy_ops		= &ich8_phy_ops,
4589 	.nvm_ops		= &ich8_nvm_ops,
4590 };
4591