1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 /* 82562G 10/100 Network Connection 5 * 82562G-2 10/100 Network Connection 6 * 82562GT 10/100 Network Connection 7 * 82562GT-2 10/100 Network Connection 8 * 82562V 10/100 Network Connection 9 * 82562V-2 10/100 Network Connection 10 * 82566DC-2 Gigabit Network Connection 11 * 82566DC Gigabit Network Connection 12 * 82566DM-2 Gigabit Network Connection 13 * 82566DM Gigabit Network Connection 14 * 82566MC Gigabit Network Connection 15 * 82566MM Gigabit Network Connection 16 * 82567LM Gigabit Network Connection 17 * 82567LF Gigabit Network Connection 18 * 82567V Gigabit Network Connection 19 * 82567LM-2 Gigabit Network Connection 20 * 82567LF-2 Gigabit Network Connection 21 * 82567V-2 Gigabit Network Connection 22 * 82567LF-3 Gigabit Network Connection 23 * 82567LM-3 Gigabit Network Connection 24 * 82567LM-4 Gigabit Network Connection 25 * 82577LM Gigabit Network Connection 26 * 82577LC Gigabit Network Connection 27 * 82578DM Gigabit Network Connection 28 * 82578DC Gigabit Network Connection 29 * 82579LM Gigabit Network Connection 30 * 82579V Gigabit Network Connection 31 * Ethernet Connection I217-LM 32 * Ethernet Connection I217-V 33 * Ethernet Connection I218-V 34 * Ethernet Connection I218-LM 35 * Ethernet Connection (2) I218-LM 36 * Ethernet Connection (2) I218-V 37 * Ethernet Connection (3) I218-LM 38 * Ethernet Connection (3) I218-V 39 */ 40 41 #include "e1000.h" 42 43 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 44 /* Offset 04h HSFSTS */ 45 union ich8_hws_flash_status { 46 struct ich8_hsfsts { 47 u16 flcdone:1; /* bit 0 Flash Cycle Done */ 48 u16 flcerr:1; /* bit 1 Flash Cycle Error */ 49 u16 dael:1; /* bit 2 Direct Access error Log */ 50 u16 berasesz:2; /* bit 4:3 Sector Erase Size */ 51 u16 flcinprog:1; /* bit 5 flash cycle in Progress */ 52 u16 reserved1:2; /* bit 13:6 Reserved */ 53 u16 reserved2:6; /* bit 13:6 Reserved */ 54 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ 55 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ 56 } hsf_status; 57 u16 regval; 58 }; 59 60 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 61 /* Offset 06h FLCTL */ 62 union ich8_hws_flash_ctrl { 63 struct ich8_hsflctl { 64 u16 flcgo:1; /* 0 Flash Cycle Go */ 65 u16 flcycle:2; /* 2:1 Flash Cycle */ 66 u16 reserved:5; /* 7:3 Reserved */ 67 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ 68 u16 flockdn:6; /* 15:10 Reserved */ 69 } hsf_ctrl; 70 u16 regval; 71 }; 72 73 /* ICH Flash Region Access Permissions */ 74 union ich8_hws_flash_regacc { 75 struct ich8_flracc { 76 u32 grra:8; /* 0:7 GbE region Read Access */ 77 u32 grwa:8; /* 8:15 GbE region Write Access */ 78 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ 79 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ 80 } hsf_flregacc; 81 u16 regval; 82 }; 83 84 /* ICH Flash Protected Region */ 85 union ich8_flash_protected_range { 86 struct ich8_pr { 87 u32 base:13; /* 0:12 Protected Range Base */ 88 u32 reserved1:2; /* 13:14 Reserved */ 89 u32 rpe:1; /* 15 Read Protection Enable */ 90 u32 limit:13; /* 16:28 Protected Range Limit */ 91 u32 reserved2:2; /* 29:30 Reserved */ 92 u32 wpe:1; /* 31 Write Protection Enable */ 93 } range; 94 u32 regval; 95 }; 96 97 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 98 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 99 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 100 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 101 u32 offset, u8 byte); 102 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 103 u8 *data); 104 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 105 u16 *data); 106 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 107 u8 size, u16 *data); 108 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 109 u32 *data); 110 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, 111 u32 offset, u32 *data); 112 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, 113 u32 offset, u32 data); 114 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 115 u32 offset, u32 dword); 116 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 117 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 118 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 119 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 120 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 121 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 122 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 123 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 124 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 125 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 126 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 127 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 128 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 129 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 130 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 131 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 132 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 133 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 134 static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw); 135 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 136 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 137 static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); 138 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 139 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); 140 141 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 142 { 143 return readw(hw->flash_address + reg); 144 } 145 146 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) 147 { 148 return readl(hw->flash_address + reg); 149 } 150 151 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) 152 { 153 writew(val, hw->flash_address + reg); 154 } 155 156 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) 157 { 158 writel(val, hw->flash_address + reg); 159 } 160 161 #define er16flash(reg) __er16flash(hw, (reg)) 162 #define er32flash(reg) __er32flash(hw, (reg)) 163 #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) 164 #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) 165 166 /** 167 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers 168 * @hw: pointer to the HW structure 169 * 170 * Test access to the PHY registers by reading the PHY ID registers. If 171 * the PHY ID is already known (e.g. resume path) compare it with known ID, 172 * otherwise assume the read PHY ID is correct if it is valid. 173 * 174 * Assumes the sw/fw/hw semaphore is already acquired. 175 **/ 176 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 177 { 178 u16 phy_reg = 0; 179 u32 phy_id = 0; 180 s32 ret_val = 0; 181 u16 retry_count; 182 u32 mac_reg = 0; 183 184 for (retry_count = 0; retry_count < 2; retry_count++) { 185 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); 186 if (ret_val || (phy_reg == 0xFFFF)) 187 continue; 188 phy_id = (u32)(phy_reg << 16); 189 190 ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); 191 if (ret_val || (phy_reg == 0xFFFF)) { 192 phy_id = 0; 193 continue; 194 } 195 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 196 break; 197 } 198 199 if (hw->phy.id) { 200 if (hw->phy.id == phy_id) 201 goto out; 202 } else if (phy_id) { 203 hw->phy.id = phy_id; 204 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 205 goto out; 206 } 207 208 /* In case the PHY needs to be in mdio slow mode, 209 * set slow mode and try to get the PHY id again. 210 */ 211 if (hw->mac.type < e1000_pch_lpt) { 212 hw->phy.ops.release(hw); 213 ret_val = e1000_set_mdio_slow_mode_hv(hw); 214 if (!ret_val) 215 ret_val = e1000e_get_phy_id(hw); 216 hw->phy.ops.acquire(hw); 217 } 218 219 if (ret_val) 220 return false; 221 out: 222 if (hw->mac.type >= e1000_pch_lpt) { 223 /* Only unforce SMBus if ME is not active */ 224 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 225 /* Unforce SMBus mode in PHY */ 226 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); 227 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 228 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); 229 230 /* Unforce SMBus mode in MAC */ 231 mac_reg = er32(CTRL_EXT); 232 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 233 ew32(CTRL_EXT, mac_reg); 234 } 235 } 236 237 return true; 238 } 239 240 /** 241 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value 242 * @hw: pointer to the HW structure 243 * 244 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is 245 * used to reset the PHY to a quiescent state when necessary. 246 **/ 247 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) 248 { 249 u32 mac_reg; 250 251 /* Set Phy Config Counter to 50msec */ 252 mac_reg = er32(FEXTNVM3); 253 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 254 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 255 ew32(FEXTNVM3, mac_reg); 256 257 /* Toggle LANPHYPC Value bit */ 258 mac_reg = er32(CTRL); 259 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 260 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 261 ew32(CTRL, mac_reg); 262 e1e_flush(); 263 usleep_range(10, 20); 264 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 265 ew32(CTRL, mac_reg); 266 e1e_flush(); 267 268 if (hw->mac.type < e1000_pch_lpt) { 269 msleep(50); 270 } else { 271 u16 count = 20; 272 273 do { 274 usleep_range(5000, 6000); 275 } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); 276 277 msleep(30); 278 } 279 } 280 281 /** 282 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 283 * @hw: pointer to the HW structure 284 * 285 * Workarounds/flow necessary for PHY initialization during driver load 286 * and resume paths. 287 **/ 288 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 289 { 290 struct e1000_adapter *adapter = hw->adapter; 291 u32 mac_reg, fwsm = er32(FWSM); 292 s32 ret_val; 293 294 /* Gate automatic PHY configuration by hardware on managed and 295 * non-managed 82579 and newer adapters. 296 */ 297 e1000_gate_hw_phy_config_ich8lan(hw, true); 298 299 /* It is not possible to be certain of the current state of ULP 300 * so forcibly disable it. 301 */ 302 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; 303 ret_val = e1000_disable_ulp_lpt_lp(hw, true); 304 if (ret_val) { 305 e_warn("Failed to disable ULP\n"); 306 goto out; 307 } 308 309 ret_val = hw->phy.ops.acquire(hw); 310 if (ret_val) { 311 e_dbg("Failed to initialize PHY flow\n"); 312 goto out; 313 } 314 315 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 316 * inaccessible and resetting the PHY is not blocked, toggle the 317 * LANPHYPC Value bit to force the interconnect to PCIe mode. 318 */ 319 switch (hw->mac.type) { 320 case e1000_pch_lpt: 321 case e1000_pch_spt: 322 case e1000_pch_cnp: 323 case e1000_pch_tgp: 324 case e1000_pch_adp: 325 if (e1000_phy_is_accessible_pchlan(hw)) 326 break; 327 328 /* Before toggling LANPHYPC, see if PHY is accessible by 329 * forcing MAC to SMBus mode first. 330 */ 331 mac_reg = er32(CTRL_EXT); 332 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 333 ew32(CTRL_EXT, mac_reg); 334 335 /* Wait 50 milliseconds for MAC to finish any retries 336 * that it might be trying to perform from previous 337 * attempts to acknowledge any phy read requests. 338 */ 339 msleep(50); 340 341 /* fall-through */ 342 case e1000_pch2lan: 343 if (e1000_phy_is_accessible_pchlan(hw)) 344 break; 345 346 /* fall-through */ 347 case e1000_pchlan: 348 if ((hw->mac.type == e1000_pchlan) && 349 (fwsm & E1000_ICH_FWSM_FW_VALID)) 350 break; 351 352 if (hw->phy.ops.check_reset_block(hw)) { 353 e_dbg("Required LANPHYPC toggle blocked by ME\n"); 354 ret_val = -E1000_ERR_PHY; 355 break; 356 } 357 358 /* Toggle LANPHYPC Value bit */ 359 e1000_toggle_lanphypc_pch_lpt(hw); 360 if (hw->mac.type >= e1000_pch_lpt) { 361 if (e1000_phy_is_accessible_pchlan(hw)) 362 break; 363 364 /* Toggling LANPHYPC brings the PHY out of SMBus mode 365 * so ensure that the MAC is also out of SMBus mode 366 */ 367 mac_reg = er32(CTRL_EXT); 368 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 369 ew32(CTRL_EXT, mac_reg); 370 371 if (e1000_phy_is_accessible_pchlan(hw)) 372 break; 373 374 ret_val = -E1000_ERR_PHY; 375 } 376 break; 377 default: 378 break; 379 } 380 381 hw->phy.ops.release(hw); 382 if (!ret_val) { 383 384 /* Check to see if able to reset PHY. Print error if not */ 385 if (hw->phy.ops.check_reset_block(hw)) { 386 e_err("Reset blocked by ME\n"); 387 goto out; 388 } 389 390 /* Reset the PHY before any access to it. Doing so, ensures 391 * that the PHY is in a known good state before we read/write 392 * PHY registers. The generic reset is sufficient here, 393 * because we haven't determined the PHY type yet. 394 */ 395 ret_val = e1000e_phy_hw_reset_generic(hw); 396 if (ret_val) 397 goto out; 398 399 /* On a successful reset, possibly need to wait for the PHY 400 * to quiesce to an accessible state before returning control 401 * to the calling function. If the PHY does not quiesce, then 402 * return E1000E_BLK_PHY_RESET, as this is the condition that 403 * the PHY is in. 404 */ 405 ret_val = hw->phy.ops.check_reset_block(hw); 406 if (ret_val) 407 e_err("ME blocked access to PHY after reset\n"); 408 } 409 410 out: 411 /* Ungate automatic PHY configuration on non-managed 82579 */ 412 if ((hw->mac.type == e1000_pch2lan) && 413 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 414 usleep_range(10000, 11000); 415 e1000_gate_hw_phy_config_ich8lan(hw, false); 416 } 417 418 return ret_val; 419 } 420 421 /** 422 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 423 * @hw: pointer to the HW structure 424 * 425 * Initialize family-specific PHY parameters and function pointers. 426 **/ 427 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 428 { 429 struct e1000_phy_info *phy = &hw->phy; 430 s32 ret_val; 431 432 phy->addr = 1; 433 phy->reset_delay_us = 100; 434 435 phy->ops.set_page = e1000_set_page_igp; 436 phy->ops.read_reg = e1000_read_phy_reg_hv; 437 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 438 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 439 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 440 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 441 phy->ops.write_reg = e1000_write_phy_reg_hv; 442 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 443 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 444 phy->ops.power_up = e1000_power_up_phy_copper; 445 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 446 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 447 448 phy->id = e1000_phy_unknown; 449 450 ret_val = e1000_init_phy_workarounds_pchlan(hw); 451 if (ret_val) 452 return ret_val; 453 454 if (phy->id == e1000_phy_unknown) 455 switch (hw->mac.type) { 456 default: 457 ret_val = e1000e_get_phy_id(hw); 458 if (ret_val) 459 return ret_val; 460 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 461 break; 462 /* fall-through */ 463 case e1000_pch2lan: 464 case e1000_pch_lpt: 465 case e1000_pch_spt: 466 case e1000_pch_cnp: 467 case e1000_pch_tgp: 468 case e1000_pch_adp: 469 /* In case the PHY needs to be in mdio slow mode, 470 * set slow mode and try to get the PHY id again. 471 */ 472 ret_val = e1000_set_mdio_slow_mode_hv(hw); 473 if (ret_val) 474 return ret_val; 475 ret_val = e1000e_get_phy_id(hw); 476 if (ret_val) 477 return ret_val; 478 break; 479 } 480 phy->type = e1000e_get_phy_type_from_id(phy->id); 481 482 switch (phy->type) { 483 case e1000_phy_82577: 484 case e1000_phy_82579: 485 case e1000_phy_i217: 486 phy->ops.check_polarity = e1000_check_polarity_82577; 487 phy->ops.force_speed_duplex = 488 e1000_phy_force_speed_duplex_82577; 489 phy->ops.get_cable_length = e1000_get_cable_length_82577; 490 phy->ops.get_info = e1000_get_phy_info_82577; 491 phy->ops.commit = e1000e_phy_sw_reset; 492 break; 493 case e1000_phy_82578: 494 phy->ops.check_polarity = e1000_check_polarity_m88; 495 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; 496 phy->ops.get_cable_length = e1000e_get_cable_length_m88; 497 phy->ops.get_info = e1000e_get_phy_info_m88; 498 break; 499 default: 500 ret_val = -E1000_ERR_PHY; 501 break; 502 } 503 504 return ret_val; 505 } 506 507 /** 508 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 509 * @hw: pointer to the HW structure 510 * 511 * Initialize family-specific PHY parameters and function pointers. 512 **/ 513 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 514 { 515 struct e1000_phy_info *phy = &hw->phy; 516 s32 ret_val; 517 u16 i = 0; 518 519 phy->addr = 1; 520 phy->reset_delay_us = 100; 521 522 phy->ops.power_up = e1000_power_up_phy_copper; 523 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 524 525 /* We may need to do this twice - once for IGP and if that fails, 526 * we'll set BM func pointers and try again 527 */ 528 ret_val = e1000e_determine_phy_address(hw); 529 if (ret_val) { 530 phy->ops.write_reg = e1000e_write_phy_reg_bm; 531 phy->ops.read_reg = e1000e_read_phy_reg_bm; 532 ret_val = e1000e_determine_phy_address(hw); 533 if (ret_val) { 534 e_dbg("Cannot determine PHY addr. Erroring out\n"); 535 return ret_val; 536 } 537 } 538 539 phy->id = 0; 540 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 541 (i++ < 100)) { 542 usleep_range(1000, 1100); 543 ret_val = e1000e_get_phy_id(hw); 544 if (ret_val) 545 return ret_val; 546 } 547 548 /* Verify phy id */ 549 switch (phy->id) { 550 case IGP03E1000_E_PHY_ID: 551 phy->type = e1000_phy_igp_3; 552 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 553 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; 554 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; 555 phy->ops.get_info = e1000e_get_phy_info_igp; 556 phy->ops.check_polarity = e1000_check_polarity_igp; 557 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; 558 break; 559 case IFE_E_PHY_ID: 560 case IFE_PLUS_E_PHY_ID: 561 case IFE_C_E_PHY_ID: 562 phy->type = e1000_phy_ife; 563 phy->autoneg_mask = E1000_ALL_NOT_GIG; 564 phy->ops.get_info = e1000_get_phy_info_ife; 565 phy->ops.check_polarity = e1000_check_polarity_ife; 566 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 567 break; 568 case BME1000_E_PHY_ID: 569 phy->type = e1000_phy_bm; 570 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 571 phy->ops.read_reg = e1000e_read_phy_reg_bm; 572 phy->ops.write_reg = e1000e_write_phy_reg_bm; 573 phy->ops.commit = e1000e_phy_sw_reset; 574 phy->ops.get_info = e1000e_get_phy_info_m88; 575 phy->ops.check_polarity = e1000_check_polarity_m88; 576 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; 577 break; 578 default: 579 return -E1000_ERR_PHY; 580 } 581 582 return 0; 583 } 584 585 /** 586 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 587 * @hw: pointer to the HW structure 588 * 589 * Initialize family-specific NVM parameters and function 590 * pointers. 591 **/ 592 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 593 { 594 struct e1000_nvm_info *nvm = &hw->nvm; 595 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 596 u32 gfpreg, sector_base_addr, sector_end_addr; 597 u16 i; 598 u32 nvm_size; 599 600 nvm->type = e1000_nvm_flash_sw; 601 602 if (hw->mac.type >= e1000_pch_spt) { 603 /* in SPT, gfpreg doesn't exist. NVM size is taken from the 604 * STRAP register. This is because in SPT the GbE Flash region 605 * is no longer accessed through the flash registers. Instead, 606 * the mechanism has changed, and the Flash region access 607 * registers are now implemented in GbE memory space. 608 */ 609 nvm->flash_base_addr = 0; 610 nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) 611 * NVM_SIZE_MULTIPLIER; 612 nvm->flash_bank_size = nvm_size / 2; 613 /* Adjust to word count */ 614 nvm->flash_bank_size /= sizeof(u16); 615 /* Set the base address for flash register access */ 616 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; 617 } else { 618 /* Can't read flash registers if register set isn't mapped. */ 619 if (!hw->flash_address) { 620 e_dbg("ERROR: Flash registers not mapped\n"); 621 return -E1000_ERR_CONFIG; 622 } 623 624 gfpreg = er32flash(ICH_FLASH_GFPREG); 625 626 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 627 * Add 1 to sector_end_addr since this sector is included in 628 * the overall size. 629 */ 630 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 631 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 632 633 /* flash_base_addr is byte-aligned */ 634 nvm->flash_base_addr = sector_base_addr 635 << FLASH_SECTOR_ADDR_SHIFT; 636 637 /* find total size of the NVM, then cut in half since the total 638 * size represents two separate NVM banks. 639 */ 640 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) 641 << FLASH_SECTOR_ADDR_SHIFT); 642 nvm->flash_bank_size /= 2; 643 /* Adjust to word count */ 644 nvm->flash_bank_size /= sizeof(u16); 645 } 646 647 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; 648 649 /* Clear shadow ram */ 650 for (i = 0; i < nvm->word_size; i++) { 651 dev_spec->shadow_ram[i].modified = false; 652 dev_spec->shadow_ram[i].value = 0xFFFF; 653 } 654 655 return 0; 656 } 657 658 /** 659 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 660 * @hw: pointer to the HW structure 661 * 662 * Initialize family-specific MAC parameters and function 663 * pointers. 664 **/ 665 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 666 { 667 struct e1000_mac_info *mac = &hw->mac; 668 669 /* Set media type function pointer */ 670 hw->phy.media_type = e1000_media_type_copper; 671 672 /* Set mta register count */ 673 mac->mta_reg_count = 32; 674 /* Set rar entry count */ 675 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 676 if (mac->type == e1000_ich8lan) 677 mac->rar_entry_count--; 678 /* FWSM register */ 679 mac->has_fwsm = true; 680 /* ARC subsystem not supported */ 681 mac->arc_subsystem_valid = false; 682 /* Adaptive IFS supported */ 683 mac->adaptive_ifs = true; 684 685 /* LED and other operations */ 686 switch (mac->type) { 687 case e1000_ich8lan: 688 case e1000_ich9lan: 689 case e1000_ich10lan: 690 /* check management mode */ 691 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 692 /* ID LED init */ 693 mac->ops.id_led_init = e1000e_id_led_init_generic; 694 /* blink LED */ 695 mac->ops.blink_led = e1000e_blink_led_generic; 696 /* setup LED */ 697 mac->ops.setup_led = e1000e_setup_led_generic; 698 /* cleanup LED */ 699 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 700 /* turn on/off LED */ 701 mac->ops.led_on = e1000_led_on_ich8lan; 702 mac->ops.led_off = e1000_led_off_ich8lan; 703 break; 704 case e1000_pch2lan: 705 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; 706 mac->ops.rar_set = e1000_rar_set_pch2lan; 707 /* fall-through */ 708 case e1000_pch_lpt: 709 case e1000_pch_spt: 710 case e1000_pch_cnp: 711 case e1000_pch_tgp: 712 case e1000_pch_adp: 713 case e1000_pchlan: 714 /* check management mode */ 715 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 716 /* ID LED init */ 717 mac->ops.id_led_init = e1000_id_led_init_pchlan; 718 /* setup LED */ 719 mac->ops.setup_led = e1000_setup_led_pchlan; 720 /* cleanup LED */ 721 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 722 /* turn on/off LED */ 723 mac->ops.led_on = e1000_led_on_pchlan; 724 mac->ops.led_off = e1000_led_off_pchlan; 725 break; 726 default: 727 break; 728 } 729 730 if (mac->type >= e1000_pch_lpt) { 731 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 732 mac->ops.rar_set = e1000_rar_set_pch_lpt; 733 mac->ops.setup_physical_interface = 734 e1000_setup_copper_link_pch_lpt; 735 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt; 736 } 737 738 /* Enable PCS Lock-loss workaround for ICH8 */ 739 if (mac->type == e1000_ich8lan) 740 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 741 742 return 0; 743 } 744 745 /** 746 * __e1000_access_emi_reg_locked - Read/write EMI register 747 * @hw: pointer to the HW structure 748 * @addr: EMI address to program 749 * @data: pointer to value to read/write from/to the EMI address 750 * @read: boolean flag to indicate read or write 751 * 752 * This helper function assumes the SW/FW/HW Semaphore is already acquired. 753 **/ 754 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, 755 u16 *data, bool read) 756 { 757 s32 ret_val; 758 759 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); 760 if (ret_val) 761 return ret_val; 762 763 if (read) 764 ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); 765 else 766 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); 767 768 return ret_val; 769 } 770 771 /** 772 * e1000_read_emi_reg_locked - Read Extended Management Interface register 773 * @hw: pointer to the HW structure 774 * @addr: EMI address to program 775 * @data: value to be read from the EMI address 776 * 777 * Assumes the SW/FW/HW Semaphore is already acquired. 778 **/ 779 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) 780 { 781 return __e1000_access_emi_reg_locked(hw, addr, data, true); 782 } 783 784 /** 785 * e1000_write_emi_reg_locked - Write Extended Management Interface register 786 * @hw: pointer to the HW structure 787 * @addr: EMI address to program 788 * @data: value to be written to the EMI address 789 * 790 * Assumes the SW/FW/HW Semaphore is already acquired. 791 **/ 792 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 793 { 794 return __e1000_access_emi_reg_locked(hw, addr, &data, false); 795 } 796 797 /** 798 * e1000_set_eee_pchlan - Enable/disable EEE support 799 * @hw: pointer to the HW structure 800 * 801 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 802 * the link and the EEE capabilities of the link partner. The LPI Control 803 * register bits will remain set only if/when link is up. 804 * 805 * EEE LPI must not be asserted earlier than one second after link is up. 806 * On 82579, EEE LPI should not be enabled until such time otherwise there 807 * can be link issues with some switches. Other devices can have EEE LPI 808 * enabled immediately upon link up since they have a timer in hardware which 809 * prevents LPI from being asserted too early. 810 **/ 811 s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 812 { 813 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 814 s32 ret_val; 815 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; 816 817 switch (hw->phy.type) { 818 case e1000_phy_82579: 819 lpa = I82579_EEE_LP_ABILITY; 820 pcs_status = I82579_EEE_PCS_STATUS; 821 adv_addr = I82579_EEE_ADVERTISEMENT; 822 break; 823 case e1000_phy_i217: 824 lpa = I217_EEE_LP_ABILITY; 825 pcs_status = I217_EEE_PCS_STATUS; 826 adv_addr = I217_EEE_ADVERTISEMENT; 827 break; 828 default: 829 return 0; 830 } 831 832 ret_val = hw->phy.ops.acquire(hw); 833 if (ret_val) 834 return ret_val; 835 836 ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); 837 if (ret_val) 838 goto release; 839 840 /* Clear bits that enable EEE in various speeds */ 841 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; 842 843 /* Enable EEE if not disabled by user */ 844 if (!dev_spec->eee_disable) { 845 /* Save off link partner's EEE ability */ 846 ret_val = e1000_read_emi_reg_locked(hw, lpa, 847 &dev_spec->eee_lp_ability); 848 if (ret_val) 849 goto release; 850 851 /* Read EEE advertisement */ 852 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); 853 if (ret_val) 854 goto release; 855 856 /* Enable EEE only for speeds in which the link partner is 857 * EEE capable and for which we advertise EEE. 858 */ 859 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 860 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 861 862 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 863 e1e_rphy_locked(hw, MII_LPA, &data); 864 if (data & LPA_100FULL) 865 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 866 else 867 /* EEE is not supported in 100Half, so ignore 868 * partner's EEE in 100 ability if full-duplex 869 * is not advertised. 870 */ 871 dev_spec->eee_lp_ability &= 872 ~I82579_EEE_100_SUPPORTED; 873 } 874 } 875 876 if (hw->phy.type == e1000_phy_82579) { 877 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 878 &data); 879 if (ret_val) 880 goto release; 881 882 data &= ~I82579_LPI_100_PLL_SHUT; 883 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 884 data); 885 } 886 887 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 888 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 889 if (ret_val) 890 goto release; 891 892 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 893 release: 894 hw->phy.ops.release(hw); 895 896 return ret_val; 897 } 898 899 /** 900 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP 901 * @hw: pointer to the HW structure 902 * @link: link up bool flag 903 * 904 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 905 * preventing further DMA write requests. Workaround the issue by disabling 906 * the de-assertion of the clock request when in 1Gpbs mode. 907 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link 908 * speeds in order to avoid Tx hangs. 909 **/ 910 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 911 { 912 u32 fextnvm6 = er32(FEXTNVM6); 913 u32 status = er32(STATUS); 914 s32 ret_val = 0; 915 u16 reg; 916 917 if (link && (status & E1000_STATUS_SPEED_1000)) { 918 ret_val = hw->phy.ops.acquire(hw); 919 if (ret_val) 920 return ret_val; 921 922 ret_val = 923 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 924 ®); 925 if (ret_val) 926 goto release; 927 928 ret_val = 929 e1000e_write_kmrn_reg_locked(hw, 930 E1000_KMRNCTRLSTA_K1_CONFIG, 931 reg & 932 ~E1000_KMRNCTRLSTA_K1_ENABLE); 933 if (ret_val) 934 goto release; 935 936 usleep_range(10, 20); 937 938 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); 939 940 ret_val = 941 e1000e_write_kmrn_reg_locked(hw, 942 E1000_KMRNCTRLSTA_K1_CONFIG, 943 reg); 944 release: 945 hw->phy.ops.release(hw); 946 } else { 947 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 948 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; 949 950 if ((hw->phy.revision > 5) || !link || 951 ((status & E1000_STATUS_SPEED_100) && 952 (status & E1000_STATUS_FD))) 953 goto update_fextnvm6; 954 955 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); 956 if (ret_val) 957 return ret_val; 958 959 /* Clear link status transmit timeout */ 960 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; 961 962 if (status & E1000_STATUS_SPEED_100) { 963 /* Set inband Tx timeout to 5x10us for 100Half */ 964 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 965 966 /* Do not extend the K1 entry latency for 100Half */ 967 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 968 } else { 969 /* Set inband Tx timeout to 50x10us for 10Full/Half */ 970 reg |= 50 << 971 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 972 973 /* Extend the K1 entry latency for 10 Mbps */ 974 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 975 } 976 977 ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); 978 if (ret_val) 979 return ret_val; 980 981 update_fextnvm6: 982 ew32(FEXTNVM6, fextnvm6); 983 } 984 985 return ret_val; 986 } 987 988 /** 989 * e1000_platform_pm_pch_lpt - Set platform power management values 990 * @hw: pointer to the HW structure 991 * @link: bool indicating link status 992 * 993 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" 994 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed 995 * when link is up (which must not exceed the maximum latency supported 996 * by the platform), otherwise specify there is no LTR requirement. 997 * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop 998 * latencies in the LTR Extended Capability Structure in the PCIe Extended 999 * Capability register set, on this device LTR is set by writing the 1000 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and 1001 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) 1002 * message to the PMC. 1003 **/ 1004 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) 1005 { 1006 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | 1007 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; 1008 u16 lat_enc = 0; /* latency encoded */ 1009 1010 if (link) { 1011 u16 speed, duplex, scale = 0; 1012 u16 max_snoop, max_nosnoop; 1013 u16 max_ltr_enc; /* max LTR latency encoded */ 1014 u64 value; 1015 u32 rxa; 1016 1017 if (!hw->adapter->max_frame_size) { 1018 e_dbg("max_frame_size not set.\n"); 1019 return -E1000_ERR_CONFIG; 1020 } 1021 1022 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1023 if (!speed) { 1024 e_dbg("Speed not set.\n"); 1025 return -E1000_ERR_CONFIG; 1026 } 1027 1028 /* Rx Packet Buffer Allocation size (KB) */ 1029 rxa = er32(PBA) & E1000_PBA_RXA_MASK; 1030 1031 /* Determine the maximum latency tolerated by the device. 1032 * 1033 * Per the PCIe spec, the tolerated latencies are encoded as 1034 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 1035 * a 10-bit value (0-1023) to provide a range from 1 ns to 1036 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 1037 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 1038 */ 1039 rxa *= 512; 1040 value = (rxa > hw->adapter->max_frame_size) ? 1041 (rxa - hw->adapter->max_frame_size) * (16000 / speed) : 1042 0; 1043 1044 while (value > PCI_LTR_VALUE_MASK) { 1045 scale++; 1046 value = DIV_ROUND_UP(value, BIT(5)); 1047 } 1048 if (scale > E1000_LTRV_SCALE_MAX) { 1049 e_dbg("Invalid LTR latency scale %d\n", scale); 1050 return -E1000_ERR_CONFIG; 1051 } 1052 lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); 1053 1054 /* Determine the maximum latency tolerated by the platform */ 1055 pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, 1056 &max_snoop); 1057 pci_read_config_word(hw->adapter->pdev, 1058 E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); 1059 max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); 1060 1061 if (lat_enc > max_ltr_enc) 1062 lat_enc = max_ltr_enc; 1063 } 1064 1065 /* Set Snoop and No-Snoop latencies the same */ 1066 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); 1067 ew32(LTRV, reg); 1068 1069 return 0; 1070 } 1071 1072 /** 1073 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP 1074 * @hw: pointer to the HW structure 1075 * @to_sx: boolean indicating a system power state transition to Sx 1076 * 1077 * When link is down, configure ULP mode to significantly reduce the power 1078 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the 1079 * ME firmware to start the ULP configuration. If not on an ME enabled 1080 * system, configure the ULP mode by software. 1081 */ 1082 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) 1083 { 1084 u32 mac_reg; 1085 s32 ret_val = 0; 1086 u16 phy_reg; 1087 u16 oem_reg = 0; 1088 1089 if ((hw->mac.type < e1000_pch_lpt) || 1090 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || 1091 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || 1092 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || 1093 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || 1094 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) 1095 return 0; 1096 1097 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { 1098 /* Request ME configure ULP mode in the PHY */ 1099 mac_reg = er32(H2ME); 1100 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; 1101 ew32(H2ME, mac_reg); 1102 1103 goto out; 1104 } 1105 1106 if (!to_sx) { 1107 int i = 0; 1108 1109 /* Poll up to 5 seconds for Cable Disconnected indication */ 1110 while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { 1111 /* Bail if link is re-acquired */ 1112 if (er32(STATUS) & E1000_STATUS_LU) 1113 return -E1000_ERR_PHY; 1114 1115 if (i++ == 100) 1116 break; 1117 1118 msleep(50); 1119 } 1120 e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", 1121 (er32(FEXT) & 1122 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); 1123 } 1124 1125 ret_val = hw->phy.ops.acquire(hw); 1126 if (ret_val) 1127 goto out; 1128 1129 /* Force SMBus mode in PHY */ 1130 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1131 if (ret_val) 1132 goto release; 1133 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; 1134 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1135 1136 /* Force SMBus mode in MAC */ 1137 mac_reg = er32(CTRL_EXT); 1138 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1139 ew32(CTRL_EXT, mac_reg); 1140 1141 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable 1142 * LPLU and disable Gig speed when entering ULP 1143 */ 1144 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { 1145 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, 1146 &oem_reg); 1147 if (ret_val) 1148 goto release; 1149 1150 phy_reg = oem_reg; 1151 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; 1152 1153 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1154 phy_reg); 1155 1156 if (ret_val) 1157 goto release; 1158 } 1159 1160 /* Set Inband ULP Exit, Reset to SMBus mode and 1161 * Disable SMBus Release on PERST# in PHY 1162 */ 1163 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1164 if (ret_val) 1165 goto release; 1166 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | 1167 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1168 if (to_sx) { 1169 if (er32(WUFC) & E1000_WUFC_LNKC) 1170 phy_reg |= I218_ULP_CONFIG1_WOL_HOST; 1171 else 1172 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1173 1174 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; 1175 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; 1176 } else { 1177 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; 1178 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; 1179 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1180 } 1181 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1182 1183 /* Set Disable SMBus Release on PERST# in MAC */ 1184 mac_reg = er32(FEXTNVM7); 1185 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; 1186 ew32(FEXTNVM7, mac_reg); 1187 1188 /* Commit ULP changes in PHY by starting auto ULP configuration */ 1189 phy_reg |= I218_ULP_CONFIG1_START; 1190 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1191 1192 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && 1193 to_sx && (er32(STATUS) & E1000_STATUS_LU)) { 1194 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1195 oem_reg); 1196 if (ret_val) 1197 goto release; 1198 } 1199 1200 release: 1201 hw->phy.ops.release(hw); 1202 out: 1203 if (ret_val) 1204 e_dbg("Error in ULP enable flow: %d\n", ret_val); 1205 else 1206 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; 1207 1208 return ret_val; 1209 } 1210 1211 /** 1212 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP 1213 * @hw: pointer to the HW structure 1214 * @force: boolean indicating whether or not to force disabling ULP 1215 * 1216 * Un-configure ULP mode when link is up, the system is transitioned from 1217 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled 1218 * system, poll for an indication from ME that ULP has been un-configured. 1219 * If not on an ME enabled system, un-configure the ULP mode by software. 1220 * 1221 * During nominal operation, this function is called when link is acquired 1222 * to disable ULP mode (force=false); otherwise, for example when unloading 1223 * the driver or during Sx->S0 transitions, this is called with force=true 1224 * to forcibly disable ULP. 1225 */ 1226 static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) 1227 { 1228 s32 ret_val = 0; 1229 u32 mac_reg; 1230 u16 phy_reg; 1231 int i = 0; 1232 1233 if ((hw->mac.type < e1000_pch_lpt) || 1234 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || 1235 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || 1236 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || 1237 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || 1238 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) 1239 return 0; 1240 1241 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { 1242 if (force) { 1243 /* Request ME un-configure ULP mode in the PHY */ 1244 mac_reg = er32(H2ME); 1245 mac_reg &= ~E1000_H2ME_ULP; 1246 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; 1247 ew32(H2ME, mac_reg); 1248 } 1249 1250 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ 1251 while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { 1252 if (i++ == 30) { 1253 ret_val = -E1000_ERR_PHY; 1254 goto out; 1255 } 1256 1257 usleep_range(10000, 11000); 1258 } 1259 e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); 1260 1261 if (force) { 1262 mac_reg = er32(H2ME); 1263 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; 1264 ew32(H2ME, mac_reg); 1265 } else { 1266 /* Clear H2ME.ULP after ME ULP configuration */ 1267 mac_reg = er32(H2ME); 1268 mac_reg &= ~E1000_H2ME_ULP; 1269 ew32(H2ME, mac_reg); 1270 } 1271 1272 goto out; 1273 } 1274 1275 ret_val = hw->phy.ops.acquire(hw); 1276 if (ret_val) 1277 goto out; 1278 1279 if (force) 1280 /* Toggle LANPHYPC Value bit */ 1281 e1000_toggle_lanphypc_pch_lpt(hw); 1282 1283 /* Unforce SMBus mode in PHY */ 1284 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1285 if (ret_val) { 1286 /* The MAC might be in PCIe mode, so temporarily force to 1287 * SMBus mode in order to access the PHY. 1288 */ 1289 mac_reg = er32(CTRL_EXT); 1290 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1291 ew32(CTRL_EXT, mac_reg); 1292 1293 msleep(50); 1294 1295 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, 1296 &phy_reg); 1297 if (ret_val) 1298 goto release; 1299 } 1300 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 1301 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1302 1303 /* Unforce SMBus mode in MAC */ 1304 mac_reg = er32(CTRL_EXT); 1305 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 1306 ew32(CTRL_EXT, mac_reg); 1307 1308 /* When ULP mode was previously entered, K1 was disabled by the 1309 * hardware. Re-Enable K1 in the PHY when exiting ULP. 1310 */ 1311 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); 1312 if (ret_val) 1313 goto release; 1314 phy_reg |= HV_PM_CTRL_K1_ENABLE; 1315 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); 1316 1317 /* Clear ULP enabled configuration */ 1318 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1319 if (ret_val) 1320 goto release; 1321 phy_reg &= ~(I218_ULP_CONFIG1_IND | 1322 I218_ULP_CONFIG1_STICKY_ULP | 1323 I218_ULP_CONFIG1_RESET_TO_SMBUS | 1324 I218_ULP_CONFIG1_WOL_HOST | 1325 I218_ULP_CONFIG1_INBAND_EXIT | 1326 I218_ULP_CONFIG1_EN_ULP_LANPHYPC | 1327 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | 1328 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1329 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1330 1331 /* Commit ULP changes by starting auto ULP configuration */ 1332 phy_reg |= I218_ULP_CONFIG1_START; 1333 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1334 1335 /* Clear Disable SMBus Release on PERST# in MAC */ 1336 mac_reg = er32(FEXTNVM7); 1337 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; 1338 ew32(FEXTNVM7, mac_reg); 1339 1340 release: 1341 hw->phy.ops.release(hw); 1342 if (force) { 1343 e1000_phy_hw_reset(hw); 1344 msleep(50); 1345 } 1346 out: 1347 if (ret_val) 1348 e_dbg("Error in ULP disable flow: %d\n", ret_val); 1349 else 1350 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; 1351 1352 return ret_val; 1353 } 1354 1355 /** 1356 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1357 * @hw: pointer to the HW structure 1358 * 1359 * Checks to see of the link status of the hardware has changed. If a 1360 * change in link status has been detected, then we read the PHY registers 1361 * to get the current speed/duplex if link exists. 1362 **/ 1363 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1364 { 1365 struct e1000_mac_info *mac = &hw->mac; 1366 s32 ret_val, tipg_reg = 0; 1367 u16 emi_addr, emi_val = 0; 1368 bool link; 1369 u16 phy_reg; 1370 1371 /* We only want to go out to the PHY registers to see if Auto-Neg 1372 * has completed and/or if our link status has changed. The 1373 * get_link_status flag is set upon receiving a Link Status 1374 * Change or Rx Sequence Error interrupt. 1375 */ 1376 if (!mac->get_link_status) 1377 return 0; 1378 mac->get_link_status = false; 1379 1380 /* First we want to see if the MII Status Register reports 1381 * link. If so, then we want to get the current speed/duplex 1382 * of the PHY. 1383 */ 1384 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1385 if (ret_val) 1386 goto out; 1387 1388 if (hw->mac.type == e1000_pchlan) { 1389 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1390 if (ret_val) 1391 goto out; 1392 } 1393 1394 /* When connected at 10Mbps half-duplex, some parts are excessively 1395 * aggressive resulting in many collisions. To avoid this, increase 1396 * the IPG and reduce Rx latency in the PHY. 1397 */ 1398 if ((hw->mac.type >= e1000_pch2lan) && link) { 1399 u16 speed, duplex; 1400 1401 e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); 1402 tipg_reg = er32(TIPG); 1403 tipg_reg &= ~E1000_TIPG_IPGT_MASK; 1404 1405 if (duplex == HALF_DUPLEX && speed == SPEED_10) { 1406 tipg_reg |= 0xFF; 1407 /* Reduce Rx latency in analog PHY */ 1408 emi_val = 0; 1409 } else if (hw->mac.type >= e1000_pch_spt && 1410 duplex == FULL_DUPLEX && speed != SPEED_1000) { 1411 tipg_reg |= 0xC; 1412 emi_val = 1; 1413 } else { 1414 1415 /* Roll back the default values */ 1416 tipg_reg |= 0x08; 1417 emi_val = 1; 1418 } 1419 1420 ew32(TIPG, tipg_reg); 1421 1422 ret_val = hw->phy.ops.acquire(hw); 1423 if (ret_val) 1424 goto out; 1425 1426 if (hw->mac.type == e1000_pch2lan) 1427 emi_addr = I82579_RX_CONFIG; 1428 else 1429 emi_addr = I217_RX_CONFIG; 1430 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); 1431 1432 if (hw->mac.type >= e1000_pch_lpt) { 1433 u16 phy_reg; 1434 1435 e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); 1436 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; 1437 if (speed == SPEED_100 || speed == SPEED_10) 1438 phy_reg |= 0x3E8; 1439 else 1440 phy_reg |= 0xFA; 1441 e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); 1442 1443 if (speed == SPEED_1000) { 1444 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, 1445 &phy_reg); 1446 1447 phy_reg |= HV_PM_CTRL_K1_CLK_REQ; 1448 1449 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, 1450 phy_reg); 1451 } 1452 } 1453 hw->phy.ops.release(hw); 1454 1455 if (ret_val) 1456 goto out; 1457 1458 if (hw->mac.type >= e1000_pch_spt) { 1459 u16 data; 1460 u16 ptr_gap; 1461 1462 if (speed == SPEED_1000) { 1463 ret_val = hw->phy.ops.acquire(hw); 1464 if (ret_val) 1465 goto out; 1466 1467 ret_val = e1e_rphy_locked(hw, 1468 PHY_REG(776, 20), 1469 &data); 1470 if (ret_val) { 1471 hw->phy.ops.release(hw); 1472 goto out; 1473 } 1474 1475 ptr_gap = (data & (0x3FF << 2)) >> 2; 1476 if (ptr_gap < 0x18) { 1477 data &= ~(0x3FF << 2); 1478 data |= (0x18 << 2); 1479 ret_val = 1480 e1e_wphy_locked(hw, 1481 PHY_REG(776, 20), 1482 data); 1483 } 1484 hw->phy.ops.release(hw); 1485 if (ret_val) 1486 goto out; 1487 } else { 1488 ret_val = hw->phy.ops.acquire(hw); 1489 if (ret_val) 1490 goto out; 1491 1492 ret_val = e1e_wphy_locked(hw, 1493 PHY_REG(776, 20), 1494 0xC023); 1495 hw->phy.ops.release(hw); 1496 if (ret_val) 1497 goto out; 1498 1499 } 1500 } 1501 } 1502 1503 /* I217 Packet Loss issue: 1504 * ensure that FEXTNVM4 Beacon Duration is set correctly 1505 * on power up. 1506 * Set the Beacon Duration for I217 to 8 usec 1507 */ 1508 if (hw->mac.type >= e1000_pch_lpt) { 1509 u32 mac_reg; 1510 1511 mac_reg = er32(FEXTNVM4); 1512 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1513 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1514 ew32(FEXTNVM4, mac_reg); 1515 } 1516 1517 /* Work-around I218 hang issue */ 1518 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1519 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1520 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || 1521 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1522 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1523 if (ret_val) 1524 goto out; 1525 } 1526 if (hw->mac.type >= e1000_pch_lpt) { 1527 /* Set platform power management values for 1528 * Latency Tolerance Reporting (LTR) 1529 */ 1530 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1531 if (ret_val) 1532 goto out; 1533 } 1534 1535 /* Clear link partner's EEE ability */ 1536 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1537 1538 if (hw->mac.type >= e1000_pch_lpt) { 1539 u32 fextnvm6 = er32(FEXTNVM6); 1540 1541 if (hw->mac.type == e1000_pch_spt) { 1542 /* FEXTNVM6 K1-off workaround - for SPT only */ 1543 u32 pcieanacfg = er32(PCIEANACFG); 1544 1545 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) 1546 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; 1547 else 1548 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; 1549 } 1550 1551 ew32(FEXTNVM6, fextnvm6); 1552 } 1553 1554 if (!link) 1555 goto out; 1556 1557 switch (hw->mac.type) { 1558 case e1000_pch2lan: 1559 ret_val = e1000_k1_workaround_lv(hw); 1560 if (ret_val) 1561 return ret_val; 1562 /* fall-thru */ 1563 case e1000_pchlan: 1564 if (hw->phy.type == e1000_phy_82578) { 1565 ret_val = e1000_link_stall_workaround_hv(hw); 1566 if (ret_val) 1567 return ret_val; 1568 } 1569 1570 /* Workaround for PCHx parts in half-duplex: 1571 * Set the number of preambles removed from the packet 1572 * when it is passed from the PHY to the MAC to prevent 1573 * the MAC from misinterpreting the packet type. 1574 */ 1575 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); 1576 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; 1577 1578 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) 1579 phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1580 1581 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); 1582 break; 1583 default: 1584 break; 1585 } 1586 1587 /* Check if there was DownShift, must be checked 1588 * immediately after link-up 1589 */ 1590 e1000e_check_downshift(hw); 1591 1592 /* Enable/Disable EEE after link up */ 1593 if (hw->phy.type > e1000_phy_82579) { 1594 ret_val = e1000_set_eee_pchlan(hw); 1595 if (ret_val) 1596 return ret_val; 1597 } 1598 1599 /* If we are forcing speed/duplex, then we simply return since 1600 * we have already determined whether we have link or not. 1601 */ 1602 if (!mac->autoneg) 1603 return -E1000_ERR_CONFIG; 1604 1605 /* Auto-Neg is enabled. Auto Speed Detection takes care 1606 * of MAC speed/duplex configuration. So we only need to 1607 * configure Collision Distance in the MAC. 1608 */ 1609 mac->ops.config_collision_dist(hw); 1610 1611 /* Configure Flow Control now that Auto-Neg has completed. 1612 * First, we need to restore the desired flow control 1613 * settings because we may have had to re-autoneg with a 1614 * different link partner. 1615 */ 1616 ret_val = e1000e_config_fc_after_link_up(hw); 1617 if (ret_val) 1618 e_dbg("Error configuring flow control\n"); 1619 1620 return ret_val; 1621 1622 out: 1623 mac->get_link_status = true; 1624 return ret_val; 1625 } 1626 1627 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1628 { 1629 struct e1000_hw *hw = &adapter->hw; 1630 s32 rc; 1631 1632 rc = e1000_init_mac_params_ich8lan(hw); 1633 if (rc) 1634 return rc; 1635 1636 rc = e1000_init_nvm_params_ich8lan(hw); 1637 if (rc) 1638 return rc; 1639 1640 switch (hw->mac.type) { 1641 case e1000_ich8lan: 1642 case e1000_ich9lan: 1643 case e1000_ich10lan: 1644 rc = e1000_init_phy_params_ich8lan(hw); 1645 break; 1646 case e1000_pchlan: 1647 case e1000_pch2lan: 1648 case e1000_pch_lpt: 1649 case e1000_pch_spt: 1650 case e1000_pch_cnp: 1651 case e1000_pch_tgp: 1652 case e1000_pch_adp: 1653 rc = e1000_init_phy_params_pchlan(hw); 1654 break; 1655 default: 1656 break; 1657 } 1658 if (rc) 1659 return rc; 1660 1661 /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or 1662 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). 1663 */ 1664 if ((adapter->hw.phy.type == e1000_phy_ife) || 1665 ((adapter->hw.mac.type >= e1000_pch2lan) && 1666 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { 1667 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 1668 adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 1669 1670 hw->mac.ops.blink_led = NULL; 1671 } 1672 1673 if ((adapter->hw.mac.type == e1000_ich8lan) && 1674 (adapter->hw.phy.type != e1000_phy_ife)) 1675 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 1676 1677 /* Enable workaround for 82579 w/ ME enabled */ 1678 if ((adapter->hw.mac.type == e1000_pch2lan) && 1679 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1680 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; 1681 1682 return 0; 1683 } 1684 1685 static DEFINE_MUTEX(nvm_mutex); 1686 1687 /** 1688 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 1689 * @hw: pointer to the HW structure 1690 * 1691 * Acquires the mutex for performing NVM operations. 1692 **/ 1693 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) 1694 { 1695 mutex_lock(&nvm_mutex); 1696 1697 return 0; 1698 } 1699 1700 /** 1701 * e1000_release_nvm_ich8lan - Release NVM mutex 1702 * @hw: pointer to the HW structure 1703 * 1704 * Releases the mutex used while performing NVM operations. 1705 **/ 1706 static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) 1707 { 1708 mutex_unlock(&nvm_mutex); 1709 } 1710 1711 /** 1712 * e1000_acquire_swflag_ich8lan - Acquire software control flag 1713 * @hw: pointer to the HW structure 1714 * 1715 * Acquires the software control flag for performing PHY and select 1716 * MAC CSR accesses. 1717 **/ 1718 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 1719 { 1720 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 1721 s32 ret_val = 0; 1722 1723 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, 1724 &hw->adapter->state)) { 1725 e_dbg("contention for Phy access\n"); 1726 return -E1000_ERR_PHY; 1727 } 1728 1729 while (timeout) { 1730 extcnf_ctrl = er32(EXTCNF_CTRL); 1731 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 1732 break; 1733 1734 mdelay(1); 1735 timeout--; 1736 } 1737 1738 if (!timeout) { 1739 e_dbg("SW has already locked the resource.\n"); 1740 ret_val = -E1000_ERR_CONFIG; 1741 goto out; 1742 } 1743 1744 timeout = SW_FLAG_TIMEOUT; 1745 1746 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 1747 ew32(EXTCNF_CTRL, extcnf_ctrl); 1748 1749 while (timeout) { 1750 extcnf_ctrl = er32(EXTCNF_CTRL); 1751 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 1752 break; 1753 1754 mdelay(1); 1755 timeout--; 1756 } 1757 1758 if (!timeout) { 1759 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", 1760 er32(FWSM), extcnf_ctrl); 1761 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1762 ew32(EXTCNF_CTRL, extcnf_ctrl); 1763 ret_val = -E1000_ERR_CONFIG; 1764 goto out; 1765 } 1766 1767 out: 1768 if (ret_val) 1769 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 1770 1771 return ret_val; 1772 } 1773 1774 /** 1775 * e1000_release_swflag_ich8lan - Release software control flag 1776 * @hw: pointer to the HW structure 1777 * 1778 * Releases the software control flag for performing PHY and select 1779 * MAC CSR accesses. 1780 **/ 1781 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 1782 { 1783 u32 extcnf_ctrl; 1784 1785 extcnf_ctrl = er32(EXTCNF_CTRL); 1786 1787 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { 1788 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1789 ew32(EXTCNF_CTRL, extcnf_ctrl); 1790 } else { 1791 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); 1792 } 1793 1794 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 1795 } 1796 1797 /** 1798 * e1000_check_mng_mode_ich8lan - Checks management mode 1799 * @hw: pointer to the HW structure 1800 * 1801 * This checks if the adapter has any manageability enabled. 1802 * This is a function pointer entry point only called by read/write 1803 * routines for the PHY and NVM parts. 1804 **/ 1805 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 1806 { 1807 u32 fwsm; 1808 1809 fwsm = er32(FWSM); 1810 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1811 ((fwsm & E1000_FWSM_MODE_MASK) == 1812 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1813 } 1814 1815 /** 1816 * e1000_check_mng_mode_pchlan - Checks management mode 1817 * @hw: pointer to the HW structure 1818 * 1819 * This checks if the adapter has iAMT enabled. 1820 * This is a function pointer entry point only called by read/write 1821 * routines for the PHY and NVM parts. 1822 **/ 1823 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) 1824 { 1825 u32 fwsm; 1826 1827 fwsm = er32(FWSM); 1828 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1829 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1830 } 1831 1832 /** 1833 * e1000_rar_set_pch2lan - Set receive address register 1834 * @hw: pointer to the HW structure 1835 * @addr: pointer to the receive address 1836 * @index: receive address array register 1837 * 1838 * Sets the receive address array register at index to the address passed 1839 * in by addr. For 82579, RAR[0] is the base address register that is to 1840 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 1841 * Use SHRA[0-3] in place of those reserved for ME. 1842 **/ 1843 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 1844 { 1845 u32 rar_low, rar_high; 1846 1847 /* HW expects these in little endian so we reverse the byte order 1848 * from network order (big endian) to little endian 1849 */ 1850 rar_low = ((u32)addr[0] | 1851 ((u32)addr[1] << 8) | 1852 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 1853 1854 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 1855 1856 /* If MAC address zero, no need to set the AV bit */ 1857 if (rar_low || rar_high) 1858 rar_high |= E1000_RAH_AV; 1859 1860 if (index == 0) { 1861 ew32(RAL(index), rar_low); 1862 e1e_flush(); 1863 ew32(RAH(index), rar_high); 1864 e1e_flush(); 1865 return 0; 1866 } 1867 1868 /* RAR[1-6] are owned by manageability. Skip those and program the 1869 * next address into the SHRA register array. 1870 */ 1871 if (index < (u32)(hw->mac.rar_entry_count)) { 1872 s32 ret_val; 1873 1874 ret_val = e1000_acquire_swflag_ich8lan(hw); 1875 if (ret_val) 1876 goto out; 1877 1878 ew32(SHRAL(index - 1), rar_low); 1879 e1e_flush(); 1880 ew32(SHRAH(index - 1), rar_high); 1881 e1e_flush(); 1882 1883 e1000_release_swflag_ich8lan(hw); 1884 1885 /* verify the register updates */ 1886 if ((er32(SHRAL(index - 1)) == rar_low) && 1887 (er32(SHRAH(index - 1)) == rar_high)) 1888 return 0; 1889 1890 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 1891 (index - 1), er32(FWSM)); 1892 } 1893 1894 out: 1895 e_dbg("Failed to write receive address at index %d\n", index); 1896 return -E1000_ERR_CONFIG; 1897 } 1898 1899 /** 1900 * e1000_rar_get_count_pch_lpt - Get the number of available SHRA 1901 * @hw: pointer to the HW structure 1902 * 1903 * Get the number of available receive registers that the Host can 1904 * program. SHRA[0-10] are the shared receive address registers 1905 * that are shared between the Host and manageability engine (ME). 1906 * ME can reserve any number of addresses and the host needs to be 1907 * able to tell how many available registers it has access to. 1908 **/ 1909 static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw) 1910 { 1911 u32 wlock_mac; 1912 u32 num_entries; 1913 1914 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; 1915 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 1916 1917 switch (wlock_mac) { 1918 case 0: 1919 /* All SHRA[0..10] and RAR[0] available */ 1920 num_entries = hw->mac.rar_entry_count; 1921 break; 1922 case 1: 1923 /* Only RAR[0] available */ 1924 num_entries = 1; 1925 break; 1926 default: 1927 /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */ 1928 num_entries = wlock_mac + 1; 1929 break; 1930 } 1931 1932 return num_entries; 1933 } 1934 1935 /** 1936 * e1000_rar_set_pch_lpt - Set receive address registers 1937 * @hw: pointer to the HW structure 1938 * @addr: pointer to the receive address 1939 * @index: receive address array register 1940 * 1941 * Sets the receive address register array at index to the address passed 1942 * in by addr. For LPT, RAR[0] is the base address register that is to 1943 * contain the MAC address. SHRA[0-10] are the shared receive address 1944 * registers that are shared between the Host and manageability engine (ME). 1945 **/ 1946 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 1947 { 1948 u32 rar_low, rar_high; 1949 u32 wlock_mac; 1950 1951 /* HW expects these in little endian so we reverse the byte order 1952 * from network order (big endian) to little endian 1953 */ 1954 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 1955 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 1956 1957 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 1958 1959 /* If MAC address zero, no need to set the AV bit */ 1960 if (rar_low || rar_high) 1961 rar_high |= E1000_RAH_AV; 1962 1963 if (index == 0) { 1964 ew32(RAL(index), rar_low); 1965 e1e_flush(); 1966 ew32(RAH(index), rar_high); 1967 e1e_flush(); 1968 return 0; 1969 } 1970 1971 /* The manageability engine (ME) can lock certain SHRAR registers that 1972 * it is using - those registers are unavailable for use. 1973 */ 1974 if (index < hw->mac.rar_entry_count) { 1975 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; 1976 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 1977 1978 /* Check if all SHRAR registers are locked */ 1979 if (wlock_mac == 1) 1980 goto out; 1981 1982 if ((wlock_mac == 0) || (index <= wlock_mac)) { 1983 s32 ret_val; 1984 1985 ret_val = e1000_acquire_swflag_ich8lan(hw); 1986 1987 if (ret_val) 1988 goto out; 1989 1990 ew32(SHRAL_PCH_LPT(index - 1), rar_low); 1991 e1e_flush(); 1992 ew32(SHRAH_PCH_LPT(index - 1), rar_high); 1993 e1e_flush(); 1994 1995 e1000_release_swflag_ich8lan(hw); 1996 1997 /* verify the register updates */ 1998 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && 1999 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) 2000 return 0; 2001 } 2002 } 2003 2004 out: 2005 e_dbg("Failed to write receive address at index %d\n", index); 2006 return -E1000_ERR_CONFIG; 2007 } 2008 2009 /** 2010 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 2011 * @hw: pointer to the HW structure 2012 * 2013 * Checks if firmware is blocking the reset of the PHY. 2014 * This is a function pointer entry point only called by 2015 * reset routines. 2016 **/ 2017 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 2018 { 2019 bool blocked = false; 2020 int i = 0; 2021 2022 while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && 2023 (i++ < 30)) 2024 usleep_range(10000, 11000); 2025 return blocked ? E1000_BLK_PHY_RESET : 0; 2026 } 2027 2028 /** 2029 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 2030 * @hw: pointer to the HW structure 2031 * 2032 * Assumes semaphore already acquired. 2033 * 2034 **/ 2035 static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 2036 { 2037 u16 phy_data; 2038 u32 strap = er32(STRAP); 2039 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> 2040 E1000_STRAP_SMT_FREQ_SHIFT; 2041 s32 ret_val; 2042 2043 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 2044 2045 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 2046 if (ret_val) 2047 return ret_val; 2048 2049 phy_data &= ~HV_SMB_ADDR_MASK; 2050 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 2051 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 2052 2053 if (hw->phy.type == e1000_phy_i217) { 2054 /* Restore SMBus frequency */ 2055 if (freq--) { 2056 phy_data &= ~HV_SMB_ADDR_FREQ_MASK; 2057 phy_data |= (freq & BIT(0)) << 2058 HV_SMB_ADDR_FREQ_LOW_SHIFT; 2059 phy_data |= (freq & BIT(1)) << 2060 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); 2061 } else { 2062 e_dbg("Unsupported SMB frequency in PHY\n"); 2063 } 2064 } 2065 2066 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 2067 } 2068 2069 /** 2070 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 2071 * @hw: pointer to the HW structure 2072 * 2073 * SW should configure the LCD from the NVM extended configuration region 2074 * as a workaround for certain parts. 2075 **/ 2076 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 2077 { 2078 struct e1000_phy_info *phy = &hw->phy; 2079 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 2080 s32 ret_val = 0; 2081 u16 word_addr, reg_data, reg_addr, phy_page = 0; 2082 2083 /* Initialize the PHY from the NVM on ICH platforms. This 2084 * is needed due to an issue where the NVM configuration is 2085 * not properly autoloaded after power transitions. 2086 * Therefore, after each PHY reset, we will load the 2087 * configuration data out of the NVM manually. 2088 */ 2089 switch (hw->mac.type) { 2090 case e1000_ich8lan: 2091 if (phy->type != e1000_phy_igp_3) 2092 return ret_val; 2093 2094 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || 2095 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { 2096 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 2097 break; 2098 } 2099 /* Fall-thru */ 2100 case e1000_pchlan: 2101 case e1000_pch2lan: 2102 case e1000_pch_lpt: 2103 case e1000_pch_spt: 2104 case e1000_pch_cnp: 2105 case e1000_pch_tgp: 2106 case e1000_pch_adp: 2107 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 2108 break; 2109 default: 2110 return ret_val; 2111 } 2112 2113 ret_val = hw->phy.ops.acquire(hw); 2114 if (ret_val) 2115 return ret_val; 2116 2117 data = er32(FEXTNVM); 2118 if (!(data & sw_cfg_mask)) 2119 goto release; 2120 2121 /* Make sure HW does not configure LCD from PHY 2122 * extended configuration before SW configuration 2123 */ 2124 data = er32(EXTCNF_CTRL); 2125 if ((hw->mac.type < e1000_pch2lan) && 2126 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) 2127 goto release; 2128 2129 cnf_size = er32(EXTCNF_SIZE); 2130 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 2131 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 2132 if (!cnf_size) 2133 goto release; 2134 2135 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 2136 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 2137 2138 if (((hw->mac.type == e1000_pchlan) && 2139 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || 2140 (hw->mac.type > e1000_pchlan)) { 2141 /* HW configures the SMBus address and LEDs when the 2142 * OEM and LCD Write Enable bits are set in the NVM. 2143 * When both NVM bits are cleared, SW will configure 2144 * them instead. 2145 */ 2146 ret_val = e1000_write_smbus_addr(hw); 2147 if (ret_val) 2148 goto release; 2149 2150 data = er32(LEDCTL); 2151 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 2152 (u16)data); 2153 if (ret_val) 2154 goto release; 2155 } 2156 2157 /* Configure LCD from extended configuration region. */ 2158 2159 /* cnf_base_addr is in DWORD */ 2160 word_addr = (u16)(cnf_base_addr << 1); 2161 2162 for (i = 0; i < cnf_size; i++) { 2163 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); 2164 if (ret_val) 2165 goto release; 2166 2167 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 2168 1, ®_addr); 2169 if (ret_val) 2170 goto release; 2171 2172 /* Save off the PHY page for future writes. */ 2173 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 2174 phy_page = reg_data; 2175 continue; 2176 } 2177 2178 reg_addr &= PHY_REG_MASK; 2179 reg_addr |= phy_page; 2180 2181 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); 2182 if (ret_val) 2183 goto release; 2184 } 2185 2186 release: 2187 hw->phy.ops.release(hw); 2188 return ret_val; 2189 } 2190 2191 /** 2192 * e1000_k1_gig_workaround_hv - K1 Si workaround 2193 * @hw: pointer to the HW structure 2194 * @link: link up bool flag 2195 * 2196 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 2197 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 2198 * If link is down, the function will restore the default K1 setting located 2199 * in the NVM. 2200 **/ 2201 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 2202 { 2203 s32 ret_val = 0; 2204 u16 status_reg = 0; 2205 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 2206 2207 if (hw->mac.type != e1000_pchlan) 2208 return 0; 2209 2210 /* Wrap the whole flow with the sw flag */ 2211 ret_val = hw->phy.ops.acquire(hw); 2212 if (ret_val) 2213 return ret_val; 2214 2215 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 2216 if (link) { 2217 if (hw->phy.type == e1000_phy_82578) { 2218 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, 2219 &status_reg); 2220 if (ret_val) 2221 goto release; 2222 2223 status_reg &= (BM_CS_STATUS_LINK_UP | 2224 BM_CS_STATUS_RESOLVED | 2225 BM_CS_STATUS_SPEED_MASK); 2226 2227 if (status_reg == (BM_CS_STATUS_LINK_UP | 2228 BM_CS_STATUS_RESOLVED | 2229 BM_CS_STATUS_SPEED_1000)) 2230 k1_enable = false; 2231 } 2232 2233 if (hw->phy.type == e1000_phy_82577) { 2234 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); 2235 if (ret_val) 2236 goto release; 2237 2238 status_reg &= (HV_M_STATUS_LINK_UP | 2239 HV_M_STATUS_AUTONEG_COMPLETE | 2240 HV_M_STATUS_SPEED_MASK); 2241 2242 if (status_reg == (HV_M_STATUS_LINK_UP | 2243 HV_M_STATUS_AUTONEG_COMPLETE | 2244 HV_M_STATUS_SPEED_1000)) 2245 k1_enable = false; 2246 } 2247 2248 /* Link stall fix for link up */ 2249 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); 2250 if (ret_val) 2251 goto release; 2252 2253 } else { 2254 /* Link stall fix for link down */ 2255 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); 2256 if (ret_val) 2257 goto release; 2258 } 2259 2260 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 2261 2262 release: 2263 hw->phy.ops.release(hw); 2264 2265 return ret_val; 2266 } 2267 2268 /** 2269 * e1000_configure_k1_ich8lan - Configure K1 power state 2270 * @hw: pointer to the HW structure 2271 * @enable: K1 state to configure 2272 * 2273 * Configure the K1 power state based on the provided parameter. 2274 * Assumes semaphore already acquired. 2275 * 2276 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 2277 **/ 2278 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 2279 { 2280 s32 ret_val; 2281 u32 ctrl_reg = 0; 2282 u32 ctrl_ext = 0; 2283 u32 reg = 0; 2284 u16 kmrn_reg = 0; 2285 2286 ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2287 &kmrn_reg); 2288 if (ret_val) 2289 return ret_val; 2290 2291 if (k1_enable) 2292 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 2293 else 2294 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 2295 2296 ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2297 kmrn_reg); 2298 if (ret_val) 2299 return ret_val; 2300 2301 usleep_range(20, 40); 2302 ctrl_ext = er32(CTRL_EXT); 2303 ctrl_reg = er32(CTRL); 2304 2305 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 2306 reg |= E1000_CTRL_FRCSPD; 2307 ew32(CTRL, reg); 2308 2309 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 2310 e1e_flush(); 2311 usleep_range(20, 40); 2312 ew32(CTRL, ctrl_reg); 2313 ew32(CTRL_EXT, ctrl_ext); 2314 e1e_flush(); 2315 usleep_range(20, 40); 2316 2317 return 0; 2318 } 2319 2320 /** 2321 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 2322 * @hw: pointer to the HW structure 2323 * @d0_state: boolean if entering d0 or d3 device state 2324 * 2325 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 2326 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 2327 * in NVM determines whether HW should configure LPLU and Gbe Disable. 2328 **/ 2329 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 2330 { 2331 s32 ret_val = 0; 2332 u32 mac_reg; 2333 u16 oem_reg; 2334 2335 if (hw->mac.type < e1000_pchlan) 2336 return ret_val; 2337 2338 ret_val = hw->phy.ops.acquire(hw); 2339 if (ret_val) 2340 return ret_val; 2341 2342 if (hw->mac.type == e1000_pchlan) { 2343 mac_reg = er32(EXTCNF_CTRL); 2344 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 2345 goto release; 2346 } 2347 2348 mac_reg = er32(FEXTNVM); 2349 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 2350 goto release; 2351 2352 mac_reg = er32(PHY_CTRL); 2353 2354 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); 2355 if (ret_val) 2356 goto release; 2357 2358 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 2359 2360 if (d0_state) { 2361 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 2362 oem_reg |= HV_OEM_BITS_GBE_DIS; 2363 2364 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 2365 oem_reg |= HV_OEM_BITS_LPLU; 2366 } else { 2367 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 2368 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 2369 oem_reg |= HV_OEM_BITS_GBE_DIS; 2370 2371 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | 2372 E1000_PHY_CTRL_NOND0A_LPLU)) 2373 oem_reg |= HV_OEM_BITS_LPLU; 2374 } 2375 2376 /* Set Restart auto-neg to activate the bits */ 2377 if ((d0_state || (hw->mac.type != e1000_pchlan)) && 2378 !hw->phy.ops.check_reset_block(hw)) 2379 oem_reg |= HV_OEM_BITS_RESTART_AN; 2380 2381 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); 2382 2383 release: 2384 hw->phy.ops.release(hw); 2385 2386 return ret_val; 2387 } 2388 2389 /** 2390 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 2391 * @hw: pointer to the HW structure 2392 **/ 2393 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 2394 { 2395 s32 ret_val; 2396 u16 data; 2397 2398 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); 2399 if (ret_val) 2400 return ret_val; 2401 2402 data |= HV_KMRN_MDIO_SLOW; 2403 2404 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); 2405 2406 return ret_val; 2407 } 2408 2409 /** 2410 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2411 * done after every PHY reset. 2412 **/ 2413 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2414 { 2415 s32 ret_val = 0; 2416 u16 phy_data; 2417 2418 if (hw->mac.type != e1000_pchlan) 2419 return 0; 2420 2421 /* Set MDIO slow mode before any other MDIO access */ 2422 if (hw->phy.type == e1000_phy_82577) { 2423 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2424 if (ret_val) 2425 return ret_val; 2426 } 2427 2428 if (((hw->phy.type == e1000_phy_82577) && 2429 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 2430 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 2431 /* Disable generation of early preamble */ 2432 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); 2433 if (ret_val) 2434 return ret_val; 2435 2436 /* Preamble tuning for SSC */ 2437 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); 2438 if (ret_val) 2439 return ret_val; 2440 } 2441 2442 if (hw->phy.type == e1000_phy_82578) { 2443 /* Return registers to default by doing a soft reset then 2444 * writing 0x3140 to the control register. 2445 */ 2446 if (hw->phy.revision < 2) { 2447 e1000e_phy_sw_reset(hw); 2448 ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); 2449 if (ret_val) 2450 return ret_val; 2451 } 2452 } 2453 2454 /* Select page 0 */ 2455 ret_val = hw->phy.ops.acquire(hw); 2456 if (ret_val) 2457 return ret_val; 2458 2459 hw->phy.addr = 1; 2460 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 2461 hw->phy.ops.release(hw); 2462 if (ret_val) 2463 return ret_val; 2464 2465 /* Configure the K1 Si workaround during phy reset assuming there is 2466 * link so that it disables K1 if link is in 1Gbps. 2467 */ 2468 ret_val = e1000_k1_gig_workaround_hv(hw, true); 2469 if (ret_val) 2470 return ret_val; 2471 2472 /* Workaround for link disconnects on a busy hub in half duplex */ 2473 ret_val = hw->phy.ops.acquire(hw); 2474 if (ret_val) 2475 return ret_val; 2476 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); 2477 if (ret_val) 2478 goto release; 2479 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); 2480 if (ret_val) 2481 goto release; 2482 2483 /* set MSE higher to enable link to stay up when noise is high */ 2484 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); 2485 release: 2486 hw->phy.ops.release(hw); 2487 2488 return ret_val; 2489 } 2490 2491 /** 2492 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 2493 * @hw: pointer to the HW structure 2494 **/ 2495 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) 2496 { 2497 u32 mac_reg; 2498 u16 i, phy_reg = 0; 2499 s32 ret_val; 2500 2501 ret_val = hw->phy.ops.acquire(hw); 2502 if (ret_val) 2503 return; 2504 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2505 if (ret_val) 2506 goto release; 2507 2508 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 2509 for (i = 0; i < (hw->mac.rar_entry_count); i++) { 2510 mac_reg = er32(RAL(i)); 2511 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 2512 (u16)(mac_reg & 0xFFFF)); 2513 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), 2514 (u16)((mac_reg >> 16) & 0xFFFF)); 2515 2516 mac_reg = er32(RAH(i)); 2517 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), 2518 (u16)(mac_reg & 0xFFFF)); 2519 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), 2520 (u16)((mac_reg & E1000_RAH_AV) 2521 >> 16)); 2522 } 2523 2524 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2525 2526 release: 2527 hw->phy.ops.release(hw); 2528 } 2529 2530 /** 2531 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 2532 * with 82579 PHY 2533 * @hw: pointer to the HW structure 2534 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 2535 **/ 2536 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) 2537 { 2538 s32 ret_val = 0; 2539 u16 phy_reg, data; 2540 u32 mac_reg; 2541 u16 i; 2542 2543 if (hw->mac.type < e1000_pch2lan) 2544 return 0; 2545 2546 /* disable Rx path while enabling/disabling workaround */ 2547 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); 2548 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14)); 2549 if (ret_val) 2550 return ret_val; 2551 2552 if (enable) { 2553 /* Write Rx addresses (rar_entry_count for RAL/H, and 2554 * SHRAL/H) and initial CRC values to the MAC 2555 */ 2556 for (i = 0; i < hw->mac.rar_entry_count; i++) { 2557 u8 mac_addr[ETH_ALEN] = { 0 }; 2558 u32 addr_high, addr_low; 2559 2560 addr_high = er32(RAH(i)); 2561 if (!(addr_high & E1000_RAH_AV)) 2562 continue; 2563 addr_low = er32(RAL(i)); 2564 mac_addr[0] = (addr_low & 0xFF); 2565 mac_addr[1] = ((addr_low >> 8) & 0xFF); 2566 mac_addr[2] = ((addr_low >> 16) & 0xFF); 2567 mac_addr[3] = ((addr_low >> 24) & 0xFF); 2568 mac_addr[4] = (addr_high & 0xFF); 2569 mac_addr[5] = ((addr_high >> 8) & 0xFF); 2570 2571 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); 2572 } 2573 2574 /* Write Rx addresses to the PHY */ 2575 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 2576 2577 /* Enable jumbo frame workaround in the MAC */ 2578 mac_reg = er32(FFLT_DBG); 2579 mac_reg &= ~BIT(14); 2580 mac_reg |= (7 << 15); 2581 ew32(FFLT_DBG, mac_reg); 2582 2583 mac_reg = er32(RCTL); 2584 mac_reg |= E1000_RCTL_SECRC; 2585 ew32(RCTL, mac_reg); 2586 2587 ret_val = e1000e_read_kmrn_reg(hw, 2588 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2589 &data); 2590 if (ret_val) 2591 return ret_val; 2592 ret_val = e1000e_write_kmrn_reg(hw, 2593 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2594 data | BIT(0)); 2595 if (ret_val) 2596 return ret_val; 2597 ret_val = e1000e_read_kmrn_reg(hw, 2598 E1000_KMRNCTRLSTA_HD_CTRL, 2599 &data); 2600 if (ret_val) 2601 return ret_val; 2602 data &= ~(0xF << 8); 2603 data |= (0xB << 8); 2604 ret_val = e1000e_write_kmrn_reg(hw, 2605 E1000_KMRNCTRLSTA_HD_CTRL, 2606 data); 2607 if (ret_val) 2608 return ret_val; 2609 2610 /* Enable jumbo frame workaround in the PHY */ 2611 e1e_rphy(hw, PHY_REG(769, 23), &data); 2612 data &= ~(0x7F << 5); 2613 data |= (0x37 << 5); 2614 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 2615 if (ret_val) 2616 return ret_val; 2617 e1e_rphy(hw, PHY_REG(769, 16), &data); 2618 data &= ~BIT(13); 2619 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2620 if (ret_val) 2621 return ret_val; 2622 e1e_rphy(hw, PHY_REG(776, 20), &data); 2623 data &= ~(0x3FF << 2); 2624 data |= (E1000_TX_PTR_GAP << 2); 2625 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); 2626 if (ret_val) 2627 return ret_val; 2628 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); 2629 if (ret_val) 2630 return ret_val; 2631 e1e_rphy(hw, HV_PM_CTRL, &data); 2632 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10)); 2633 if (ret_val) 2634 return ret_val; 2635 } else { 2636 /* Write MAC register values back to h/w defaults */ 2637 mac_reg = er32(FFLT_DBG); 2638 mac_reg &= ~(0xF << 14); 2639 ew32(FFLT_DBG, mac_reg); 2640 2641 mac_reg = er32(RCTL); 2642 mac_reg &= ~E1000_RCTL_SECRC; 2643 ew32(RCTL, mac_reg); 2644 2645 ret_val = e1000e_read_kmrn_reg(hw, 2646 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2647 &data); 2648 if (ret_val) 2649 return ret_val; 2650 ret_val = e1000e_write_kmrn_reg(hw, 2651 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2652 data & ~BIT(0)); 2653 if (ret_val) 2654 return ret_val; 2655 ret_val = e1000e_read_kmrn_reg(hw, 2656 E1000_KMRNCTRLSTA_HD_CTRL, 2657 &data); 2658 if (ret_val) 2659 return ret_val; 2660 data &= ~(0xF << 8); 2661 data |= (0xB << 8); 2662 ret_val = e1000e_write_kmrn_reg(hw, 2663 E1000_KMRNCTRLSTA_HD_CTRL, 2664 data); 2665 if (ret_val) 2666 return ret_val; 2667 2668 /* Write PHY register values back to h/w defaults */ 2669 e1e_rphy(hw, PHY_REG(769, 23), &data); 2670 data &= ~(0x7F << 5); 2671 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 2672 if (ret_val) 2673 return ret_val; 2674 e1e_rphy(hw, PHY_REG(769, 16), &data); 2675 data |= BIT(13); 2676 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2677 if (ret_val) 2678 return ret_val; 2679 e1e_rphy(hw, PHY_REG(776, 20), &data); 2680 data &= ~(0x3FF << 2); 2681 data |= (0x8 << 2); 2682 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); 2683 if (ret_val) 2684 return ret_val; 2685 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); 2686 if (ret_val) 2687 return ret_val; 2688 e1e_rphy(hw, HV_PM_CTRL, &data); 2689 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10)); 2690 if (ret_val) 2691 return ret_val; 2692 } 2693 2694 /* re-enable Rx path after enabling/disabling workaround */ 2695 return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14)); 2696 } 2697 2698 /** 2699 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2700 * done after every PHY reset. 2701 **/ 2702 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2703 { 2704 s32 ret_val = 0; 2705 2706 if (hw->mac.type != e1000_pch2lan) 2707 return 0; 2708 2709 /* Set MDIO slow mode before any other MDIO access */ 2710 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2711 if (ret_val) 2712 return ret_val; 2713 2714 ret_val = hw->phy.ops.acquire(hw); 2715 if (ret_val) 2716 return ret_val; 2717 /* set MSE higher to enable link to stay up when noise is high */ 2718 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); 2719 if (ret_val) 2720 goto release; 2721 /* drop link after 5 times MSE threshold was reached */ 2722 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); 2723 release: 2724 hw->phy.ops.release(hw); 2725 2726 return ret_val; 2727 } 2728 2729 /** 2730 * e1000_k1_gig_workaround_lv - K1 Si workaround 2731 * @hw: pointer to the HW structure 2732 * 2733 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 2734 * Disable K1 in 1000Mbps and 100Mbps 2735 **/ 2736 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2737 { 2738 s32 ret_val = 0; 2739 u16 status_reg = 0; 2740 2741 if (hw->mac.type != e1000_pch2lan) 2742 return 0; 2743 2744 /* Set K1 beacon duration based on 10Mbs speed */ 2745 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2746 if (ret_val) 2747 return ret_val; 2748 2749 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2750 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2751 if (status_reg & 2752 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 2753 u16 pm_phy_reg; 2754 2755 /* LV 1G/100 Packet drop issue wa */ 2756 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2757 if (ret_val) 2758 return ret_val; 2759 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; 2760 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2761 if (ret_val) 2762 return ret_val; 2763 } else { 2764 u32 mac_reg; 2765 2766 mac_reg = er32(FEXTNVM4); 2767 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2768 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2769 ew32(FEXTNVM4, mac_reg); 2770 } 2771 } 2772 2773 return ret_val; 2774 } 2775 2776 /** 2777 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 2778 * @hw: pointer to the HW structure 2779 * @gate: boolean set to true to gate, false to ungate 2780 * 2781 * Gate/ungate the automatic PHY configuration via hardware; perform 2782 * the configuration via software instead. 2783 **/ 2784 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 2785 { 2786 u32 extcnf_ctrl; 2787 2788 if (hw->mac.type < e1000_pch2lan) 2789 return; 2790 2791 extcnf_ctrl = er32(EXTCNF_CTRL); 2792 2793 if (gate) 2794 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 2795 else 2796 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 2797 2798 ew32(EXTCNF_CTRL, extcnf_ctrl); 2799 } 2800 2801 /** 2802 * e1000_lan_init_done_ich8lan - Check for PHY config completion 2803 * @hw: pointer to the HW structure 2804 * 2805 * Check the appropriate indication the MAC has finished configuring the 2806 * PHY after a software reset. 2807 **/ 2808 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 2809 { 2810 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 2811 2812 /* Wait for basic configuration completes before proceeding */ 2813 do { 2814 data = er32(STATUS); 2815 data &= E1000_STATUS_LAN_INIT_DONE; 2816 usleep_range(100, 200); 2817 } while ((!data) && --loop); 2818 2819 /* If basic configuration is incomplete before the above loop 2820 * count reaches 0, loading the configuration from NVM will 2821 * leave the PHY in a bad state possibly resulting in no link. 2822 */ 2823 if (loop == 0) 2824 e_dbg("LAN_INIT_DONE not set, increase timeout\n"); 2825 2826 /* Clear the Init Done bit for the next init event */ 2827 data = er32(STATUS); 2828 data &= ~E1000_STATUS_LAN_INIT_DONE; 2829 ew32(STATUS, data); 2830 } 2831 2832 /** 2833 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset 2834 * @hw: pointer to the HW structure 2835 **/ 2836 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) 2837 { 2838 s32 ret_val = 0; 2839 u16 reg; 2840 2841 if (hw->phy.ops.check_reset_block(hw)) 2842 return 0; 2843 2844 /* Allow time for h/w to get to quiescent state after reset */ 2845 usleep_range(10000, 11000); 2846 2847 /* Perform any necessary post-reset workarounds */ 2848 switch (hw->mac.type) { 2849 case e1000_pchlan: 2850 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 2851 if (ret_val) 2852 return ret_val; 2853 break; 2854 case e1000_pch2lan: 2855 ret_val = e1000_lv_phy_workarounds_ich8lan(hw); 2856 if (ret_val) 2857 return ret_val; 2858 break; 2859 default: 2860 break; 2861 } 2862 2863 /* Clear the host wakeup bit after lcd reset */ 2864 if (hw->mac.type >= e1000_pchlan) { 2865 e1e_rphy(hw, BM_PORT_GEN_CFG, ®); 2866 reg &= ~BM_WUC_HOST_WU_BIT; 2867 e1e_wphy(hw, BM_PORT_GEN_CFG, reg); 2868 } 2869 2870 /* Configure the LCD with the extended configuration region in NVM */ 2871 ret_val = e1000_sw_lcd_config_ich8lan(hw); 2872 if (ret_val) 2873 return ret_val; 2874 2875 /* Configure the LCD with the OEM bits in NVM */ 2876 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 2877 2878 if (hw->mac.type == e1000_pch2lan) { 2879 /* Ungate automatic PHY configuration on non-managed 82579 */ 2880 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 2881 usleep_range(10000, 11000); 2882 e1000_gate_hw_phy_config_ich8lan(hw, false); 2883 } 2884 2885 /* Set EEE LPI Update Timer to 200usec */ 2886 ret_val = hw->phy.ops.acquire(hw); 2887 if (ret_val) 2888 return ret_val; 2889 ret_val = e1000_write_emi_reg_locked(hw, 2890 I82579_LPI_UPDATE_TIMER, 2891 0x1387); 2892 hw->phy.ops.release(hw); 2893 } 2894 2895 return ret_val; 2896 } 2897 2898 /** 2899 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 2900 * @hw: pointer to the HW structure 2901 * 2902 * Resets the PHY 2903 * This is a function pointer entry point called by drivers 2904 * or other shared routines. 2905 **/ 2906 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 2907 { 2908 s32 ret_val = 0; 2909 2910 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 2911 if ((hw->mac.type == e1000_pch2lan) && 2912 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 2913 e1000_gate_hw_phy_config_ich8lan(hw, true); 2914 2915 ret_val = e1000e_phy_hw_reset_generic(hw); 2916 if (ret_val) 2917 return ret_val; 2918 2919 return e1000_post_phy_reset_ich8lan(hw); 2920 } 2921 2922 /** 2923 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 2924 * @hw: pointer to the HW structure 2925 * @active: true to enable LPLU, false to disable 2926 * 2927 * Sets the LPLU state according to the active flag. For PCH, if OEM write 2928 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 2929 * the phy speed. This function will manually set the LPLU bit and restart 2930 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 2931 * since it configures the same bit. 2932 **/ 2933 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 2934 { 2935 s32 ret_val; 2936 u16 oem_reg; 2937 2938 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); 2939 if (ret_val) 2940 return ret_val; 2941 2942 if (active) 2943 oem_reg |= HV_OEM_BITS_LPLU; 2944 else 2945 oem_reg &= ~HV_OEM_BITS_LPLU; 2946 2947 if (!hw->phy.ops.check_reset_block(hw)) 2948 oem_reg |= HV_OEM_BITS_RESTART_AN; 2949 2950 return e1e_wphy(hw, HV_OEM_BITS, oem_reg); 2951 } 2952 2953 /** 2954 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 2955 * @hw: pointer to the HW structure 2956 * @active: true to enable LPLU, false to disable 2957 * 2958 * Sets the LPLU D0 state according to the active flag. When 2959 * activating LPLU this function also disables smart speed 2960 * and vice versa. LPLU will not be activated unless the 2961 * device autonegotiation advertisement meets standards of 2962 * either 10 or 10/100 or 10/100/1000 at all duplexes. 2963 * This is a function pointer entry point only called by 2964 * PHY setup routines. 2965 **/ 2966 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 2967 { 2968 struct e1000_phy_info *phy = &hw->phy; 2969 u32 phy_ctrl; 2970 s32 ret_val = 0; 2971 u16 data; 2972 2973 if (phy->type == e1000_phy_ife) 2974 return 0; 2975 2976 phy_ctrl = er32(PHY_CTRL); 2977 2978 if (active) { 2979 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 2980 ew32(PHY_CTRL, phy_ctrl); 2981 2982 if (phy->type != e1000_phy_igp_3) 2983 return 0; 2984 2985 /* Call gig speed drop workaround on LPLU before accessing 2986 * any PHY registers 2987 */ 2988 if (hw->mac.type == e1000_ich8lan) 2989 e1000e_gig_downshift_workaround_ich8lan(hw); 2990 2991 /* When LPLU is enabled, we should disable SmartSpeed */ 2992 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); 2993 if (ret_val) 2994 return ret_val; 2995 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2996 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); 2997 if (ret_val) 2998 return ret_val; 2999 } else { 3000 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 3001 ew32(PHY_CTRL, phy_ctrl); 3002 3003 if (phy->type != e1000_phy_igp_3) 3004 return 0; 3005 3006 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3007 * during Dx states where the power conservation is most 3008 * important. During driver activity we should enable 3009 * SmartSpeed, so performance is maintained. 3010 */ 3011 if (phy->smart_speed == e1000_smart_speed_on) { 3012 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3013 &data); 3014 if (ret_val) 3015 return ret_val; 3016 3017 data |= IGP01E1000_PSCFR_SMART_SPEED; 3018 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3019 data); 3020 if (ret_val) 3021 return ret_val; 3022 } else if (phy->smart_speed == e1000_smart_speed_off) { 3023 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3024 &data); 3025 if (ret_val) 3026 return ret_val; 3027 3028 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3029 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3030 data); 3031 if (ret_val) 3032 return ret_val; 3033 } 3034 } 3035 3036 return 0; 3037 } 3038 3039 /** 3040 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 3041 * @hw: pointer to the HW structure 3042 * @active: true to enable LPLU, false to disable 3043 * 3044 * Sets the LPLU D3 state according to the active flag. When 3045 * activating LPLU this function also disables smart speed 3046 * and vice versa. LPLU will not be activated unless the 3047 * device autonegotiation advertisement meets standards of 3048 * either 10 or 10/100 or 10/100/1000 at all duplexes. 3049 * This is a function pointer entry point only called by 3050 * PHY setup routines. 3051 **/ 3052 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 3053 { 3054 struct e1000_phy_info *phy = &hw->phy; 3055 u32 phy_ctrl; 3056 s32 ret_val = 0; 3057 u16 data; 3058 3059 phy_ctrl = er32(PHY_CTRL); 3060 3061 if (!active) { 3062 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 3063 ew32(PHY_CTRL, phy_ctrl); 3064 3065 if (phy->type != e1000_phy_igp_3) 3066 return 0; 3067 3068 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3069 * during Dx states where the power conservation is most 3070 * important. During driver activity we should enable 3071 * SmartSpeed, so performance is maintained. 3072 */ 3073 if (phy->smart_speed == e1000_smart_speed_on) { 3074 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3075 &data); 3076 if (ret_val) 3077 return ret_val; 3078 3079 data |= IGP01E1000_PSCFR_SMART_SPEED; 3080 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3081 data); 3082 if (ret_val) 3083 return ret_val; 3084 } else if (phy->smart_speed == e1000_smart_speed_off) { 3085 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3086 &data); 3087 if (ret_val) 3088 return ret_val; 3089 3090 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3091 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 3092 data); 3093 if (ret_val) 3094 return ret_val; 3095 } 3096 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 3097 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 3098 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 3099 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 3100 ew32(PHY_CTRL, phy_ctrl); 3101 3102 if (phy->type != e1000_phy_igp_3) 3103 return 0; 3104 3105 /* Call gig speed drop workaround on LPLU before accessing 3106 * any PHY registers 3107 */ 3108 if (hw->mac.type == e1000_ich8lan) 3109 e1000e_gig_downshift_workaround_ich8lan(hw); 3110 3111 /* When LPLU is enabled, we should disable SmartSpeed */ 3112 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); 3113 if (ret_val) 3114 return ret_val; 3115 3116 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3117 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); 3118 } 3119 3120 return ret_val; 3121 } 3122 3123 /** 3124 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 3125 * @hw: pointer to the HW structure 3126 * @bank: pointer to the variable that returns the active bank 3127 * 3128 * Reads signature byte from the NVM using the flash access registers. 3129 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 3130 **/ 3131 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 3132 { 3133 u32 eecd; 3134 struct e1000_nvm_info *nvm = &hw->nvm; 3135 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 3136 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 3137 u32 nvm_dword = 0; 3138 u8 sig_byte = 0; 3139 s32 ret_val; 3140 3141 switch (hw->mac.type) { 3142 case e1000_pch_spt: 3143 case e1000_pch_cnp: 3144 case e1000_pch_tgp: 3145 case e1000_pch_adp: 3146 bank1_offset = nvm->flash_bank_size; 3147 act_offset = E1000_ICH_NVM_SIG_WORD; 3148 3149 /* set bank to 0 in case flash read fails */ 3150 *bank = 0; 3151 3152 /* Check bank 0 */ 3153 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, 3154 &nvm_dword); 3155 if (ret_val) 3156 return ret_val; 3157 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3158 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3159 E1000_ICH_NVM_SIG_VALUE) { 3160 *bank = 0; 3161 return 0; 3162 } 3163 3164 /* Check bank 1 */ 3165 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + 3166 bank1_offset, 3167 &nvm_dword); 3168 if (ret_val) 3169 return ret_val; 3170 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3171 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3172 E1000_ICH_NVM_SIG_VALUE) { 3173 *bank = 1; 3174 return 0; 3175 } 3176 3177 e_dbg("ERROR: No valid NVM bank present\n"); 3178 return -E1000_ERR_NVM; 3179 case e1000_ich8lan: 3180 case e1000_ich9lan: 3181 eecd = er32(EECD); 3182 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 3183 E1000_EECD_SEC1VAL_VALID_MASK) { 3184 if (eecd & E1000_EECD_SEC1VAL) 3185 *bank = 1; 3186 else 3187 *bank = 0; 3188 3189 return 0; 3190 } 3191 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); 3192 /* fall-thru */ 3193 default: 3194 /* set bank to 0 in case flash read fails */ 3195 *bank = 0; 3196 3197 /* Check bank 0 */ 3198 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 3199 &sig_byte); 3200 if (ret_val) 3201 return ret_val; 3202 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3203 E1000_ICH_NVM_SIG_VALUE) { 3204 *bank = 0; 3205 return 0; 3206 } 3207 3208 /* Check bank 1 */ 3209 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 3210 bank1_offset, 3211 &sig_byte); 3212 if (ret_val) 3213 return ret_val; 3214 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3215 E1000_ICH_NVM_SIG_VALUE) { 3216 *bank = 1; 3217 return 0; 3218 } 3219 3220 e_dbg("ERROR: No valid NVM bank present\n"); 3221 return -E1000_ERR_NVM; 3222 } 3223 } 3224 3225 /** 3226 * e1000_read_nvm_spt - NVM access for SPT 3227 * @hw: pointer to the HW structure 3228 * @offset: The offset (in bytes) of the word(s) to read. 3229 * @words: Size of data to read in words. 3230 * @data: pointer to the word(s) to read at offset. 3231 * 3232 * Reads a word(s) from the NVM 3233 **/ 3234 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, 3235 u16 *data) 3236 { 3237 struct e1000_nvm_info *nvm = &hw->nvm; 3238 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3239 u32 act_offset; 3240 s32 ret_val = 0; 3241 u32 bank = 0; 3242 u32 dword = 0; 3243 u16 offset_to_read; 3244 u16 i; 3245 3246 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3247 (words == 0)) { 3248 e_dbg("nvm parameter(s) out of bounds\n"); 3249 ret_val = -E1000_ERR_NVM; 3250 goto out; 3251 } 3252 3253 nvm->ops.acquire(hw); 3254 3255 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3256 if (ret_val) { 3257 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3258 bank = 0; 3259 } 3260 3261 act_offset = (bank) ? nvm->flash_bank_size : 0; 3262 act_offset += offset; 3263 3264 ret_val = 0; 3265 3266 for (i = 0; i < words; i += 2) { 3267 if (words - i == 1) { 3268 if (dev_spec->shadow_ram[offset + i].modified) { 3269 data[i] = 3270 dev_spec->shadow_ram[offset + i].value; 3271 } else { 3272 offset_to_read = act_offset + i - 3273 ((act_offset + i) % 2); 3274 ret_val = 3275 e1000_read_flash_dword_ich8lan(hw, 3276 offset_to_read, 3277 &dword); 3278 if (ret_val) 3279 break; 3280 if ((act_offset + i) % 2 == 0) 3281 data[i] = (u16)(dword & 0xFFFF); 3282 else 3283 data[i] = (u16)((dword >> 16) & 0xFFFF); 3284 } 3285 } else { 3286 offset_to_read = act_offset + i; 3287 if (!(dev_spec->shadow_ram[offset + i].modified) || 3288 !(dev_spec->shadow_ram[offset + i + 1].modified)) { 3289 ret_val = 3290 e1000_read_flash_dword_ich8lan(hw, 3291 offset_to_read, 3292 &dword); 3293 if (ret_val) 3294 break; 3295 } 3296 if (dev_spec->shadow_ram[offset + i].modified) 3297 data[i] = 3298 dev_spec->shadow_ram[offset + i].value; 3299 else 3300 data[i] = (u16)(dword & 0xFFFF); 3301 if (dev_spec->shadow_ram[offset + i].modified) 3302 data[i + 1] = 3303 dev_spec->shadow_ram[offset + i + 1].value; 3304 else 3305 data[i + 1] = (u16)(dword >> 16 & 0xFFFF); 3306 } 3307 } 3308 3309 nvm->ops.release(hw); 3310 3311 out: 3312 if (ret_val) 3313 e_dbg("NVM read error: %d\n", ret_val); 3314 3315 return ret_val; 3316 } 3317 3318 /** 3319 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 3320 * @hw: pointer to the HW structure 3321 * @offset: The offset (in bytes) of the word(s) to read. 3322 * @words: Size of data to read in words 3323 * @data: Pointer to the word(s) to read at offset. 3324 * 3325 * Reads a word(s) from the NVM using the flash access registers. 3326 **/ 3327 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3328 u16 *data) 3329 { 3330 struct e1000_nvm_info *nvm = &hw->nvm; 3331 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3332 u32 act_offset; 3333 s32 ret_val = 0; 3334 u32 bank = 0; 3335 u16 i, word; 3336 3337 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3338 (words == 0)) { 3339 e_dbg("nvm parameter(s) out of bounds\n"); 3340 ret_val = -E1000_ERR_NVM; 3341 goto out; 3342 } 3343 3344 nvm->ops.acquire(hw); 3345 3346 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3347 if (ret_val) { 3348 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3349 bank = 0; 3350 } 3351 3352 act_offset = (bank) ? nvm->flash_bank_size : 0; 3353 act_offset += offset; 3354 3355 ret_val = 0; 3356 for (i = 0; i < words; i++) { 3357 if (dev_spec->shadow_ram[offset + i].modified) { 3358 data[i] = dev_spec->shadow_ram[offset + i].value; 3359 } else { 3360 ret_val = e1000_read_flash_word_ich8lan(hw, 3361 act_offset + i, 3362 &word); 3363 if (ret_val) 3364 break; 3365 data[i] = word; 3366 } 3367 } 3368 3369 nvm->ops.release(hw); 3370 3371 out: 3372 if (ret_val) 3373 e_dbg("NVM read error: %d\n", ret_val); 3374 3375 return ret_val; 3376 } 3377 3378 /** 3379 * e1000_flash_cycle_init_ich8lan - Initialize flash 3380 * @hw: pointer to the HW structure 3381 * 3382 * This function does initial flash setup so that a new read/write/erase cycle 3383 * can be started. 3384 **/ 3385 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 3386 { 3387 union ich8_hws_flash_status hsfsts; 3388 s32 ret_val = -E1000_ERR_NVM; 3389 3390 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3391 3392 /* Check if the flash descriptor is valid */ 3393 if (!hsfsts.hsf_status.fldesvalid) { 3394 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); 3395 return -E1000_ERR_NVM; 3396 } 3397 3398 /* Clear FCERR and DAEL in hw status by writing 1 */ 3399 hsfsts.hsf_status.flcerr = 1; 3400 hsfsts.hsf_status.dael = 1; 3401 if (hw->mac.type >= e1000_pch_spt) 3402 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); 3403 else 3404 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3405 3406 /* Either we should have a hardware SPI cycle in progress 3407 * bit to check against, in order to start a new cycle or 3408 * FDONE bit should be changed in the hardware so that it 3409 * is 1 after hardware reset, which can then be used as an 3410 * indication whether a cycle is in progress or has been 3411 * completed. 3412 */ 3413 3414 if (!hsfsts.hsf_status.flcinprog) { 3415 /* There is no cycle running at present, 3416 * so we can start a cycle. 3417 * Begin by setting Flash Cycle Done. 3418 */ 3419 hsfsts.hsf_status.flcdone = 1; 3420 if (hw->mac.type >= e1000_pch_spt) 3421 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); 3422 else 3423 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3424 ret_val = 0; 3425 } else { 3426 s32 i; 3427 3428 /* Otherwise poll for sometime so the current 3429 * cycle has a chance to end before giving up. 3430 */ 3431 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 3432 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3433 if (!hsfsts.hsf_status.flcinprog) { 3434 ret_val = 0; 3435 break; 3436 } 3437 udelay(1); 3438 } 3439 if (!ret_val) { 3440 /* Successful in waiting for previous cycle to timeout, 3441 * now set the Flash Cycle Done. 3442 */ 3443 hsfsts.hsf_status.flcdone = 1; 3444 if (hw->mac.type >= e1000_pch_spt) 3445 ew32flash(ICH_FLASH_HSFSTS, 3446 hsfsts.regval & 0xFFFF); 3447 else 3448 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3449 } else { 3450 e_dbg("Flash controller busy, cannot get access\n"); 3451 } 3452 } 3453 3454 return ret_val; 3455 } 3456 3457 /** 3458 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 3459 * @hw: pointer to the HW structure 3460 * @timeout: maximum time to wait for completion 3461 * 3462 * This function starts a flash cycle and waits for its completion. 3463 **/ 3464 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 3465 { 3466 union ich8_hws_flash_ctrl hsflctl; 3467 union ich8_hws_flash_status hsfsts; 3468 u32 i = 0; 3469 3470 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 3471 if (hw->mac.type >= e1000_pch_spt) 3472 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; 3473 else 3474 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3475 hsflctl.hsf_ctrl.flcgo = 1; 3476 3477 if (hw->mac.type >= e1000_pch_spt) 3478 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); 3479 else 3480 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3481 3482 /* wait till FDONE bit is set to 1 */ 3483 do { 3484 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3485 if (hsfsts.hsf_status.flcdone) 3486 break; 3487 udelay(1); 3488 } while (i++ < timeout); 3489 3490 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) 3491 return 0; 3492 3493 return -E1000_ERR_NVM; 3494 } 3495 3496 /** 3497 * e1000_read_flash_dword_ich8lan - Read dword from flash 3498 * @hw: pointer to the HW structure 3499 * @offset: offset to data location 3500 * @data: pointer to the location for storing the data 3501 * 3502 * Reads the flash dword at offset into data. Offset is converted 3503 * to bytes before read. 3504 **/ 3505 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, 3506 u32 *data) 3507 { 3508 /* Must convert word offset into bytes. */ 3509 offset <<= 1; 3510 return e1000_read_flash_data32_ich8lan(hw, offset, data); 3511 } 3512 3513 /** 3514 * e1000_read_flash_word_ich8lan - Read word from flash 3515 * @hw: pointer to the HW structure 3516 * @offset: offset to data location 3517 * @data: pointer to the location for storing the data 3518 * 3519 * Reads the flash word at offset into data. Offset is converted 3520 * to bytes before read. 3521 **/ 3522 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 3523 u16 *data) 3524 { 3525 /* Must convert offset into bytes. */ 3526 offset <<= 1; 3527 3528 return e1000_read_flash_data_ich8lan(hw, offset, 2, data); 3529 } 3530 3531 /** 3532 * e1000_read_flash_byte_ich8lan - Read byte from flash 3533 * @hw: pointer to the HW structure 3534 * @offset: The offset of the byte to read. 3535 * @data: Pointer to a byte to store the value read. 3536 * 3537 * Reads a single byte from the NVM using the flash access registers. 3538 **/ 3539 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 3540 u8 *data) 3541 { 3542 s32 ret_val; 3543 u16 word = 0; 3544 3545 /* In SPT, only 32 bits access is supported, 3546 * so this function should not be called. 3547 */ 3548 if (hw->mac.type >= e1000_pch_spt) 3549 return -E1000_ERR_NVM; 3550 else 3551 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 3552 3553 if (ret_val) 3554 return ret_val; 3555 3556 *data = (u8)word; 3557 3558 return 0; 3559 } 3560 3561 /** 3562 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 3563 * @hw: pointer to the HW structure 3564 * @offset: The offset (in bytes) of the byte or word to read. 3565 * @size: Size of data to read, 1=byte 2=word 3566 * @data: Pointer to the word to store the value read. 3567 * 3568 * Reads a byte or word from the NVM using the flash access registers. 3569 **/ 3570 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 3571 u8 size, u16 *data) 3572 { 3573 union ich8_hws_flash_status hsfsts; 3574 union ich8_hws_flash_ctrl hsflctl; 3575 u32 flash_linear_addr; 3576 u32 flash_data = 0; 3577 s32 ret_val = -E1000_ERR_NVM; 3578 u8 count = 0; 3579 3580 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 3581 return -E1000_ERR_NVM; 3582 3583 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3584 hw->nvm.flash_base_addr); 3585 3586 do { 3587 udelay(1); 3588 /* Steps */ 3589 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3590 if (ret_val) 3591 break; 3592 3593 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3594 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3595 hsflctl.hsf_ctrl.fldbcount = size - 1; 3596 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 3597 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3598 3599 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3600 3601 ret_val = 3602 e1000_flash_cycle_ich8lan(hw, 3603 ICH_FLASH_READ_COMMAND_TIMEOUT); 3604 3605 /* Check if FCERR is set to 1, if set to 1, clear it 3606 * and try the whole sequence a few more times, else 3607 * read in (shift in) the Flash Data0, the order is 3608 * least significant byte first msb to lsb 3609 */ 3610 if (!ret_val) { 3611 flash_data = er32flash(ICH_FLASH_FDATA0); 3612 if (size == 1) 3613 *data = (u8)(flash_data & 0x000000FF); 3614 else if (size == 2) 3615 *data = (u16)(flash_data & 0x0000FFFF); 3616 break; 3617 } else { 3618 /* If we've gotten here, then things are probably 3619 * completely hosed, but if the error condition is 3620 * detected, it won't hurt to give it another try... 3621 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 3622 */ 3623 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3624 if (hsfsts.hsf_status.flcerr) { 3625 /* Repeat for some time before giving up. */ 3626 continue; 3627 } else if (!hsfsts.hsf_status.flcdone) { 3628 e_dbg("Timeout error - flash cycle did not complete.\n"); 3629 break; 3630 } 3631 } 3632 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3633 3634 return ret_val; 3635 } 3636 3637 /** 3638 * e1000_read_flash_data32_ich8lan - Read dword from NVM 3639 * @hw: pointer to the HW structure 3640 * @offset: The offset (in bytes) of the dword to read. 3641 * @data: Pointer to the dword to store the value read. 3642 * 3643 * Reads a byte or word from the NVM using the flash access registers. 3644 **/ 3645 3646 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 3647 u32 *data) 3648 { 3649 union ich8_hws_flash_status hsfsts; 3650 union ich8_hws_flash_ctrl hsflctl; 3651 u32 flash_linear_addr; 3652 s32 ret_val = -E1000_ERR_NVM; 3653 u8 count = 0; 3654 3655 if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt) 3656 return -E1000_ERR_NVM; 3657 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3658 hw->nvm.flash_base_addr); 3659 3660 do { 3661 udelay(1); 3662 /* Steps */ 3663 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3664 if (ret_val) 3665 break; 3666 /* In SPT, This register is in Lan memory space, not flash. 3667 * Therefore, only 32 bit access is supported 3668 */ 3669 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; 3670 3671 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3672 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 3673 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 3674 /* In SPT, This register is in Lan memory space, not flash. 3675 * Therefore, only 32 bit access is supported 3676 */ 3677 ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); 3678 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3679 3680 ret_val = 3681 e1000_flash_cycle_ich8lan(hw, 3682 ICH_FLASH_READ_COMMAND_TIMEOUT); 3683 3684 /* Check if FCERR is set to 1, if set to 1, clear it 3685 * and try the whole sequence a few more times, else 3686 * read in (shift in) the Flash Data0, the order is 3687 * least significant byte first msb to lsb 3688 */ 3689 if (!ret_val) { 3690 *data = er32flash(ICH_FLASH_FDATA0); 3691 break; 3692 } else { 3693 /* If we've gotten here, then things are probably 3694 * completely hosed, but if the error condition is 3695 * detected, it won't hurt to give it another try... 3696 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 3697 */ 3698 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3699 if (hsfsts.hsf_status.flcerr) { 3700 /* Repeat for some time before giving up. */ 3701 continue; 3702 } else if (!hsfsts.hsf_status.flcdone) { 3703 e_dbg("Timeout error - flash cycle did not complete.\n"); 3704 break; 3705 } 3706 } 3707 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3708 3709 return ret_val; 3710 } 3711 3712 /** 3713 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 3714 * @hw: pointer to the HW structure 3715 * @offset: The offset (in bytes) of the word(s) to write. 3716 * @words: Size of data to write in words 3717 * @data: Pointer to the word(s) to write at offset. 3718 * 3719 * Writes a byte or word to the NVM using the flash access registers. 3720 **/ 3721 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3722 u16 *data) 3723 { 3724 struct e1000_nvm_info *nvm = &hw->nvm; 3725 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3726 u16 i; 3727 3728 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3729 (words == 0)) { 3730 e_dbg("nvm parameter(s) out of bounds\n"); 3731 return -E1000_ERR_NVM; 3732 } 3733 3734 nvm->ops.acquire(hw); 3735 3736 for (i = 0; i < words; i++) { 3737 dev_spec->shadow_ram[offset + i].modified = true; 3738 dev_spec->shadow_ram[offset + i].value = data[i]; 3739 } 3740 3741 nvm->ops.release(hw); 3742 3743 return 0; 3744 } 3745 3746 /** 3747 * e1000_update_nvm_checksum_spt - Update the checksum for NVM 3748 * @hw: pointer to the HW structure 3749 * 3750 * The NVM checksum is updated by calling the generic update_nvm_checksum, 3751 * which writes the checksum to the shadow ram. The changes in the shadow 3752 * ram are then committed to the EEPROM by processing each bank at a time 3753 * checking for the modified bit and writing only the pending changes. 3754 * After a successful commit, the shadow ram is cleared and is ready for 3755 * future writes. 3756 **/ 3757 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) 3758 { 3759 struct e1000_nvm_info *nvm = &hw->nvm; 3760 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3761 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 3762 s32 ret_val; 3763 u32 dword = 0; 3764 3765 ret_val = e1000e_update_nvm_checksum_generic(hw); 3766 if (ret_val) 3767 goto out; 3768 3769 if (nvm->type != e1000_nvm_flash_sw) 3770 goto out; 3771 3772 nvm->ops.acquire(hw); 3773 3774 /* We're writing to the opposite bank so if we're on bank 1, 3775 * write to bank 0 etc. We also need to erase the segment that 3776 * is going to be written 3777 */ 3778 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3779 if (ret_val) { 3780 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3781 bank = 0; 3782 } 3783 3784 if (bank == 0) { 3785 new_bank_offset = nvm->flash_bank_size; 3786 old_bank_offset = 0; 3787 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 3788 if (ret_val) 3789 goto release; 3790 } else { 3791 old_bank_offset = nvm->flash_bank_size; 3792 new_bank_offset = 0; 3793 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 3794 if (ret_val) 3795 goto release; 3796 } 3797 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { 3798 /* Determine whether to write the value stored 3799 * in the other NVM bank or a modified value stored 3800 * in the shadow RAM 3801 */ 3802 ret_val = e1000_read_flash_dword_ich8lan(hw, 3803 i + old_bank_offset, 3804 &dword); 3805 3806 if (dev_spec->shadow_ram[i].modified) { 3807 dword &= 0xffff0000; 3808 dword |= (dev_spec->shadow_ram[i].value & 0xffff); 3809 } 3810 if (dev_spec->shadow_ram[i + 1].modified) { 3811 dword &= 0x0000ffff; 3812 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) 3813 << 16); 3814 } 3815 if (ret_val) 3816 break; 3817 3818 /* If the word is 0x13, then make sure the signature bits 3819 * (15:14) are 11b until the commit has completed. 3820 * This will allow us to write 10b which indicates the 3821 * signature is valid. We want to do this after the write 3822 * has completed so that we don't mark the segment valid 3823 * while the write is still in progress 3824 */ 3825 if (i == E1000_ICH_NVM_SIG_WORD - 1) 3826 dword |= E1000_ICH_NVM_SIG_MASK << 16; 3827 3828 /* Convert offset to bytes. */ 3829 act_offset = (i + new_bank_offset) << 1; 3830 3831 usleep_range(100, 200); 3832 3833 /* Write the data to the new bank. Offset in words */ 3834 act_offset = i + new_bank_offset; 3835 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, 3836 dword); 3837 if (ret_val) 3838 break; 3839 } 3840 3841 /* Don't bother writing the segment valid bits if sector 3842 * programming failed. 3843 */ 3844 if (ret_val) { 3845 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 3846 e_dbg("Flash commit failed.\n"); 3847 goto release; 3848 } 3849 3850 /* Finally validate the new segment by setting bit 15:14 3851 * to 10b in word 0x13 , this can be done without an 3852 * erase as well since these bits are 11 to start with 3853 * and we need to change bit 14 to 0b 3854 */ 3855 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 3856 3857 /*offset in words but we read dword */ 3858 --act_offset; 3859 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 3860 3861 if (ret_val) 3862 goto release; 3863 3864 dword &= 0xBFFFFFFF; 3865 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 3866 3867 if (ret_val) 3868 goto release; 3869 3870 /* And invalidate the previously valid segment by setting 3871 * its signature word (0x13) high_byte to 0b. This can be 3872 * done without an erase because flash erase sets all bits 3873 * to 1's. We can write 1's to 0's without an erase 3874 */ 3875 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 3876 3877 /* offset in words but we read dword */ 3878 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; 3879 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 3880 3881 if (ret_val) 3882 goto release; 3883 3884 dword &= 0x00FFFFFF; 3885 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 3886 3887 if (ret_val) 3888 goto release; 3889 3890 /* Great! Everything worked, we can now clear the cached entries. */ 3891 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 3892 dev_spec->shadow_ram[i].modified = false; 3893 dev_spec->shadow_ram[i].value = 0xFFFF; 3894 } 3895 3896 release: 3897 nvm->ops.release(hw); 3898 3899 /* Reload the EEPROM, or else modifications will not appear 3900 * until after the next adapter reset. 3901 */ 3902 if (!ret_val) { 3903 nvm->ops.reload(hw); 3904 usleep_range(10000, 11000); 3905 } 3906 3907 out: 3908 if (ret_val) 3909 e_dbg("NVM update error: %d\n", ret_val); 3910 3911 return ret_val; 3912 } 3913 3914 /** 3915 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 3916 * @hw: pointer to the HW structure 3917 * 3918 * The NVM checksum is updated by calling the generic update_nvm_checksum, 3919 * which writes the checksum to the shadow ram. The changes in the shadow 3920 * ram are then committed to the EEPROM by processing each bank at a time 3921 * checking for the modified bit and writing only the pending changes. 3922 * After a successful commit, the shadow ram is cleared and is ready for 3923 * future writes. 3924 **/ 3925 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 3926 { 3927 struct e1000_nvm_info *nvm = &hw->nvm; 3928 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3929 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 3930 s32 ret_val; 3931 u16 data = 0; 3932 3933 ret_val = e1000e_update_nvm_checksum_generic(hw); 3934 if (ret_val) 3935 goto out; 3936 3937 if (nvm->type != e1000_nvm_flash_sw) 3938 goto out; 3939 3940 nvm->ops.acquire(hw); 3941 3942 /* We're writing to the opposite bank so if we're on bank 1, 3943 * write to bank 0 etc. We also need to erase the segment that 3944 * is going to be written 3945 */ 3946 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3947 if (ret_val) { 3948 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3949 bank = 0; 3950 } 3951 3952 if (bank == 0) { 3953 new_bank_offset = nvm->flash_bank_size; 3954 old_bank_offset = 0; 3955 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 3956 if (ret_val) 3957 goto release; 3958 } else { 3959 old_bank_offset = nvm->flash_bank_size; 3960 new_bank_offset = 0; 3961 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 3962 if (ret_val) 3963 goto release; 3964 } 3965 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 3966 if (dev_spec->shadow_ram[i].modified) { 3967 data = dev_spec->shadow_ram[i].value; 3968 } else { 3969 ret_val = e1000_read_flash_word_ich8lan(hw, i + 3970 old_bank_offset, 3971 &data); 3972 if (ret_val) 3973 break; 3974 } 3975 3976 /* If the word is 0x13, then make sure the signature bits 3977 * (15:14) are 11b until the commit has completed. 3978 * This will allow us to write 10b which indicates the 3979 * signature is valid. We want to do this after the write 3980 * has completed so that we don't mark the segment valid 3981 * while the write is still in progress 3982 */ 3983 if (i == E1000_ICH_NVM_SIG_WORD) 3984 data |= E1000_ICH_NVM_SIG_MASK; 3985 3986 /* Convert offset to bytes. */ 3987 act_offset = (i + new_bank_offset) << 1; 3988 3989 usleep_range(100, 200); 3990 /* Write the bytes to the new bank. */ 3991 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 3992 act_offset, 3993 (u8)data); 3994 if (ret_val) 3995 break; 3996 3997 usleep_range(100, 200); 3998 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 3999 act_offset + 1, 4000 (u8)(data >> 8)); 4001 if (ret_val) 4002 break; 4003 } 4004 4005 /* Don't bother writing the segment valid bits if sector 4006 * programming failed. 4007 */ 4008 if (ret_val) { 4009 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 4010 e_dbg("Flash commit failed.\n"); 4011 goto release; 4012 } 4013 4014 /* Finally validate the new segment by setting bit 15:14 4015 * to 10b in word 0x13 , this can be done without an 4016 * erase as well since these bits are 11 to start with 4017 * and we need to change bit 14 to 0b 4018 */ 4019 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 4020 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 4021 if (ret_val) 4022 goto release; 4023 4024 data &= 0xBFFF; 4025 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 4026 act_offset * 2 + 1, 4027 (u8)(data >> 8)); 4028 if (ret_val) 4029 goto release; 4030 4031 /* And invalidate the previously valid segment by setting 4032 * its signature word (0x13) high_byte to 0b. This can be 4033 * done without an erase because flash erase sets all bits 4034 * to 1's. We can write 1's to 0's without an erase 4035 */ 4036 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 4037 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 4038 if (ret_val) 4039 goto release; 4040 4041 /* Great! Everything worked, we can now clear the cached entries. */ 4042 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 4043 dev_spec->shadow_ram[i].modified = false; 4044 dev_spec->shadow_ram[i].value = 0xFFFF; 4045 } 4046 4047 release: 4048 nvm->ops.release(hw); 4049 4050 /* Reload the EEPROM, or else modifications will not appear 4051 * until after the next adapter reset. 4052 */ 4053 if (!ret_val) { 4054 nvm->ops.reload(hw); 4055 usleep_range(10000, 11000); 4056 } 4057 4058 out: 4059 if (ret_val) 4060 e_dbg("NVM update error: %d\n", ret_val); 4061 4062 return ret_val; 4063 } 4064 4065 /** 4066 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 4067 * @hw: pointer to the HW structure 4068 * 4069 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 4070 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 4071 * calculated, in which case we need to calculate the checksum and set bit 6. 4072 **/ 4073 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 4074 { 4075 s32 ret_val; 4076 u16 data; 4077 u16 word; 4078 u16 valid_csum_mask; 4079 4080 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, 4081 * the checksum needs to be fixed. This bit is an indication that 4082 * the NVM was prepared by OEM software and did not calculate 4083 * the checksum...a likely scenario. 4084 */ 4085 switch (hw->mac.type) { 4086 case e1000_pch_lpt: 4087 case e1000_pch_spt: 4088 case e1000_pch_cnp: 4089 case e1000_pch_tgp: 4090 case e1000_pch_adp: 4091 word = NVM_COMPAT; 4092 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 4093 break; 4094 default: 4095 word = NVM_FUTURE_INIT_WORD1; 4096 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; 4097 break; 4098 } 4099 4100 ret_val = e1000_read_nvm(hw, word, 1, &data); 4101 if (ret_val) 4102 return ret_val; 4103 4104 if (!(data & valid_csum_mask)) { 4105 data |= valid_csum_mask; 4106 ret_val = e1000_write_nvm(hw, word, 1, &data); 4107 if (ret_val) 4108 return ret_val; 4109 ret_val = e1000e_update_nvm_checksum(hw); 4110 if (ret_val) 4111 return ret_val; 4112 } 4113 4114 return e1000e_validate_nvm_checksum_generic(hw); 4115 } 4116 4117 /** 4118 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only 4119 * @hw: pointer to the HW structure 4120 * 4121 * To prevent malicious write/erase of the NVM, set it to be read-only 4122 * so that the hardware ignores all write/erase cycles of the NVM via 4123 * the flash control registers. The shadow-ram copy of the NVM will 4124 * still be updated, however any updates to this copy will not stick 4125 * across driver reloads. 4126 **/ 4127 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 4128 { 4129 struct e1000_nvm_info *nvm = &hw->nvm; 4130 union ich8_flash_protected_range pr0; 4131 union ich8_hws_flash_status hsfsts; 4132 u32 gfpreg; 4133 4134 nvm->ops.acquire(hw); 4135 4136 gfpreg = er32flash(ICH_FLASH_GFPREG); 4137 4138 /* Write-protect GbE Sector of NVM */ 4139 pr0.regval = er32flash(ICH_FLASH_PR0); 4140 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; 4141 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); 4142 pr0.range.wpe = true; 4143 ew32flash(ICH_FLASH_PR0, pr0.regval); 4144 4145 /* Lock down a subset of GbE Flash Control Registers, e.g. 4146 * PR0 to prevent the write-protection from being lifted. 4147 * Once FLOCKDN is set, the registers protected by it cannot 4148 * be written until FLOCKDN is cleared by a hardware reset. 4149 */ 4150 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 4151 hsfsts.hsf_status.flockdn = true; 4152 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 4153 4154 nvm->ops.release(hw); 4155 } 4156 4157 /** 4158 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 4159 * @hw: pointer to the HW structure 4160 * @offset: The offset (in bytes) of the byte/word to read. 4161 * @size: Size of data to read, 1=byte 2=word 4162 * @data: The byte(s) to write to the NVM. 4163 * 4164 * Writes one/two bytes to the NVM using the flash access registers. 4165 **/ 4166 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 4167 u8 size, u16 data) 4168 { 4169 union ich8_hws_flash_status hsfsts; 4170 union ich8_hws_flash_ctrl hsflctl; 4171 u32 flash_linear_addr; 4172 u32 flash_data = 0; 4173 s32 ret_val; 4174 u8 count = 0; 4175 4176 if (hw->mac.type >= e1000_pch_spt) { 4177 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4178 return -E1000_ERR_NVM; 4179 } else { 4180 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4181 return -E1000_ERR_NVM; 4182 } 4183 4184 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4185 hw->nvm.flash_base_addr); 4186 4187 do { 4188 udelay(1); 4189 /* Steps */ 4190 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4191 if (ret_val) 4192 break; 4193 /* In SPT, This register is in Lan memory space, not 4194 * flash. Therefore, only 32 bit access is supported 4195 */ 4196 if (hw->mac.type >= e1000_pch_spt) 4197 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; 4198 else 4199 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 4200 4201 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4202 hsflctl.hsf_ctrl.fldbcount = size - 1; 4203 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4204 /* In SPT, This register is in Lan memory space, 4205 * not flash. Therefore, only 32 bit access is 4206 * supported 4207 */ 4208 if (hw->mac.type >= e1000_pch_spt) 4209 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); 4210 else 4211 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 4212 4213 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 4214 4215 if (size == 1) 4216 flash_data = (u32)data & 0x00FF; 4217 else 4218 flash_data = (u32)data; 4219 4220 ew32flash(ICH_FLASH_FDATA0, flash_data); 4221 4222 /* check if FCERR is set to 1 , if set to 1, clear it 4223 * and try the whole sequence a few more times else done 4224 */ 4225 ret_val = 4226 e1000_flash_cycle_ich8lan(hw, 4227 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4228 if (!ret_val) 4229 break; 4230 4231 /* If we're here, then things are most likely 4232 * completely hosed, but if the error condition 4233 * is detected, it won't hurt to give it another 4234 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4235 */ 4236 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 4237 if (hsfsts.hsf_status.flcerr) 4238 /* Repeat for some time before giving up. */ 4239 continue; 4240 if (!hsfsts.hsf_status.flcdone) { 4241 e_dbg("Timeout error - flash cycle did not complete.\n"); 4242 break; 4243 } 4244 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4245 4246 return ret_val; 4247 } 4248 4249 /** 4250 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM 4251 * @hw: pointer to the HW structure 4252 * @offset: The offset (in bytes) of the dwords to read. 4253 * @data: The 4 bytes to write to the NVM. 4254 * 4255 * Writes one/two/four bytes to the NVM using the flash access registers. 4256 **/ 4257 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 4258 u32 data) 4259 { 4260 union ich8_hws_flash_status hsfsts; 4261 union ich8_hws_flash_ctrl hsflctl; 4262 u32 flash_linear_addr; 4263 s32 ret_val; 4264 u8 count = 0; 4265 4266 if (hw->mac.type >= e1000_pch_spt) { 4267 if (offset > ICH_FLASH_LINEAR_ADDR_MASK) 4268 return -E1000_ERR_NVM; 4269 } 4270 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4271 hw->nvm.flash_base_addr); 4272 do { 4273 udelay(1); 4274 /* Steps */ 4275 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4276 if (ret_val) 4277 break; 4278 4279 /* In SPT, This register is in Lan memory space, not 4280 * flash. Therefore, only 32 bit access is supported 4281 */ 4282 if (hw->mac.type >= e1000_pch_spt) 4283 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) 4284 >> 16; 4285 else 4286 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 4287 4288 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 4289 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4290 4291 /* In SPT, This register is in Lan memory space, 4292 * not flash. Therefore, only 32 bit access is 4293 * supported 4294 */ 4295 if (hw->mac.type >= e1000_pch_spt) 4296 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); 4297 else 4298 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 4299 4300 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 4301 4302 ew32flash(ICH_FLASH_FDATA0, data); 4303 4304 /* check if FCERR is set to 1 , if set to 1, clear it 4305 * and try the whole sequence a few more times else done 4306 */ 4307 ret_val = 4308 e1000_flash_cycle_ich8lan(hw, 4309 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4310 4311 if (!ret_val) 4312 break; 4313 4314 /* If we're here, then things are most likely 4315 * completely hosed, but if the error condition 4316 * is detected, it won't hurt to give it another 4317 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4318 */ 4319 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 4320 4321 if (hsfsts.hsf_status.flcerr) 4322 /* Repeat for some time before giving up. */ 4323 continue; 4324 if (!hsfsts.hsf_status.flcdone) { 4325 e_dbg("Timeout error - flash cycle did not complete.\n"); 4326 break; 4327 } 4328 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4329 4330 return ret_val; 4331 } 4332 4333 /** 4334 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 4335 * @hw: pointer to the HW structure 4336 * @offset: The index of the byte to read. 4337 * @data: The byte to write to the NVM. 4338 * 4339 * Writes a single byte to the NVM using the flash access registers. 4340 **/ 4341 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 4342 u8 data) 4343 { 4344 u16 word = (u16)data; 4345 4346 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 4347 } 4348 4349 /** 4350 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM 4351 * @hw: pointer to the HW structure 4352 * @offset: The offset of the word to write. 4353 * @dword: The dword to write to the NVM. 4354 * 4355 * Writes a single dword to the NVM using the flash access registers. 4356 * Goes through a retry algorithm before giving up. 4357 **/ 4358 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 4359 u32 offset, u32 dword) 4360 { 4361 s32 ret_val; 4362 u16 program_retries; 4363 4364 /* Must convert word offset into bytes. */ 4365 offset <<= 1; 4366 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4367 4368 if (!ret_val) 4369 return ret_val; 4370 for (program_retries = 0; program_retries < 100; program_retries++) { 4371 e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); 4372 usleep_range(100, 200); 4373 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4374 if (!ret_val) 4375 break; 4376 } 4377 if (program_retries == 100) 4378 return -E1000_ERR_NVM; 4379 4380 return 0; 4381 } 4382 4383 /** 4384 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 4385 * @hw: pointer to the HW structure 4386 * @offset: The offset of the byte to write. 4387 * @byte: The byte to write to the NVM. 4388 * 4389 * Writes a single byte to the NVM using the flash access registers. 4390 * Goes through a retry algorithm before giving up. 4391 **/ 4392 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 4393 u32 offset, u8 byte) 4394 { 4395 s32 ret_val; 4396 u16 program_retries; 4397 4398 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4399 if (!ret_val) 4400 return ret_val; 4401 4402 for (program_retries = 0; program_retries < 100; program_retries++) { 4403 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); 4404 usleep_range(100, 200); 4405 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4406 if (!ret_val) 4407 break; 4408 } 4409 if (program_retries == 100) 4410 return -E1000_ERR_NVM; 4411 4412 return 0; 4413 } 4414 4415 /** 4416 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 4417 * @hw: pointer to the HW structure 4418 * @bank: 0 for first bank, 1 for second bank, etc. 4419 * 4420 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 4421 * bank N is 4096 * N + flash_reg_addr. 4422 **/ 4423 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 4424 { 4425 struct e1000_nvm_info *nvm = &hw->nvm; 4426 union ich8_hws_flash_status hsfsts; 4427 union ich8_hws_flash_ctrl hsflctl; 4428 u32 flash_linear_addr; 4429 /* bank size is in 16bit words - adjust to bytes */ 4430 u32 flash_bank_size = nvm->flash_bank_size * 2; 4431 s32 ret_val; 4432 s32 count = 0; 4433 s32 j, iteration, sector_size; 4434 4435 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 4436 4437 /* Determine HW Sector size: Read BERASE bits of hw flash status 4438 * register 4439 * 00: The Hw sector is 256 bytes, hence we need to erase 16 4440 * consecutive sectors. The start index for the nth Hw sector 4441 * can be calculated as = bank * 4096 + n * 256 4442 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 4443 * The start index for the nth Hw sector can be calculated 4444 * as = bank * 4096 4445 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 4446 * (ich9 only, otherwise error condition) 4447 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 4448 */ 4449 switch (hsfsts.hsf_status.berasesz) { 4450 case 0: 4451 /* Hw sector size 256 */ 4452 sector_size = ICH_FLASH_SEG_SIZE_256; 4453 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 4454 break; 4455 case 1: 4456 sector_size = ICH_FLASH_SEG_SIZE_4K; 4457 iteration = 1; 4458 break; 4459 case 2: 4460 sector_size = ICH_FLASH_SEG_SIZE_8K; 4461 iteration = 1; 4462 break; 4463 case 3: 4464 sector_size = ICH_FLASH_SEG_SIZE_64K; 4465 iteration = 1; 4466 break; 4467 default: 4468 return -E1000_ERR_NVM; 4469 } 4470 4471 /* Start with the base address, then add the sector offset. */ 4472 flash_linear_addr = hw->nvm.flash_base_addr; 4473 flash_linear_addr += (bank) ? flash_bank_size : 0; 4474 4475 for (j = 0; j < iteration; j++) { 4476 do { 4477 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; 4478 4479 /* Steps */ 4480 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4481 if (ret_val) 4482 return ret_val; 4483 4484 /* Write a value 11 (block Erase) in Flash 4485 * Cycle field in hw flash control 4486 */ 4487 if (hw->mac.type >= e1000_pch_spt) 4488 hsflctl.regval = 4489 er32flash(ICH_FLASH_HSFSTS) >> 16; 4490 else 4491 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 4492 4493 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 4494 if (hw->mac.type >= e1000_pch_spt) 4495 ew32flash(ICH_FLASH_HSFSTS, 4496 hsflctl.regval << 16); 4497 else 4498 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 4499 4500 /* Write the last 24 bits of an index within the 4501 * block into Flash Linear address field in Flash 4502 * Address. 4503 */ 4504 flash_linear_addr += (j * sector_size); 4505 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 4506 4507 ret_val = e1000_flash_cycle_ich8lan(hw, timeout); 4508 if (!ret_val) 4509 break; 4510 4511 /* Check if FCERR is set to 1. If 1, 4512 * clear it and try the whole sequence 4513 * a few more times else Done 4514 */ 4515 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 4516 if (hsfsts.hsf_status.flcerr) 4517 /* repeat for some time before giving up */ 4518 continue; 4519 else if (!hsfsts.hsf_status.flcdone) 4520 return ret_val; 4521 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 4522 } 4523 4524 return 0; 4525 } 4526 4527 /** 4528 * e1000_valid_led_default_ich8lan - Set the default LED settings 4529 * @hw: pointer to the HW structure 4530 * @data: Pointer to the LED settings 4531 * 4532 * Reads the LED default settings from the NVM to data. If the NVM LED 4533 * settings is all 0's or F's, set the LED default to a valid LED default 4534 * setting. 4535 **/ 4536 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 4537 { 4538 s32 ret_val; 4539 4540 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 4541 if (ret_val) { 4542 e_dbg("NVM Read Error\n"); 4543 return ret_val; 4544 } 4545 4546 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) 4547 *data = ID_LED_DEFAULT_ICH8LAN; 4548 4549 return 0; 4550 } 4551 4552 /** 4553 * e1000_id_led_init_pchlan - store LED configurations 4554 * @hw: pointer to the HW structure 4555 * 4556 * PCH does not control LEDs via the LEDCTL register, rather it uses 4557 * the PHY LED configuration register. 4558 * 4559 * PCH also does not have an "always on" or "always off" mode which 4560 * complicates the ID feature. Instead of using the "on" mode to indicate 4561 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), 4562 * use "link_up" mode. The LEDs will still ID on request if there is no 4563 * link based on logic in e1000_led_[on|off]_pchlan(). 4564 **/ 4565 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 4566 { 4567 struct e1000_mac_info *mac = &hw->mac; 4568 s32 ret_val; 4569 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 4570 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 4571 u16 data, i, temp, shift; 4572 4573 /* Get default ID LED modes */ 4574 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 4575 if (ret_val) 4576 return ret_val; 4577 4578 mac->ledctl_default = er32(LEDCTL); 4579 mac->ledctl_mode1 = mac->ledctl_default; 4580 mac->ledctl_mode2 = mac->ledctl_default; 4581 4582 for (i = 0; i < 4; i++) { 4583 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 4584 shift = (i * 5); 4585 switch (temp) { 4586 case ID_LED_ON1_DEF2: 4587 case ID_LED_ON1_ON2: 4588 case ID_LED_ON1_OFF2: 4589 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 4590 mac->ledctl_mode1 |= (ledctl_on << shift); 4591 break; 4592 case ID_LED_OFF1_DEF2: 4593 case ID_LED_OFF1_ON2: 4594 case ID_LED_OFF1_OFF2: 4595 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 4596 mac->ledctl_mode1 |= (ledctl_off << shift); 4597 break; 4598 default: 4599 /* Do nothing */ 4600 break; 4601 } 4602 switch (temp) { 4603 case ID_LED_DEF1_ON2: 4604 case ID_LED_ON1_ON2: 4605 case ID_LED_OFF1_ON2: 4606 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 4607 mac->ledctl_mode2 |= (ledctl_on << shift); 4608 break; 4609 case ID_LED_DEF1_OFF2: 4610 case ID_LED_ON1_OFF2: 4611 case ID_LED_OFF1_OFF2: 4612 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 4613 mac->ledctl_mode2 |= (ledctl_off << shift); 4614 break; 4615 default: 4616 /* Do nothing */ 4617 break; 4618 } 4619 } 4620 4621 return 0; 4622 } 4623 4624 /** 4625 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 4626 * @hw: pointer to the HW structure 4627 * 4628 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 4629 * register, so the the bus width is hard coded. 4630 **/ 4631 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 4632 { 4633 struct e1000_bus_info *bus = &hw->bus; 4634 s32 ret_val; 4635 4636 ret_val = e1000e_get_bus_info_pcie(hw); 4637 4638 /* ICH devices are "PCI Express"-ish. They have 4639 * a configuration space, but do not contain 4640 * PCI Express Capability registers, so bus width 4641 * must be hardcoded. 4642 */ 4643 if (bus->width == e1000_bus_width_unknown) 4644 bus->width = e1000_bus_width_pcie_x1; 4645 4646 return ret_val; 4647 } 4648 4649 /** 4650 * e1000_reset_hw_ich8lan - Reset the hardware 4651 * @hw: pointer to the HW structure 4652 * 4653 * Does a full reset of the hardware which includes a reset of the PHY and 4654 * MAC. 4655 **/ 4656 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 4657 { 4658 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4659 u16 kum_cfg; 4660 u32 ctrl, reg; 4661 s32 ret_val; 4662 4663 /* Prevent the PCI-E bus from sticking if there is no TLP connection 4664 * on the last TLP read/write transaction when MAC is reset. 4665 */ 4666 ret_val = e1000e_disable_pcie_master(hw); 4667 if (ret_val) 4668 e_dbg("PCI-E Master disable polling has failed.\n"); 4669 4670 e_dbg("Masking off all interrupts\n"); 4671 ew32(IMC, 0xffffffff); 4672 4673 /* Disable the Transmit and Receive units. Then delay to allow 4674 * any pending transactions to complete before we hit the MAC 4675 * with the global reset. 4676 */ 4677 ew32(RCTL, 0); 4678 ew32(TCTL, E1000_TCTL_PSP); 4679 e1e_flush(); 4680 4681 usleep_range(10000, 11000); 4682 4683 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 4684 if (hw->mac.type == e1000_ich8lan) { 4685 /* Set Tx and Rx buffer allocation to 8k apiece. */ 4686 ew32(PBA, E1000_PBA_8K); 4687 /* Set Packet Buffer Size to 16k. */ 4688 ew32(PBS, E1000_PBS_16K); 4689 } 4690 4691 if (hw->mac.type == e1000_pchlan) { 4692 /* Save the NVM K1 bit setting */ 4693 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); 4694 if (ret_val) 4695 return ret_val; 4696 4697 if (kum_cfg & E1000_NVM_K1_ENABLE) 4698 dev_spec->nvm_k1_enabled = true; 4699 else 4700 dev_spec->nvm_k1_enabled = false; 4701 } 4702 4703 ctrl = er32(CTRL); 4704 4705 if (!hw->phy.ops.check_reset_block(hw)) { 4706 /* Full-chip reset requires MAC and PHY reset at the same 4707 * time to make sure the interface between MAC and the 4708 * external PHY is reset. 4709 */ 4710 ctrl |= E1000_CTRL_PHY_RST; 4711 4712 /* Gate automatic PHY configuration by hardware on 4713 * non-managed 82579 4714 */ 4715 if ((hw->mac.type == e1000_pch2lan) && 4716 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 4717 e1000_gate_hw_phy_config_ich8lan(hw, true); 4718 } 4719 ret_val = e1000_acquire_swflag_ich8lan(hw); 4720 e_dbg("Issuing a global reset to ich8lan\n"); 4721 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 4722 /* cannot issue a flush here because it hangs the hardware */ 4723 msleep(20); 4724 4725 /* Set Phy Config Counter to 50msec */ 4726 if (hw->mac.type == e1000_pch2lan) { 4727 reg = er32(FEXTNVM3); 4728 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 4729 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 4730 ew32(FEXTNVM3, reg); 4731 } 4732 4733 if (!ret_val) 4734 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 4735 4736 if (ctrl & E1000_CTRL_PHY_RST) { 4737 ret_val = hw->phy.ops.get_cfg_done(hw); 4738 if (ret_val) 4739 return ret_val; 4740 4741 ret_val = e1000_post_phy_reset_ich8lan(hw); 4742 if (ret_val) 4743 return ret_val; 4744 } 4745 4746 /* For PCH, this write will make sure that any noise 4747 * will be detected as a CRC error and be dropped rather than show up 4748 * as a bad packet to the DMA engine. 4749 */ 4750 if (hw->mac.type == e1000_pchlan) 4751 ew32(CRC_OFFSET, 0x65656565); 4752 4753 ew32(IMC, 0xffffffff); 4754 er32(ICR); 4755 4756 reg = er32(KABGTXD); 4757 reg |= E1000_KABGTXD_BGSQLBIAS; 4758 ew32(KABGTXD, reg); 4759 4760 return 0; 4761 } 4762 4763 /** 4764 * e1000_init_hw_ich8lan - Initialize the hardware 4765 * @hw: pointer to the HW structure 4766 * 4767 * Prepares the hardware for transmit and receive by doing the following: 4768 * - initialize hardware bits 4769 * - initialize LED identification 4770 * - setup receive address registers 4771 * - setup flow control 4772 * - setup transmit descriptors 4773 * - clear statistics 4774 **/ 4775 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 4776 { 4777 struct e1000_mac_info *mac = &hw->mac; 4778 u32 ctrl_ext, txdctl, snoop; 4779 s32 ret_val; 4780 u16 i; 4781 4782 e1000_initialize_hw_bits_ich8lan(hw); 4783 4784 /* Initialize identification LED */ 4785 ret_val = mac->ops.id_led_init(hw); 4786 /* An error is not fatal and we should not stop init due to this */ 4787 if (ret_val) 4788 e_dbg("Error initializing identification LED\n"); 4789 4790 /* Setup the receive address. */ 4791 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 4792 4793 /* Zero out the Multicast HASH table */ 4794 e_dbg("Zeroing the MTA\n"); 4795 for (i = 0; i < mac->mta_reg_count; i++) 4796 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 4797 4798 /* The 82578 Rx buffer will stall if wakeup is enabled in host and 4799 * the ME. Disable wakeup by clearing the host wakeup bit. 4800 * Reset the phy after disabling host wakeup to reset the Rx buffer. 4801 */ 4802 if (hw->phy.type == e1000_phy_82578) { 4803 e1e_rphy(hw, BM_PORT_GEN_CFG, &i); 4804 i &= ~BM_WUC_HOST_WU_BIT; 4805 e1e_wphy(hw, BM_PORT_GEN_CFG, i); 4806 ret_val = e1000_phy_hw_reset_ich8lan(hw); 4807 if (ret_val) 4808 return ret_val; 4809 } 4810 4811 /* Setup link and flow control */ 4812 ret_val = mac->ops.setup_link(hw); 4813 4814 /* Set the transmit descriptor write-back policy for both queues */ 4815 txdctl = er32(TXDCTL(0)); 4816 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 4817 E1000_TXDCTL_FULL_TX_DESC_WB); 4818 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 4819 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 4820 ew32(TXDCTL(0), txdctl); 4821 txdctl = er32(TXDCTL(1)); 4822 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 4823 E1000_TXDCTL_FULL_TX_DESC_WB); 4824 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 4825 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 4826 ew32(TXDCTL(1), txdctl); 4827 4828 /* ICH8 has opposite polarity of no_snoop bits. 4829 * By default, we should use snoop behavior. 4830 */ 4831 if (mac->type == e1000_ich8lan) 4832 snoop = PCIE_ICH8_SNOOP_ALL; 4833 else 4834 snoop = (u32)~(PCIE_NO_SNOOP_ALL); 4835 e1000e_set_pcie_no_snoop(hw, snoop); 4836 4837 ctrl_ext = er32(CTRL_EXT); 4838 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 4839 ew32(CTRL_EXT, ctrl_ext); 4840 4841 /* Clear all of the statistics registers (clear on read). It is 4842 * important that we do this after we have tried to establish link 4843 * because the symbol error count will increment wildly if there 4844 * is no link. 4845 */ 4846 e1000_clear_hw_cntrs_ich8lan(hw); 4847 4848 return ret_val; 4849 } 4850 4851 /** 4852 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 4853 * @hw: pointer to the HW structure 4854 * 4855 * Sets/Clears required hardware bits necessary for correctly setting up the 4856 * hardware for transmit and receive. 4857 **/ 4858 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 4859 { 4860 u32 reg; 4861 4862 /* Extended Device Control */ 4863 reg = er32(CTRL_EXT); 4864 reg |= BIT(22); 4865 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4866 if (hw->mac.type >= e1000_pchlan) 4867 reg |= E1000_CTRL_EXT_PHYPDEN; 4868 ew32(CTRL_EXT, reg); 4869 4870 /* Transmit Descriptor Control 0 */ 4871 reg = er32(TXDCTL(0)); 4872 reg |= BIT(22); 4873 ew32(TXDCTL(0), reg); 4874 4875 /* Transmit Descriptor Control 1 */ 4876 reg = er32(TXDCTL(1)); 4877 reg |= BIT(22); 4878 ew32(TXDCTL(1), reg); 4879 4880 /* Transmit Arbitration Control 0 */ 4881 reg = er32(TARC(0)); 4882 if (hw->mac.type == e1000_ich8lan) 4883 reg |= BIT(28) | BIT(29); 4884 reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27); 4885 ew32(TARC(0), reg); 4886 4887 /* Transmit Arbitration Control 1 */ 4888 reg = er32(TARC(1)); 4889 if (er32(TCTL) & E1000_TCTL_MULR) 4890 reg &= ~BIT(28); 4891 else 4892 reg |= BIT(28); 4893 reg |= BIT(24) | BIT(26) | BIT(30); 4894 ew32(TARC(1), reg); 4895 4896 /* Device Status */ 4897 if (hw->mac.type == e1000_ich8lan) { 4898 reg = er32(STATUS); 4899 reg &= ~BIT(31); 4900 ew32(STATUS, reg); 4901 } 4902 4903 /* work-around descriptor data corruption issue during nfs v2 udp 4904 * traffic, just disable the nfs filtering capability 4905 */ 4906 reg = er32(RFCTL); 4907 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 4908 4909 /* Disable IPv6 extension header parsing because some malformed 4910 * IPv6 headers can hang the Rx. 4911 */ 4912 if (hw->mac.type == e1000_ich8lan) 4913 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 4914 ew32(RFCTL, reg); 4915 4916 /* Enable ECC on Lynxpoint */ 4917 if (hw->mac.type >= e1000_pch_lpt) { 4918 reg = er32(PBECCSTS); 4919 reg |= E1000_PBECCSTS_ECC_ENABLE; 4920 ew32(PBECCSTS, reg); 4921 4922 reg = er32(CTRL); 4923 reg |= E1000_CTRL_MEHE; 4924 ew32(CTRL, reg); 4925 } 4926 } 4927 4928 /** 4929 * e1000_setup_link_ich8lan - Setup flow control and link settings 4930 * @hw: pointer to the HW structure 4931 * 4932 * Determines which flow control settings to use, then configures flow 4933 * control. Calls the appropriate media-specific link configuration 4934 * function. Assuming the adapter has a valid link partner, a valid link 4935 * should be established. Assumes the hardware has previously been reset 4936 * and the transmitter and receiver are not enabled. 4937 **/ 4938 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 4939 { 4940 s32 ret_val; 4941 4942 if (hw->phy.ops.check_reset_block(hw)) 4943 return 0; 4944 4945 /* ICH parts do not have a word in the NVM to determine 4946 * the default flow control setting, so we explicitly 4947 * set it to full. 4948 */ 4949 if (hw->fc.requested_mode == e1000_fc_default) { 4950 /* Workaround h/w hang when Tx flow control enabled */ 4951 if (hw->mac.type == e1000_pchlan) 4952 hw->fc.requested_mode = e1000_fc_rx_pause; 4953 else 4954 hw->fc.requested_mode = e1000_fc_full; 4955 } 4956 4957 /* Save off the requested flow control mode for use later. Depending 4958 * on the link partner's capabilities, we may or may not use this mode. 4959 */ 4960 hw->fc.current_mode = hw->fc.requested_mode; 4961 4962 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 4963 4964 /* Continue to configure the copper link. */ 4965 ret_val = hw->mac.ops.setup_physical_interface(hw); 4966 if (ret_val) 4967 return ret_val; 4968 4969 ew32(FCTTV, hw->fc.pause_time); 4970 if ((hw->phy.type == e1000_phy_82578) || 4971 (hw->phy.type == e1000_phy_82579) || 4972 (hw->phy.type == e1000_phy_i217) || 4973 (hw->phy.type == e1000_phy_82577)) { 4974 ew32(FCRTV_PCH, hw->fc.refresh_time); 4975 4976 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), 4977 hw->fc.pause_time); 4978 if (ret_val) 4979 return ret_val; 4980 } 4981 4982 return e1000e_set_fc_watermarks(hw); 4983 } 4984 4985 /** 4986 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 4987 * @hw: pointer to the HW structure 4988 * 4989 * Configures the kumeran interface to the PHY to wait the appropriate time 4990 * when polling the PHY, then call the generic setup_copper_link to finish 4991 * configuring the copper link. 4992 **/ 4993 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 4994 { 4995 u32 ctrl; 4996 s32 ret_val; 4997 u16 reg_data; 4998 4999 ctrl = er32(CTRL); 5000 ctrl |= E1000_CTRL_SLU; 5001 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5002 ew32(CTRL, ctrl); 5003 5004 /* Set the mac to wait the maximum time between each iteration 5005 * and increase the max iterations when polling the phy; 5006 * this fixes erroneous timeouts at 10Mbps. 5007 */ 5008 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); 5009 if (ret_val) 5010 return ret_val; 5011 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 5012 ®_data); 5013 if (ret_val) 5014 return ret_val; 5015 reg_data |= 0x3F; 5016 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 5017 reg_data); 5018 if (ret_val) 5019 return ret_val; 5020 5021 switch (hw->phy.type) { 5022 case e1000_phy_igp_3: 5023 ret_val = e1000e_copper_link_setup_igp(hw); 5024 if (ret_val) 5025 return ret_val; 5026 break; 5027 case e1000_phy_bm: 5028 case e1000_phy_82578: 5029 ret_val = e1000e_copper_link_setup_m88(hw); 5030 if (ret_val) 5031 return ret_val; 5032 break; 5033 case e1000_phy_82577: 5034 case e1000_phy_82579: 5035 ret_val = e1000_copper_link_setup_82577(hw); 5036 if (ret_val) 5037 return ret_val; 5038 break; 5039 case e1000_phy_ife: 5040 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); 5041 if (ret_val) 5042 return ret_val; 5043 5044 reg_data &= ~IFE_PMC_AUTO_MDIX; 5045 5046 switch (hw->phy.mdix) { 5047 case 1: 5048 reg_data &= ~IFE_PMC_FORCE_MDIX; 5049 break; 5050 case 2: 5051 reg_data |= IFE_PMC_FORCE_MDIX; 5052 break; 5053 case 0: 5054 default: 5055 reg_data |= IFE_PMC_AUTO_MDIX; 5056 break; 5057 } 5058 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); 5059 if (ret_val) 5060 return ret_val; 5061 break; 5062 default: 5063 break; 5064 } 5065 5066 return e1000e_setup_copper_link(hw); 5067 } 5068 5069 /** 5070 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface 5071 * @hw: pointer to the HW structure 5072 * 5073 * Calls the PHY specific link setup function and then calls the 5074 * generic setup_copper_link to finish configuring the link for 5075 * Lynxpoint PCH devices 5076 **/ 5077 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) 5078 { 5079 u32 ctrl; 5080 s32 ret_val; 5081 5082 ctrl = er32(CTRL); 5083 ctrl |= E1000_CTRL_SLU; 5084 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5085 ew32(CTRL, ctrl); 5086 5087 ret_val = e1000_copper_link_setup_82577(hw); 5088 if (ret_val) 5089 return ret_val; 5090 5091 return e1000e_setup_copper_link(hw); 5092 } 5093 5094 /** 5095 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 5096 * @hw: pointer to the HW structure 5097 * @speed: pointer to store current link speed 5098 * @duplex: pointer to store the current link duplex 5099 * 5100 * Calls the generic get_speed_and_duplex to retrieve the current link 5101 * information and then calls the Kumeran lock loss workaround for links at 5102 * gigabit speeds. 5103 **/ 5104 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 5105 u16 *duplex) 5106 { 5107 s32 ret_val; 5108 5109 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); 5110 if (ret_val) 5111 return ret_val; 5112 5113 if ((hw->mac.type == e1000_ich8lan) && 5114 (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { 5115 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 5116 } 5117 5118 return ret_val; 5119 } 5120 5121 /** 5122 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 5123 * @hw: pointer to the HW structure 5124 * 5125 * Work-around for 82566 Kumeran PCS lock loss: 5126 * On link status change (i.e. PCI reset, speed change) and link is up and 5127 * speed is gigabit- 5128 * 0) if workaround is optionally disabled do nothing 5129 * 1) wait 1ms for Kumeran link to come up 5130 * 2) check Kumeran Diagnostic register PCS lock loss bit 5131 * 3) if not set the link is locked (all is good), otherwise... 5132 * 4) reset the PHY 5133 * 5) repeat up to 10 times 5134 * Note: this is only called for IGP3 copper when speed is 1gb. 5135 **/ 5136 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 5137 { 5138 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5139 u32 phy_ctrl; 5140 s32 ret_val; 5141 u16 i, data; 5142 bool link; 5143 5144 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 5145 return 0; 5146 5147 /* Make sure link is up before proceeding. If not just return. 5148 * Attempting this while link is negotiating fouled up link 5149 * stability 5150 */ 5151 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 5152 if (!link) 5153 return 0; 5154 5155 for (i = 0; i < 10; i++) { 5156 /* read once to clear */ 5157 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); 5158 if (ret_val) 5159 return ret_val; 5160 /* and again to get new status */ 5161 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); 5162 if (ret_val) 5163 return ret_val; 5164 5165 /* check for PCS lock */ 5166 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) 5167 return 0; 5168 5169 /* Issue PHY reset */ 5170 e1000_phy_hw_reset(hw); 5171 mdelay(5); 5172 } 5173 /* Disable GigE link negotiation */ 5174 phy_ctrl = er32(PHY_CTRL); 5175 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 5176 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5177 ew32(PHY_CTRL, phy_ctrl); 5178 5179 /* Call gig speed drop workaround on Gig disable before accessing 5180 * any PHY registers 5181 */ 5182 e1000e_gig_downshift_workaround_ich8lan(hw); 5183 5184 /* unable to acquire PCS lock */ 5185 return -E1000_ERR_PHY; 5186 } 5187 5188 /** 5189 * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 5190 * @hw: pointer to the HW structure 5191 * @state: boolean value used to set the current Kumeran workaround state 5192 * 5193 * If ICH8, set the current Kumeran workaround state (enabled - true 5194 * /disabled - false). 5195 **/ 5196 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 5197 bool state) 5198 { 5199 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5200 5201 if (hw->mac.type != e1000_ich8lan) { 5202 e_dbg("Workaround applies to ICH8 only.\n"); 5203 return; 5204 } 5205 5206 dev_spec->kmrn_lock_loss_workaround_enabled = state; 5207 } 5208 5209 /** 5210 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 5211 * @hw: pointer to the HW structure 5212 * 5213 * Workaround for 82566 power-down on D3 entry: 5214 * 1) disable gigabit link 5215 * 2) write VR power-down enable 5216 * 3) read it back 5217 * Continue if successful, else issue LCD reset and repeat 5218 **/ 5219 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 5220 { 5221 u32 reg; 5222 u16 data; 5223 u8 retry = 0; 5224 5225 if (hw->phy.type != e1000_phy_igp_3) 5226 return; 5227 5228 /* Try the workaround twice (if needed) */ 5229 do { 5230 /* Disable link */ 5231 reg = er32(PHY_CTRL); 5232 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 5233 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5234 ew32(PHY_CTRL, reg); 5235 5236 /* Call gig speed drop workaround on Gig disable before 5237 * accessing any PHY registers 5238 */ 5239 if (hw->mac.type == e1000_ich8lan) 5240 e1000e_gig_downshift_workaround_ich8lan(hw); 5241 5242 /* Write VR power-down enable */ 5243 e1e_rphy(hw, IGP3_VR_CTRL, &data); 5244 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5245 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); 5246 5247 /* Read it back and test */ 5248 e1e_rphy(hw, IGP3_VR_CTRL, &data); 5249 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5250 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 5251 break; 5252 5253 /* Issue PHY reset and repeat at most one more time */ 5254 reg = er32(CTRL); 5255 ew32(CTRL, reg | E1000_CTRL_PHY_RST); 5256 retry++; 5257 } while (retry); 5258 } 5259 5260 /** 5261 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working 5262 * @hw: pointer to the HW structure 5263 * 5264 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 5265 * LPLU, Gig disable, MDIC PHY reset): 5266 * 1) Set Kumeran Near-end loopback 5267 * 2) Clear Kumeran Near-end loopback 5268 * Should only be called for ICH8[m] devices with any 1G Phy. 5269 **/ 5270 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 5271 { 5272 s32 ret_val; 5273 u16 reg_data; 5274 5275 if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) 5276 return; 5277 5278 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5279 ®_data); 5280 if (ret_val) 5281 return; 5282 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 5283 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5284 reg_data); 5285 if (ret_val) 5286 return; 5287 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 5288 e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); 5289 } 5290 5291 /** 5292 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx 5293 * @hw: pointer to the HW structure 5294 * 5295 * During S0 to Sx transition, it is possible the link remains at gig 5296 * instead of negotiating to a lower speed. Before going to Sx, set 5297 * 'Gig Disable' to force link speed negotiation to a lower speed based on 5298 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 5299 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 5300 * needs to be written. 5301 * Parts that support (and are linked to a partner which support) EEE in 5302 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power 5303 * than 10Mbps w/o EEE. 5304 **/ 5305 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 5306 { 5307 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5308 u32 phy_ctrl; 5309 s32 ret_val; 5310 5311 phy_ctrl = er32(PHY_CTRL); 5312 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 5313 5314 if (hw->phy.type == e1000_phy_i217) { 5315 u16 phy_reg, device_id = hw->adapter->pdev->device; 5316 5317 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 5318 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 5319 (device_id == E1000_DEV_ID_PCH_I218_LM3) || 5320 (device_id == E1000_DEV_ID_PCH_I218_V3) || 5321 (hw->mac.type >= e1000_pch_spt)) { 5322 u32 fextnvm6 = er32(FEXTNVM6); 5323 5324 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 5325 } 5326 5327 ret_val = hw->phy.ops.acquire(hw); 5328 if (ret_val) 5329 goto out; 5330 5331 if (!dev_spec->eee_disable) { 5332 u16 eee_advert; 5333 5334 ret_val = 5335 e1000_read_emi_reg_locked(hw, 5336 I217_EEE_ADVERTISEMENT, 5337 &eee_advert); 5338 if (ret_val) 5339 goto release; 5340 5341 /* Disable LPLU if both link partners support 100BaseT 5342 * EEE and 100Full is advertised on both ends of the 5343 * link, and enable Auto Enable LPI since there will 5344 * be no driver to enable LPI while in Sx. 5345 */ 5346 if ((eee_advert & I82579_EEE_100_SUPPORTED) && 5347 (dev_spec->eee_lp_ability & 5348 I82579_EEE_100_SUPPORTED) && 5349 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { 5350 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | 5351 E1000_PHY_CTRL_NOND0A_LPLU); 5352 5353 /* Set Auto Enable LPI after link up */ 5354 e1e_rphy_locked(hw, 5355 I217_LPI_GPIO_CTRL, &phy_reg); 5356 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5357 e1e_wphy_locked(hw, 5358 I217_LPI_GPIO_CTRL, phy_reg); 5359 } 5360 } 5361 5362 /* For i217 Intel Rapid Start Technology support, 5363 * when the system is going into Sx and no manageability engine 5364 * is present, the driver must configure proxy to reset only on 5365 * power good. LPI (Low Power Idle) state must also reset only 5366 * on power good, as well as the MTA (Multicast table array). 5367 * The SMBus release must also be disabled on LCD reset. 5368 */ 5369 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 5370 /* Enable proxy to reset only on power good. */ 5371 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); 5372 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; 5373 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); 5374 5375 /* Set bit enable LPI (EEE) to reset only on 5376 * power good. 5377 */ 5378 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 5379 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; 5380 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 5381 5382 /* Disable the SMB release on LCD reset. */ 5383 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 5384 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; 5385 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 5386 } 5387 5388 /* Enable MTA to reset for Intel Rapid Start Technology 5389 * Support 5390 */ 5391 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 5392 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; 5393 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 5394 5395 release: 5396 hw->phy.ops.release(hw); 5397 } 5398 out: 5399 ew32(PHY_CTRL, phy_ctrl); 5400 5401 if (hw->mac.type == e1000_ich8lan) 5402 e1000e_gig_downshift_workaround_ich8lan(hw); 5403 5404 if (hw->mac.type >= e1000_pchlan) { 5405 e1000_oem_bits_config_ich8lan(hw, false); 5406 5407 /* Reset PHY to activate OEM bits on 82577/8 */ 5408 if (hw->mac.type == e1000_pchlan) 5409 e1000e_phy_hw_reset_generic(hw); 5410 5411 ret_val = hw->phy.ops.acquire(hw); 5412 if (ret_val) 5413 return; 5414 e1000_write_smbus_addr(hw); 5415 hw->phy.ops.release(hw); 5416 } 5417 } 5418 5419 /** 5420 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 5421 * @hw: pointer to the HW structure 5422 * 5423 * During Sx to S0 transitions on non-managed devices or managed devices 5424 * on which PHY resets are not blocked, if the PHY registers cannot be 5425 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 5426 * the PHY. 5427 * On i217, setup Intel Rapid Start Technology. 5428 **/ 5429 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 5430 { 5431 s32 ret_val; 5432 5433 if (hw->mac.type < e1000_pch2lan) 5434 return; 5435 5436 ret_val = e1000_init_phy_workarounds_pchlan(hw); 5437 if (ret_val) { 5438 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); 5439 return; 5440 } 5441 5442 /* For i217 Intel Rapid Start Technology support when the system 5443 * is transitioning from Sx and no manageability engine is present 5444 * configure SMBus to restore on reset, disable proxy, and enable 5445 * the reset on MTA (Multicast table array). 5446 */ 5447 if (hw->phy.type == e1000_phy_i217) { 5448 u16 phy_reg; 5449 5450 ret_val = hw->phy.ops.acquire(hw); 5451 if (ret_val) { 5452 e_dbg("Failed to setup iRST\n"); 5453 return; 5454 } 5455 5456 /* Clear Auto Enable LPI after link up */ 5457 e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); 5458 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5459 e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); 5460 5461 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 5462 /* Restore clear on SMB if no manageability engine 5463 * is present 5464 */ 5465 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 5466 if (ret_val) 5467 goto release; 5468 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 5469 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 5470 5471 /* Disable Proxy */ 5472 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); 5473 } 5474 /* Enable reset on MTA */ 5475 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 5476 if (ret_val) 5477 goto release; 5478 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 5479 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 5480 release: 5481 if (ret_val) 5482 e_dbg("Error %d in resume workarounds\n", ret_val); 5483 hw->phy.ops.release(hw); 5484 } 5485 } 5486 5487 /** 5488 * e1000_cleanup_led_ich8lan - Restore the default LED operation 5489 * @hw: pointer to the HW structure 5490 * 5491 * Return the LED back to the default configuration. 5492 **/ 5493 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 5494 { 5495 if (hw->phy.type == e1000_phy_ife) 5496 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 5497 5498 ew32(LEDCTL, hw->mac.ledctl_default); 5499 return 0; 5500 } 5501 5502 /** 5503 * e1000_led_on_ich8lan - Turn LEDs on 5504 * @hw: pointer to the HW structure 5505 * 5506 * Turn on the LEDs. 5507 **/ 5508 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 5509 { 5510 if (hw->phy.type == e1000_phy_ife) 5511 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5512 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 5513 5514 ew32(LEDCTL, hw->mac.ledctl_mode2); 5515 return 0; 5516 } 5517 5518 /** 5519 * e1000_led_off_ich8lan - Turn LEDs off 5520 * @hw: pointer to the HW structure 5521 * 5522 * Turn off the LEDs. 5523 **/ 5524 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 5525 { 5526 if (hw->phy.type == e1000_phy_ife) 5527 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5528 (IFE_PSCL_PROBE_MODE | 5529 IFE_PSCL_PROBE_LEDS_OFF)); 5530 5531 ew32(LEDCTL, hw->mac.ledctl_mode1); 5532 return 0; 5533 } 5534 5535 /** 5536 * e1000_setup_led_pchlan - Configures SW controllable LED 5537 * @hw: pointer to the HW structure 5538 * 5539 * This prepares the SW controllable LED for use. 5540 **/ 5541 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 5542 { 5543 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); 5544 } 5545 5546 /** 5547 * e1000_cleanup_led_pchlan - Restore the default LED operation 5548 * @hw: pointer to the HW structure 5549 * 5550 * Return the LED back to the default configuration. 5551 **/ 5552 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 5553 { 5554 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); 5555 } 5556 5557 /** 5558 * e1000_led_on_pchlan - Turn LEDs on 5559 * @hw: pointer to the HW structure 5560 * 5561 * Turn on the LEDs. 5562 **/ 5563 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 5564 { 5565 u16 data = (u16)hw->mac.ledctl_mode2; 5566 u32 i, led; 5567 5568 /* If no link, then turn LED on by setting the invert bit 5569 * for each LED that's mode is "link_up" in ledctl_mode2. 5570 */ 5571 if (!(er32(STATUS) & E1000_STATUS_LU)) { 5572 for (i = 0; i < 3; i++) { 5573 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 5574 if ((led & E1000_PHY_LED0_MODE_MASK) != 5575 E1000_LEDCTL_MODE_LINK_UP) 5576 continue; 5577 if (led & E1000_PHY_LED0_IVRT) 5578 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 5579 else 5580 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 5581 } 5582 } 5583 5584 return e1e_wphy(hw, HV_LED_CONFIG, data); 5585 } 5586 5587 /** 5588 * e1000_led_off_pchlan - Turn LEDs off 5589 * @hw: pointer to the HW structure 5590 * 5591 * Turn off the LEDs. 5592 **/ 5593 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 5594 { 5595 u16 data = (u16)hw->mac.ledctl_mode1; 5596 u32 i, led; 5597 5598 /* If no link, then turn LED off by clearing the invert bit 5599 * for each LED that's mode is "link_up" in ledctl_mode1. 5600 */ 5601 if (!(er32(STATUS) & E1000_STATUS_LU)) { 5602 for (i = 0; i < 3; i++) { 5603 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 5604 if ((led & E1000_PHY_LED0_MODE_MASK) != 5605 E1000_LEDCTL_MODE_LINK_UP) 5606 continue; 5607 if (led & E1000_PHY_LED0_IVRT) 5608 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 5609 else 5610 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 5611 } 5612 } 5613 5614 return e1e_wphy(hw, HV_LED_CONFIG, data); 5615 } 5616 5617 /** 5618 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset 5619 * @hw: pointer to the HW structure 5620 * 5621 * Read appropriate register for the config done bit for completion status 5622 * and configure the PHY through s/w for EEPROM-less parts. 5623 * 5624 * NOTE: some silicon which is EEPROM-less will fail trying to read the 5625 * config done bit, so only an error is logged and continues. If we were 5626 * to return with error, EEPROM-less silicon would not be able to be reset 5627 * or change link. 5628 **/ 5629 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 5630 { 5631 s32 ret_val = 0; 5632 u32 bank = 0; 5633 u32 status; 5634 5635 e1000e_get_cfg_done_generic(hw); 5636 5637 /* Wait for indication from h/w that it has completed basic config */ 5638 if (hw->mac.type >= e1000_ich10lan) { 5639 e1000_lan_init_done_ich8lan(hw); 5640 } else { 5641 ret_val = e1000e_get_auto_rd_done(hw); 5642 if (ret_val) { 5643 /* When auto config read does not complete, do not 5644 * return with an error. This can happen in situations 5645 * where there is no eeprom and prevents getting link. 5646 */ 5647 e_dbg("Auto Read Done did not complete\n"); 5648 ret_val = 0; 5649 } 5650 } 5651 5652 /* Clear PHY Reset Asserted bit */ 5653 status = er32(STATUS); 5654 if (status & E1000_STATUS_PHYRA) 5655 ew32(STATUS, status & ~E1000_STATUS_PHYRA); 5656 else 5657 e_dbg("PHY Reset Asserted not set - needs delay\n"); 5658 5659 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 5660 if (hw->mac.type <= e1000_ich9lan) { 5661 if (!(er32(EECD) & E1000_EECD_PRES) && 5662 (hw->phy.type == e1000_phy_igp_3)) { 5663 e1000e_phy_init_script_igp3(hw); 5664 } 5665 } else { 5666 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 5667 /* Maybe we should do a basic PHY config */ 5668 e_dbg("EEPROM not present\n"); 5669 ret_val = -E1000_ERR_CONFIG; 5670 } 5671 } 5672 5673 return ret_val; 5674 } 5675 5676 /** 5677 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 5678 * @hw: pointer to the HW structure 5679 * 5680 * In the case of a PHY power down to save power, or to turn off link during a 5681 * driver unload, or wake on lan is not enabled, remove the link. 5682 **/ 5683 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 5684 { 5685 /* If the management interface is not enabled, then power down */ 5686 if (!(hw->mac.ops.check_mng_mode(hw) || 5687 hw->phy.ops.check_reset_block(hw))) 5688 e1000_power_down_phy_copper(hw); 5689 } 5690 5691 /** 5692 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 5693 * @hw: pointer to the HW structure 5694 * 5695 * Clears hardware counters specific to the silicon family and calls 5696 * clear_hw_cntrs_generic to clear all general purpose counters. 5697 **/ 5698 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 5699 { 5700 u16 phy_data; 5701 s32 ret_val; 5702 5703 e1000e_clear_hw_cntrs_base(hw); 5704 5705 er32(ALGNERRC); 5706 er32(RXERRC); 5707 er32(TNCRS); 5708 er32(CEXTERR); 5709 er32(TSCTC); 5710 er32(TSCTFC); 5711 5712 er32(MGTPRC); 5713 er32(MGTPDC); 5714 er32(MGTPTC); 5715 5716 er32(IAC); 5717 er32(ICRXOC); 5718 5719 /* Clear PHY statistics registers */ 5720 if ((hw->phy.type == e1000_phy_82578) || 5721 (hw->phy.type == e1000_phy_82579) || 5722 (hw->phy.type == e1000_phy_i217) || 5723 (hw->phy.type == e1000_phy_82577)) { 5724 ret_val = hw->phy.ops.acquire(hw); 5725 if (ret_val) 5726 return; 5727 ret_val = hw->phy.ops.set_page(hw, 5728 HV_STATS_PAGE << IGP_PAGE_SHIFT); 5729 if (ret_val) 5730 goto release; 5731 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 5732 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 5733 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 5734 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 5735 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 5736 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 5737 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 5738 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 5739 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 5740 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 5741 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 5742 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 5743 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 5744 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 5745 release: 5746 hw->phy.ops.release(hw); 5747 } 5748 } 5749 5750 static const struct e1000_mac_operations ich8_mac_ops = { 5751 /* check_mng_mode dependent on mac type */ 5752 .check_for_link = e1000_check_for_copper_link_ich8lan, 5753 /* cleanup_led dependent on mac type */ 5754 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 5755 .get_bus_info = e1000_get_bus_info_ich8lan, 5756 .set_lan_id = e1000_set_lan_id_single_port, 5757 .get_link_up_info = e1000_get_link_up_info_ich8lan, 5758 /* led_on dependent on mac type */ 5759 /* led_off dependent on mac type */ 5760 .update_mc_addr_list = e1000e_update_mc_addr_list_generic, 5761 .reset_hw = e1000_reset_hw_ich8lan, 5762 .init_hw = e1000_init_hw_ich8lan, 5763 .setup_link = e1000_setup_link_ich8lan, 5764 .setup_physical_interface = e1000_setup_copper_link_ich8lan, 5765 /* id_led_init dependent on mac type */ 5766 .config_collision_dist = e1000e_config_collision_dist_generic, 5767 .rar_set = e1000e_rar_set_generic, 5768 .rar_get_count = e1000e_rar_get_count_generic, 5769 }; 5770 5771 static const struct e1000_phy_operations ich8_phy_ops = { 5772 .acquire = e1000_acquire_swflag_ich8lan, 5773 .check_reset_block = e1000_check_reset_block_ich8lan, 5774 .commit = NULL, 5775 .get_cfg_done = e1000_get_cfg_done_ich8lan, 5776 .get_cable_length = e1000e_get_cable_length_igp_2, 5777 .read_reg = e1000e_read_phy_reg_igp, 5778 .release = e1000_release_swflag_ich8lan, 5779 .reset = e1000_phy_hw_reset_ich8lan, 5780 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, 5781 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, 5782 .write_reg = e1000e_write_phy_reg_igp, 5783 }; 5784 5785 static const struct e1000_nvm_operations ich8_nvm_ops = { 5786 .acquire = e1000_acquire_nvm_ich8lan, 5787 .read = e1000_read_nvm_ich8lan, 5788 .release = e1000_release_nvm_ich8lan, 5789 .reload = e1000e_reload_nvm_generic, 5790 .update = e1000_update_nvm_checksum_ich8lan, 5791 .valid_led_default = e1000_valid_led_default_ich8lan, 5792 .validate = e1000_validate_nvm_checksum_ich8lan, 5793 .write = e1000_write_nvm_ich8lan, 5794 }; 5795 5796 static const struct e1000_nvm_operations spt_nvm_ops = { 5797 .acquire = e1000_acquire_nvm_ich8lan, 5798 .release = e1000_release_nvm_ich8lan, 5799 .read = e1000_read_nvm_spt, 5800 .update = e1000_update_nvm_checksum_spt, 5801 .reload = e1000e_reload_nvm_generic, 5802 .valid_led_default = e1000_valid_led_default_ich8lan, 5803 .validate = e1000_validate_nvm_checksum_ich8lan, 5804 .write = e1000_write_nvm_ich8lan, 5805 }; 5806 5807 const struct e1000_info e1000_ich8_info = { 5808 .mac = e1000_ich8lan, 5809 .flags = FLAG_HAS_WOL 5810 | FLAG_IS_ICH 5811 | FLAG_HAS_CTRLEXT_ON_LOAD 5812 | FLAG_HAS_AMT 5813 | FLAG_HAS_FLASH 5814 | FLAG_APME_IN_WUC, 5815 .pba = 8, 5816 .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, 5817 .get_variants = e1000_get_variants_ich8lan, 5818 .mac_ops = &ich8_mac_ops, 5819 .phy_ops = &ich8_phy_ops, 5820 .nvm_ops = &ich8_nvm_ops, 5821 }; 5822 5823 const struct e1000_info e1000_ich9_info = { 5824 .mac = e1000_ich9lan, 5825 .flags = FLAG_HAS_JUMBO_FRAMES 5826 | FLAG_IS_ICH 5827 | FLAG_HAS_WOL 5828 | FLAG_HAS_CTRLEXT_ON_LOAD 5829 | FLAG_HAS_AMT 5830 | FLAG_HAS_FLASH 5831 | FLAG_APME_IN_WUC, 5832 .pba = 18, 5833 .max_hw_frame_size = DEFAULT_JUMBO, 5834 .get_variants = e1000_get_variants_ich8lan, 5835 .mac_ops = &ich8_mac_ops, 5836 .phy_ops = &ich8_phy_ops, 5837 .nvm_ops = &ich8_nvm_ops, 5838 }; 5839 5840 const struct e1000_info e1000_ich10_info = { 5841 .mac = e1000_ich10lan, 5842 .flags = FLAG_HAS_JUMBO_FRAMES 5843 | FLAG_IS_ICH 5844 | FLAG_HAS_WOL 5845 | FLAG_HAS_CTRLEXT_ON_LOAD 5846 | FLAG_HAS_AMT 5847 | FLAG_HAS_FLASH 5848 | FLAG_APME_IN_WUC, 5849 .pba = 18, 5850 .max_hw_frame_size = DEFAULT_JUMBO, 5851 .get_variants = e1000_get_variants_ich8lan, 5852 .mac_ops = &ich8_mac_ops, 5853 .phy_ops = &ich8_phy_ops, 5854 .nvm_ops = &ich8_nvm_ops, 5855 }; 5856 5857 const struct e1000_info e1000_pch_info = { 5858 .mac = e1000_pchlan, 5859 .flags = FLAG_IS_ICH 5860 | FLAG_HAS_WOL 5861 | FLAG_HAS_CTRLEXT_ON_LOAD 5862 | FLAG_HAS_AMT 5863 | FLAG_HAS_FLASH 5864 | FLAG_HAS_JUMBO_FRAMES 5865 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 5866 | FLAG_APME_IN_WUC, 5867 .flags2 = FLAG2_HAS_PHY_STATS, 5868 .pba = 26, 5869 .max_hw_frame_size = 4096, 5870 .get_variants = e1000_get_variants_ich8lan, 5871 .mac_ops = &ich8_mac_ops, 5872 .phy_ops = &ich8_phy_ops, 5873 .nvm_ops = &ich8_nvm_ops, 5874 }; 5875 5876 const struct e1000_info e1000_pch2_info = { 5877 .mac = e1000_pch2lan, 5878 .flags = FLAG_IS_ICH 5879 | FLAG_HAS_WOL 5880 | FLAG_HAS_HW_TIMESTAMP 5881 | FLAG_HAS_CTRLEXT_ON_LOAD 5882 | FLAG_HAS_AMT 5883 | FLAG_HAS_FLASH 5884 | FLAG_HAS_JUMBO_FRAMES 5885 | FLAG_APME_IN_WUC, 5886 .flags2 = FLAG2_HAS_PHY_STATS 5887 | FLAG2_HAS_EEE 5888 | FLAG2_CHECK_SYSTIM_OVERFLOW, 5889 .pba = 26, 5890 .max_hw_frame_size = 9022, 5891 .get_variants = e1000_get_variants_ich8lan, 5892 .mac_ops = &ich8_mac_ops, 5893 .phy_ops = &ich8_phy_ops, 5894 .nvm_ops = &ich8_nvm_ops, 5895 }; 5896 5897 const struct e1000_info e1000_pch_lpt_info = { 5898 .mac = e1000_pch_lpt, 5899 .flags = FLAG_IS_ICH 5900 | FLAG_HAS_WOL 5901 | FLAG_HAS_HW_TIMESTAMP 5902 | FLAG_HAS_CTRLEXT_ON_LOAD 5903 | FLAG_HAS_AMT 5904 | FLAG_HAS_FLASH 5905 | FLAG_HAS_JUMBO_FRAMES 5906 | FLAG_APME_IN_WUC, 5907 .flags2 = FLAG2_HAS_PHY_STATS 5908 | FLAG2_HAS_EEE 5909 | FLAG2_CHECK_SYSTIM_OVERFLOW, 5910 .pba = 26, 5911 .max_hw_frame_size = 9022, 5912 .get_variants = e1000_get_variants_ich8lan, 5913 .mac_ops = &ich8_mac_ops, 5914 .phy_ops = &ich8_phy_ops, 5915 .nvm_ops = &ich8_nvm_ops, 5916 }; 5917 5918 const struct e1000_info e1000_pch_spt_info = { 5919 .mac = e1000_pch_spt, 5920 .flags = FLAG_IS_ICH 5921 | FLAG_HAS_WOL 5922 | FLAG_HAS_HW_TIMESTAMP 5923 | FLAG_HAS_CTRLEXT_ON_LOAD 5924 | FLAG_HAS_AMT 5925 | FLAG_HAS_FLASH 5926 | FLAG_HAS_JUMBO_FRAMES 5927 | FLAG_APME_IN_WUC, 5928 .flags2 = FLAG2_HAS_PHY_STATS 5929 | FLAG2_HAS_EEE, 5930 .pba = 26, 5931 .max_hw_frame_size = 9022, 5932 .get_variants = e1000_get_variants_ich8lan, 5933 .mac_ops = &ich8_mac_ops, 5934 .phy_ops = &ich8_phy_ops, 5935 .nvm_ops = &spt_nvm_ops, 5936 }; 5937 5938 const struct e1000_info e1000_pch_cnp_info = { 5939 .mac = e1000_pch_cnp, 5940 .flags = FLAG_IS_ICH 5941 | FLAG_HAS_WOL 5942 | FLAG_HAS_HW_TIMESTAMP 5943 | FLAG_HAS_CTRLEXT_ON_LOAD 5944 | FLAG_HAS_AMT 5945 | FLAG_HAS_FLASH 5946 | FLAG_HAS_JUMBO_FRAMES 5947 | FLAG_APME_IN_WUC, 5948 .flags2 = FLAG2_HAS_PHY_STATS 5949 | FLAG2_HAS_EEE, 5950 .pba = 26, 5951 .max_hw_frame_size = 9022, 5952 .get_variants = e1000_get_variants_ich8lan, 5953 .mac_ops = &ich8_mac_ops, 5954 .phy_ops = &ich8_phy_ops, 5955 .nvm_ops = &spt_nvm_ops, 5956 }; 5957