1 /* Intel PRO/1000 Linux driver 2 * Copyright(c) 1999 - 2014 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 * Contact Information: 17 * Linux NICS <linux.nics@intel.com> 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 20 */ 21 22 /* 82562G 10/100 Network Connection 23 * 82562G-2 10/100 Network Connection 24 * 82562GT 10/100 Network Connection 25 * 82562GT-2 10/100 Network Connection 26 * 82562V 10/100 Network Connection 27 * 82562V-2 10/100 Network Connection 28 * 82566DC-2 Gigabit Network Connection 29 * 82566DC Gigabit Network Connection 30 * 82566DM-2 Gigabit Network Connection 31 * 82566DM Gigabit Network Connection 32 * 82566MC Gigabit Network Connection 33 * 82566MM Gigabit Network Connection 34 * 82567LM Gigabit Network Connection 35 * 82567LF Gigabit Network Connection 36 * 82567V Gigabit Network Connection 37 * 82567LM-2 Gigabit Network Connection 38 * 82567LF-2 Gigabit Network Connection 39 * 82567V-2 Gigabit Network Connection 40 * 82567LF-3 Gigabit Network Connection 41 * 82567LM-3 Gigabit Network Connection 42 * 82567LM-4 Gigabit Network Connection 43 * 82577LM Gigabit Network Connection 44 * 82577LC Gigabit Network Connection 45 * 82578DM Gigabit Network Connection 46 * 82578DC Gigabit Network Connection 47 * 82579LM Gigabit Network Connection 48 * 82579V Gigabit Network Connection 49 * Ethernet Connection I217-LM 50 * Ethernet Connection I217-V 51 * Ethernet Connection I218-V 52 * Ethernet Connection I218-LM 53 * Ethernet Connection (2) I218-LM 54 * Ethernet Connection (2) I218-V 55 * Ethernet Connection (3) I218-LM 56 * Ethernet Connection (3) I218-V 57 */ 58 59 #include "e1000.h" 60 61 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 62 /* Offset 04h HSFSTS */ 63 union ich8_hws_flash_status { 64 struct ich8_hsfsts { 65 u16 flcdone:1; /* bit 0 Flash Cycle Done */ 66 u16 flcerr:1; /* bit 1 Flash Cycle Error */ 67 u16 dael:1; /* bit 2 Direct Access error Log */ 68 u16 berasesz:2; /* bit 4:3 Sector Erase Size */ 69 u16 flcinprog:1; /* bit 5 flash cycle in Progress */ 70 u16 reserved1:2; /* bit 13:6 Reserved */ 71 u16 reserved2:6; /* bit 13:6 Reserved */ 72 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ 73 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ 74 } hsf_status; 75 u16 regval; 76 }; 77 78 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 79 /* Offset 06h FLCTL */ 80 union ich8_hws_flash_ctrl { 81 struct ich8_hsflctl { 82 u16 flcgo:1; /* 0 Flash Cycle Go */ 83 u16 flcycle:2; /* 2:1 Flash Cycle */ 84 u16 reserved:5; /* 7:3 Reserved */ 85 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ 86 u16 flockdn:6; /* 15:10 Reserved */ 87 } hsf_ctrl; 88 u16 regval; 89 }; 90 91 /* ICH Flash Region Access Permissions */ 92 union ich8_hws_flash_regacc { 93 struct ich8_flracc { 94 u32 grra:8; /* 0:7 GbE region Read Access */ 95 u32 grwa:8; /* 8:15 GbE region Write Access */ 96 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ 97 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ 98 } hsf_flregacc; 99 u16 regval; 100 }; 101 102 /* ICH Flash Protected Region */ 103 union ich8_flash_protected_range { 104 struct ich8_pr { 105 u32 base:13; /* 0:12 Protected Range Base */ 106 u32 reserved1:2; /* 13:14 Reserved */ 107 u32 rpe:1; /* 15 Read Protection Enable */ 108 u32 limit:13; /* 16:28 Protected Range Limit */ 109 u32 reserved2:2; /* 29:30 Reserved */ 110 u32 wpe:1; /* 31 Write Protection Enable */ 111 } range; 112 u32 regval; 113 }; 114 115 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 117 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 118 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 119 u32 offset, u8 byte); 120 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 121 u8 *data); 122 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 123 u16 *data); 124 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 125 u8 size, u16 *data); 126 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 127 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 128 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 129 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 130 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 131 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 132 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 133 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 134 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 135 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 136 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 137 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 138 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 139 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 140 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 141 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 142 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 143 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 144 static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw); 145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 147 static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); 148 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 149 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); 150 151 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 152 { 153 return readw(hw->flash_address + reg); 154 } 155 156 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) 157 { 158 return readl(hw->flash_address + reg); 159 } 160 161 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) 162 { 163 writew(val, hw->flash_address + reg); 164 } 165 166 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) 167 { 168 writel(val, hw->flash_address + reg); 169 } 170 171 #define er16flash(reg) __er16flash(hw, (reg)) 172 #define er32flash(reg) __er32flash(hw, (reg)) 173 #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) 174 #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) 175 176 /** 177 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers 178 * @hw: pointer to the HW structure 179 * 180 * Test access to the PHY registers by reading the PHY ID registers. If 181 * the PHY ID is already known (e.g. resume path) compare it with known ID, 182 * otherwise assume the read PHY ID is correct if it is valid. 183 * 184 * Assumes the sw/fw/hw semaphore is already acquired. 185 **/ 186 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 187 { 188 u16 phy_reg = 0; 189 u32 phy_id = 0; 190 s32 ret_val = 0; 191 u16 retry_count; 192 u32 mac_reg = 0; 193 194 for (retry_count = 0; retry_count < 2; retry_count++) { 195 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); 196 if (ret_val || (phy_reg == 0xFFFF)) 197 continue; 198 phy_id = (u32)(phy_reg << 16); 199 200 ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); 201 if (ret_val || (phy_reg == 0xFFFF)) { 202 phy_id = 0; 203 continue; 204 } 205 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 206 break; 207 } 208 209 if (hw->phy.id) { 210 if (hw->phy.id == phy_id) 211 goto out; 212 } else if (phy_id) { 213 hw->phy.id = phy_id; 214 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 215 goto out; 216 } 217 218 /* In case the PHY needs to be in mdio slow mode, 219 * set slow mode and try to get the PHY id again. 220 */ 221 if (hw->mac.type < e1000_pch_lpt) { 222 hw->phy.ops.release(hw); 223 ret_val = e1000_set_mdio_slow_mode_hv(hw); 224 if (!ret_val) 225 ret_val = e1000e_get_phy_id(hw); 226 hw->phy.ops.acquire(hw); 227 } 228 229 if (ret_val) 230 return false; 231 out: 232 if (hw->mac.type == e1000_pch_lpt) { 233 /* Unforce SMBus mode in PHY */ 234 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); 235 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 236 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); 237 238 /* Unforce SMBus mode in MAC */ 239 mac_reg = er32(CTRL_EXT); 240 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 241 ew32(CTRL_EXT, mac_reg); 242 } 243 244 return true; 245 } 246 247 /** 248 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value 249 * @hw: pointer to the HW structure 250 * 251 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is 252 * used to reset the PHY to a quiescent state when necessary. 253 **/ 254 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) 255 { 256 u32 mac_reg; 257 258 /* Set Phy Config Counter to 50msec */ 259 mac_reg = er32(FEXTNVM3); 260 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 261 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 262 ew32(FEXTNVM3, mac_reg); 263 264 /* Toggle LANPHYPC Value bit */ 265 mac_reg = er32(CTRL); 266 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 267 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 268 ew32(CTRL, mac_reg); 269 e1e_flush(); 270 usleep_range(10, 20); 271 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 272 ew32(CTRL, mac_reg); 273 e1e_flush(); 274 275 if (hw->mac.type < e1000_pch_lpt) { 276 msleep(50); 277 } else { 278 u16 count = 20; 279 280 do { 281 usleep_range(5000, 10000); 282 } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); 283 284 msleep(30); 285 } 286 } 287 288 /** 289 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 290 * @hw: pointer to the HW structure 291 * 292 * Workarounds/flow necessary for PHY initialization during driver load 293 * and resume paths. 294 **/ 295 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 296 { 297 struct e1000_adapter *adapter = hw->adapter; 298 u32 mac_reg, fwsm = er32(FWSM); 299 s32 ret_val; 300 301 /* Gate automatic PHY configuration by hardware on managed and 302 * non-managed 82579 and newer adapters. 303 */ 304 e1000_gate_hw_phy_config_ich8lan(hw, true); 305 306 /* It is not possible to be certain of the current state of ULP 307 * so forcibly disable it. 308 */ 309 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; 310 e1000_disable_ulp_lpt_lp(hw, true); 311 312 ret_val = hw->phy.ops.acquire(hw); 313 if (ret_val) { 314 e_dbg("Failed to initialize PHY flow\n"); 315 goto out; 316 } 317 318 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 319 * inaccessible and resetting the PHY is not blocked, toggle the 320 * LANPHYPC Value bit to force the interconnect to PCIe mode. 321 */ 322 switch (hw->mac.type) { 323 case e1000_pch_lpt: 324 if (e1000_phy_is_accessible_pchlan(hw)) 325 break; 326 327 /* Before toggling LANPHYPC, see if PHY is accessible by 328 * forcing MAC to SMBus mode first. 329 */ 330 mac_reg = er32(CTRL_EXT); 331 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 332 ew32(CTRL_EXT, mac_reg); 333 334 /* Wait 50 milliseconds for MAC to finish any retries 335 * that it might be trying to perform from previous 336 * attempts to acknowledge any phy read requests. 337 */ 338 msleep(50); 339 340 /* fall-through */ 341 case e1000_pch2lan: 342 if (e1000_phy_is_accessible_pchlan(hw)) 343 break; 344 345 /* fall-through */ 346 case e1000_pchlan: 347 if ((hw->mac.type == e1000_pchlan) && 348 (fwsm & E1000_ICH_FWSM_FW_VALID)) 349 break; 350 351 if (hw->phy.ops.check_reset_block(hw)) { 352 e_dbg("Required LANPHYPC toggle blocked by ME\n"); 353 ret_val = -E1000_ERR_PHY; 354 break; 355 } 356 357 /* Toggle LANPHYPC Value bit */ 358 e1000_toggle_lanphypc_pch_lpt(hw); 359 if (hw->mac.type >= e1000_pch_lpt) { 360 if (e1000_phy_is_accessible_pchlan(hw)) 361 break; 362 363 /* Toggling LANPHYPC brings the PHY out of SMBus mode 364 * so ensure that the MAC is also out of SMBus mode 365 */ 366 mac_reg = er32(CTRL_EXT); 367 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 368 ew32(CTRL_EXT, mac_reg); 369 370 if (e1000_phy_is_accessible_pchlan(hw)) 371 break; 372 373 ret_val = -E1000_ERR_PHY; 374 } 375 break; 376 default: 377 break; 378 } 379 380 hw->phy.ops.release(hw); 381 if (!ret_val) { 382 383 /* Check to see if able to reset PHY. Print error if not */ 384 if (hw->phy.ops.check_reset_block(hw)) { 385 e_err("Reset blocked by ME\n"); 386 goto out; 387 } 388 389 /* Reset the PHY before any access to it. Doing so, ensures 390 * that the PHY is in a known good state before we read/write 391 * PHY registers. The generic reset is sufficient here, 392 * because we haven't determined the PHY type yet. 393 */ 394 ret_val = e1000e_phy_hw_reset_generic(hw); 395 if (ret_val) 396 goto out; 397 398 /* On a successful reset, possibly need to wait for the PHY 399 * to quiesce to an accessible state before returning control 400 * to the calling function. If the PHY does not quiesce, then 401 * return E1000E_BLK_PHY_RESET, as this is the condition that 402 * the PHY is in. 403 */ 404 ret_val = hw->phy.ops.check_reset_block(hw); 405 if (ret_val) 406 e_err("ME blocked access to PHY after reset\n"); 407 } 408 409 out: 410 /* Ungate automatic PHY configuration on non-managed 82579 */ 411 if ((hw->mac.type == e1000_pch2lan) && 412 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 413 usleep_range(10000, 20000); 414 e1000_gate_hw_phy_config_ich8lan(hw, false); 415 } 416 417 return ret_val; 418 } 419 420 /** 421 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 422 * @hw: pointer to the HW structure 423 * 424 * Initialize family-specific PHY parameters and function pointers. 425 **/ 426 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 427 { 428 struct e1000_phy_info *phy = &hw->phy; 429 s32 ret_val; 430 431 phy->addr = 1; 432 phy->reset_delay_us = 100; 433 434 phy->ops.set_page = e1000_set_page_igp; 435 phy->ops.read_reg = e1000_read_phy_reg_hv; 436 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 437 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 438 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 439 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 440 phy->ops.write_reg = e1000_write_phy_reg_hv; 441 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 442 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 443 phy->ops.power_up = e1000_power_up_phy_copper; 444 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 445 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 446 447 phy->id = e1000_phy_unknown; 448 449 ret_val = e1000_init_phy_workarounds_pchlan(hw); 450 if (ret_val) 451 return ret_val; 452 453 if (phy->id == e1000_phy_unknown) 454 switch (hw->mac.type) { 455 default: 456 ret_val = e1000e_get_phy_id(hw); 457 if (ret_val) 458 return ret_val; 459 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 460 break; 461 /* fall-through */ 462 case e1000_pch2lan: 463 case e1000_pch_lpt: 464 /* In case the PHY needs to be in mdio slow mode, 465 * set slow mode and try to get the PHY id again. 466 */ 467 ret_val = e1000_set_mdio_slow_mode_hv(hw); 468 if (ret_val) 469 return ret_val; 470 ret_val = e1000e_get_phy_id(hw); 471 if (ret_val) 472 return ret_val; 473 break; 474 } 475 phy->type = e1000e_get_phy_type_from_id(phy->id); 476 477 switch (phy->type) { 478 case e1000_phy_82577: 479 case e1000_phy_82579: 480 case e1000_phy_i217: 481 phy->ops.check_polarity = e1000_check_polarity_82577; 482 phy->ops.force_speed_duplex = 483 e1000_phy_force_speed_duplex_82577; 484 phy->ops.get_cable_length = e1000_get_cable_length_82577; 485 phy->ops.get_info = e1000_get_phy_info_82577; 486 phy->ops.commit = e1000e_phy_sw_reset; 487 break; 488 case e1000_phy_82578: 489 phy->ops.check_polarity = e1000_check_polarity_m88; 490 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; 491 phy->ops.get_cable_length = e1000e_get_cable_length_m88; 492 phy->ops.get_info = e1000e_get_phy_info_m88; 493 break; 494 default: 495 ret_val = -E1000_ERR_PHY; 496 break; 497 } 498 499 return ret_val; 500 } 501 502 /** 503 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 504 * @hw: pointer to the HW structure 505 * 506 * Initialize family-specific PHY parameters and function pointers. 507 **/ 508 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 509 { 510 struct e1000_phy_info *phy = &hw->phy; 511 s32 ret_val; 512 u16 i = 0; 513 514 phy->addr = 1; 515 phy->reset_delay_us = 100; 516 517 phy->ops.power_up = e1000_power_up_phy_copper; 518 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 519 520 /* We may need to do this twice - once for IGP and if that fails, 521 * we'll set BM func pointers and try again 522 */ 523 ret_val = e1000e_determine_phy_address(hw); 524 if (ret_val) { 525 phy->ops.write_reg = e1000e_write_phy_reg_bm; 526 phy->ops.read_reg = e1000e_read_phy_reg_bm; 527 ret_val = e1000e_determine_phy_address(hw); 528 if (ret_val) { 529 e_dbg("Cannot determine PHY addr. Erroring out\n"); 530 return ret_val; 531 } 532 } 533 534 phy->id = 0; 535 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 536 (i++ < 100)) { 537 usleep_range(1000, 2000); 538 ret_val = e1000e_get_phy_id(hw); 539 if (ret_val) 540 return ret_val; 541 } 542 543 /* Verify phy id */ 544 switch (phy->id) { 545 case IGP03E1000_E_PHY_ID: 546 phy->type = e1000_phy_igp_3; 547 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 548 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; 549 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; 550 phy->ops.get_info = e1000e_get_phy_info_igp; 551 phy->ops.check_polarity = e1000_check_polarity_igp; 552 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; 553 break; 554 case IFE_E_PHY_ID: 555 case IFE_PLUS_E_PHY_ID: 556 case IFE_C_E_PHY_ID: 557 phy->type = e1000_phy_ife; 558 phy->autoneg_mask = E1000_ALL_NOT_GIG; 559 phy->ops.get_info = e1000_get_phy_info_ife; 560 phy->ops.check_polarity = e1000_check_polarity_ife; 561 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 562 break; 563 case BME1000_E_PHY_ID: 564 phy->type = e1000_phy_bm; 565 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 566 phy->ops.read_reg = e1000e_read_phy_reg_bm; 567 phy->ops.write_reg = e1000e_write_phy_reg_bm; 568 phy->ops.commit = e1000e_phy_sw_reset; 569 phy->ops.get_info = e1000e_get_phy_info_m88; 570 phy->ops.check_polarity = e1000_check_polarity_m88; 571 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; 572 break; 573 default: 574 return -E1000_ERR_PHY; 575 break; 576 } 577 578 return 0; 579 } 580 581 /** 582 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 583 * @hw: pointer to the HW structure 584 * 585 * Initialize family-specific NVM parameters and function 586 * pointers. 587 **/ 588 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 589 { 590 struct e1000_nvm_info *nvm = &hw->nvm; 591 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 592 u32 gfpreg, sector_base_addr, sector_end_addr; 593 u16 i; 594 595 /* Can't read flash registers if the register set isn't mapped. */ 596 if (!hw->flash_address) { 597 e_dbg("ERROR: Flash registers not mapped\n"); 598 return -E1000_ERR_CONFIG; 599 } 600 601 nvm->type = e1000_nvm_flash_sw; 602 603 gfpreg = er32flash(ICH_FLASH_GFPREG); 604 605 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 606 * Add 1 to sector_end_addr since this sector is included in 607 * the overall size. 608 */ 609 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 610 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 611 612 /* flash_base_addr is byte-aligned */ 613 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 614 615 /* find total size of the NVM, then cut in half since the total 616 * size represents two separate NVM banks. 617 */ 618 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) 619 << FLASH_SECTOR_ADDR_SHIFT); 620 nvm->flash_bank_size /= 2; 621 /* Adjust to word count */ 622 nvm->flash_bank_size /= sizeof(u16); 623 624 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; 625 626 /* Clear shadow ram */ 627 for (i = 0; i < nvm->word_size; i++) { 628 dev_spec->shadow_ram[i].modified = false; 629 dev_spec->shadow_ram[i].value = 0xFFFF; 630 } 631 632 return 0; 633 } 634 635 /** 636 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 637 * @hw: pointer to the HW structure 638 * 639 * Initialize family-specific MAC parameters and function 640 * pointers. 641 **/ 642 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 643 { 644 struct e1000_mac_info *mac = &hw->mac; 645 646 /* Set media type function pointer */ 647 hw->phy.media_type = e1000_media_type_copper; 648 649 /* Set mta register count */ 650 mac->mta_reg_count = 32; 651 /* Set rar entry count */ 652 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 653 if (mac->type == e1000_ich8lan) 654 mac->rar_entry_count--; 655 /* FWSM register */ 656 mac->has_fwsm = true; 657 /* ARC subsystem not supported */ 658 mac->arc_subsystem_valid = false; 659 /* Adaptive IFS supported */ 660 mac->adaptive_ifs = true; 661 662 /* LED and other operations */ 663 switch (mac->type) { 664 case e1000_ich8lan: 665 case e1000_ich9lan: 666 case e1000_ich10lan: 667 /* check management mode */ 668 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 669 /* ID LED init */ 670 mac->ops.id_led_init = e1000e_id_led_init_generic; 671 /* blink LED */ 672 mac->ops.blink_led = e1000e_blink_led_generic; 673 /* setup LED */ 674 mac->ops.setup_led = e1000e_setup_led_generic; 675 /* cleanup LED */ 676 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 677 /* turn on/off LED */ 678 mac->ops.led_on = e1000_led_on_ich8lan; 679 mac->ops.led_off = e1000_led_off_ich8lan; 680 break; 681 case e1000_pch2lan: 682 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; 683 mac->ops.rar_set = e1000_rar_set_pch2lan; 684 /* fall-through */ 685 case e1000_pch_lpt: 686 case e1000_pchlan: 687 /* check management mode */ 688 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 689 /* ID LED init */ 690 mac->ops.id_led_init = e1000_id_led_init_pchlan; 691 /* setup LED */ 692 mac->ops.setup_led = e1000_setup_led_pchlan; 693 /* cleanup LED */ 694 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 695 /* turn on/off LED */ 696 mac->ops.led_on = e1000_led_on_pchlan; 697 mac->ops.led_off = e1000_led_off_pchlan; 698 break; 699 default: 700 break; 701 } 702 703 if (mac->type == e1000_pch_lpt) { 704 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 705 mac->ops.rar_set = e1000_rar_set_pch_lpt; 706 mac->ops.setup_physical_interface = 707 e1000_setup_copper_link_pch_lpt; 708 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt; 709 } 710 711 /* Enable PCS Lock-loss workaround for ICH8 */ 712 if (mac->type == e1000_ich8lan) 713 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 714 715 return 0; 716 } 717 718 /** 719 * __e1000_access_emi_reg_locked - Read/write EMI register 720 * @hw: pointer to the HW structure 721 * @addr: EMI address to program 722 * @data: pointer to value to read/write from/to the EMI address 723 * @read: boolean flag to indicate read or write 724 * 725 * This helper function assumes the SW/FW/HW Semaphore is already acquired. 726 **/ 727 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, 728 u16 *data, bool read) 729 { 730 s32 ret_val; 731 732 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); 733 if (ret_val) 734 return ret_val; 735 736 if (read) 737 ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); 738 else 739 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); 740 741 return ret_val; 742 } 743 744 /** 745 * e1000_read_emi_reg_locked - Read Extended Management Interface register 746 * @hw: pointer to the HW structure 747 * @addr: EMI address to program 748 * @data: value to be read from the EMI address 749 * 750 * Assumes the SW/FW/HW Semaphore is already acquired. 751 **/ 752 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) 753 { 754 return __e1000_access_emi_reg_locked(hw, addr, data, true); 755 } 756 757 /** 758 * e1000_write_emi_reg_locked - Write Extended Management Interface register 759 * @hw: pointer to the HW structure 760 * @addr: EMI address to program 761 * @data: value to be written to the EMI address 762 * 763 * Assumes the SW/FW/HW Semaphore is already acquired. 764 **/ 765 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 766 { 767 return __e1000_access_emi_reg_locked(hw, addr, &data, false); 768 } 769 770 /** 771 * e1000_set_eee_pchlan - Enable/disable EEE support 772 * @hw: pointer to the HW structure 773 * 774 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 775 * the link and the EEE capabilities of the link partner. The LPI Control 776 * register bits will remain set only if/when link is up. 777 * 778 * EEE LPI must not be asserted earlier than one second after link is up. 779 * On 82579, EEE LPI should not be enabled until such time otherwise there 780 * can be link issues with some switches. Other devices can have EEE LPI 781 * enabled immediately upon link up since they have a timer in hardware which 782 * prevents LPI from being asserted too early. 783 **/ 784 s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 785 { 786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 787 s32 ret_val; 788 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; 789 790 switch (hw->phy.type) { 791 case e1000_phy_82579: 792 lpa = I82579_EEE_LP_ABILITY; 793 pcs_status = I82579_EEE_PCS_STATUS; 794 adv_addr = I82579_EEE_ADVERTISEMENT; 795 break; 796 case e1000_phy_i217: 797 lpa = I217_EEE_LP_ABILITY; 798 pcs_status = I217_EEE_PCS_STATUS; 799 adv_addr = I217_EEE_ADVERTISEMENT; 800 break; 801 default: 802 return 0; 803 } 804 805 ret_val = hw->phy.ops.acquire(hw); 806 if (ret_val) 807 return ret_val; 808 809 ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); 810 if (ret_val) 811 goto release; 812 813 /* Clear bits that enable EEE in various speeds */ 814 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; 815 816 /* Enable EEE if not disabled by user */ 817 if (!dev_spec->eee_disable) { 818 /* Save off link partner's EEE ability */ 819 ret_val = e1000_read_emi_reg_locked(hw, lpa, 820 &dev_spec->eee_lp_ability); 821 if (ret_val) 822 goto release; 823 824 /* Read EEE advertisement */ 825 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); 826 if (ret_val) 827 goto release; 828 829 /* Enable EEE only for speeds in which the link partner is 830 * EEE capable and for which we advertise EEE. 831 */ 832 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 833 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 834 835 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 836 e1e_rphy_locked(hw, MII_LPA, &data); 837 if (data & LPA_100FULL) 838 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 839 else 840 /* EEE is not supported in 100Half, so ignore 841 * partner's EEE in 100 ability if full-duplex 842 * is not advertised. 843 */ 844 dev_spec->eee_lp_ability &= 845 ~I82579_EEE_100_SUPPORTED; 846 } 847 } 848 849 if (hw->phy.type == e1000_phy_82579) { 850 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 851 &data); 852 if (ret_val) 853 goto release; 854 855 data &= ~I82579_LPI_100_PLL_SHUT; 856 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 857 data); 858 } 859 860 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 861 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 862 if (ret_val) 863 goto release; 864 865 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 866 release: 867 hw->phy.ops.release(hw); 868 869 return ret_val; 870 } 871 872 /** 873 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP 874 * @hw: pointer to the HW structure 875 * @link: link up bool flag 876 * 877 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 878 * preventing further DMA write requests. Workaround the issue by disabling 879 * the de-assertion of the clock request when in 1Gpbs mode. 880 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link 881 * speeds in order to avoid Tx hangs. 882 **/ 883 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 884 { 885 u32 fextnvm6 = er32(FEXTNVM6); 886 u32 status = er32(STATUS); 887 s32 ret_val = 0; 888 u16 reg; 889 890 if (link && (status & E1000_STATUS_SPEED_1000)) { 891 ret_val = hw->phy.ops.acquire(hw); 892 if (ret_val) 893 return ret_val; 894 895 ret_val = 896 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 897 ®); 898 if (ret_val) 899 goto release; 900 901 ret_val = 902 e1000e_write_kmrn_reg_locked(hw, 903 E1000_KMRNCTRLSTA_K1_CONFIG, 904 reg & 905 ~E1000_KMRNCTRLSTA_K1_ENABLE); 906 if (ret_val) 907 goto release; 908 909 usleep_range(10, 20); 910 911 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); 912 913 ret_val = 914 e1000e_write_kmrn_reg_locked(hw, 915 E1000_KMRNCTRLSTA_K1_CONFIG, 916 reg); 917 release: 918 hw->phy.ops.release(hw); 919 } else { 920 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 921 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; 922 923 if (!link || ((status & E1000_STATUS_SPEED_100) && 924 (status & E1000_STATUS_FD))) 925 goto update_fextnvm6; 926 927 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); 928 if (ret_val) 929 return ret_val; 930 931 /* Clear link status transmit timeout */ 932 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; 933 934 if (status & E1000_STATUS_SPEED_100) { 935 /* Set inband Tx timeout to 5x10us for 100Half */ 936 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 937 938 /* Do not extend the K1 entry latency for 100Half */ 939 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 940 } else { 941 /* Set inband Tx timeout to 50x10us for 10Full/Half */ 942 reg |= 50 << 943 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 944 945 /* Extend the K1 entry latency for 10 Mbps */ 946 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 947 } 948 949 ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); 950 if (ret_val) 951 return ret_val; 952 953 update_fextnvm6: 954 ew32(FEXTNVM6, fextnvm6); 955 } 956 957 return ret_val; 958 } 959 960 /** 961 * e1000_platform_pm_pch_lpt - Set platform power management values 962 * @hw: pointer to the HW structure 963 * @link: bool indicating link status 964 * 965 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" 966 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed 967 * when link is up (which must not exceed the maximum latency supported 968 * by the platform), otherwise specify there is no LTR requirement. 969 * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop 970 * latencies in the LTR Extended Capability Structure in the PCIe Extended 971 * Capability register set, on this device LTR is set by writing the 972 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and 973 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) 974 * message to the PMC. 975 **/ 976 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) 977 { 978 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | 979 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; 980 u16 lat_enc = 0; /* latency encoded */ 981 982 if (link) { 983 u16 speed, duplex, scale = 0; 984 u16 max_snoop, max_nosnoop; 985 u16 max_ltr_enc; /* max LTR latency encoded */ 986 s64 lat_ns; /* latency (ns) */ 987 s64 value; 988 u32 rxa; 989 990 if (!hw->adapter->max_frame_size) { 991 e_dbg("max_frame_size not set.\n"); 992 return -E1000_ERR_CONFIG; 993 } 994 995 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 996 if (!speed) { 997 e_dbg("Speed not set.\n"); 998 return -E1000_ERR_CONFIG; 999 } 1000 1001 /* Rx Packet Buffer Allocation size (KB) */ 1002 rxa = er32(PBA) & E1000_PBA_RXA_MASK; 1003 1004 /* Determine the maximum latency tolerated by the device. 1005 * 1006 * Per the PCIe spec, the tolerated latencies are encoded as 1007 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 1008 * a 10-bit value (0-1023) to provide a range from 1 ns to 1009 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 1010 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 1011 */ 1012 lat_ns = ((s64)rxa * 1024 - 1013 (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; 1014 if (lat_ns < 0) 1015 lat_ns = 0; 1016 else 1017 do_div(lat_ns, speed); 1018 1019 value = lat_ns; 1020 while (value > PCI_LTR_VALUE_MASK) { 1021 scale++; 1022 value = DIV_ROUND_UP(value, (1 << 5)); 1023 } 1024 if (scale > E1000_LTRV_SCALE_MAX) { 1025 e_dbg("Invalid LTR latency scale %d\n", scale); 1026 return -E1000_ERR_CONFIG; 1027 } 1028 lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); 1029 1030 /* Determine the maximum latency tolerated by the platform */ 1031 pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, 1032 &max_snoop); 1033 pci_read_config_word(hw->adapter->pdev, 1034 E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); 1035 max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); 1036 1037 if (lat_enc > max_ltr_enc) 1038 lat_enc = max_ltr_enc; 1039 } 1040 1041 /* Set Snoop and No-Snoop latencies the same */ 1042 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); 1043 ew32(LTRV, reg); 1044 1045 return 0; 1046 } 1047 1048 /** 1049 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP 1050 * @hw: pointer to the HW structure 1051 * @to_sx: boolean indicating a system power state transition to Sx 1052 * 1053 * When link is down, configure ULP mode to significantly reduce the power 1054 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the 1055 * ME firmware to start the ULP configuration. If not on an ME enabled 1056 * system, configure the ULP mode by software. 1057 */ 1058 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) 1059 { 1060 u32 mac_reg; 1061 s32 ret_val = 0; 1062 u16 phy_reg; 1063 1064 if ((hw->mac.type < e1000_pch_lpt) || 1065 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || 1066 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || 1067 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || 1068 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || 1069 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) 1070 return 0; 1071 1072 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { 1073 /* Request ME configure ULP mode in the PHY */ 1074 mac_reg = er32(H2ME); 1075 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; 1076 ew32(H2ME, mac_reg); 1077 1078 goto out; 1079 } 1080 1081 if (!to_sx) { 1082 int i = 0; 1083 1084 /* Poll up to 5 seconds for Cable Disconnected indication */ 1085 while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { 1086 /* Bail if link is re-acquired */ 1087 if (er32(STATUS) & E1000_STATUS_LU) 1088 return -E1000_ERR_PHY; 1089 1090 if (i++ == 100) 1091 break; 1092 1093 msleep(50); 1094 } 1095 e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", 1096 (er32(FEXT) & 1097 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); 1098 } 1099 1100 ret_val = hw->phy.ops.acquire(hw); 1101 if (ret_val) 1102 goto out; 1103 1104 /* Force SMBus mode in PHY */ 1105 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1106 if (ret_val) 1107 goto release; 1108 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; 1109 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1110 1111 /* Force SMBus mode in MAC */ 1112 mac_reg = er32(CTRL_EXT); 1113 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1114 ew32(CTRL_EXT, mac_reg); 1115 1116 /* Set Inband ULP Exit, Reset to SMBus mode and 1117 * Disable SMBus Release on PERST# in PHY 1118 */ 1119 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1120 if (ret_val) 1121 goto release; 1122 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | 1123 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1124 if (to_sx) { 1125 if (er32(WUFC) & E1000_WUFC_LNKC) 1126 phy_reg |= I218_ULP_CONFIG1_WOL_HOST; 1127 1128 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; 1129 } else { 1130 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; 1131 } 1132 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1133 1134 /* Set Disable SMBus Release on PERST# in MAC */ 1135 mac_reg = er32(FEXTNVM7); 1136 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; 1137 ew32(FEXTNVM7, mac_reg); 1138 1139 /* Commit ULP changes in PHY by starting auto ULP configuration */ 1140 phy_reg |= I218_ULP_CONFIG1_START; 1141 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1142 release: 1143 hw->phy.ops.release(hw); 1144 out: 1145 if (ret_val) 1146 e_dbg("Error in ULP enable flow: %d\n", ret_val); 1147 else 1148 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; 1149 1150 return ret_val; 1151 } 1152 1153 /** 1154 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP 1155 * @hw: pointer to the HW structure 1156 * @force: boolean indicating whether or not to force disabling ULP 1157 * 1158 * Un-configure ULP mode when link is up, the system is transitioned from 1159 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled 1160 * system, poll for an indication from ME that ULP has been un-configured. 1161 * If not on an ME enabled system, un-configure the ULP mode by software. 1162 * 1163 * During nominal operation, this function is called when link is acquired 1164 * to disable ULP mode (force=false); otherwise, for example when unloading 1165 * the driver or during Sx->S0 transitions, this is called with force=true 1166 * to forcibly disable ULP. 1167 */ 1168 static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) 1169 { 1170 s32 ret_val = 0; 1171 u32 mac_reg; 1172 u16 phy_reg; 1173 int i = 0; 1174 1175 if ((hw->mac.type < e1000_pch_lpt) || 1176 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || 1177 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || 1178 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || 1179 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || 1180 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) 1181 return 0; 1182 1183 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { 1184 if (force) { 1185 /* Request ME un-configure ULP mode in the PHY */ 1186 mac_reg = er32(H2ME); 1187 mac_reg &= ~E1000_H2ME_ULP; 1188 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; 1189 ew32(H2ME, mac_reg); 1190 } 1191 1192 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ 1193 while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { 1194 if (i++ == 10) { 1195 ret_val = -E1000_ERR_PHY; 1196 goto out; 1197 } 1198 1199 usleep_range(10000, 20000); 1200 } 1201 e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); 1202 1203 if (force) { 1204 mac_reg = er32(H2ME); 1205 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; 1206 ew32(H2ME, mac_reg); 1207 } else { 1208 /* Clear H2ME.ULP after ME ULP configuration */ 1209 mac_reg = er32(H2ME); 1210 mac_reg &= ~E1000_H2ME_ULP; 1211 ew32(H2ME, mac_reg); 1212 } 1213 1214 goto out; 1215 } 1216 1217 ret_val = hw->phy.ops.acquire(hw); 1218 if (ret_val) 1219 goto out; 1220 1221 if (force) 1222 /* Toggle LANPHYPC Value bit */ 1223 e1000_toggle_lanphypc_pch_lpt(hw); 1224 1225 /* Unforce SMBus mode in PHY */ 1226 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1227 if (ret_val) { 1228 /* The MAC might be in PCIe mode, so temporarily force to 1229 * SMBus mode in order to access the PHY. 1230 */ 1231 mac_reg = er32(CTRL_EXT); 1232 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1233 ew32(CTRL_EXT, mac_reg); 1234 1235 msleep(50); 1236 1237 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, 1238 &phy_reg); 1239 if (ret_val) 1240 goto release; 1241 } 1242 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 1243 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1244 1245 /* Unforce SMBus mode in MAC */ 1246 mac_reg = er32(CTRL_EXT); 1247 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 1248 ew32(CTRL_EXT, mac_reg); 1249 1250 /* When ULP mode was previously entered, K1 was disabled by the 1251 * hardware. Re-Enable K1 in the PHY when exiting ULP. 1252 */ 1253 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); 1254 if (ret_val) 1255 goto release; 1256 phy_reg |= HV_PM_CTRL_K1_ENABLE; 1257 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); 1258 1259 /* Clear ULP enabled configuration */ 1260 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1261 if (ret_val) 1262 goto release; 1263 phy_reg &= ~(I218_ULP_CONFIG1_IND | 1264 I218_ULP_CONFIG1_STICKY_ULP | 1265 I218_ULP_CONFIG1_RESET_TO_SMBUS | 1266 I218_ULP_CONFIG1_WOL_HOST | 1267 I218_ULP_CONFIG1_INBAND_EXIT | 1268 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1269 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1270 1271 /* Commit ULP changes by starting auto ULP configuration */ 1272 phy_reg |= I218_ULP_CONFIG1_START; 1273 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1274 1275 /* Clear Disable SMBus Release on PERST# in MAC */ 1276 mac_reg = er32(FEXTNVM7); 1277 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; 1278 ew32(FEXTNVM7, mac_reg); 1279 1280 release: 1281 hw->phy.ops.release(hw); 1282 if (force) { 1283 e1000_phy_hw_reset(hw); 1284 msleep(50); 1285 } 1286 out: 1287 if (ret_val) 1288 e_dbg("Error in ULP disable flow: %d\n", ret_val); 1289 else 1290 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; 1291 1292 return ret_val; 1293 } 1294 1295 /** 1296 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1297 * @hw: pointer to the HW structure 1298 * 1299 * Checks to see of the link status of the hardware has changed. If a 1300 * change in link status has been detected, then we read the PHY registers 1301 * to get the current speed/duplex if link exists. 1302 **/ 1303 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1304 { 1305 struct e1000_mac_info *mac = &hw->mac; 1306 s32 ret_val; 1307 bool link; 1308 u16 phy_reg; 1309 1310 /* We only want to go out to the PHY registers to see if Auto-Neg 1311 * has completed and/or if our link status has changed. The 1312 * get_link_status flag is set upon receiving a Link Status 1313 * Change or Rx Sequence Error interrupt. 1314 */ 1315 if (!mac->get_link_status) 1316 return 0; 1317 1318 /* First we want to see if the MII Status Register reports 1319 * link. If so, then we want to get the current speed/duplex 1320 * of the PHY. 1321 */ 1322 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1323 if (ret_val) 1324 return ret_val; 1325 1326 if (hw->mac.type == e1000_pchlan) { 1327 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1328 if (ret_val) 1329 return ret_val; 1330 } 1331 1332 /* When connected at 10Mbps half-duplex, some parts are excessively 1333 * aggressive resulting in many collisions. To avoid this, increase 1334 * the IPG and reduce Rx latency in the PHY. 1335 */ 1336 if (((hw->mac.type == e1000_pch2lan) || 1337 (hw->mac.type == e1000_pch_lpt)) && link) { 1338 u32 reg; 1339 1340 reg = er32(STATUS); 1341 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1342 u16 emi_addr; 1343 1344 reg = er32(TIPG); 1345 reg &= ~E1000_TIPG_IPGT_MASK; 1346 reg |= 0xFF; 1347 ew32(TIPG, reg); 1348 1349 /* Reduce Rx latency in analog PHY */ 1350 ret_val = hw->phy.ops.acquire(hw); 1351 if (ret_val) 1352 return ret_val; 1353 1354 if (hw->mac.type == e1000_pch2lan) 1355 emi_addr = I82579_RX_CONFIG; 1356 else 1357 emi_addr = I217_RX_CONFIG; 1358 1359 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); 1360 1361 hw->phy.ops.release(hw); 1362 1363 if (ret_val) 1364 return ret_val; 1365 } 1366 } 1367 1368 /* Work-around I218 hang issue */ 1369 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1370 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1371 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || 1372 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1373 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1374 if (ret_val) 1375 return ret_val; 1376 } 1377 1378 if (hw->mac.type == e1000_pch_lpt) { 1379 /* Set platform power management values for 1380 * Latency Tolerance Reporting (LTR) 1381 */ 1382 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1383 if (ret_val) 1384 return ret_val; 1385 } 1386 1387 /* Clear link partner's EEE ability */ 1388 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1389 1390 if (!link) 1391 return 0; /* No link detected */ 1392 1393 mac->get_link_status = false; 1394 1395 switch (hw->mac.type) { 1396 case e1000_pch2lan: 1397 ret_val = e1000_k1_workaround_lv(hw); 1398 if (ret_val) 1399 return ret_val; 1400 /* fall-thru */ 1401 case e1000_pchlan: 1402 if (hw->phy.type == e1000_phy_82578) { 1403 ret_val = e1000_link_stall_workaround_hv(hw); 1404 if (ret_val) 1405 return ret_val; 1406 } 1407 1408 /* Workaround for PCHx parts in half-duplex: 1409 * Set the number of preambles removed from the packet 1410 * when it is passed from the PHY to the MAC to prevent 1411 * the MAC from misinterpreting the packet type. 1412 */ 1413 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); 1414 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; 1415 1416 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) 1417 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1418 1419 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); 1420 break; 1421 default: 1422 break; 1423 } 1424 1425 /* Check if there was DownShift, must be checked 1426 * immediately after link-up 1427 */ 1428 e1000e_check_downshift(hw); 1429 1430 /* Enable/Disable EEE after link up */ 1431 if (hw->phy.type > e1000_phy_82579) { 1432 ret_val = e1000_set_eee_pchlan(hw); 1433 if (ret_val) 1434 return ret_val; 1435 } 1436 1437 /* If we are forcing speed/duplex, then we simply return since 1438 * we have already determined whether we have link or not. 1439 */ 1440 if (!mac->autoneg) 1441 return -E1000_ERR_CONFIG; 1442 1443 /* Auto-Neg is enabled. Auto Speed Detection takes care 1444 * of MAC speed/duplex configuration. So we only need to 1445 * configure Collision Distance in the MAC. 1446 */ 1447 mac->ops.config_collision_dist(hw); 1448 1449 /* Configure Flow Control now that Auto-Neg has completed. 1450 * First, we need to restore the desired flow control 1451 * settings because we may have had to re-autoneg with a 1452 * different link partner. 1453 */ 1454 ret_val = e1000e_config_fc_after_link_up(hw); 1455 if (ret_val) 1456 e_dbg("Error configuring flow control\n"); 1457 1458 return ret_val; 1459 } 1460 1461 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1462 { 1463 struct e1000_hw *hw = &adapter->hw; 1464 s32 rc; 1465 1466 rc = e1000_init_mac_params_ich8lan(hw); 1467 if (rc) 1468 return rc; 1469 1470 rc = e1000_init_nvm_params_ich8lan(hw); 1471 if (rc) 1472 return rc; 1473 1474 switch (hw->mac.type) { 1475 case e1000_ich8lan: 1476 case e1000_ich9lan: 1477 case e1000_ich10lan: 1478 rc = e1000_init_phy_params_ich8lan(hw); 1479 break; 1480 case e1000_pchlan: 1481 case e1000_pch2lan: 1482 case e1000_pch_lpt: 1483 rc = e1000_init_phy_params_pchlan(hw); 1484 break; 1485 default: 1486 break; 1487 } 1488 if (rc) 1489 return rc; 1490 1491 /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or 1492 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). 1493 */ 1494 if ((adapter->hw.phy.type == e1000_phy_ife) || 1495 ((adapter->hw.mac.type >= e1000_pch2lan) && 1496 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { 1497 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 1498 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; 1499 1500 hw->mac.ops.blink_led = NULL; 1501 } 1502 1503 if ((adapter->hw.mac.type == e1000_ich8lan) && 1504 (adapter->hw.phy.type != e1000_phy_ife)) 1505 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 1506 1507 /* Enable workaround for 82579 w/ ME enabled */ 1508 if ((adapter->hw.mac.type == e1000_pch2lan) && 1509 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1510 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; 1511 1512 return 0; 1513 } 1514 1515 static DEFINE_MUTEX(nvm_mutex); 1516 1517 /** 1518 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 1519 * @hw: pointer to the HW structure 1520 * 1521 * Acquires the mutex for performing NVM operations. 1522 **/ 1523 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) 1524 { 1525 mutex_lock(&nvm_mutex); 1526 1527 return 0; 1528 } 1529 1530 /** 1531 * e1000_release_nvm_ich8lan - Release NVM mutex 1532 * @hw: pointer to the HW structure 1533 * 1534 * Releases the mutex used while performing NVM operations. 1535 **/ 1536 static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) 1537 { 1538 mutex_unlock(&nvm_mutex); 1539 } 1540 1541 /** 1542 * e1000_acquire_swflag_ich8lan - Acquire software control flag 1543 * @hw: pointer to the HW structure 1544 * 1545 * Acquires the software control flag for performing PHY and select 1546 * MAC CSR accesses. 1547 **/ 1548 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 1549 { 1550 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 1551 s32 ret_val = 0; 1552 1553 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, 1554 &hw->adapter->state)) { 1555 e_dbg("contention for Phy access\n"); 1556 return -E1000_ERR_PHY; 1557 } 1558 1559 while (timeout) { 1560 extcnf_ctrl = er32(EXTCNF_CTRL); 1561 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 1562 break; 1563 1564 mdelay(1); 1565 timeout--; 1566 } 1567 1568 if (!timeout) { 1569 e_dbg("SW has already locked the resource.\n"); 1570 ret_val = -E1000_ERR_CONFIG; 1571 goto out; 1572 } 1573 1574 timeout = SW_FLAG_TIMEOUT; 1575 1576 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 1577 ew32(EXTCNF_CTRL, extcnf_ctrl); 1578 1579 while (timeout) { 1580 extcnf_ctrl = er32(EXTCNF_CTRL); 1581 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 1582 break; 1583 1584 mdelay(1); 1585 timeout--; 1586 } 1587 1588 if (!timeout) { 1589 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", 1590 er32(FWSM), extcnf_ctrl); 1591 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1592 ew32(EXTCNF_CTRL, extcnf_ctrl); 1593 ret_val = -E1000_ERR_CONFIG; 1594 goto out; 1595 } 1596 1597 out: 1598 if (ret_val) 1599 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 1600 1601 return ret_val; 1602 } 1603 1604 /** 1605 * e1000_release_swflag_ich8lan - Release software control flag 1606 * @hw: pointer to the HW structure 1607 * 1608 * Releases the software control flag for performing PHY and select 1609 * MAC CSR accesses. 1610 **/ 1611 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 1612 { 1613 u32 extcnf_ctrl; 1614 1615 extcnf_ctrl = er32(EXTCNF_CTRL); 1616 1617 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { 1618 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1619 ew32(EXTCNF_CTRL, extcnf_ctrl); 1620 } else { 1621 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); 1622 } 1623 1624 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 1625 } 1626 1627 /** 1628 * e1000_check_mng_mode_ich8lan - Checks management mode 1629 * @hw: pointer to the HW structure 1630 * 1631 * This checks if the adapter has any manageability enabled. 1632 * This is a function pointer entry point only called by read/write 1633 * routines for the PHY and NVM parts. 1634 **/ 1635 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 1636 { 1637 u32 fwsm; 1638 1639 fwsm = er32(FWSM); 1640 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1641 ((fwsm & E1000_FWSM_MODE_MASK) == 1642 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1643 } 1644 1645 /** 1646 * e1000_check_mng_mode_pchlan - Checks management mode 1647 * @hw: pointer to the HW structure 1648 * 1649 * This checks if the adapter has iAMT enabled. 1650 * This is a function pointer entry point only called by read/write 1651 * routines for the PHY and NVM parts. 1652 **/ 1653 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) 1654 { 1655 u32 fwsm; 1656 1657 fwsm = er32(FWSM); 1658 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1659 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1660 } 1661 1662 /** 1663 * e1000_rar_set_pch2lan - Set receive address register 1664 * @hw: pointer to the HW structure 1665 * @addr: pointer to the receive address 1666 * @index: receive address array register 1667 * 1668 * Sets the receive address array register at index to the address passed 1669 * in by addr. For 82579, RAR[0] is the base address register that is to 1670 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 1671 * Use SHRA[0-3] in place of those reserved for ME. 1672 **/ 1673 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 1674 { 1675 u32 rar_low, rar_high; 1676 1677 /* HW expects these in little endian so we reverse the byte order 1678 * from network order (big endian) to little endian 1679 */ 1680 rar_low = ((u32)addr[0] | 1681 ((u32)addr[1] << 8) | 1682 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 1683 1684 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 1685 1686 /* If MAC address zero, no need to set the AV bit */ 1687 if (rar_low || rar_high) 1688 rar_high |= E1000_RAH_AV; 1689 1690 if (index == 0) { 1691 ew32(RAL(index), rar_low); 1692 e1e_flush(); 1693 ew32(RAH(index), rar_high); 1694 e1e_flush(); 1695 return 0; 1696 } 1697 1698 /* RAR[1-6] are owned by manageability. Skip those and program the 1699 * next address into the SHRA register array. 1700 */ 1701 if (index < (u32)(hw->mac.rar_entry_count)) { 1702 s32 ret_val; 1703 1704 ret_val = e1000_acquire_swflag_ich8lan(hw); 1705 if (ret_val) 1706 goto out; 1707 1708 ew32(SHRAL(index - 1), rar_low); 1709 e1e_flush(); 1710 ew32(SHRAH(index - 1), rar_high); 1711 e1e_flush(); 1712 1713 e1000_release_swflag_ich8lan(hw); 1714 1715 /* verify the register updates */ 1716 if ((er32(SHRAL(index - 1)) == rar_low) && 1717 (er32(SHRAH(index - 1)) == rar_high)) 1718 return 0; 1719 1720 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 1721 (index - 1), er32(FWSM)); 1722 } 1723 1724 out: 1725 e_dbg("Failed to write receive address at index %d\n", index); 1726 return -E1000_ERR_CONFIG; 1727 } 1728 1729 /** 1730 * e1000_rar_get_count_pch_lpt - Get the number of available SHRA 1731 * @hw: pointer to the HW structure 1732 * 1733 * Get the number of available receive registers that the Host can 1734 * program. SHRA[0-10] are the shared receive address registers 1735 * that are shared between the Host and manageability engine (ME). 1736 * ME can reserve any number of addresses and the host needs to be 1737 * able to tell how many available registers it has access to. 1738 **/ 1739 static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw) 1740 { 1741 u32 wlock_mac; 1742 u32 num_entries; 1743 1744 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; 1745 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 1746 1747 switch (wlock_mac) { 1748 case 0: 1749 /* All SHRA[0..10] and RAR[0] available */ 1750 num_entries = hw->mac.rar_entry_count; 1751 break; 1752 case 1: 1753 /* Only RAR[0] available */ 1754 num_entries = 1; 1755 break; 1756 default: 1757 /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */ 1758 num_entries = wlock_mac + 1; 1759 break; 1760 } 1761 1762 return num_entries; 1763 } 1764 1765 /** 1766 * e1000_rar_set_pch_lpt - Set receive address registers 1767 * @hw: pointer to the HW structure 1768 * @addr: pointer to the receive address 1769 * @index: receive address array register 1770 * 1771 * Sets the receive address register array at index to the address passed 1772 * in by addr. For LPT, RAR[0] is the base address register that is to 1773 * contain the MAC address. SHRA[0-10] are the shared receive address 1774 * registers that are shared between the Host and manageability engine (ME). 1775 **/ 1776 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 1777 { 1778 u32 rar_low, rar_high; 1779 u32 wlock_mac; 1780 1781 /* HW expects these in little endian so we reverse the byte order 1782 * from network order (big endian) to little endian 1783 */ 1784 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 1785 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 1786 1787 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 1788 1789 /* If MAC address zero, no need to set the AV bit */ 1790 if (rar_low || rar_high) 1791 rar_high |= E1000_RAH_AV; 1792 1793 if (index == 0) { 1794 ew32(RAL(index), rar_low); 1795 e1e_flush(); 1796 ew32(RAH(index), rar_high); 1797 e1e_flush(); 1798 return 0; 1799 } 1800 1801 /* The manageability engine (ME) can lock certain SHRAR registers that 1802 * it is using - those registers are unavailable for use. 1803 */ 1804 if (index < hw->mac.rar_entry_count) { 1805 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; 1806 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 1807 1808 /* Check if all SHRAR registers are locked */ 1809 if (wlock_mac == 1) 1810 goto out; 1811 1812 if ((wlock_mac == 0) || (index <= wlock_mac)) { 1813 s32 ret_val; 1814 1815 ret_val = e1000_acquire_swflag_ich8lan(hw); 1816 1817 if (ret_val) 1818 goto out; 1819 1820 ew32(SHRAL_PCH_LPT(index - 1), rar_low); 1821 e1e_flush(); 1822 ew32(SHRAH_PCH_LPT(index - 1), rar_high); 1823 e1e_flush(); 1824 1825 e1000_release_swflag_ich8lan(hw); 1826 1827 /* verify the register updates */ 1828 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && 1829 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) 1830 return 0; 1831 } 1832 } 1833 1834 out: 1835 e_dbg("Failed to write receive address at index %d\n", index); 1836 return -E1000_ERR_CONFIG; 1837 } 1838 1839 /** 1840 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 1841 * @hw: pointer to the HW structure 1842 * 1843 * Checks if firmware is blocking the reset of the PHY. 1844 * This is a function pointer entry point only called by 1845 * reset routines. 1846 **/ 1847 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 1848 { 1849 bool blocked = false; 1850 int i = 0; 1851 1852 while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && 1853 (i++ < 10)) 1854 usleep_range(10000, 20000); 1855 return blocked ? E1000_BLK_PHY_RESET : 0; 1856 } 1857 1858 /** 1859 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 1860 * @hw: pointer to the HW structure 1861 * 1862 * Assumes semaphore already acquired. 1863 * 1864 **/ 1865 static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 1866 { 1867 u16 phy_data; 1868 u32 strap = er32(STRAP); 1869 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> 1870 E1000_STRAP_SMT_FREQ_SHIFT; 1871 s32 ret_val; 1872 1873 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1874 1875 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 1876 if (ret_val) 1877 return ret_val; 1878 1879 phy_data &= ~HV_SMB_ADDR_MASK; 1880 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 1881 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1882 1883 if (hw->phy.type == e1000_phy_i217) { 1884 /* Restore SMBus frequency */ 1885 if (freq--) { 1886 phy_data &= ~HV_SMB_ADDR_FREQ_MASK; 1887 phy_data |= (freq & (1 << 0)) << 1888 HV_SMB_ADDR_FREQ_LOW_SHIFT; 1889 phy_data |= (freq & (1 << 1)) << 1890 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); 1891 } else { 1892 e_dbg("Unsupported SMB frequency in PHY\n"); 1893 } 1894 } 1895 1896 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 1897 } 1898 1899 /** 1900 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 1901 * @hw: pointer to the HW structure 1902 * 1903 * SW should configure the LCD from the NVM extended configuration region 1904 * as a workaround for certain parts. 1905 **/ 1906 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 1907 { 1908 struct e1000_phy_info *phy = &hw->phy; 1909 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 1910 s32 ret_val = 0; 1911 u16 word_addr, reg_data, reg_addr, phy_page = 0; 1912 1913 /* Initialize the PHY from the NVM on ICH platforms. This 1914 * is needed due to an issue where the NVM configuration is 1915 * not properly autoloaded after power transitions. 1916 * Therefore, after each PHY reset, we will load the 1917 * configuration data out of the NVM manually. 1918 */ 1919 switch (hw->mac.type) { 1920 case e1000_ich8lan: 1921 if (phy->type != e1000_phy_igp_3) 1922 return ret_val; 1923 1924 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || 1925 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { 1926 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 1927 break; 1928 } 1929 /* Fall-thru */ 1930 case e1000_pchlan: 1931 case e1000_pch2lan: 1932 case e1000_pch_lpt: 1933 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1934 break; 1935 default: 1936 return ret_val; 1937 } 1938 1939 ret_val = hw->phy.ops.acquire(hw); 1940 if (ret_val) 1941 return ret_val; 1942 1943 data = er32(FEXTNVM); 1944 if (!(data & sw_cfg_mask)) 1945 goto release; 1946 1947 /* Make sure HW does not configure LCD from PHY 1948 * extended configuration before SW configuration 1949 */ 1950 data = er32(EXTCNF_CTRL); 1951 if ((hw->mac.type < e1000_pch2lan) && 1952 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) 1953 goto release; 1954 1955 cnf_size = er32(EXTCNF_SIZE); 1956 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 1957 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 1958 if (!cnf_size) 1959 goto release; 1960 1961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1963 1964 if (((hw->mac.type == e1000_pchlan) && 1965 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || 1966 (hw->mac.type > e1000_pchlan)) { 1967 /* HW configures the SMBus address and LEDs when the 1968 * OEM and LCD Write Enable bits are set in the NVM. 1969 * When both NVM bits are cleared, SW will configure 1970 * them instead. 1971 */ 1972 ret_val = e1000_write_smbus_addr(hw); 1973 if (ret_val) 1974 goto release; 1975 1976 data = er32(LEDCTL); 1977 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 1978 (u16)data); 1979 if (ret_val) 1980 goto release; 1981 } 1982 1983 /* Configure LCD from extended configuration region. */ 1984 1985 /* cnf_base_addr is in DWORD */ 1986 word_addr = (u16)(cnf_base_addr << 1); 1987 1988 for (i = 0; i < cnf_size; i++) { 1989 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); 1990 if (ret_val) 1991 goto release; 1992 1993 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 1994 1, ®_addr); 1995 if (ret_val) 1996 goto release; 1997 1998 /* Save off the PHY page for future writes. */ 1999 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 2000 phy_page = reg_data; 2001 continue; 2002 } 2003 2004 reg_addr &= PHY_REG_MASK; 2005 reg_addr |= phy_page; 2006 2007 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); 2008 if (ret_val) 2009 goto release; 2010 } 2011 2012 release: 2013 hw->phy.ops.release(hw); 2014 return ret_val; 2015 } 2016 2017 /** 2018 * e1000_k1_gig_workaround_hv - K1 Si workaround 2019 * @hw: pointer to the HW structure 2020 * @link: link up bool flag 2021 * 2022 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 2023 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 2024 * If link is down, the function will restore the default K1 setting located 2025 * in the NVM. 2026 **/ 2027 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 2028 { 2029 s32 ret_val = 0; 2030 u16 status_reg = 0; 2031 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 2032 2033 if (hw->mac.type != e1000_pchlan) 2034 return 0; 2035 2036 /* Wrap the whole flow with the sw flag */ 2037 ret_val = hw->phy.ops.acquire(hw); 2038 if (ret_val) 2039 return ret_val; 2040 2041 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 2042 if (link) { 2043 if (hw->phy.type == e1000_phy_82578) { 2044 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, 2045 &status_reg); 2046 if (ret_val) 2047 goto release; 2048 2049 status_reg &= (BM_CS_STATUS_LINK_UP | 2050 BM_CS_STATUS_RESOLVED | 2051 BM_CS_STATUS_SPEED_MASK); 2052 2053 if (status_reg == (BM_CS_STATUS_LINK_UP | 2054 BM_CS_STATUS_RESOLVED | 2055 BM_CS_STATUS_SPEED_1000)) 2056 k1_enable = false; 2057 } 2058 2059 if (hw->phy.type == e1000_phy_82577) { 2060 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); 2061 if (ret_val) 2062 goto release; 2063 2064 status_reg &= (HV_M_STATUS_LINK_UP | 2065 HV_M_STATUS_AUTONEG_COMPLETE | 2066 HV_M_STATUS_SPEED_MASK); 2067 2068 if (status_reg == (HV_M_STATUS_LINK_UP | 2069 HV_M_STATUS_AUTONEG_COMPLETE | 2070 HV_M_STATUS_SPEED_1000)) 2071 k1_enable = false; 2072 } 2073 2074 /* Link stall fix for link up */ 2075 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); 2076 if (ret_val) 2077 goto release; 2078 2079 } else { 2080 /* Link stall fix for link down */ 2081 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); 2082 if (ret_val) 2083 goto release; 2084 } 2085 2086 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 2087 2088 release: 2089 hw->phy.ops.release(hw); 2090 2091 return ret_val; 2092 } 2093 2094 /** 2095 * e1000_configure_k1_ich8lan - Configure K1 power state 2096 * @hw: pointer to the HW structure 2097 * @enable: K1 state to configure 2098 * 2099 * Configure the K1 power state based on the provided parameter. 2100 * Assumes semaphore already acquired. 2101 * 2102 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 2103 **/ 2104 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 2105 { 2106 s32 ret_val; 2107 u32 ctrl_reg = 0; 2108 u32 ctrl_ext = 0; 2109 u32 reg = 0; 2110 u16 kmrn_reg = 0; 2111 2112 ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2113 &kmrn_reg); 2114 if (ret_val) 2115 return ret_val; 2116 2117 if (k1_enable) 2118 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 2119 else 2120 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 2121 2122 ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2123 kmrn_reg); 2124 if (ret_val) 2125 return ret_val; 2126 2127 usleep_range(20, 40); 2128 ctrl_ext = er32(CTRL_EXT); 2129 ctrl_reg = er32(CTRL); 2130 2131 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 2132 reg |= E1000_CTRL_FRCSPD; 2133 ew32(CTRL, reg); 2134 2135 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 2136 e1e_flush(); 2137 usleep_range(20, 40); 2138 ew32(CTRL, ctrl_reg); 2139 ew32(CTRL_EXT, ctrl_ext); 2140 e1e_flush(); 2141 usleep_range(20, 40); 2142 2143 return 0; 2144 } 2145 2146 /** 2147 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 2148 * @hw: pointer to the HW structure 2149 * @d0_state: boolean if entering d0 or d3 device state 2150 * 2151 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 2152 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 2153 * in NVM determines whether HW should configure LPLU and Gbe Disable. 2154 **/ 2155 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 2156 { 2157 s32 ret_val = 0; 2158 u32 mac_reg; 2159 u16 oem_reg; 2160 2161 if (hw->mac.type < e1000_pchlan) 2162 return ret_val; 2163 2164 ret_val = hw->phy.ops.acquire(hw); 2165 if (ret_val) 2166 return ret_val; 2167 2168 if (hw->mac.type == e1000_pchlan) { 2169 mac_reg = er32(EXTCNF_CTRL); 2170 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 2171 goto release; 2172 } 2173 2174 mac_reg = er32(FEXTNVM); 2175 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 2176 goto release; 2177 2178 mac_reg = er32(PHY_CTRL); 2179 2180 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); 2181 if (ret_val) 2182 goto release; 2183 2184 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 2185 2186 if (d0_state) { 2187 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 2188 oem_reg |= HV_OEM_BITS_GBE_DIS; 2189 2190 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 2191 oem_reg |= HV_OEM_BITS_LPLU; 2192 } else { 2193 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 2194 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 2195 oem_reg |= HV_OEM_BITS_GBE_DIS; 2196 2197 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | 2198 E1000_PHY_CTRL_NOND0A_LPLU)) 2199 oem_reg |= HV_OEM_BITS_LPLU; 2200 } 2201 2202 /* Set Restart auto-neg to activate the bits */ 2203 if ((d0_state || (hw->mac.type != e1000_pchlan)) && 2204 !hw->phy.ops.check_reset_block(hw)) 2205 oem_reg |= HV_OEM_BITS_RESTART_AN; 2206 2207 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); 2208 2209 release: 2210 hw->phy.ops.release(hw); 2211 2212 return ret_val; 2213 } 2214 2215 /** 2216 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 2217 * @hw: pointer to the HW structure 2218 **/ 2219 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 2220 { 2221 s32 ret_val; 2222 u16 data; 2223 2224 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); 2225 if (ret_val) 2226 return ret_val; 2227 2228 data |= HV_KMRN_MDIO_SLOW; 2229 2230 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); 2231 2232 return ret_val; 2233 } 2234 2235 /** 2236 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2237 * done after every PHY reset. 2238 **/ 2239 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2240 { 2241 s32 ret_val = 0; 2242 u16 phy_data; 2243 2244 if (hw->mac.type != e1000_pchlan) 2245 return 0; 2246 2247 /* Set MDIO slow mode before any other MDIO access */ 2248 if (hw->phy.type == e1000_phy_82577) { 2249 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2250 if (ret_val) 2251 return ret_val; 2252 } 2253 2254 if (((hw->phy.type == e1000_phy_82577) && 2255 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 2256 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 2257 /* Disable generation of early preamble */ 2258 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); 2259 if (ret_val) 2260 return ret_val; 2261 2262 /* Preamble tuning for SSC */ 2263 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); 2264 if (ret_val) 2265 return ret_val; 2266 } 2267 2268 if (hw->phy.type == e1000_phy_82578) { 2269 /* Return registers to default by doing a soft reset then 2270 * writing 0x3140 to the control register. 2271 */ 2272 if (hw->phy.revision < 2) { 2273 e1000e_phy_sw_reset(hw); 2274 ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); 2275 } 2276 } 2277 2278 /* Select page 0 */ 2279 ret_val = hw->phy.ops.acquire(hw); 2280 if (ret_val) 2281 return ret_val; 2282 2283 hw->phy.addr = 1; 2284 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 2285 hw->phy.ops.release(hw); 2286 if (ret_val) 2287 return ret_val; 2288 2289 /* Configure the K1 Si workaround during phy reset assuming there is 2290 * link so that it disables K1 if link is in 1Gbps. 2291 */ 2292 ret_val = e1000_k1_gig_workaround_hv(hw, true); 2293 if (ret_val) 2294 return ret_val; 2295 2296 /* Workaround for link disconnects on a busy hub in half duplex */ 2297 ret_val = hw->phy.ops.acquire(hw); 2298 if (ret_val) 2299 return ret_val; 2300 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); 2301 if (ret_val) 2302 goto release; 2303 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); 2304 if (ret_val) 2305 goto release; 2306 2307 /* set MSE higher to enable link to stay up when noise is high */ 2308 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); 2309 release: 2310 hw->phy.ops.release(hw); 2311 2312 return ret_val; 2313 } 2314 2315 /** 2316 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 2317 * @hw: pointer to the HW structure 2318 **/ 2319 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) 2320 { 2321 u32 mac_reg; 2322 u16 i, phy_reg = 0; 2323 s32 ret_val; 2324 2325 ret_val = hw->phy.ops.acquire(hw); 2326 if (ret_val) 2327 return; 2328 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2329 if (ret_val) 2330 goto release; 2331 2332 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 2333 for (i = 0; i < (hw->mac.rar_entry_count); i++) { 2334 mac_reg = er32(RAL(i)); 2335 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 2336 (u16)(mac_reg & 0xFFFF)); 2337 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), 2338 (u16)((mac_reg >> 16) & 0xFFFF)); 2339 2340 mac_reg = er32(RAH(i)); 2341 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), 2342 (u16)(mac_reg & 0xFFFF)); 2343 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), 2344 (u16)((mac_reg & E1000_RAH_AV) 2345 >> 16)); 2346 } 2347 2348 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2349 2350 release: 2351 hw->phy.ops.release(hw); 2352 } 2353 2354 /** 2355 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 2356 * with 82579 PHY 2357 * @hw: pointer to the HW structure 2358 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 2359 **/ 2360 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) 2361 { 2362 s32 ret_val = 0; 2363 u16 phy_reg, data; 2364 u32 mac_reg; 2365 u16 i; 2366 2367 if (hw->mac.type < e1000_pch2lan) 2368 return 0; 2369 2370 /* disable Rx path while enabling/disabling workaround */ 2371 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); 2372 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); 2373 if (ret_val) 2374 return ret_val; 2375 2376 if (enable) { 2377 /* Write Rx addresses (rar_entry_count for RAL/H, and 2378 * SHRAL/H) and initial CRC values to the MAC 2379 */ 2380 for (i = 0; i < hw->mac.rar_entry_count; i++) { 2381 u8 mac_addr[ETH_ALEN] = { 0 }; 2382 u32 addr_high, addr_low; 2383 2384 addr_high = er32(RAH(i)); 2385 if (!(addr_high & E1000_RAH_AV)) 2386 continue; 2387 addr_low = er32(RAL(i)); 2388 mac_addr[0] = (addr_low & 0xFF); 2389 mac_addr[1] = ((addr_low >> 8) & 0xFF); 2390 mac_addr[2] = ((addr_low >> 16) & 0xFF); 2391 mac_addr[3] = ((addr_low >> 24) & 0xFF); 2392 mac_addr[4] = (addr_high & 0xFF); 2393 mac_addr[5] = ((addr_high >> 8) & 0xFF); 2394 2395 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); 2396 } 2397 2398 /* Write Rx addresses to the PHY */ 2399 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 2400 2401 /* Enable jumbo frame workaround in the MAC */ 2402 mac_reg = er32(FFLT_DBG); 2403 mac_reg &= ~(1 << 14); 2404 mac_reg |= (7 << 15); 2405 ew32(FFLT_DBG, mac_reg); 2406 2407 mac_reg = er32(RCTL); 2408 mac_reg |= E1000_RCTL_SECRC; 2409 ew32(RCTL, mac_reg); 2410 2411 ret_val = e1000e_read_kmrn_reg(hw, 2412 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2413 &data); 2414 if (ret_val) 2415 return ret_val; 2416 ret_val = e1000e_write_kmrn_reg(hw, 2417 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2418 data | (1 << 0)); 2419 if (ret_val) 2420 return ret_val; 2421 ret_val = e1000e_read_kmrn_reg(hw, 2422 E1000_KMRNCTRLSTA_HD_CTRL, 2423 &data); 2424 if (ret_val) 2425 return ret_val; 2426 data &= ~(0xF << 8); 2427 data |= (0xB << 8); 2428 ret_val = e1000e_write_kmrn_reg(hw, 2429 E1000_KMRNCTRLSTA_HD_CTRL, 2430 data); 2431 if (ret_val) 2432 return ret_val; 2433 2434 /* Enable jumbo frame workaround in the PHY */ 2435 e1e_rphy(hw, PHY_REG(769, 23), &data); 2436 data &= ~(0x7F << 5); 2437 data |= (0x37 << 5); 2438 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 2439 if (ret_val) 2440 return ret_val; 2441 e1e_rphy(hw, PHY_REG(769, 16), &data); 2442 data &= ~(1 << 13); 2443 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2444 if (ret_val) 2445 return ret_val; 2446 e1e_rphy(hw, PHY_REG(776, 20), &data); 2447 data &= ~(0x3FF << 2); 2448 data |= (0x1A << 2); 2449 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); 2450 if (ret_val) 2451 return ret_val; 2452 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); 2453 if (ret_val) 2454 return ret_val; 2455 e1e_rphy(hw, HV_PM_CTRL, &data); 2456 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); 2457 if (ret_val) 2458 return ret_val; 2459 } else { 2460 /* Write MAC register values back to h/w defaults */ 2461 mac_reg = er32(FFLT_DBG); 2462 mac_reg &= ~(0xF << 14); 2463 ew32(FFLT_DBG, mac_reg); 2464 2465 mac_reg = er32(RCTL); 2466 mac_reg &= ~E1000_RCTL_SECRC; 2467 ew32(RCTL, mac_reg); 2468 2469 ret_val = e1000e_read_kmrn_reg(hw, 2470 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2471 &data); 2472 if (ret_val) 2473 return ret_val; 2474 ret_val = e1000e_write_kmrn_reg(hw, 2475 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2476 data & ~(1 << 0)); 2477 if (ret_val) 2478 return ret_val; 2479 ret_val = e1000e_read_kmrn_reg(hw, 2480 E1000_KMRNCTRLSTA_HD_CTRL, 2481 &data); 2482 if (ret_val) 2483 return ret_val; 2484 data &= ~(0xF << 8); 2485 data |= (0xB << 8); 2486 ret_val = e1000e_write_kmrn_reg(hw, 2487 E1000_KMRNCTRLSTA_HD_CTRL, 2488 data); 2489 if (ret_val) 2490 return ret_val; 2491 2492 /* Write PHY register values back to h/w defaults */ 2493 e1e_rphy(hw, PHY_REG(769, 23), &data); 2494 data &= ~(0x7F << 5); 2495 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 2496 if (ret_val) 2497 return ret_val; 2498 e1e_rphy(hw, PHY_REG(769, 16), &data); 2499 data |= (1 << 13); 2500 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2501 if (ret_val) 2502 return ret_val; 2503 e1e_rphy(hw, PHY_REG(776, 20), &data); 2504 data &= ~(0x3FF << 2); 2505 data |= (0x8 << 2); 2506 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); 2507 if (ret_val) 2508 return ret_val; 2509 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); 2510 if (ret_val) 2511 return ret_val; 2512 e1e_rphy(hw, HV_PM_CTRL, &data); 2513 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); 2514 if (ret_val) 2515 return ret_val; 2516 } 2517 2518 /* re-enable Rx path after enabling/disabling workaround */ 2519 return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); 2520 } 2521 2522 /** 2523 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2524 * done after every PHY reset. 2525 **/ 2526 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2527 { 2528 s32 ret_val = 0; 2529 2530 if (hw->mac.type != e1000_pch2lan) 2531 return 0; 2532 2533 /* Set MDIO slow mode before any other MDIO access */ 2534 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2535 if (ret_val) 2536 return ret_val; 2537 2538 ret_val = hw->phy.ops.acquire(hw); 2539 if (ret_val) 2540 return ret_val; 2541 /* set MSE higher to enable link to stay up when noise is high */ 2542 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); 2543 if (ret_val) 2544 goto release; 2545 /* drop link after 5 times MSE threshold was reached */ 2546 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); 2547 release: 2548 hw->phy.ops.release(hw); 2549 2550 return ret_val; 2551 } 2552 2553 /** 2554 * e1000_k1_gig_workaround_lv - K1 Si workaround 2555 * @hw: pointer to the HW structure 2556 * 2557 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 2558 * Disable K1 in 1000Mbps and 100Mbps 2559 **/ 2560 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2561 { 2562 s32 ret_val = 0; 2563 u16 status_reg = 0; 2564 2565 if (hw->mac.type != e1000_pch2lan) 2566 return 0; 2567 2568 /* Set K1 beacon duration based on 10Mbs speed */ 2569 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2570 if (ret_val) 2571 return ret_val; 2572 2573 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2574 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2575 if (status_reg & 2576 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 2577 u16 pm_phy_reg; 2578 2579 /* LV 1G/100 Packet drop issue wa */ 2580 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2581 if (ret_val) 2582 return ret_val; 2583 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; 2584 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2585 if (ret_val) 2586 return ret_val; 2587 } else { 2588 u32 mac_reg; 2589 2590 mac_reg = er32(FEXTNVM4); 2591 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2592 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2593 ew32(FEXTNVM4, mac_reg); 2594 } 2595 } 2596 2597 return ret_val; 2598 } 2599 2600 /** 2601 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 2602 * @hw: pointer to the HW structure 2603 * @gate: boolean set to true to gate, false to ungate 2604 * 2605 * Gate/ungate the automatic PHY configuration via hardware; perform 2606 * the configuration via software instead. 2607 **/ 2608 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 2609 { 2610 u32 extcnf_ctrl; 2611 2612 if (hw->mac.type < e1000_pch2lan) 2613 return; 2614 2615 extcnf_ctrl = er32(EXTCNF_CTRL); 2616 2617 if (gate) 2618 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 2619 else 2620 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 2621 2622 ew32(EXTCNF_CTRL, extcnf_ctrl); 2623 } 2624 2625 /** 2626 * e1000_lan_init_done_ich8lan - Check for PHY config completion 2627 * @hw: pointer to the HW structure 2628 * 2629 * Check the appropriate indication the MAC has finished configuring the 2630 * PHY after a software reset. 2631 **/ 2632 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 2633 { 2634 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 2635 2636 /* Wait for basic configuration completes before proceeding */ 2637 do { 2638 data = er32(STATUS); 2639 data &= E1000_STATUS_LAN_INIT_DONE; 2640 usleep_range(100, 200); 2641 } while ((!data) && --loop); 2642 2643 /* If basic configuration is incomplete before the above loop 2644 * count reaches 0, loading the configuration from NVM will 2645 * leave the PHY in a bad state possibly resulting in no link. 2646 */ 2647 if (loop == 0) 2648 e_dbg("LAN_INIT_DONE not set, increase timeout\n"); 2649 2650 /* Clear the Init Done bit for the next init event */ 2651 data = er32(STATUS); 2652 data &= ~E1000_STATUS_LAN_INIT_DONE; 2653 ew32(STATUS, data); 2654 } 2655 2656 /** 2657 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset 2658 * @hw: pointer to the HW structure 2659 **/ 2660 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) 2661 { 2662 s32 ret_val = 0; 2663 u16 reg; 2664 2665 if (hw->phy.ops.check_reset_block(hw)) 2666 return 0; 2667 2668 /* Allow time for h/w to get to quiescent state after reset */ 2669 usleep_range(10000, 20000); 2670 2671 /* Perform any necessary post-reset workarounds */ 2672 switch (hw->mac.type) { 2673 case e1000_pchlan: 2674 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 2675 if (ret_val) 2676 return ret_val; 2677 break; 2678 case e1000_pch2lan: 2679 ret_val = e1000_lv_phy_workarounds_ich8lan(hw); 2680 if (ret_val) 2681 return ret_val; 2682 break; 2683 default: 2684 break; 2685 } 2686 2687 /* Clear the host wakeup bit after lcd reset */ 2688 if (hw->mac.type >= e1000_pchlan) { 2689 e1e_rphy(hw, BM_PORT_GEN_CFG, ®); 2690 reg &= ~BM_WUC_HOST_WU_BIT; 2691 e1e_wphy(hw, BM_PORT_GEN_CFG, reg); 2692 } 2693 2694 /* Configure the LCD with the extended configuration region in NVM */ 2695 ret_val = e1000_sw_lcd_config_ich8lan(hw); 2696 if (ret_val) 2697 return ret_val; 2698 2699 /* Configure the LCD with the OEM bits in NVM */ 2700 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 2701 2702 if (hw->mac.type == e1000_pch2lan) { 2703 /* Ungate automatic PHY configuration on non-managed 82579 */ 2704 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 2705 usleep_range(10000, 20000); 2706 e1000_gate_hw_phy_config_ich8lan(hw, false); 2707 } 2708 2709 /* Set EEE LPI Update Timer to 200usec */ 2710 ret_val = hw->phy.ops.acquire(hw); 2711 if (ret_val) 2712 return ret_val; 2713 ret_val = e1000_write_emi_reg_locked(hw, 2714 I82579_LPI_UPDATE_TIMER, 2715 0x1387); 2716 hw->phy.ops.release(hw); 2717 } 2718 2719 return ret_val; 2720 } 2721 2722 /** 2723 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 2724 * @hw: pointer to the HW structure 2725 * 2726 * Resets the PHY 2727 * This is a function pointer entry point called by drivers 2728 * or other shared routines. 2729 **/ 2730 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 2731 { 2732 s32 ret_val = 0; 2733 2734 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 2735 if ((hw->mac.type == e1000_pch2lan) && 2736 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 2737 e1000_gate_hw_phy_config_ich8lan(hw, true); 2738 2739 ret_val = e1000e_phy_hw_reset_generic(hw); 2740 if (ret_val) 2741 return ret_val; 2742 2743 return e1000_post_phy_reset_ich8lan(hw); 2744 } 2745 2746 /** 2747 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 2748 * @hw: pointer to the HW structure 2749 * @active: true to enable LPLU, false to disable 2750 * 2751 * Sets the LPLU state according to the active flag. For PCH, if OEM write 2752 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 2753 * the phy speed. This function will manually set the LPLU bit and restart 2754 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 2755 * since it configures the same bit. 2756 **/ 2757 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 2758 { 2759 s32 ret_val; 2760 u16 oem_reg; 2761 2762 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); 2763 if (ret_val) 2764 return ret_val; 2765 2766 if (active) 2767 oem_reg |= HV_OEM_BITS_LPLU; 2768 else 2769 oem_reg &= ~HV_OEM_BITS_LPLU; 2770 2771 if (!hw->phy.ops.check_reset_block(hw)) 2772 oem_reg |= HV_OEM_BITS_RESTART_AN; 2773 2774 return e1e_wphy(hw, HV_OEM_BITS, oem_reg); 2775 } 2776 2777 /** 2778 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 2779 * @hw: pointer to the HW structure 2780 * @active: true to enable LPLU, false to disable 2781 * 2782 * Sets the LPLU D0 state according to the active flag. When 2783 * activating LPLU this function also disables smart speed 2784 * and vice versa. LPLU will not be activated unless the 2785 * device autonegotiation advertisement meets standards of 2786 * either 10 or 10/100 or 10/100/1000 at all duplexes. 2787 * This is a function pointer entry point only called by 2788 * PHY setup routines. 2789 **/ 2790 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 2791 { 2792 struct e1000_phy_info *phy = &hw->phy; 2793 u32 phy_ctrl; 2794 s32 ret_val = 0; 2795 u16 data; 2796 2797 if (phy->type == e1000_phy_ife) 2798 return 0; 2799 2800 phy_ctrl = er32(PHY_CTRL); 2801 2802 if (active) { 2803 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 2804 ew32(PHY_CTRL, phy_ctrl); 2805 2806 if (phy->type != e1000_phy_igp_3) 2807 return 0; 2808 2809 /* Call gig speed drop workaround on LPLU before accessing 2810 * any PHY registers 2811 */ 2812 if (hw->mac.type == e1000_ich8lan) 2813 e1000e_gig_downshift_workaround_ich8lan(hw); 2814 2815 /* When LPLU is enabled, we should disable SmartSpeed */ 2816 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); 2817 if (ret_val) 2818 return ret_val; 2819 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2820 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); 2821 if (ret_val) 2822 return ret_val; 2823 } else { 2824 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 2825 ew32(PHY_CTRL, phy_ctrl); 2826 2827 if (phy->type != e1000_phy_igp_3) 2828 return 0; 2829 2830 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 2831 * during Dx states where the power conservation is most 2832 * important. During driver activity we should enable 2833 * SmartSpeed, so performance is maintained. 2834 */ 2835 if (phy->smart_speed == e1000_smart_speed_on) { 2836 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2837 &data); 2838 if (ret_val) 2839 return ret_val; 2840 2841 data |= IGP01E1000_PSCFR_SMART_SPEED; 2842 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2843 data); 2844 if (ret_val) 2845 return ret_val; 2846 } else if (phy->smart_speed == e1000_smart_speed_off) { 2847 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2848 &data); 2849 if (ret_val) 2850 return ret_val; 2851 2852 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2853 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2854 data); 2855 if (ret_val) 2856 return ret_val; 2857 } 2858 } 2859 2860 return 0; 2861 } 2862 2863 /** 2864 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 2865 * @hw: pointer to the HW structure 2866 * @active: true to enable LPLU, false to disable 2867 * 2868 * Sets the LPLU D3 state according to the active flag. When 2869 * activating LPLU this function also disables smart speed 2870 * and vice versa. LPLU will not be activated unless the 2871 * device autonegotiation advertisement meets standards of 2872 * either 10 or 10/100 or 10/100/1000 at all duplexes. 2873 * This is a function pointer entry point only called by 2874 * PHY setup routines. 2875 **/ 2876 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 2877 { 2878 struct e1000_phy_info *phy = &hw->phy; 2879 u32 phy_ctrl; 2880 s32 ret_val = 0; 2881 u16 data; 2882 2883 phy_ctrl = er32(PHY_CTRL); 2884 2885 if (!active) { 2886 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 2887 ew32(PHY_CTRL, phy_ctrl); 2888 2889 if (phy->type != e1000_phy_igp_3) 2890 return 0; 2891 2892 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 2893 * during Dx states where the power conservation is most 2894 * important. During driver activity we should enable 2895 * SmartSpeed, so performance is maintained. 2896 */ 2897 if (phy->smart_speed == e1000_smart_speed_on) { 2898 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2899 &data); 2900 if (ret_val) 2901 return ret_val; 2902 2903 data |= IGP01E1000_PSCFR_SMART_SPEED; 2904 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2905 data); 2906 if (ret_val) 2907 return ret_val; 2908 } else if (phy->smart_speed == e1000_smart_speed_off) { 2909 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2910 &data); 2911 if (ret_val) 2912 return ret_val; 2913 2914 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2915 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 2916 data); 2917 if (ret_val) 2918 return ret_val; 2919 } 2920 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 2921 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 2922 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 2923 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 2924 ew32(PHY_CTRL, phy_ctrl); 2925 2926 if (phy->type != e1000_phy_igp_3) 2927 return 0; 2928 2929 /* Call gig speed drop workaround on LPLU before accessing 2930 * any PHY registers 2931 */ 2932 if (hw->mac.type == e1000_ich8lan) 2933 e1000e_gig_downshift_workaround_ich8lan(hw); 2934 2935 /* When LPLU is enabled, we should disable SmartSpeed */ 2936 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); 2937 if (ret_val) 2938 return ret_val; 2939 2940 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2941 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); 2942 } 2943 2944 return ret_val; 2945 } 2946 2947 /** 2948 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 2949 * @hw: pointer to the HW structure 2950 * @bank: pointer to the variable that returns the active bank 2951 * 2952 * Reads signature byte from the NVM using the flash access registers. 2953 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 2954 **/ 2955 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 2956 { 2957 u32 eecd; 2958 struct e1000_nvm_info *nvm = &hw->nvm; 2959 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 2960 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 2961 u8 sig_byte = 0; 2962 s32 ret_val; 2963 2964 switch (hw->mac.type) { 2965 case e1000_ich8lan: 2966 case e1000_ich9lan: 2967 eecd = er32(EECD); 2968 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 2969 E1000_EECD_SEC1VAL_VALID_MASK) { 2970 if (eecd & E1000_EECD_SEC1VAL) 2971 *bank = 1; 2972 else 2973 *bank = 0; 2974 2975 return 0; 2976 } 2977 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); 2978 /* fall-thru */ 2979 default: 2980 /* set bank to 0 in case flash read fails */ 2981 *bank = 0; 2982 2983 /* Check bank 0 */ 2984 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 2985 &sig_byte); 2986 if (ret_val) 2987 return ret_val; 2988 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2989 E1000_ICH_NVM_SIG_VALUE) { 2990 *bank = 0; 2991 return 0; 2992 } 2993 2994 /* Check bank 1 */ 2995 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 2996 bank1_offset, 2997 &sig_byte); 2998 if (ret_val) 2999 return ret_val; 3000 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3001 E1000_ICH_NVM_SIG_VALUE) { 3002 *bank = 1; 3003 return 0; 3004 } 3005 3006 e_dbg("ERROR: No valid NVM bank present\n"); 3007 return -E1000_ERR_NVM; 3008 } 3009 } 3010 3011 /** 3012 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 3013 * @hw: pointer to the HW structure 3014 * @offset: The offset (in bytes) of the word(s) to read. 3015 * @words: Size of data to read in words 3016 * @data: Pointer to the word(s) to read at offset. 3017 * 3018 * Reads a word(s) from the NVM using the flash access registers. 3019 **/ 3020 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3021 u16 *data) 3022 { 3023 struct e1000_nvm_info *nvm = &hw->nvm; 3024 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3025 u32 act_offset; 3026 s32 ret_val = 0; 3027 u32 bank = 0; 3028 u16 i, word; 3029 3030 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3031 (words == 0)) { 3032 e_dbg("nvm parameter(s) out of bounds\n"); 3033 ret_val = -E1000_ERR_NVM; 3034 goto out; 3035 } 3036 3037 nvm->ops.acquire(hw); 3038 3039 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3040 if (ret_val) { 3041 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3042 bank = 0; 3043 } 3044 3045 act_offset = (bank) ? nvm->flash_bank_size : 0; 3046 act_offset += offset; 3047 3048 ret_val = 0; 3049 for (i = 0; i < words; i++) { 3050 if (dev_spec->shadow_ram[offset + i].modified) { 3051 data[i] = dev_spec->shadow_ram[offset + i].value; 3052 } else { 3053 ret_val = e1000_read_flash_word_ich8lan(hw, 3054 act_offset + i, 3055 &word); 3056 if (ret_val) 3057 break; 3058 data[i] = word; 3059 } 3060 } 3061 3062 nvm->ops.release(hw); 3063 3064 out: 3065 if (ret_val) 3066 e_dbg("NVM read error: %d\n", ret_val); 3067 3068 return ret_val; 3069 } 3070 3071 /** 3072 * e1000_flash_cycle_init_ich8lan - Initialize flash 3073 * @hw: pointer to the HW structure 3074 * 3075 * This function does initial flash setup so that a new read/write/erase cycle 3076 * can be started. 3077 **/ 3078 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 3079 { 3080 union ich8_hws_flash_status hsfsts; 3081 s32 ret_val = -E1000_ERR_NVM; 3082 3083 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3084 3085 /* Check if the flash descriptor is valid */ 3086 if (!hsfsts.hsf_status.fldesvalid) { 3087 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); 3088 return -E1000_ERR_NVM; 3089 } 3090 3091 /* Clear FCERR and DAEL in hw status by writing 1 */ 3092 hsfsts.hsf_status.flcerr = 1; 3093 hsfsts.hsf_status.dael = 1; 3094 3095 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3096 3097 /* Either we should have a hardware SPI cycle in progress 3098 * bit to check against, in order to start a new cycle or 3099 * FDONE bit should be changed in the hardware so that it 3100 * is 1 after hardware reset, which can then be used as an 3101 * indication whether a cycle is in progress or has been 3102 * completed. 3103 */ 3104 3105 if (!hsfsts.hsf_status.flcinprog) { 3106 /* There is no cycle running at present, 3107 * so we can start a cycle. 3108 * Begin by setting Flash Cycle Done. 3109 */ 3110 hsfsts.hsf_status.flcdone = 1; 3111 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3112 ret_val = 0; 3113 } else { 3114 s32 i; 3115 3116 /* Otherwise poll for sometime so the current 3117 * cycle has a chance to end before giving up. 3118 */ 3119 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 3120 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3121 if (!hsfsts.hsf_status.flcinprog) { 3122 ret_val = 0; 3123 break; 3124 } 3125 udelay(1); 3126 } 3127 if (!ret_val) { 3128 /* Successful in waiting for previous cycle to timeout, 3129 * now set the Flash Cycle Done. 3130 */ 3131 hsfsts.hsf_status.flcdone = 1; 3132 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3133 } else { 3134 e_dbg("Flash controller busy, cannot get access\n"); 3135 } 3136 } 3137 3138 return ret_val; 3139 } 3140 3141 /** 3142 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 3143 * @hw: pointer to the HW structure 3144 * @timeout: maximum time to wait for completion 3145 * 3146 * This function starts a flash cycle and waits for its completion. 3147 **/ 3148 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 3149 { 3150 union ich8_hws_flash_ctrl hsflctl; 3151 union ich8_hws_flash_status hsfsts; 3152 u32 i = 0; 3153 3154 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 3155 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3156 hsflctl.hsf_ctrl.flcgo = 1; 3157 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3158 3159 /* wait till FDONE bit is set to 1 */ 3160 do { 3161 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3162 if (hsfsts.hsf_status.flcdone) 3163 break; 3164 udelay(1); 3165 } while (i++ < timeout); 3166 3167 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) 3168 return 0; 3169 3170 return -E1000_ERR_NVM; 3171 } 3172 3173 /** 3174 * e1000_read_flash_word_ich8lan - Read word from flash 3175 * @hw: pointer to the HW structure 3176 * @offset: offset to data location 3177 * @data: pointer to the location for storing the data 3178 * 3179 * Reads the flash word at offset into data. Offset is converted 3180 * to bytes before read. 3181 **/ 3182 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 3183 u16 *data) 3184 { 3185 /* Must convert offset into bytes. */ 3186 offset <<= 1; 3187 3188 return e1000_read_flash_data_ich8lan(hw, offset, 2, data); 3189 } 3190 3191 /** 3192 * e1000_read_flash_byte_ich8lan - Read byte from flash 3193 * @hw: pointer to the HW structure 3194 * @offset: The offset of the byte to read. 3195 * @data: Pointer to a byte to store the value read. 3196 * 3197 * Reads a single byte from the NVM using the flash access registers. 3198 **/ 3199 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 3200 u8 *data) 3201 { 3202 s32 ret_val; 3203 u16 word = 0; 3204 3205 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 3206 if (ret_val) 3207 return ret_val; 3208 3209 *data = (u8)word; 3210 3211 return 0; 3212 } 3213 3214 /** 3215 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 3216 * @hw: pointer to the HW structure 3217 * @offset: The offset (in bytes) of the byte or word to read. 3218 * @size: Size of data to read, 1=byte 2=word 3219 * @data: Pointer to the word to store the value read. 3220 * 3221 * Reads a byte or word from the NVM using the flash access registers. 3222 **/ 3223 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 3224 u8 size, u16 *data) 3225 { 3226 union ich8_hws_flash_status hsfsts; 3227 union ich8_hws_flash_ctrl hsflctl; 3228 u32 flash_linear_addr; 3229 u32 flash_data = 0; 3230 s32 ret_val = -E1000_ERR_NVM; 3231 u8 count = 0; 3232 3233 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 3234 return -E1000_ERR_NVM; 3235 3236 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3237 hw->nvm.flash_base_addr); 3238 3239 do { 3240 udelay(1); 3241 /* Steps */ 3242 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3243 if (ret_val) 3244 break; 3245 3246 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3247 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3248 hsflctl.hsf_ctrl.fldbcount = size - 1; 3249 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 3250 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3251 3252 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3253 3254 ret_val = 3255 e1000_flash_cycle_ich8lan(hw, 3256 ICH_FLASH_READ_COMMAND_TIMEOUT); 3257 3258 /* Check if FCERR is set to 1, if set to 1, clear it 3259 * and try the whole sequence a few more times, else 3260 * read in (shift in) the Flash Data0, the order is 3261 * least significant byte first msb to lsb 3262 */ 3263 if (!ret_val) { 3264 flash_data = er32flash(ICH_FLASH_FDATA0); 3265 if (size == 1) 3266 *data = (u8)(flash_data & 0x000000FF); 3267 else if (size == 2) 3268 *data = (u16)(flash_data & 0x0000FFFF); 3269 break; 3270 } else { 3271 /* If we've gotten here, then things are probably 3272 * completely hosed, but if the error condition is 3273 * detected, it won't hurt to give it another try... 3274 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 3275 */ 3276 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3277 if (hsfsts.hsf_status.flcerr) { 3278 /* Repeat for some time before giving up. */ 3279 continue; 3280 } else if (!hsfsts.hsf_status.flcdone) { 3281 e_dbg("Timeout error - flash cycle did not complete.\n"); 3282 break; 3283 } 3284 } 3285 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3286 3287 return ret_val; 3288 } 3289 3290 /** 3291 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 3292 * @hw: pointer to the HW structure 3293 * @offset: The offset (in bytes) of the word(s) to write. 3294 * @words: Size of data to write in words 3295 * @data: Pointer to the word(s) to write at offset. 3296 * 3297 * Writes a byte or word to the NVM using the flash access registers. 3298 **/ 3299 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3300 u16 *data) 3301 { 3302 struct e1000_nvm_info *nvm = &hw->nvm; 3303 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3304 u16 i; 3305 3306 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3307 (words == 0)) { 3308 e_dbg("nvm parameter(s) out of bounds\n"); 3309 return -E1000_ERR_NVM; 3310 } 3311 3312 nvm->ops.acquire(hw); 3313 3314 for (i = 0; i < words; i++) { 3315 dev_spec->shadow_ram[offset + i].modified = true; 3316 dev_spec->shadow_ram[offset + i].value = data[i]; 3317 } 3318 3319 nvm->ops.release(hw); 3320 3321 return 0; 3322 } 3323 3324 /** 3325 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 3326 * @hw: pointer to the HW structure 3327 * 3328 * The NVM checksum is updated by calling the generic update_nvm_checksum, 3329 * which writes the checksum to the shadow ram. The changes in the shadow 3330 * ram are then committed to the EEPROM by processing each bank at a time 3331 * checking for the modified bit and writing only the pending changes. 3332 * After a successful commit, the shadow ram is cleared and is ready for 3333 * future writes. 3334 **/ 3335 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 3336 { 3337 struct e1000_nvm_info *nvm = &hw->nvm; 3338 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3339 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 3340 s32 ret_val; 3341 u16 data; 3342 3343 ret_val = e1000e_update_nvm_checksum_generic(hw); 3344 if (ret_val) 3345 goto out; 3346 3347 if (nvm->type != e1000_nvm_flash_sw) 3348 goto out; 3349 3350 nvm->ops.acquire(hw); 3351 3352 /* We're writing to the opposite bank so if we're on bank 1, 3353 * write to bank 0 etc. We also need to erase the segment that 3354 * is going to be written 3355 */ 3356 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3357 if (ret_val) { 3358 e_dbg("Could not detect valid bank, assuming bank 0\n"); 3359 bank = 0; 3360 } 3361 3362 if (bank == 0) { 3363 new_bank_offset = nvm->flash_bank_size; 3364 old_bank_offset = 0; 3365 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 3366 if (ret_val) 3367 goto release; 3368 } else { 3369 old_bank_offset = nvm->flash_bank_size; 3370 new_bank_offset = 0; 3371 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 3372 if (ret_val) 3373 goto release; 3374 } 3375 3376 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 3377 /* Determine whether to write the value stored 3378 * in the other NVM bank or a modified value stored 3379 * in the shadow RAM 3380 */ 3381 if (dev_spec->shadow_ram[i].modified) { 3382 data = dev_spec->shadow_ram[i].value; 3383 } else { 3384 ret_val = e1000_read_flash_word_ich8lan(hw, i + 3385 old_bank_offset, 3386 &data); 3387 if (ret_val) 3388 break; 3389 } 3390 3391 /* If the word is 0x13, then make sure the signature bits 3392 * (15:14) are 11b until the commit has completed. 3393 * This will allow us to write 10b which indicates the 3394 * signature is valid. We want to do this after the write 3395 * has completed so that we don't mark the segment valid 3396 * while the write is still in progress 3397 */ 3398 if (i == E1000_ICH_NVM_SIG_WORD) 3399 data |= E1000_ICH_NVM_SIG_MASK; 3400 3401 /* Convert offset to bytes. */ 3402 act_offset = (i + new_bank_offset) << 1; 3403 3404 usleep_range(100, 200); 3405 /* Write the bytes to the new bank. */ 3406 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 3407 act_offset, 3408 (u8)data); 3409 if (ret_val) 3410 break; 3411 3412 usleep_range(100, 200); 3413 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 3414 act_offset + 1, 3415 (u8)(data >> 8)); 3416 if (ret_val) 3417 break; 3418 } 3419 3420 /* Don't bother writing the segment valid bits if sector 3421 * programming failed. 3422 */ 3423 if (ret_val) { 3424 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 3425 e_dbg("Flash commit failed.\n"); 3426 goto release; 3427 } 3428 3429 /* Finally validate the new segment by setting bit 15:14 3430 * to 10b in word 0x13 , this can be done without an 3431 * erase as well since these bits are 11 to start with 3432 * and we need to change bit 14 to 0b 3433 */ 3434 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 3435 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 3436 if (ret_val) 3437 goto release; 3438 3439 data &= 0xBFFF; 3440 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 3441 act_offset * 2 + 1, 3442 (u8)(data >> 8)); 3443 if (ret_val) 3444 goto release; 3445 3446 /* And invalidate the previously valid segment by setting 3447 * its signature word (0x13) high_byte to 0b. This can be 3448 * done without an erase because flash erase sets all bits 3449 * to 1's. We can write 1's to 0's without an erase 3450 */ 3451 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 3452 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 3453 if (ret_val) 3454 goto release; 3455 3456 /* Great! Everything worked, we can now clear the cached entries. */ 3457 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 3458 dev_spec->shadow_ram[i].modified = false; 3459 dev_spec->shadow_ram[i].value = 0xFFFF; 3460 } 3461 3462 release: 3463 nvm->ops.release(hw); 3464 3465 /* Reload the EEPROM, or else modifications will not appear 3466 * until after the next adapter reset. 3467 */ 3468 if (!ret_val) { 3469 nvm->ops.reload(hw); 3470 usleep_range(10000, 20000); 3471 } 3472 3473 out: 3474 if (ret_val) 3475 e_dbg("NVM update error: %d\n", ret_val); 3476 3477 return ret_val; 3478 } 3479 3480 /** 3481 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 3482 * @hw: pointer to the HW structure 3483 * 3484 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 3485 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 3486 * calculated, in which case we need to calculate the checksum and set bit 6. 3487 **/ 3488 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 3489 { 3490 s32 ret_val; 3491 u16 data; 3492 u16 word; 3493 u16 valid_csum_mask; 3494 3495 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, 3496 * the checksum needs to be fixed. This bit is an indication that 3497 * the NVM was prepared by OEM software and did not calculate 3498 * the checksum...a likely scenario. 3499 */ 3500 switch (hw->mac.type) { 3501 case e1000_pch_lpt: 3502 word = NVM_COMPAT; 3503 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 3504 break; 3505 default: 3506 word = NVM_FUTURE_INIT_WORD1; 3507 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; 3508 break; 3509 } 3510 3511 ret_val = e1000_read_nvm(hw, word, 1, &data); 3512 if (ret_val) 3513 return ret_val; 3514 3515 if (!(data & valid_csum_mask)) { 3516 data |= valid_csum_mask; 3517 ret_val = e1000_write_nvm(hw, word, 1, &data); 3518 if (ret_val) 3519 return ret_val; 3520 ret_val = e1000e_update_nvm_checksum(hw); 3521 if (ret_val) 3522 return ret_val; 3523 } 3524 3525 return e1000e_validate_nvm_checksum_generic(hw); 3526 } 3527 3528 /** 3529 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only 3530 * @hw: pointer to the HW structure 3531 * 3532 * To prevent malicious write/erase of the NVM, set it to be read-only 3533 * so that the hardware ignores all write/erase cycles of the NVM via 3534 * the flash control registers. The shadow-ram copy of the NVM will 3535 * still be updated, however any updates to this copy will not stick 3536 * across driver reloads. 3537 **/ 3538 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 3539 { 3540 struct e1000_nvm_info *nvm = &hw->nvm; 3541 union ich8_flash_protected_range pr0; 3542 union ich8_hws_flash_status hsfsts; 3543 u32 gfpreg; 3544 3545 nvm->ops.acquire(hw); 3546 3547 gfpreg = er32flash(ICH_FLASH_GFPREG); 3548 3549 /* Write-protect GbE Sector of NVM */ 3550 pr0.regval = er32flash(ICH_FLASH_PR0); 3551 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; 3552 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); 3553 pr0.range.wpe = true; 3554 ew32flash(ICH_FLASH_PR0, pr0.regval); 3555 3556 /* Lock down a subset of GbE Flash Control Registers, e.g. 3557 * PR0 to prevent the write-protection from being lifted. 3558 * Once FLOCKDN is set, the registers protected by it cannot 3559 * be written until FLOCKDN is cleared by a hardware reset. 3560 */ 3561 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3562 hsfsts.hsf_status.flockdn = true; 3563 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3564 3565 nvm->ops.release(hw); 3566 } 3567 3568 /** 3569 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 3570 * @hw: pointer to the HW structure 3571 * @offset: The offset (in bytes) of the byte/word to read. 3572 * @size: Size of data to read, 1=byte 2=word 3573 * @data: The byte(s) to write to the NVM. 3574 * 3575 * Writes one/two bytes to the NVM using the flash access registers. 3576 **/ 3577 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 3578 u8 size, u16 data) 3579 { 3580 union ich8_hws_flash_status hsfsts; 3581 union ich8_hws_flash_ctrl hsflctl; 3582 u32 flash_linear_addr; 3583 u32 flash_data = 0; 3584 s32 ret_val; 3585 u8 count = 0; 3586 3587 if (size < 1 || size > 2 || data > size * 0xff || 3588 offset > ICH_FLASH_LINEAR_ADDR_MASK) 3589 return -E1000_ERR_NVM; 3590 3591 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3592 hw->nvm.flash_base_addr); 3593 3594 do { 3595 udelay(1); 3596 /* Steps */ 3597 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3598 if (ret_val) 3599 break; 3600 3601 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3602 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3603 hsflctl.hsf_ctrl.fldbcount = size - 1; 3604 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 3605 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3606 3607 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3608 3609 if (size == 1) 3610 flash_data = (u32)data & 0x00FF; 3611 else 3612 flash_data = (u32)data; 3613 3614 ew32flash(ICH_FLASH_FDATA0, flash_data); 3615 3616 /* check if FCERR is set to 1 , if set to 1, clear it 3617 * and try the whole sequence a few more times else done 3618 */ 3619 ret_val = 3620 e1000_flash_cycle_ich8lan(hw, 3621 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 3622 if (!ret_val) 3623 break; 3624 3625 /* If we're here, then things are most likely 3626 * completely hosed, but if the error condition 3627 * is detected, it won't hurt to give it another 3628 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 3629 */ 3630 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3631 if (hsfsts.hsf_status.flcerr) 3632 /* Repeat for some time before giving up. */ 3633 continue; 3634 if (!hsfsts.hsf_status.flcdone) { 3635 e_dbg("Timeout error - flash cycle did not complete.\n"); 3636 break; 3637 } 3638 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3639 3640 return ret_val; 3641 } 3642 3643 /** 3644 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 3645 * @hw: pointer to the HW structure 3646 * @offset: The index of the byte to read. 3647 * @data: The byte to write to the NVM. 3648 * 3649 * Writes a single byte to the NVM using the flash access registers. 3650 **/ 3651 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 3652 u8 data) 3653 { 3654 u16 word = (u16)data; 3655 3656 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 3657 } 3658 3659 /** 3660 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 3661 * @hw: pointer to the HW structure 3662 * @offset: The offset of the byte to write. 3663 * @byte: The byte to write to the NVM. 3664 * 3665 * Writes a single byte to the NVM using the flash access registers. 3666 * Goes through a retry algorithm before giving up. 3667 **/ 3668 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 3669 u32 offset, u8 byte) 3670 { 3671 s32 ret_val; 3672 u16 program_retries; 3673 3674 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 3675 if (!ret_val) 3676 return ret_val; 3677 3678 for (program_retries = 0; program_retries < 100; program_retries++) { 3679 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); 3680 usleep_range(100, 200); 3681 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 3682 if (!ret_val) 3683 break; 3684 } 3685 if (program_retries == 100) 3686 return -E1000_ERR_NVM; 3687 3688 return 0; 3689 } 3690 3691 /** 3692 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 3693 * @hw: pointer to the HW structure 3694 * @bank: 0 for first bank, 1 for second bank, etc. 3695 * 3696 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 3697 * bank N is 4096 * N + flash_reg_addr. 3698 **/ 3699 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 3700 { 3701 struct e1000_nvm_info *nvm = &hw->nvm; 3702 union ich8_hws_flash_status hsfsts; 3703 union ich8_hws_flash_ctrl hsflctl; 3704 u32 flash_linear_addr; 3705 /* bank size is in 16bit words - adjust to bytes */ 3706 u32 flash_bank_size = nvm->flash_bank_size * 2; 3707 s32 ret_val; 3708 s32 count = 0; 3709 s32 j, iteration, sector_size; 3710 3711 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3712 3713 /* Determine HW Sector size: Read BERASE bits of hw flash status 3714 * register 3715 * 00: The Hw sector is 256 bytes, hence we need to erase 16 3716 * consecutive sectors. The start index for the nth Hw sector 3717 * can be calculated as = bank * 4096 + n * 256 3718 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 3719 * The start index for the nth Hw sector can be calculated 3720 * as = bank * 4096 3721 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 3722 * (ich9 only, otherwise error condition) 3723 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 3724 */ 3725 switch (hsfsts.hsf_status.berasesz) { 3726 case 0: 3727 /* Hw sector size 256 */ 3728 sector_size = ICH_FLASH_SEG_SIZE_256; 3729 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 3730 break; 3731 case 1: 3732 sector_size = ICH_FLASH_SEG_SIZE_4K; 3733 iteration = 1; 3734 break; 3735 case 2: 3736 sector_size = ICH_FLASH_SEG_SIZE_8K; 3737 iteration = 1; 3738 break; 3739 case 3: 3740 sector_size = ICH_FLASH_SEG_SIZE_64K; 3741 iteration = 1; 3742 break; 3743 default: 3744 return -E1000_ERR_NVM; 3745 } 3746 3747 /* Start with the base address, then add the sector offset. */ 3748 flash_linear_addr = hw->nvm.flash_base_addr; 3749 flash_linear_addr += (bank) ? flash_bank_size : 0; 3750 3751 for (j = 0; j < iteration; j++) { 3752 do { 3753 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; 3754 3755 /* Steps */ 3756 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3757 if (ret_val) 3758 return ret_val; 3759 3760 /* Write a value 11 (block Erase) in Flash 3761 * Cycle field in hw flash control 3762 */ 3763 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3764 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 3765 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3766 3767 /* Write the last 24 bits of an index within the 3768 * block into Flash Linear address field in Flash 3769 * Address. 3770 */ 3771 flash_linear_addr += (j * sector_size); 3772 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3773 3774 ret_val = e1000_flash_cycle_ich8lan(hw, timeout); 3775 if (!ret_val) 3776 break; 3777 3778 /* Check if FCERR is set to 1. If 1, 3779 * clear it and try the whole sequence 3780 * a few more times else Done 3781 */ 3782 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3783 if (hsfsts.hsf_status.flcerr) 3784 /* repeat for some time before giving up */ 3785 continue; 3786 else if (!hsfsts.hsf_status.flcdone) 3787 return ret_val; 3788 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 3789 } 3790 3791 return 0; 3792 } 3793 3794 /** 3795 * e1000_valid_led_default_ich8lan - Set the default LED settings 3796 * @hw: pointer to the HW structure 3797 * @data: Pointer to the LED settings 3798 * 3799 * Reads the LED default settings from the NVM to data. If the NVM LED 3800 * settings is all 0's or F's, set the LED default to a valid LED default 3801 * setting. 3802 **/ 3803 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 3804 { 3805 s32 ret_val; 3806 3807 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 3808 if (ret_val) { 3809 e_dbg("NVM Read Error\n"); 3810 return ret_val; 3811 } 3812 3813 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) 3814 *data = ID_LED_DEFAULT_ICH8LAN; 3815 3816 return 0; 3817 } 3818 3819 /** 3820 * e1000_id_led_init_pchlan - store LED configurations 3821 * @hw: pointer to the HW structure 3822 * 3823 * PCH does not control LEDs via the LEDCTL register, rather it uses 3824 * the PHY LED configuration register. 3825 * 3826 * PCH also does not have an "always on" or "always off" mode which 3827 * complicates the ID feature. Instead of using the "on" mode to indicate 3828 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), 3829 * use "link_up" mode. The LEDs will still ID on request if there is no 3830 * link based on logic in e1000_led_[on|off]_pchlan(). 3831 **/ 3832 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 3833 { 3834 struct e1000_mac_info *mac = &hw->mac; 3835 s32 ret_val; 3836 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 3837 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 3838 u16 data, i, temp, shift; 3839 3840 /* Get default ID LED modes */ 3841 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 3842 if (ret_val) 3843 return ret_val; 3844 3845 mac->ledctl_default = er32(LEDCTL); 3846 mac->ledctl_mode1 = mac->ledctl_default; 3847 mac->ledctl_mode2 = mac->ledctl_default; 3848 3849 for (i = 0; i < 4; i++) { 3850 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 3851 shift = (i * 5); 3852 switch (temp) { 3853 case ID_LED_ON1_DEF2: 3854 case ID_LED_ON1_ON2: 3855 case ID_LED_ON1_OFF2: 3856 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 3857 mac->ledctl_mode1 |= (ledctl_on << shift); 3858 break; 3859 case ID_LED_OFF1_DEF2: 3860 case ID_LED_OFF1_ON2: 3861 case ID_LED_OFF1_OFF2: 3862 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 3863 mac->ledctl_mode1 |= (ledctl_off << shift); 3864 break; 3865 default: 3866 /* Do nothing */ 3867 break; 3868 } 3869 switch (temp) { 3870 case ID_LED_DEF1_ON2: 3871 case ID_LED_ON1_ON2: 3872 case ID_LED_OFF1_ON2: 3873 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 3874 mac->ledctl_mode2 |= (ledctl_on << shift); 3875 break; 3876 case ID_LED_DEF1_OFF2: 3877 case ID_LED_ON1_OFF2: 3878 case ID_LED_OFF1_OFF2: 3879 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 3880 mac->ledctl_mode2 |= (ledctl_off << shift); 3881 break; 3882 default: 3883 /* Do nothing */ 3884 break; 3885 } 3886 } 3887 3888 return 0; 3889 } 3890 3891 /** 3892 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 3893 * @hw: pointer to the HW structure 3894 * 3895 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 3896 * register, so the the bus width is hard coded. 3897 **/ 3898 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 3899 { 3900 struct e1000_bus_info *bus = &hw->bus; 3901 s32 ret_val; 3902 3903 ret_val = e1000e_get_bus_info_pcie(hw); 3904 3905 /* ICH devices are "PCI Express"-ish. They have 3906 * a configuration space, but do not contain 3907 * PCI Express Capability registers, so bus width 3908 * must be hardcoded. 3909 */ 3910 if (bus->width == e1000_bus_width_unknown) 3911 bus->width = e1000_bus_width_pcie_x1; 3912 3913 return ret_val; 3914 } 3915 3916 /** 3917 * e1000_reset_hw_ich8lan - Reset the hardware 3918 * @hw: pointer to the HW structure 3919 * 3920 * Does a full reset of the hardware which includes a reset of the PHY and 3921 * MAC. 3922 **/ 3923 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 3924 { 3925 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3926 u16 kum_cfg; 3927 u32 ctrl, reg; 3928 s32 ret_val; 3929 3930 /* Prevent the PCI-E bus from sticking if there is no TLP connection 3931 * on the last TLP read/write transaction when MAC is reset. 3932 */ 3933 ret_val = e1000e_disable_pcie_master(hw); 3934 if (ret_val) 3935 e_dbg("PCI-E Master disable polling has failed.\n"); 3936 3937 e_dbg("Masking off all interrupts\n"); 3938 ew32(IMC, 0xffffffff); 3939 3940 /* Disable the Transmit and Receive units. Then delay to allow 3941 * any pending transactions to complete before we hit the MAC 3942 * with the global reset. 3943 */ 3944 ew32(RCTL, 0); 3945 ew32(TCTL, E1000_TCTL_PSP); 3946 e1e_flush(); 3947 3948 usleep_range(10000, 20000); 3949 3950 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 3951 if (hw->mac.type == e1000_ich8lan) { 3952 /* Set Tx and Rx buffer allocation to 8k apiece. */ 3953 ew32(PBA, E1000_PBA_8K); 3954 /* Set Packet Buffer Size to 16k. */ 3955 ew32(PBS, E1000_PBS_16K); 3956 } 3957 3958 if (hw->mac.type == e1000_pchlan) { 3959 /* Save the NVM K1 bit setting */ 3960 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); 3961 if (ret_val) 3962 return ret_val; 3963 3964 if (kum_cfg & E1000_NVM_K1_ENABLE) 3965 dev_spec->nvm_k1_enabled = true; 3966 else 3967 dev_spec->nvm_k1_enabled = false; 3968 } 3969 3970 ctrl = er32(CTRL); 3971 3972 if (!hw->phy.ops.check_reset_block(hw)) { 3973 /* Full-chip reset requires MAC and PHY reset at the same 3974 * time to make sure the interface between MAC and the 3975 * external PHY is reset. 3976 */ 3977 ctrl |= E1000_CTRL_PHY_RST; 3978 3979 /* Gate automatic PHY configuration by hardware on 3980 * non-managed 82579 3981 */ 3982 if ((hw->mac.type == e1000_pch2lan) && 3983 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 3984 e1000_gate_hw_phy_config_ich8lan(hw, true); 3985 } 3986 ret_val = e1000_acquire_swflag_ich8lan(hw); 3987 e_dbg("Issuing a global reset to ich8lan\n"); 3988 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 3989 /* cannot issue a flush here because it hangs the hardware */ 3990 msleep(20); 3991 3992 /* Set Phy Config Counter to 50msec */ 3993 if (hw->mac.type == e1000_pch2lan) { 3994 reg = er32(FEXTNVM3); 3995 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 3996 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 3997 ew32(FEXTNVM3, reg); 3998 } 3999 4000 if (!ret_val) 4001 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 4002 4003 if (ctrl & E1000_CTRL_PHY_RST) { 4004 ret_val = hw->phy.ops.get_cfg_done(hw); 4005 if (ret_val) 4006 return ret_val; 4007 4008 ret_val = e1000_post_phy_reset_ich8lan(hw); 4009 if (ret_val) 4010 return ret_val; 4011 } 4012 4013 /* For PCH, this write will make sure that any noise 4014 * will be detected as a CRC error and be dropped rather than show up 4015 * as a bad packet to the DMA engine. 4016 */ 4017 if (hw->mac.type == e1000_pchlan) 4018 ew32(CRC_OFFSET, 0x65656565); 4019 4020 ew32(IMC, 0xffffffff); 4021 er32(ICR); 4022 4023 reg = er32(KABGTXD); 4024 reg |= E1000_KABGTXD_BGSQLBIAS; 4025 ew32(KABGTXD, reg); 4026 4027 return 0; 4028 } 4029 4030 /** 4031 * e1000_init_hw_ich8lan - Initialize the hardware 4032 * @hw: pointer to the HW structure 4033 * 4034 * Prepares the hardware for transmit and receive by doing the following: 4035 * - initialize hardware bits 4036 * - initialize LED identification 4037 * - setup receive address registers 4038 * - setup flow control 4039 * - setup transmit descriptors 4040 * - clear statistics 4041 **/ 4042 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 4043 { 4044 struct e1000_mac_info *mac = &hw->mac; 4045 u32 ctrl_ext, txdctl, snoop; 4046 s32 ret_val; 4047 u16 i; 4048 4049 e1000_initialize_hw_bits_ich8lan(hw); 4050 4051 /* Initialize identification LED */ 4052 ret_val = mac->ops.id_led_init(hw); 4053 /* An error is not fatal and we should not stop init due to this */ 4054 if (ret_val) 4055 e_dbg("Error initializing identification LED\n"); 4056 4057 /* Setup the receive address. */ 4058 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 4059 4060 /* Zero out the Multicast HASH table */ 4061 e_dbg("Zeroing the MTA\n"); 4062 for (i = 0; i < mac->mta_reg_count; i++) 4063 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 4064 4065 /* The 82578 Rx buffer will stall if wakeup is enabled in host and 4066 * the ME. Disable wakeup by clearing the host wakeup bit. 4067 * Reset the phy after disabling host wakeup to reset the Rx buffer. 4068 */ 4069 if (hw->phy.type == e1000_phy_82578) { 4070 e1e_rphy(hw, BM_PORT_GEN_CFG, &i); 4071 i &= ~BM_WUC_HOST_WU_BIT; 4072 e1e_wphy(hw, BM_PORT_GEN_CFG, i); 4073 ret_val = e1000_phy_hw_reset_ich8lan(hw); 4074 if (ret_val) 4075 return ret_val; 4076 } 4077 4078 /* Setup link and flow control */ 4079 ret_val = mac->ops.setup_link(hw); 4080 4081 /* Set the transmit descriptor write-back policy for both queues */ 4082 txdctl = er32(TXDCTL(0)); 4083 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 4084 E1000_TXDCTL_FULL_TX_DESC_WB); 4085 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 4086 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 4087 ew32(TXDCTL(0), txdctl); 4088 txdctl = er32(TXDCTL(1)); 4089 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 4090 E1000_TXDCTL_FULL_TX_DESC_WB); 4091 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 4092 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 4093 ew32(TXDCTL(1), txdctl); 4094 4095 /* ICH8 has opposite polarity of no_snoop bits. 4096 * By default, we should use snoop behavior. 4097 */ 4098 if (mac->type == e1000_ich8lan) 4099 snoop = PCIE_ICH8_SNOOP_ALL; 4100 else 4101 snoop = (u32)~(PCIE_NO_SNOOP_ALL); 4102 e1000e_set_pcie_no_snoop(hw, snoop); 4103 4104 ctrl_ext = er32(CTRL_EXT); 4105 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 4106 ew32(CTRL_EXT, ctrl_ext); 4107 4108 /* Clear all of the statistics registers (clear on read). It is 4109 * important that we do this after we have tried to establish link 4110 * because the symbol error count will increment wildly if there 4111 * is no link. 4112 */ 4113 e1000_clear_hw_cntrs_ich8lan(hw); 4114 4115 return ret_val; 4116 } 4117 4118 /** 4119 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 4120 * @hw: pointer to the HW structure 4121 * 4122 * Sets/Clears required hardware bits necessary for correctly setting up the 4123 * hardware for transmit and receive. 4124 **/ 4125 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 4126 { 4127 u32 reg; 4128 4129 /* Extended Device Control */ 4130 reg = er32(CTRL_EXT); 4131 reg |= (1 << 22); 4132 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4133 if (hw->mac.type >= e1000_pchlan) 4134 reg |= E1000_CTRL_EXT_PHYPDEN; 4135 ew32(CTRL_EXT, reg); 4136 4137 /* Transmit Descriptor Control 0 */ 4138 reg = er32(TXDCTL(0)); 4139 reg |= (1 << 22); 4140 ew32(TXDCTL(0), reg); 4141 4142 /* Transmit Descriptor Control 1 */ 4143 reg = er32(TXDCTL(1)); 4144 reg |= (1 << 22); 4145 ew32(TXDCTL(1), reg); 4146 4147 /* Transmit Arbitration Control 0 */ 4148 reg = er32(TARC(0)); 4149 if (hw->mac.type == e1000_ich8lan) 4150 reg |= (1 << 28) | (1 << 29); 4151 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 4152 ew32(TARC(0), reg); 4153 4154 /* Transmit Arbitration Control 1 */ 4155 reg = er32(TARC(1)); 4156 if (er32(TCTL) & E1000_TCTL_MULR) 4157 reg &= ~(1 << 28); 4158 else 4159 reg |= (1 << 28); 4160 reg |= (1 << 24) | (1 << 26) | (1 << 30); 4161 ew32(TARC(1), reg); 4162 4163 /* Device Status */ 4164 if (hw->mac.type == e1000_ich8lan) { 4165 reg = er32(STATUS); 4166 reg &= ~(1 << 31); 4167 ew32(STATUS, reg); 4168 } 4169 4170 /* work-around descriptor data corruption issue during nfs v2 udp 4171 * traffic, just disable the nfs filtering capability 4172 */ 4173 reg = er32(RFCTL); 4174 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 4175 4176 /* Disable IPv6 extension header parsing because some malformed 4177 * IPv6 headers can hang the Rx. 4178 */ 4179 if (hw->mac.type == e1000_ich8lan) 4180 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 4181 ew32(RFCTL, reg); 4182 4183 /* Enable ECC on Lynxpoint */ 4184 if (hw->mac.type == e1000_pch_lpt) { 4185 reg = er32(PBECCSTS); 4186 reg |= E1000_PBECCSTS_ECC_ENABLE; 4187 ew32(PBECCSTS, reg); 4188 4189 reg = er32(CTRL); 4190 reg |= E1000_CTRL_MEHE; 4191 ew32(CTRL, reg); 4192 } 4193 } 4194 4195 /** 4196 * e1000_setup_link_ich8lan - Setup flow control and link settings 4197 * @hw: pointer to the HW structure 4198 * 4199 * Determines which flow control settings to use, then configures flow 4200 * control. Calls the appropriate media-specific link configuration 4201 * function. Assuming the adapter has a valid link partner, a valid link 4202 * should be established. Assumes the hardware has previously been reset 4203 * and the transmitter and receiver are not enabled. 4204 **/ 4205 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 4206 { 4207 s32 ret_val; 4208 4209 if (hw->phy.ops.check_reset_block(hw)) 4210 return 0; 4211 4212 /* ICH parts do not have a word in the NVM to determine 4213 * the default flow control setting, so we explicitly 4214 * set it to full. 4215 */ 4216 if (hw->fc.requested_mode == e1000_fc_default) { 4217 /* Workaround h/w hang when Tx flow control enabled */ 4218 if (hw->mac.type == e1000_pchlan) 4219 hw->fc.requested_mode = e1000_fc_rx_pause; 4220 else 4221 hw->fc.requested_mode = e1000_fc_full; 4222 } 4223 4224 /* Save off the requested flow control mode for use later. Depending 4225 * on the link partner's capabilities, we may or may not use this mode. 4226 */ 4227 hw->fc.current_mode = hw->fc.requested_mode; 4228 4229 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 4230 4231 /* Continue to configure the copper link. */ 4232 ret_val = hw->mac.ops.setup_physical_interface(hw); 4233 if (ret_val) 4234 return ret_val; 4235 4236 ew32(FCTTV, hw->fc.pause_time); 4237 if ((hw->phy.type == e1000_phy_82578) || 4238 (hw->phy.type == e1000_phy_82579) || 4239 (hw->phy.type == e1000_phy_i217) || 4240 (hw->phy.type == e1000_phy_82577)) { 4241 ew32(FCRTV_PCH, hw->fc.refresh_time); 4242 4243 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), 4244 hw->fc.pause_time); 4245 if (ret_val) 4246 return ret_val; 4247 } 4248 4249 return e1000e_set_fc_watermarks(hw); 4250 } 4251 4252 /** 4253 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 4254 * @hw: pointer to the HW structure 4255 * 4256 * Configures the kumeran interface to the PHY to wait the appropriate time 4257 * when polling the PHY, then call the generic setup_copper_link to finish 4258 * configuring the copper link. 4259 **/ 4260 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 4261 { 4262 u32 ctrl; 4263 s32 ret_val; 4264 u16 reg_data; 4265 4266 ctrl = er32(CTRL); 4267 ctrl |= E1000_CTRL_SLU; 4268 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 4269 ew32(CTRL, ctrl); 4270 4271 /* Set the mac to wait the maximum time between each iteration 4272 * and increase the max iterations when polling the phy; 4273 * this fixes erroneous timeouts at 10Mbps. 4274 */ 4275 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); 4276 if (ret_val) 4277 return ret_val; 4278 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 4279 ®_data); 4280 if (ret_val) 4281 return ret_val; 4282 reg_data |= 0x3F; 4283 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 4284 reg_data); 4285 if (ret_val) 4286 return ret_val; 4287 4288 switch (hw->phy.type) { 4289 case e1000_phy_igp_3: 4290 ret_val = e1000e_copper_link_setup_igp(hw); 4291 if (ret_val) 4292 return ret_val; 4293 break; 4294 case e1000_phy_bm: 4295 case e1000_phy_82578: 4296 ret_val = e1000e_copper_link_setup_m88(hw); 4297 if (ret_val) 4298 return ret_val; 4299 break; 4300 case e1000_phy_82577: 4301 case e1000_phy_82579: 4302 ret_val = e1000_copper_link_setup_82577(hw); 4303 if (ret_val) 4304 return ret_val; 4305 break; 4306 case e1000_phy_ife: 4307 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); 4308 if (ret_val) 4309 return ret_val; 4310 4311 reg_data &= ~IFE_PMC_AUTO_MDIX; 4312 4313 switch (hw->phy.mdix) { 4314 case 1: 4315 reg_data &= ~IFE_PMC_FORCE_MDIX; 4316 break; 4317 case 2: 4318 reg_data |= IFE_PMC_FORCE_MDIX; 4319 break; 4320 case 0: 4321 default: 4322 reg_data |= IFE_PMC_AUTO_MDIX; 4323 break; 4324 } 4325 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); 4326 if (ret_val) 4327 return ret_val; 4328 break; 4329 default: 4330 break; 4331 } 4332 4333 return e1000e_setup_copper_link(hw); 4334 } 4335 4336 /** 4337 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface 4338 * @hw: pointer to the HW structure 4339 * 4340 * Calls the PHY specific link setup function and then calls the 4341 * generic setup_copper_link to finish configuring the link for 4342 * Lynxpoint PCH devices 4343 **/ 4344 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) 4345 { 4346 u32 ctrl; 4347 s32 ret_val; 4348 4349 ctrl = er32(CTRL); 4350 ctrl |= E1000_CTRL_SLU; 4351 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 4352 ew32(CTRL, ctrl); 4353 4354 ret_val = e1000_copper_link_setup_82577(hw); 4355 if (ret_val) 4356 return ret_val; 4357 4358 return e1000e_setup_copper_link(hw); 4359 } 4360 4361 /** 4362 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 4363 * @hw: pointer to the HW structure 4364 * @speed: pointer to store current link speed 4365 * @duplex: pointer to store the current link duplex 4366 * 4367 * Calls the generic get_speed_and_duplex to retrieve the current link 4368 * information and then calls the Kumeran lock loss workaround for links at 4369 * gigabit speeds. 4370 **/ 4371 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 4372 u16 *duplex) 4373 { 4374 s32 ret_val; 4375 4376 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); 4377 if (ret_val) 4378 return ret_val; 4379 4380 if ((hw->mac.type == e1000_ich8lan) && 4381 (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { 4382 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 4383 } 4384 4385 return ret_val; 4386 } 4387 4388 /** 4389 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 4390 * @hw: pointer to the HW structure 4391 * 4392 * Work-around for 82566 Kumeran PCS lock loss: 4393 * On link status change (i.e. PCI reset, speed change) and link is up and 4394 * speed is gigabit- 4395 * 0) if workaround is optionally disabled do nothing 4396 * 1) wait 1ms for Kumeran link to come up 4397 * 2) check Kumeran Diagnostic register PCS lock loss bit 4398 * 3) if not set the link is locked (all is good), otherwise... 4399 * 4) reset the PHY 4400 * 5) repeat up to 10 times 4401 * Note: this is only called for IGP3 copper when speed is 1gb. 4402 **/ 4403 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 4404 { 4405 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4406 u32 phy_ctrl; 4407 s32 ret_val; 4408 u16 i, data; 4409 bool link; 4410 4411 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 4412 return 0; 4413 4414 /* Make sure link is up before proceeding. If not just return. 4415 * Attempting this while link is negotiating fouled up link 4416 * stability 4417 */ 4418 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 4419 if (!link) 4420 return 0; 4421 4422 for (i = 0; i < 10; i++) { 4423 /* read once to clear */ 4424 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); 4425 if (ret_val) 4426 return ret_val; 4427 /* and again to get new status */ 4428 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); 4429 if (ret_val) 4430 return ret_val; 4431 4432 /* check for PCS lock */ 4433 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) 4434 return 0; 4435 4436 /* Issue PHY reset */ 4437 e1000_phy_hw_reset(hw); 4438 mdelay(5); 4439 } 4440 /* Disable GigE link negotiation */ 4441 phy_ctrl = er32(PHY_CTRL); 4442 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 4443 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 4444 ew32(PHY_CTRL, phy_ctrl); 4445 4446 /* Call gig speed drop workaround on Gig disable before accessing 4447 * any PHY registers 4448 */ 4449 e1000e_gig_downshift_workaround_ich8lan(hw); 4450 4451 /* unable to acquire PCS lock */ 4452 return -E1000_ERR_PHY; 4453 } 4454 4455 /** 4456 * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 4457 * @hw: pointer to the HW structure 4458 * @state: boolean value used to set the current Kumeran workaround state 4459 * 4460 * If ICH8, set the current Kumeran workaround state (enabled - true 4461 * /disabled - false). 4462 **/ 4463 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 4464 bool state) 4465 { 4466 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4467 4468 if (hw->mac.type != e1000_ich8lan) { 4469 e_dbg("Workaround applies to ICH8 only.\n"); 4470 return; 4471 } 4472 4473 dev_spec->kmrn_lock_loss_workaround_enabled = state; 4474 } 4475 4476 /** 4477 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 4478 * @hw: pointer to the HW structure 4479 * 4480 * Workaround for 82566 power-down on D3 entry: 4481 * 1) disable gigabit link 4482 * 2) write VR power-down enable 4483 * 3) read it back 4484 * Continue if successful, else issue LCD reset and repeat 4485 **/ 4486 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 4487 { 4488 u32 reg; 4489 u16 data; 4490 u8 retry = 0; 4491 4492 if (hw->phy.type != e1000_phy_igp_3) 4493 return; 4494 4495 /* Try the workaround twice (if needed) */ 4496 do { 4497 /* Disable link */ 4498 reg = er32(PHY_CTRL); 4499 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 4500 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 4501 ew32(PHY_CTRL, reg); 4502 4503 /* Call gig speed drop workaround on Gig disable before 4504 * accessing any PHY registers 4505 */ 4506 if (hw->mac.type == e1000_ich8lan) 4507 e1000e_gig_downshift_workaround_ich8lan(hw); 4508 4509 /* Write VR power-down enable */ 4510 e1e_rphy(hw, IGP3_VR_CTRL, &data); 4511 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 4512 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); 4513 4514 /* Read it back and test */ 4515 e1e_rphy(hw, IGP3_VR_CTRL, &data); 4516 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 4517 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 4518 break; 4519 4520 /* Issue PHY reset and repeat at most one more time */ 4521 reg = er32(CTRL); 4522 ew32(CTRL, reg | E1000_CTRL_PHY_RST); 4523 retry++; 4524 } while (retry); 4525 } 4526 4527 /** 4528 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working 4529 * @hw: pointer to the HW structure 4530 * 4531 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 4532 * LPLU, Gig disable, MDIC PHY reset): 4533 * 1) Set Kumeran Near-end loopback 4534 * 2) Clear Kumeran Near-end loopback 4535 * Should only be called for ICH8[m] devices with any 1G Phy. 4536 **/ 4537 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 4538 { 4539 s32 ret_val; 4540 u16 reg_data; 4541 4542 if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) 4543 return; 4544 4545 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4546 ®_data); 4547 if (ret_val) 4548 return; 4549 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 4550 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4551 reg_data); 4552 if (ret_val) 4553 return; 4554 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 4555 e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); 4556 } 4557 4558 /** 4559 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx 4560 * @hw: pointer to the HW structure 4561 * 4562 * During S0 to Sx transition, it is possible the link remains at gig 4563 * instead of negotiating to a lower speed. Before going to Sx, set 4564 * 'Gig Disable' to force link speed negotiation to a lower speed based on 4565 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 4566 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 4567 * needs to be written. 4568 * Parts that support (and are linked to a partner which support) EEE in 4569 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power 4570 * than 10Mbps w/o EEE. 4571 **/ 4572 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 4573 { 4574 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4575 u32 phy_ctrl; 4576 s32 ret_val; 4577 4578 phy_ctrl = er32(PHY_CTRL); 4579 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4580 4581 if (hw->phy.type == e1000_phy_i217) { 4582 u16 phy_reg, device_id = hw->adapter->pdev->device; 4583 4584 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 4585 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 4586 (device_id == E1000_DEV_ID_PCH_I218_LM3) || 4587 (device_id == E1000_DEV_ID_PCH_I218_V3)) { 4588 u32 fextnvm6 = er32(FEXTNVM6); 4589 4590 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 4591 } 4592 4593 ret_val = hw->phy.ops.acquire(hw); 4594 if (ret_val) 4595 goto out; 4596 4597 if (!dev_spec->eee_disable) { 4598 u16 eee_advert; 4599 4600 ret_val = 4601 e1000_read_emi_reg_locked(hw, 4602 I217_EEE_ADVERTISEMENT, 4603 &eee_advert); 4604 if (ret_val) 4605 goto release; 4606 4607 /* Disable LPLU if both link partners support 100BaseT 4608 * EEE and 100Full is advertised on both ends of the 4609 * link. 4610 */ 4611 if ((eee_advert & I82579_EEE_100_SUPPORTED) && 4612 (dev_spec->eee_lp_ability & 4613 I82579_EEE_100_SUPPORTED) && 4614 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) 4615 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | 4616 E1000_PHY_CTRL_NOND0A_LPLU); 4617 } 4618 4619 /* For i217 Intel Rapid Start Technology support, 4620 * when the system is going into Sx and no manageability engine 4621 * is present, the driver must configure proxy to reset only on 4622 * power good. LPI (Low Power Idle) state must also reset only 4623 * on power good, as well as the MTA (Multicast table array). 4624 * The SMBus release must also be disabled on LCD reset. 4625 */ 4626 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 4627 /* Enable proxy to reset only on power good. */ 4628 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); 4629 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; 4630 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); 4631 4632 /* Set bit enable LPI (EEE) to reset only on 4633 * power good. 4634 */ 4635 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4636 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; 4637 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 4638 4639 /* Disable the SMB release on LCD reset. */ 4640 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4641 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; 4642 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4643 } 4644 4645 /* Enable MTA to reset for Intel Rapid Start Technology 4646 * Support 4647 */ 4648 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4649 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; 4650 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4651 4652 release: 4653 hw->phy.ops.release(hw); 4654 } 4655 out: 4656 ew32(PHY_CTRL, phy_ctrl); 4657 4658 if (hw->mac.type == e1000_ich8lan) 4659 e1000e_gig_downshift_workaround_ich8lan(hw); 4660 4661 if (hw->mac.type >= e1000_pchlan) { 4662 e1000_oem_bits_config_ich8lan(hw, false); 4663 4664 /* Reset PHY to activate OEM bits on 82577/8 */ 4665 if (hw->mac.type == e1000_pchlan) 4666 e1000e_phy_hw_reset_generic(hw); 4667 4668 ret_val = hw->phy.ops.acquire(hw); 4669 if (ret_val) 4670 return; 4671 e1000_write_smbus_addr(hw); 4672 hw->phy.ops.release(hw); 4673 } 4674 } 4675 4676 /** 4677 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 4678 * @hw: pointer to the HW structure 4679 * 4680 * During Sx to S0 transitions on non-managed devices or managed devices 4681 * on which PHY resets are not blocked, if the PHY registers cannot be 4682 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 4683 * the PHY. 4684 * On i217, setup Intel Rapid Start Technology. 4685 **/ 4686 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 4687 { 4688 s32 ret_val; 4689 4690 if (hw->mac.type < e1000_pch2lan) 4691 return; 4692 4693 ret_val = e1000_init_phy_workarounds_pchlan(hw); 4694 if (ret_val) { 4695 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); 4696 return; 4697 } 4698 4699 /* For i217 Intel Rapid Start Technology support when the system 4700 * is transitioning from Sx and no manageability engine is present 4701 * configure SMBus to restore on reset, disable proxy, and enable 4702 * the reset on MTA (Multicast table array). 4703 */ 4704 if (hw->phy.type == e1000_phy_i217) { 4705 u16 phy_reg; 4706 4707 ret_val = hw->phy.ops.acquire(hw); 4708 if (ret_val) { 4709 e_dbg("Failed to setup iRST\n"); 4710 return; 4711 } 4712 4713 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 4714 /* Restore clear on SMB if no manageability engine 4715 * is present 4716 */ 4717 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4718 if (ret_val) 4719 goto release; 4720 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 4721 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4722 4723 /* Disable Proxy */ 4724 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); 4725 } 4726 /* Enable reset on MTA */ 4727 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4728 if (ret_val) 4729 goto release; 4730 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 4731 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4732 release: 4733 if (ret_val) 4734 e_dbg("Error %d in resume workarounds\n", ret_val); 4735 hw->phy.ops.release(hw); 4736 } 4737 } 4738 4739 /** 4740 * e1000_cleanup_led_ich8lan - Restore the default LED operation 4741 * @hw: pointer to the HW structure 4742 * 4743 * Return the LED back to the default configuration. 4744 **/ 4745 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 4746 { 4747 if (hw->phy.type == e1000_phy_ife) 4748 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 4749 4750 ew32(LEDCTL, hw->mac.ledctl_default); 4751 return 0; 4752 } 4753 4754 /** 4755 * e1000_led_on_ich8lan - Turn LEDs on 4756 * @hw: pointer to the HW structure 4757 * 4758 * Turn on the LEDs. 4759 **/ 4760 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 4761 { 4762 if (hw->phy.type == e1000_phy_ife) 4763 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4764 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 4765 4766 ew32(LEDCTL, hw->mac.ledctl_mode2); 4767 return 0; 4768 } 4769 4770 /** 4771 * e1000_led_off_ich8lan - Turn LEDs off 4772 * @hw: pointer to the HW structure 4773 * 4774 * Turn off the LEDs. 4775 **/ 4776 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 4777 { 4778 if (hw->phy.type == e1000_phy_ife) 4779 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4780 (IFE_PSCL_PROBE_MODE | 4781 IFE_PSCL_PROBE_LEDS_OFF)); 4782 4783 ew32(LEDCTL, hw->mac.ledctl_mode1); 4784 return 0; 4785 } 4786 4787 /** 4788 * e1000_setup_led_pchlan - Configures SW controllable LED 4789 * @hw: pointer to the HW structure 4790 * 4791 * This prepares the SW controllable LED for use. 4792 **/ 4793 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 4794 { 4795 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); 4796 } 4797 4798 /** 4799 * e1000_cleanup_led_pchlan - Restore the default LED operation 4800 * @hw: pointer to the HW structure 4801 * 4802 * Return the LED back to the default configuration. 4803 **/ 4804 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 4805 { 4806 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); 4807 } 4808 4809 /** 4810 * e1000_led_on_pchlan - Turn LEDs on 4811 * @hw: pointer to the HW structure 4812 * 4813 * Turn on the LEDs. 4814 **/ 4815 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 4816 { 4817 u16 data = (u16)hw->mac.ledctl_mode2; 4818 u32 i, led; 4819 4820 /* If no link, then turn LED on by setting the invert bit 4821 * for each LED that's mode is "link_up" in ledctl_mode2. 4822 */ 4823 if (!(er32(STATUS) & E1000_STATUS_LU)) { 4824 for (i = 0; i < 3; i++) { 4825 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 4826 if ((led & E1000_PHY_LED0_MODE_MASK) != 4827 E1000_LEDCTL_MODE_LINK_UP) 4828 continue; 4829 if (led & E1000_PHY_LED0_IVRT) 4830 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 4831 else 4832 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 4833 } 4834 } 4835 4836 return e1e_wphy(hw, HV_LED_CONFIG, data); 4837 } 4838 4839 /** 4840 * e1000_led_off_pchlan - Turn LEDs off 4841 * @hw: pointer to the HW structure 4842 * 4843 * Turn off the LEDs. 4844 **/ 4845 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 4846 { 4847 u16 data = (u16)hw->mac.ledctl_mode1; 4848 u32 i, led; 4849 4850 /* If no link, then turn LED off by clearing the invert bit 4851 * for each LED that's mode is "link_up" in ledctl_mode1. 4852 */ 4853 if (!(er32(STATUS) & E1000_STATUS_LU)) { 4854 for (i = 0; i < 3; i++) { 4855 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 4856 if ((led & E1000_PHY_LED0_MODE_MASK) != 4857 E1000_LEDCTL_MODE_LINK_UP) 4858 continue; 4859 if (led & E1000_PHY_LED0_IVRT) 4860 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 4861 else 4862 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 4863 } 4864 } 4865 4866 return e1e_wphy(hw, HV_LED_CONFIG, data); 4867 } 4868 4869 /** 4870 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset 4871 * @hw: pointer to the HW structure 4872 * 4873 * Read appropriate register for the config done bit for completion status 4874 * and configure the PHY through s/w for EEPROM-less parts. 4875 * 4876 * NOTE: some silicon which is EEPROM-less will fail trying to read the 4877 * config done bit, so only an error is logged and continues. If we were 4878 * to return with error, EEPROM-less silicon would not be able to be reset 4879 * or change link. 4880 **/ 4881 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 4882 { 4883 s32 ret_val = 0; 4884 u32 bank = 0; 4885 u32 status; 4886 4887 e1000e_get_cfg_done_generic(hw); 4888 4889 /* Wait for indication from h/w that it has completed basic config */ 4890 if (hw->mac.type >= e1000_ich10lan) { 4891 e1000_lan_init_done_ich8lan(hw); 4892 } else { 4893 ret_val = e1000e_get_auto_rd_done(hw); 4894 if (ret_val) { 4895 /* When auto config read does not complete, do not 4896 * return with an error. This can happen in situations 4897 * where there is no eeprom and prevents getting link. 4898 */ 4899 e_dbg("Auto Read Done did not complete\n"); 4900 ret_val = 0; 4901 } 4902 } 4903 4904 /* Clear PHY Reset Asserted bit */ 4905 status = er32(STATUS); 4906 if (status & E1000_STATUS_PHYRA) 4907 ew32(STATUS, status & ~E1000_STATUS_PHYRA); 4908 else 4909 e_dbg("PHY Reset Asserted not set - needs delay\n"); 4910 4911 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 4912 if (hw->mac.type <= e1000_ich9lan) { 4913 if (!(er32(EECD) & E1000_EECD_PRES) && 4914 (hw->phy.type == e1000_phy_igp_3)) { 4915 e1000e_phy_init_script_igp3(hw); 4916 } 4917 } else { 4918 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 4919 /* Maybe we should do a basic PHY config */ 4920 e_dbg("EEPROM not present\n"); 4921 ret_val = -E1000_ERR_CONFIG; 4922 } 4923 } 4924 4925 return ret_val; 4926 } 4927 4928 /** 4929 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 4930 * @hw: pointer to the HW structure 4931 * 4932 * In the case of a PHY power down to save power, or to turn off link during a 4933 * driver unload, or wake on lan is not enabled, remove the link. 4934 **/ 4935 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 4936 { 4937 /* If the management interface is not enabled, then power down */ 4938 if (!(hw->mac.ops.check_mng_mode(hw) || 4939 hw->phy.ops.check_reset_block(hw))) 4940 e1000_power_down_phy_copper(hw); 4941 } 4942 4943 /** 4944 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 4945 * @hw: pointer to the HW structure 4946 * 4947 * Clears hardware counters specific to the silicon family and calls 4948 * clear_hw_cntrs_generic to clear all general purpose counters. 4949 **/ 4950 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 4951 { 4952 u16 phy_data; 4953 s32 ret_val; 4954 4955 e1000e_clear_hw_cntrs_base(hw); 4956 4957 er32(ALGNERRC); 4958 er32(RXERRC); 4959 er32(TNCRS); 4960 er32(CEXTERR); 4961 er32(TSCTC); 4962 er32(TSCTFC); 4963 4964 er32(MGTPRC); 4965 er32(MGTPDC); 4966 er32(MGTPTC); 4967 4968 er32(IAC); 4969 er32(ICRXOC); 4970 4971 /* Clear PHY statistics registers */ 4972 if ((hw->phy.type == e1000_phy_82578) || 4973 (hw->phy.type == e1000_phy_82579) || 4974 (hw->phy.type == e1000_phy_i217) || 4975 (hw->phy.type == e1000_phy_82577)) { 4976 ret_val = hw->phy.ops.acquire(hw); 4977 if (ret_val) 4978 return; 4979 ret_val = hw->phy.ops.set_page(hw, 4980 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4981 if (ret_val) 4982 goto release; 4983 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4984 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4985 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4986 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4987 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4988 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4989 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4990 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4991 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4992 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4993 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4994 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4995 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4996 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4997 release: 4998 hw->phy.ops.release(hw); 4999 } 5000 } 5001 5002 static const struct e1000_mac_operations ich8_mac_ops = { 5003 /* check_mng_mode dependent on mac type */ 5004 .check_for_link = e1000_check_for_copper_link_ich8lan, 5005 /* cleanup_led dependent on mac type */ 5006 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 5007 .get_bus_info = e1000_get_bus_info_ich8lan, 5008 .set_lan_id = e1000_set_lan_id_single_port, 5009 .get_link_up_info = e1000_get_link_up_info_ich8lan, 5010 /* led_on dependent on mac type */ 5011 /* led_off dependent on mac type */ 5012 .update_mc_addr_list = e1000e_update_mc_addr_list_generic, 5013 .reset_hw = e1000_reset_hw_ich8lan, 5014 .init_hw = e1000_init_hw_ich8lan, 5015 .setup_link = e1000_setup_link_ich8lan, 5016 .setup_physical_interface = e1000_setup_copper_link_ich8lan, 5017 /* id_led_init dependent on mac type */ 5018 .config_collision_dist = e1000e_config_collision_dist_generic, 5019 .rar_set = e1000e_rar_set_generic, 5020 .rar_get_count = e1000e_rar_get_count_generic, 5021 }; 5022 5023 static const struct e1000_phy_operations ich8_phy_ops = { 5024 .acquire = e1000_acquire_swflag_ich8lan, 5025 .check_reset_block = e1000_check_reset_block_ich8lan, 5026 .commit = NULL, 5027 .get_cfg_done = e1000_get_cfg_done_ich8lan, 5028 .get_cable_length = e1000e_get_cable_length_igp_2, 5029 .read_reg = e1000e_read_phy_reg_igp, 5030 .release = e1000_release_swflag_ich8lan, 5031 .reset = e1000_phy_hw_reset_ich8lan, 5032 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, 5033 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, 5034 .write_reg = e1000e_write_phy_reg_igp, 5035 }; 5036 5037 static const struct e1000_nvm_operations ich8_nvm_ops = { 5038 .acquire = e1000_acquire_nvm_ich8lan, 5039 .read = e1000_read_nvm_ich8lan, 5040 .release = e1000_release_nvm_ich8lan, 5041 .reload = e1000e_reload_nvm_generic, 5042 .update = e1000_update_nvm_checksum_ich8lan, 5043 .valid_led_default = e1000_valid_led_default_ich8lan, 5044 .validate = e1000_validate_nvm_checksum_ich8lan, 5045 .write = e1000_write_nvm_ich8lan, 5046 }; 5047 5048 const struct e1000_info e1000_ich8_info = { 5049 .mac = e1000_ich8lan, 5050 .flags = FLAG_HAS_WOL 5051 | FLAG_IS_ICH 5052 | FLAG_HAS_CTRLEXT_ON_LOAD 5053 | FLAG_HAS_AMT 5054 | FLAG_HAS_FLASH 5055 | FLAG_APME_IN_WUC, 5056 .pba = 8, 5057 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 5058 .get_variants = e1000_get_variants_ich8lan, 5059 .mac_ops = &ich8_mac_ops, 5060 .phy_ops = &ich8_phy_ops, 5061 .nvm_ops = &ich8_nvm_ops, 5062 }; 5063 5064 const struct e1000_info e1000_ich9_info = { 5065 .mac = e1000_ich9lan, 5066 .flags = FLAG_HAS_JUMBO_FRAMES 5067 | FLAG_IS_ICH 5068 | FLAG_HAS_WOL 5069 | FLAG_HAS_CTRLEXT_ON_LOAD 5070 | FLAG_HAS_AMT 5071 | FLAG_HAS_FLASH 5072 | FLAG_APME_IN_WUC, 5073 .pba = 18, 5074 .max_hw_frame_size = DEFAULT_JUMBO, 5075 .get_variants = e1000_get_variants_ich8lan, 5076 .mac_ops = &ich8_mac_ops, 5077 .phy_ops = &ich8_phy_ops, 5078 .nvm_ops = &ich8_nvm_ops, 5079 }; 5080 5081 const struct e1000_info e1000_ich10_info = { 5082 .mac = e1000_ich10lan, 5083 .flags = FLAG_HAS_JUMBO_FRAMES 5084 | FLAG_IS_ICH 5085 | FLAG_HAS_WOL 5086 | FLAG_HAS_CTRLEXT_ON_LOAD 5087 | FLAG_HAS_AMT 5088 | FLAG_HAS_FLASH 5089 | FLAG_APME_IN_WUC, 5090 .pba = 18, 5091 .max_hw_frame_size = DEFAULT_JUMBO, 5092 .get_variants = e1000_get_variants_ich8lan, 5093 .mac_ops = &ich8_mac_ops, 5094 .phy_ops = &ich8_phy_ops, 5095 .nvm_ops = &ich8_nvm_ops, 5096 }; 5097 5098 const struct e1000_info e1000_pch_info = { 5099 .mac = e1000_pchlan, 5100 .flags = FLAG_IS_ICH 5101 | FLAG_HAS_WOL 5102 | FLAG_HAS_CTRLEXT_ON_LOAD 5103 | FLAG_HAS_AMT 5104 | FLAG_HAS_FLASH 5105 | FLAG_HAS_JUMBO_FRAMES 5106 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 5107 | FLAG_APME_IN_WUC, 5108 .flags2 = FLAG2_HAS_PHY_STATS, 5109 .pba = 26, 5110 .max_hw_frame_size = 4096, 5111 .get_variants = e1000_get_variants_ich8lan, 5112 .mac_ops = &ich8_mac_ops, 5113 .phy_ops = &ich8_phy_ops, 5114 .nvm_ops = &ich8_nvm_ops, 5115 }; 5116 5117 const struct e1000_info e1000_pch2_info = { 5118 .mac = e1000_pch2lan, 5119 .flags = FLAG_IS_ICH 5120 | FLAG_HAS_WOL 5121 | FLAG_HAS_HW_TIMESTAMP 5122 | FLAG_HAS_CTRLEXT_ON_LOAD 5123 | FLAG_HAS_AMT 5124 | FLAG_HAS_FLASH 5125 | FLAG_HAS_JUMBO_FRAMES 5126 | FLAG_APME_IN_WUC, 5127 .flags2 = FLAG2_HAS_PHY_STATS 5128 | FLAG2_HAS_EEE, 5129 .pba = 26, 5130 .max_hw_frame_size = 9018, 5131 .get_variants = e1000_get_variants_ich8lan, 5132 .mac_ops = &ich8_mac_ops, 5133 .phy_ops = &ich8_phy_ops, 5134 .nvm_ops = &ich8_nvm_ops, 5135 }; 5136 5137 const struct e1000_info e1000_pch_lpt_info = { 5138 .mac = e1000_pch_lpt, 5139 .flags = FLAG_IS_ICH 5140 | FLAG_HAS_WOL 5141 | FLAG_HAS_HW_TIMESTAMP 5142 | FLAG_HAS_CTRLEXT_ON_LOAD 5143 | FLAG_HAS_AMT 5144 | FLAG_HAS_FLASH 5145 | FLAG_HAS_JUMBO_FRAMES 5146 | FLAG_APME_IN_WUC, 5147 .flags2 = FLAG2_HAS_PHY_STATS 5148 | FLAG2_HAS_EEE, 5149 .pba = 26, 5150 .max_hw_frame_size = 9018, 5151 .get_variants = e1000_get_variants_ich8lan, 5152 .mac_ops = &ich8_mac_ops, 5153 .phy_ops = &ich8_phy_ops, 5154 .nvm_ops = &ich8_nvm_ops, 5155 }; 5156