1 /*******************************************************************************
2 
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007-2014 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 ******************************************************************************/
26 
27 /* e1000_i210
28  * e1000_i211
29  */
30 
31 #include <linux/types.h>
32 #include <linux/if_ether.h>
33 
34 #include "e1000_hw.h"
35 #include "e1000_i210.h"
36 
37 static s32 igb_update_flash_i210(struct e1000_hw *hw);
38 
39 /**
40  * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
41  *  @hw: pointer to the HW structure
42  *
43  *  Acquire the HW semaphore to access the PHY or NVM
44  */
45 static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
46 {
47 	u32 swsm;
48 	s32 timeout = hw->nvm.word_size + 1;
49 	s32 i = 0;
50 
51 	/* Get the SW semaphore */
52 	while (i < timeout) {
53 		swsm = rd32(E1000_SWSM);
54 		if (!(swsm & E1000_SWSM_SMBI))
55 			break;
56 
57 		udelay(50);
58 		i++;
59 	}
60 
61 	if (i == timeout) {
62 		/* In rare circumstances, the SW semaphore may already be held
63 		 * unintentionally. Clear the semaphore once before giving up.
64 		 */
65 		if (hw->dev_spec._82575.clear_semaphore_once) {
66 			hw->dev_spec._82575.clear_semaphore_once = false;
67 			igb_put_hw_semaphore(hw);
68 			for (i = 0; i < timeout; i++) {
69 				swsm = rd32(E1000_SWSM);
70 				if (!(swsm & E1000_SWSM_SMBI))
71 					break;
72 
73 				udelay(50);
74 			}
75 		}
76 
77 		/* If we do not have the semaphore here, we have to give up. */
78 		if (i == timeout) {
79 			hw_dbg("Driver can't access device - SMBI bit is set.\n");
80 			return -E1000_ERR_NVM;
81 		}
82 	}
83 
84 	/* Get the FW semaphore. */
85 	for (i = 0; i < timeout; i++) {
86 		swsm = rd32(E1000_SWSM);
87 		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
88 
89 		/* Semaphore acquired if bit latched */
90 		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
91 			break;
92 
93 		udelay(50);
94 	}
95 
96 	if (i == timeout) {
97 		/* Release semaphores */
98 		igb_put_hw_semaphore(hw);
99 		hw_dbg("Driver can't access the NVM\n");
100 		return -E1000_ERR_NVM;
101 	}
102 
103 	return E1000_SUCCESS;
104 }
105 
106 /**
107  *  igb_acquire_nvm_i210 - Request for access to EEPROM
108  *  @hw: pointer to the HW structure
109  *
110  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
111  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
112  *  Return successful if access grant bit set, else clear the request for
113  *  EEPROM access and return -E1000_ERR_NVM (-1).
114  **/
115 static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
116 {
117 	return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
118 }
119 
120 /**
121  *  igb_release_nvm_i210 - Release exclusive access to EEPROM
122  *  @hw: pointer to the HW structure
123  *
124  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
125  *  then release the semaphores acquired.
126  **/
127 static void igb_release_nvm_i210(struct e1000_hw *hw)
128 {
129 	igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
130 }
131 
132 /**
133  *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
134  *  @hw: pointer to the HW structure
135  *  @mask: specifies which semaphore to acquire
136  *
137  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
138  *  will also specify which port we're acquiring the lock for.
139  **/
140 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
141 {
142 	u32 swfw_sync;
143 	u32 swmask = mask;
144 	u32 fwmask = mask << 16;
145 	s32 ret_val = E1000_SUCCESS;
146 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
147 
148 	while (i < timeout) {
149 		if (igb_get_hw_semaphore_i210(hw)) {
150 			ret_val = -E1000_ERR_SWFW_SYNC;
151 			goto out;
152 		}
153 
154 		swfw_sync = rd32(E1000_SW_FW_SYNC);
155 		if (!(swfw_sync & (fwmask | swmask)))
156 			break;
157 
158 		/* Firmware currently using resource (fwmask) */
159 		igb_put_hw_semaphore(hw);
160 		mdelay(5);
161 		i++;
162 	}
163 
164 	if (i == timeout) {
165 		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
166 		ret_val = -E1000_ERR_SWFW_SYNC;
167 		goto out;
168 	}
169 
170 	swfw_sync |= swmask;
171 	wr32(E1000_SW_FW_SYNC, swfw_sync);
172 
173 	igb_put_hw_semaphore(hw);
174 out:
175 	return ret_val;
176 }
177 
178 /**
179  *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
180  *  @hw: pointer to the HW structure
181  *  @mask: specifies which semaphore to acquire
182  *
183  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
184  *  will also specify which port we're releasing the lock for.
185  **/
186 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
187 {
188 	u32 swfw_sync;
189 
190 	while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
191 		; /* Empty */
192 
193 	swfw_sync = rd32(E1000_SW_FW_SYNC);
194 	swfw_sync &= ~mask;
195 	wr32(E1000_SW_FW_SYNC, swfw_sync);
196 
197 	igb_put_hw_semaphore(hw);
198 }
199 
200 /**
201  *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
202  *  @hw: pointer to the HW structure
203  *  @offset: offset of word in the Shadow Ram to read
204  *  @words: number of words to read
205  *  @data: word read from the Shadow Ram
206  *
207  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
208  *  Uses necessary synchronization semaphores.
209  **/
210 static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
211 				  u16 *data)
212 {
213 	s32 status = E1000_SUCCESS;
214 	u16 i, count;
215 
216 	/* We cannot hold synchronization semaphores for too long,
217 	 * because of forceful takeover procedure. However it is more efficient
218 	 * to read in bursts than synchronizing access for each word.
219 	 */
220 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
221 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
222 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
223 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
224 			status = igb_read_nvm_eerd(hw, offset, count,
225 						     data + i);
226 			hw->nvm.ops.release(hw);
227 		} else {
228 			status = E1000_ERR_SWFW_SYNC;
229 		}
230 
231 		if (status != E1000_SUCCESS)
232 			break;
233 	}
234 
235 	return status;
236 }
237 
238 /**
239  *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
240  *  @hw: pointer to the HW structure
241  *  @offset: offset within the Shadow Ram to be written to
242  *  @words: number of words to write
243  *  @data: 16 bit word(s) to be written to the Shadow Ram
244  *
245  *  Writes data to Shadow Ram at offset using EEWR register.
246  *
247  *  If igb_update_nvm_checksum is not called after this function , the
248  *  Shadow Ram will most likely contain an invalid checksum.
249  **/
250 static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
251 				u16 *data)
252 {
253 	struct e1000_nvm_info *nvm = &hw->nvm;
254 	u32 i, k, eewr = 0;
255 	u32 attempts = 100000;
256 	s32 ret_val = E1000_SUCCESS;
257 
258 	/* A check for invalid values:  offset too large, too many words,
259 	 * too many words for the offset, and not enough words.
260 	 */
261 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
262 	    (words == 0)) {
263 		hw_dbg("nvm parameter(s) out of bounds\n");
264 		ret_val = -E1000_ERR_NVM;
265 		goto out;
266 	}
267 
268 	for (i = 0; i < words; i++) {
269 		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
270 			(data[i] << E1000_NVM_RW_REG_DATA) |
271 			E1000_NVM_RW_REG_START;
272 
273 		wr32(E1000_SRWR, eewr);
274 
275 		for (k = 0; k < attempts; k++) {
276 			if (E1000_NVM_RW_REG_DONE &
277 			    rd32(E1000_SRWR)) {
278 				ret_val = E1000_SUCCESS;
279 				break;
280 			}
281 			udelay(5);
282 	}
283 
284 		if (ret_val != E1000_SUCCESS) {
285 			hw_dbg("Shadow RAM write EEWR timed out\n");
286 			break;
287 		}
288 	}
289 
290 out:
291 	return ret_val;
292 }
293 
294 /**
295  *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
296  *  @hw: pointer to the HW structure
297  *  @offset: offset within the Shadow RAM to be written to
298  *  @words: number of words to write
299  *  @data: 16 bit word(s) to be written to the Shadow RAM
300  *
301  *  Writes data to Shadow RAM at offset using EEWR register.
302  *
303  *  If e1000_update_nvm_checksum is not called after this function , the
304  *  data will not be committed to FLASH and also Shadow RAM will most likely
305  *  contain an invalid checksum.
306  *
307  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
308  *  partially written.
309  **/
310 static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
311 				   u16 *data)
312 {
313 	s32 status = E1000_SUCCESS;
314 	u16 i, count;
315 
316 	/* We cannot hold synchronization semaphores for too long,
317 	 * because of forceful takeover procedure. However it is more efficient
318 	 * to write in bursts than synchronizing access for each word.
319 	 */
320 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
321 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
322 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
323 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
324 			status = igb_write_nvm_srwr(hw, offset, count,
325 						      data + i);
326 			hw->nvm.ops.release(hw);
327 		} else {
328 			status = E1000_ERR_SWFW_SYNC;
329 		}
330 
331 		if (status != E1000_SUCCESS)
332 			break;
333 	}
334 
335 	return status;
336 }
337 
338 /**
339  *  igb_read_invm_word_i210 - Reads OTP
340  *  @hw: pointer to the HW structure
341  *  @address: the word address (aka eeprom offset) to read
342  *  @data: pointer to the data read
343  *
344  *  Reads 16-bit words from the OTP. Return error when the word is not
345  *  stored in OTP.
346  **/
347 static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
348 {
349 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
350 	u32 invm_dword;
351 	u16 i;
352 	u8 record_type, word_address;
353 
354 	for (i = 0; i < E1000_INVM_SIZE; i++) {
355 		invm_dword = rd32(E1000_INVM_DATA_REG(i));
356 		/* Get record type */
357 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
358 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
359 			break;
360 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
361 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
362 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
363 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
364 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
365 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
366 			if (word_address == address) {
367 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 				hw_dbg("Read INVM Word 0x%02x = %x",
369 					  address, *data);
370 				status = E1000_SUCCESS;
371 				break;
372 			}
373 		}
374 	}
375 	if (status != E1000_SUCCESS)
376 		hw_dbg("Requested word 0x%02x not found in OTP\n", address);
377 	return status;
378 }
379 
380 /**
381  * igb_read_invm_i210 - Read invm wrapper function for I210/I211
382  *  @hw: pointer to the HW structure
383  *  @words: number of words to read
384  *  @data: pointer to the data read
385  *
386  *  Wrapper function to return data formerly found in the NVM.
387  **/
388 static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
389 				u16 words __always_unused, u16 *data)
390 {
391 	s32 ret_val = E1000_SUCCESS;
392 
393 	/* Only the MAC addr is required to be present in the iNVM */
394 	switch (offset) {
395 	case NVM_MAC_ADDR:
396 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
397 		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
398 						     &data[1]);
399 		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
400 						     &data[2]);
401 		if (ret_val != E1000_SUCCESS)
402 			hw_dbg("MAC Addr not found in iNVM\n");
403 		break;
404 	case NVM_INIT_CTRL_2:
405 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
406 		if (ret_val != E1000_SUCCESS) {
407 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
408 			ret_val = E1000_SUCCESS;
409 		}
410 		break;
411 	case NVM_INIT_CTRL_4:
412 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
413 		if (ret_val != E1000_SUCCESS) {
414 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
415 			ret_val = E1000_SUCCESS;
416 		}
417 		break;
418 	case NVM_LED_1_CFG:
419 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
420 		if (ret_val != E1000_SUCCESS) {
421 			*data = NVM_LED_1_CFG_DEFAULT_I211;
422 			ret_val = E1000_SUCCESS;
423 		}
424 		break;
425 	case NVM_LED_0_2_CFG:
426 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
427 		if (ret_val != E1000_SUCCESS) {
428 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
429 			ret_val = E1000_SUCCESS;
430 		}
431 		break;
432 	case NVM_ID_LED_SETTINGS:
433 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
434 		if (ret_val != E1000_SUCCESS) {
435 			*data = ID_LED_RESERVED_FFFF;
436 			ret_val = E1000_SUCCESS;
437 		}
438 	case NVM_SUB_DEV_ID:
439 		*data = hw->subsystem_device_id;
440 		break;
441 	case NVM_SUB_VEN_ID:
442 		*data = hw->subsystem_vendor_id;
443 		break;
444 	case NVM_DEV_ID:
445 		*data = hw->device_id;
446 		break;
447 	case NVM_VEN_ID:
448 		*data = hw->vendor_id;
449 		break;
450 	default:
451 		hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
452 		*data = NVM_RESERVED_WORD;
453 		break;
454 	}
455 	return ret_val;
456 }
457 
458 /**
459  *  igb_read_invm_version - Reads iNVM version and image type
460  *  @hw: pointer to the HW structure
461  *  @invm_ver: version structure for the version read
462  *
463  *  Reads iNVM version and image type.
464  **/
465 s32 igb_read_invm_version(struct e1000_hw *hw,
466 			  struct e1000_fw_version *invm_ver) {
467 	u32 *record = NULL;
468 	u32 *next_record = NULL;
469 	u32 i = 0;
470 	u32 invm_dword = 0;
471 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
472 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
473 	u32 buffer[E1000_INVM_SIZE];
474 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
475 	u16 version = 0;
476 
477 	/* Read iNVM memory */
478 	for (i = 0; i < E1000_INVM_SIZE; i++) {
479 		invm_dword = rd32(E1000_INVM_DATA_REG(i));
480 		buffer[i] = invm_dword;
481 	}
482 
483 	/* Read version number */
484 	for (i = 1; i < invm_blocks; i++) {
485 		record = &buffer[invm_blocks - i];
486 		next_record = &buffer[invm_blocks - i + 1];
487 
488 		/* Check if we have first version location used */
489 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
490 			version = 0;
491 			status = E1000_SUCCESS;
492 			break;
493 		}
494 		/* Check if we have second version location used */
495 		else if ((i == 1) &&
496 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
497 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
498 			status = E1000_SUCCESS;
499 			break;
500 		}
501 		/* Check if we have odd version location
502 		 * used and it is the last one used
503 		 */
504 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
505 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
506 			 (i != 1))) {
507 			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
508 				  >> 13;
509 			status = E1000_SUCCESS;
510 			break;
511 		}
512 		/* Check if we have even version location
513 		 * used and it is the last one used
514 		 */
515 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
516 			 ((*record & 0x3) == 0)) {
517 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
518 			status = E1000_SUCCESS;
519 			break;
520 		}
521 	}
522 
523 	if (status == E1000_SUCCESS) {
524 		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
525 					>> E1000_INVM_MAJOR_SHIFT;
526 		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
527 	}
528 	/* Read Image Type */
529 	for (i = 1; i < invm_blocks; i++) {
530 		record = &buffer[invm_blocks - i];
531 		next_record = &buffer[invm_blocks - i + 1];
532 
533 		/* Check if we have image type in first location used */
534 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
535 			invm_ver->invm_img_type = 0;
536 			status = E1000_SUCCESS;
537 			break;
538 		}
539 		/* Check if we have image type in first location used */
540 		else if ((((*record & 0x3) == 0) &&
541 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
542 			 ((((*record & 0x3) != 0) && (i != 1)))) {
543 			invm_ver->invm_img_type =
544 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
545 			status = E1000_SUCCESS;
546 			break;
547 		}
548 	}
549 	return status;
550 }
551 
552 /**
553  *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
554  *  @hw: pointer to the HW structure
555  *
556  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
557  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
558  **/
559 static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
560 {
561 	s32 status = E1000_SUCCESS;
562 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
563 
564 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
565 
566 		/* Replace the read function with semaphore grabbing with
567 		 * the one that skips this for a while.
568 		 * We have semaphore taken already here.
569 		 */
570 		read_op_ptr = hw->nvm.ops.read;
571 		hw->nvm.ops.read = igb_read_nvm_eerd;
572 
573 		status = igb_validate_nvm_checksum(hw);
574 
575 		/* Revert original read operation. */
576 		hw->nvm.ops.read = read_op_ptr;
577 
578 		hw->nvm.ops.release(hw);
579 	} else {
580 		status = E1000_ERR_SWFW_SYNC;
581 	}
582 
583 	return status;
584 }
585 
586 /**
587  *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
588  *  @hw: pointer to the HW structure
589  *
590  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
591  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
592  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
593  **/
594 static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
595 {
596 	s32 ret_val = E1000_SUCCESS;
597 	u16 checksum = 0;
598 	u16 i, nvm_data;
599 
600 	/* Read the first word from the EEPROM. If this times out or fails, do
601 	 * not continue or we could be in for a very long wait while every
602 	 * EEPROM read fails
603 	 */
604 	ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
605 	if (ret_val != E1000_SUCCESS) {
606 		hw_dbg("EEPROM read failed\n");
607 		goto out;
608 	}
609 
610 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
611 		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
612 		 * because we do not want to take the synchronization
613 		 * semaphores twice here.
614 		 */
615 
616 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
617 			ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
618 			if (ret_val) {
619 				hw->nvm.ops.release(hw);
620 				hw_dbg("NVM Read Error while updating checksum.\n");
621 				goto out;
622 			}
623 			checksum += nvm_data;
624 		}
625 		checksum = (u16) NVM_SUM - checksum;
626 		ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
627 						&checksum);
628 		if (ret_val != E1000_SUCCESS) {
629 			hw->nvm.ops.release(hw);
630 			hw_dbg("NVM Write Error while updating checksum.\n");
631 			goto out;
632 		}
633 
634 		hw->nvm.ops.release(hw);
635 
636 		ret_val = igb_update_flash_i210(hw);
637 	} else {
638 		ret_val = -E1000_ERR_SWFW_SYNC;
639 	}
640 out:
641 	return ret_val;
642 }
643 
644 /**
645  *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
646  *  @hw: pointer to the HW structure
647  *
648  **/
649 static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
650 {
651 	s32 ret_val = -E1000_ERR_NVM;
652 	u32 i, reg;
653 
654 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
655 		reg = rd32(E1000_EECD);
656 		if (reg & E1000_EECD_FLUDONE_I210) {
657 			ret_val = E1000_SUCCESS;
658 			break;
659 		}
660 		udelay(5);
661 	}
662 
663 	return ret_val;
664 }
665 
666 /**
667  *  igb_get_flash_presence_i210 - Check if flash device is detected.
668  *  @hw: pointer to the HW structure
669  *
670  **/
671 bool igb_get_flash_presence_i210(struct e1000_hw *hw)
672 {
673 	u32 eec = 0;
674 	bool ret_val = false;
675 
676 	eec = rd32(E1000_EECD);
677 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
678 		ret_val = true;
679 
680 	return ret_val;
681 }
682 
683 /**
684  *  igb_update_flash_i210 - Commit EEPROM to the flash
685  *  @hw: pointer to the HW structure
686  *
687  **/
688 static s32 igb_update_flash_i210(struct e1000_hw *hw)
689 {
690 	s32 ret_val = E1000_SUCCESS;
691 	u32 flup;
692 
693 	ret_val = igb_pool_flash_update_done_i210(hw);
694 	if (ret_val == -E1000_ERR_NVM) {
695 		hw_dbg("Flash update time out\n");
696 		goto out;
697 	}
698 
699 	flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
700 	wr32(E1000_EECD, flup);
701 
702 	ret_val = igb_pool_flash_update_done_i210(hw);
703 	if (ret_val == E1000_SUCCESS)
704 		hw_dbg("Flash update complete\n");
705 	else
706 		hw_dbg("Flash update time out\n");
707 
708 out:
709 	return ret_val;
710 }
711 
712 /**
713  *  igb_valid_led_default_i210 - Verify a valid default LED config
714  *  @hw: pointer to the HW structure
715  *  @data: pointer to the NVM (EEPROM)
716  *
717  *  Read the EEPROM for the current default LED configuration.  If the
718  *  LED configuration is not valid, set to a valid LED configuration.
719  **/
720 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
721 {
722 	s32 ret_val;
723 
724 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
725 	if (ret_val) {
726 		hw_dbg("NVM Read Error\n");
727 		goto out;
728 	}
729 
730 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
731 		switch (hw->phy.media_type) {
732 		case e1000_media_type_internal_serdes:
733 			*data = ID_LED_DEFAULT_I210_SERDES;
734 			break;
735 		case e1000_media_type_copper:
736 		default:
737 			*data = ID_LED_DEFAULT_I210;
738 			break;
739 		}
740 	}
741 out:
742 	return ret_val;
743 }
744 
745 /**
746  *  __igb_access_xmdio_reg - Read/write XMDIO register
747  *  @hw: pointer to the HW structure
748  *  @address: XMDIO address to program
749  *  @dev_addr: device address to program
750  *  @data: pointer to value to read/write from/to the XMDIO address
751  *  @read: boolean flag to indicate read or write
752  **/
753 static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
754 				  u8 dev_addr, u16 *data, bool read)
755 {
756 	s32 ret_val = E1000_SUCCESS;
757 
758 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
759 	if (ret_val)
760 		return ret_val;
761 
762 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
763 	if (ret_val)
764 		return ret_val;
765 
766 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
767 							 dev_addr);
768 	if (ret_val)
769 		return ret_val;
770 
771 	if (read)
772 		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
773 	else
774 		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
775 	if (ret_val)
776 		return ret_val;
777 
778 	/* Recalibrate the device back to 0 */
779 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
780 	if (ret_val)
781 		return ret_val;
782 
783 	return ret_val;
784 }
785 
786 /**
787  *  igb_read_xmdio_reg - Read XMDIO register
788  *  @hw: pointer to the HW structure
789  *  @addr: XMDIO address to program
790  *  @dev_addr: device address to program
791  *  @data: value to be read from the EMI address
792  **/
793 s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
794 {
795 	return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
796 }
797 
798 /**
799  *  igb_write_xmdio_reg - Write XMDIO register
800  *  @hw: pointer to the HW structure
801  *  @addr: XMDIO address to program
802  *  @dev_addr: device address to program
803  *  @data: value to be written to the XMDIO address
804  **/
805 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
806 {
807 	return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
808 }
809 
810 /**
811  *  igb_init_nvm_params_i210 - Init NVM func ptrs.
812  *  @hw: pointer to the HW structure
813  **/
814 s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
815 {
816 	s32 ret_val = 0;
817 	struct e1000_nvm_info *nvm = &hw->nvm;
818 
819 	nvm->ops.acquire = igb_acquire_nvm_i210;
820 	nvm->ops.release = igb_release_nvm_i210;
821 	nvm->ops.valid_led_default = igb_valid_led_default_i210;
822 
823 	/* NVM Function Pointers */
824 	if (igb_get_flash_presence_i210(hw)) {
825 		hw->nvm.type = e1000_nvm_flash_hw;
826 		nvm->ops.read    = igb_read_nvm_srrd_i210;
827 		nvm->ops.write   = igb_write_nvm_srwr_i210;
828 		nvm->ops.validate = igb_validate_nvm_checksum_i210;
829 		nvm->ops.update   = igb_update_nvm_checksum_i210;
830 	} else {
831 		hw->nvm.type = e1000_nvm_invm;
832 		nvm->ops.read     = igb_read_invm_i210;
833 		nvm->ops.write    = NULL;
834 		nvm->ops.validate = NULL;
835 		nvm->ops.update   = NULL;
836 	}
837 	return ret_val;
838 }
839