1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 #include "i40e_status.h"
28 #include "i40e_type.h"
29 #include "i40e_register.h"
30 #include "i40e_adminq.h"
31 #include "i40e_prototype.h"
32 
33 static void i40e_resume_aq(struct i40e_hw *hw);
34 
35 /**
36  * i40e_is_nvm_update_op - return true if this is an NVM update operation
37  * @desc: API request descriptor
38  **/
39 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40 {
41 	return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
42 		(desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
43 }
44 
45 /**
46  *  i40e_adminq_init_regs - Initialize AdminQ registers
47  *  @hw: pointer to the hardware structure
48  *
49  *  This assumes the alloc_asq and alloc_arq functions have already been called
50  **/
51 static void i40e_adminq_init_regs(struct i40e_hw *hw)
52 {
53 	/* set head and tail registers in our local struct */
54 	if (i40e_is_vf(hw)) {
55 		hw->aq.asq.tail = I40E_VF_ATQT1;
56 		hw->aq.asq.head = I40E_VF_ATQH1;
57 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
58 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
59 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
60 		hw->aq.arq.tail = I40E_VF_ARQT1;
61 		hw->aq.arq.head = I40E_VF_ARQH1;
62 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
63 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
64 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
65 	} else {
66 		hw->aq.asq.tail = I40E_PF_ATQT;
67 		hw->aq.asq.head = I40E_PF_ATQH;
68 		hw->aq.asq.len  = I40E_PF_ATQLEN;
69 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
70 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
71 		hw->aq.arq.tail = I40E_PF_ARQT;
72 		hw->aq.arq.head = I40E_PF_ARQH;
73 		hw->aq.arq.len  = I40E_PF_ARQLEN;
74 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
75 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
76 	}
77 }
78 
79 /**
80  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
81  *  @hw: pointer to the hardware structure
82  **/
83 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
84 {
85 	i40e_status ret_code;
86 
87 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
88 					 i40e_mem_atq_ring,
89 					 (hw->aq.num_asq_entries *
90 					 sizeof(struct i40e_aq_desc)),
91 					 I40E_ADMINQ_DESC_ALIGNMENT);
92 	if (ret_code)
93 		return ret_code;
94 
95 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
96 					  (hw->aq.num_asq_entries *
97 					  sizeof(struct i40e_asq_cmd_details)));
98 	if (ret_code) {
99 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
100 		return ret_code;
101 	}
102 
103 	return ret_code;
104 }
105 
106 /**
107  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
108  *  @hw: pointer to the hardware structure
109  **/
110 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
111 {
112 	i40e_status ret_code;
113 
114 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
115 					 i40e_mem_arq_ring,
116 					 (hw->aq.num_arq_entries *
117 					 sizeof(struct i40e_aq_desc)),
118 					 I40E_ADMINQ_DESC_ALIGNMENT);
119 
120 	return ret_code;
121 }
122 
123 /**
124  *  i40e_free_adminq_asq - Free Admin Queue send rings
125  *  @hw: pointer to the hardware structure
126  *
127  *  This assumes the posted send buffers have already been cleaned
128  *  and de-allocated
129  **/
130 static void i40e_free_adminq_asq(struct i40e_hw *hw)
131 {
132 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
133 }
134 
135 /**
136  *  i40e_free_adminq_arq - Free Admin Queue receive rings
137  *  @hw: pointer to the hardware structure
138  *
139  *  This assumes the posted receive buffers have already been cleaned
140  *  and de-allocated
141  **/
142 static void i40e_free_adminq_arq(struct i40e_hw *hw)
143 {
144 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
145 }
146 
147 /**
148  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
149  *  @hw: pointer to the hardware structure
150  **/
151 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
152 {
153 	i40e_status ret_code;
154 	struct i40e_aq_desc *desc;
155 	struct i40e_dma_mem *bi;
156 	int i;
157 
158 	/* We'll be allocating the buffer info memory first, then we can
159 	 * allocate the mapped buffers for the event processing
160 	 */
161 
162 	/* buffer_info structures do not need alignment */
163 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
164 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
165 	if (ret_code)
166 		goto alloc_arq_bufs;
167 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
168 
169 	/* allocate the mapped buffers */
170 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
171 		bi = &hw->aq.arq.r.arq_bi[i];
172 		ret_code = i40e_allocate_dma_mem(hw, bi,
173 						 i40e_mem_arq_buf,
174 						 hw->aq.arq_buf_size,
175 						 I40E_ADMINQ_DESC_ALIGNMENT);
176 		if (ret_code)
177 			goto unwind_alloc_arq_bufs;
178 
179 		/* now configure the descriptors for use */
180 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
181 
182 		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
183 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
184 			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
185 		desc->opcode = 0;
186 		/* This is in accordance with Admin queue design, there is no
187 		 * register for buffer size configuration
188 		 */
189 		desc->datalen = cpu_to_le16((u16)bi->size);
190 		desc->retval = 0;
191 		desc->cookie_high = 0;
192 		desc->cookie_low = 0;
193 		desc->params.external.addr_high =
194 			cpu_to_le32(upper_32_bits(bi->pa));
195 		desc->params.external.addr_low =
196 			cpu_to_le32(lower_32_bits(bi->pa));
197 		desc->params.external.param0 = 0;
198 		desc->params.external.param1 = 0;
199 	}
200 
201 alloc_arq_bufs:
202 	return ret_code;
203 
204 unwind_alloc_arq_bufs:
205 	/* don't try to free the one that failed... */
206 	i--;
207 	for (; i >= 0; i--)
208 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
209 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
210 
211 	return ret_code;
212 }
213 
214 /**
215  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
216  *  @hw: pointer to the hardware structure
217  **/
218 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
219 {
220 	i40e_status ret_code;
221 	struct i40e_dma_mem *bi;
222 	int i;
223 
224 	/* No mapped memory needed yet, just the buffer info structures */
225 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
226 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
227 	if (ret_code)
228 		goto alloc_asq_bufs;
229 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
230 
231 	/* allocate the mapped buffers */
232 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
233 		bi = &hw->aq.asq.r.asq_bi[i];
234 		ret_code = i40e_allocate_dma_mem(hw, bi,
235 						 i40e_mem_asq_buf,
236 						 hw->aq.asq_buf_size,
237 						 I40E_ADMINQ_DESC_ALIGNMENT);
238 		if (ret_code)
239 			goto unwind_alloc_asq_bufs;
240 	}
241 alloc_asq_bufs:
242 	return ret_code;
243 
244 unwind_alloc_asq_bufs:
245 	/* don't try to free the one that failed... */
246 	i--;
247 	for (; i >= 0; i--)
248 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
249 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
250 
251 	return ret_code;
252 }
253 
254 /**
255  *  i40e_free_arq_bufs - Free receive queue buffer info elements
256  *  @hw: pointer to the hardware structure
257  **/
258 static void i40e_free_arq_bufs(struct i40e_hw *hw)
259 {
260 	int i;
261 
262 	/* free descriptors */
263 	for (i = 0; i < hw->aq.num_arq_entries; i++)
264 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
265 
266 	/* free the descriptor memory */
267 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
268 
269 	/* free the dma header */
270 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
271 }
272 
273 /**
274  *  i40e_free_asq_bufs - Free send queue buffer info elements
275  *  @hw: pointer to the hardware structure
276  **/
277 static void i40e_free_asq_bufs(struct i40e_hw *hw)
278 {
279 	int i;
280 
281 	/* only unmap if the address is non-NULL */
282 	for (i = 0; i < hw->aq.num_asq_entries; i++)
283 		if (hw->aq.asq.r.asq_bi[i].pa)
284 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
285 
286 	/* free the buffer info list */
287 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
288 
289 	/* free the descriptor memory */
290 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
291 
292 	/* free the dma header */
293 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
294 }
295 
296 /**
297  *  i40e_config_asq_regs - configure ASQ registers
298  *  @hw: pointer to the hardware structure
299  *
300  *  Configure base address and length registers for the transmit queue
301  **/
302 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
303 {
304 	i40e_status ret_code = 0;
305 	u32 reg = 0;
306 
307 	/* Clear Head and Tail */
308 	wr32(hw, hw->aq.asq.head, 0);
309 	wr32(hw, hw->aq.asq.tail, 0);
310 
311 	/* set starting point */
312 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313 				  I40E_PF_ATQLEN_ATQENABLE_MASK));
314 	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
315 	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
316 
317 	/* Check one register to verify that config was applied */
318 	reg = rd32(hw, hw->aq.asq.bal);
319 	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
320 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
321 
322 	return ret_code;
323 }
324 
325 /**
326  *  i40e_config_arq_regs - ARQ register configuration
327  *  @hw: pointer to the hardware structure
328  *
329  * Configure base address and length registers for the receive (event queue)
330  **/
331 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
332 {
333 	i40e_status ret_code = 0;
334 	u32 reg = 0;
335 
336 	/* Clear Head and Tail */
337 	wr32(hw, hw->aq.arq.head, 0);
338 	wr32(hw, hw->aq.arq.tail, 0);
339 
340 	/* set starting point */
341 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
342 				  I40E_PF_ARQLEN_ARQENABLE_MASK));
343 	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
344 	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
345 
346 	/* Update tail in the HW to post pre-allocated buffers */
347 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
348 
349 	/* Check one register to verify that config was applied */
350 	reg = rd32(hw, hw->aq.arq.bal);
351 	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
352 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
353 
354 	return ret_code;
355 }
356 
357 /**
358  *  i40e_init_asq - main initialization routine for ASQ
359  *  @hw: pointer to the hardware structure
360  *
361  *  This is the main initialization routine for the Admin Send Queue
362  *  Prior to calling this function, drivers *MUST* set the following fields
363  *  in the hw->aq structure:
364  *     - hw->aq.num_asq_entries
365  *     - hw->aq.arq_buf_size
366  *
367  *  Do *NOT* hold the lock when calling this as the memory allocation routines
368  *  called are not going to be atomic context safe
369  **/
370 static i40e_status i40e_init_asq(struct i40e_hw *hw)
371 {
372 	i40e_status ret_code = 0;
373 
374 	if (hw->aq.asq.count > 0) {
375 		/* queue already initialized */
376 		ret_code = I40E_ERR_NOT_READY;
377 		goto init_adminq_exit;
378 	}
379 
380 	/* verify input for valid configuration */
381 	if ((hw->aq.num_asq_entries == 0) ||
382 	    (hw->aq.asq_buf_size == 0)) {
383 		ret_code = I40E_ERR_CONFIG;
384 		goto init_adminq_exit;
385 	}
386 
387 	hw->aq.asq.next_to_use = 0;
388 	hw->aq.asq.next_to_clean = 0;
389 
390 	/* allocate the ring memory */
391 	ret_code = i40e_alloc_adminq_asq_ring(hw);
392 	if (ret_code)
393 		goto init_adminq_exit;
394 
395 	/* allocate buffers in the rings */
396 	ret_code = i40e_alloc_asq_bufs(hw);
397 	if (ret_code)
398 		goto init_adminq_free_rings;
399 
400 	/* initialize base registers */
401 	ret_code = i40e_config_asq_regs(hw);
402 	if (ret_code)
403 		goto init_adminq_free_rings;
404 
405 	/* success! */
406 	hw->aq.asq.count = hw->aq.num_asq_entries;
407 	goto init_adminq_exit;
408 
409 init_adminq_free_rings:
410 	i40e_free_adminq_asq(hw);
411 
412 init_adminq_exit:
413 	return ret_code;
414 }
415 
416 /**
417  *  i40e_init_arq - initialize ARQ
418  *  @hw: pointer to the hardware structure
419  *
420  *  The main initialization routine for the Admin Receive (Event) Queue.
421  *  Prior to calling this function, drivers *MUST* set the following fields
422  *  in the hw->aq structure:
423  *     - hw->aq.num_asq_entries
424  *     - hw->aq.arq_buf_size
425  *
426  *  Do *NOT* hold the lock when calling this as the memory allocation routines
427  *  called are not going to be atomic context safe
428  **/
429 static i40e_status i40e_init_arq(struct i40e_hw *hw)
430 {
431 	i40e_status ret_code = 0;
432 
433 	if (hw->aq.arq.count > 0) {
434 		/* queue already initialized */
435 		ret_code = I40E_ERR_NOT_READY;
436 		goto init_adminq_exit;
437 	}
438 
439 	/* verify input for valid configuration */
440 	if ((hw->aq.num_arq_entries == 0) ||
441 	    (hw->aq.arq_buf_size == 0)) {
442 		ret_code = I40E_ERR_CONFIG;
443 		goto init_adminq_exit;
444 	}
445 
446 	hw->aq.arq.next_to_use = 0;
447 	hw->aq.arq.next_to_clean = 0;
448 
449 	/* allocate the ring memory */
450 	ret_code = i40e_alloc_adminq_arq_ring(hw);
451 	if (ret_code)
452 		goto init_adminq_exit;
453 
454 	/* allocate buffers in the rings */
455 	ret_code = i40e_alloc_arq_bufs(hw);
456 	if (ret_code)
457 		goto init_adminq_free_rings;
458 
459 	/* initialize base registers */
460 	ret_code = i40e_config_arq_regs(hw);
461 	if (ret_code)
462 		goto init_adminq_free_rings;
463 
464 	/* success! */
465 	hw->aq.arq.count = hw->aq.num_arq_entries;
466 	goto init_adminq_exit;
467 
468 init_adminq_free_rings:
469 	i40e_free_adminq_arq(hw);
470 
471 init_adminq_exit:
472 	return ret_code;
473 }
474 
475 /**
476  *  i40e_shutdown_asq - shutdown the ASQ
477  *  @hw: pointer to the hardware structure
478  *
479  *  The main shutdown routine for the Admin Send Queue
480  **/
481 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
482 {
483 	i40e_status ret_code = 0;
484 
485 	mutex_lock(&hw->aq.asq_mutex);
486 
487 	if (hw->aq.asq.count == 0) {
488 		ret_code = I40E_ERR_NOT_READY;
489 		goto shutdown_asq_out;
490 	}
491 
492 	/* Stop firmware AdminQ processing */
493 	wr32(hw, hw->aq.asq.head, 0);
494 	wr32(hw, hw->aq.asq.tail, 0);
495 	wr32(hw, hw->aq.asq.len, 0);
496 	wr32(hw, hw->aq.asq.bal, 0);
497 	wr32(hw, hw->aq.asq.bah, 0);
498 
499 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
500 
501 	/* free ring buffers */
502 	i40e_free_asq_bufs(hw);
503 
504 shutdown_asq_out:
505 	mutex_unlock(&hw->aq.asq_mutex);
506 	return ret_code;
507 }
508 
509 /**
510  *  i40e_shutdown_arq - shutdown ARQ
511  *  @hw: pointer to the hardware structure
512  *
513  *  The main shutdown routine for the Admin Receive Queue
514  **/
515 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
516 {
517 	i40e_status ret_code = 0;
518 
519 	mutex_lock(&hw->aq.arq_mutex);
520 
521 	if (hw->aq.arq.count == 0) {
522 		ret_code = I40E_ERR_NOT_READY;
523 		goto shutdown_arq_out;
524 	}
525 
526 	/* Stop firmware AdminQ processing */
527 	wr32(hw, hw->aq.arq.head, 0);
528 	wr32(hw, hw->aq.arq.tail, 0);
529 	wr32(hw, hw->aq.arq.len, 0);
530 	wr32(hw, hw->aq.arq.bal, 0);
531 	wr32(hw, hw->aq.arq.bah, 0);
532 
533 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
534 
535 	/* free ring buffers */
536 	i40e_free_arq_bufs(hw);
537 
538 shutdown_arq_out:
539 	mutex_unlock(&hw->aq.arq_mutex);
540 	return ret_code;
541 }
542 
543 /**
544  *  i40e_init_adminq - main initialization routine for Admin Queue
545  *  @hw: pointer to the hardware structure
546  *
547  *  Prior to calling this function, drivers *MUST* set the following fields
548  *  in the hw->aq structure:
549  *     - hw->aq.num_asq_entries
550  *     - hw->aq.num_arq_entries
551  *     - hw->aq.arq_buf_size
552  *     - hw->aq.asq_buf_size
553  **/
554 i40e_status i40e_init_adminq(struct i40e_hw *hw)
555 {
556 	u16 cfg_ptr, oem_hi, oem_lo;
557 	u16 eetrack_lo, eetrack_hi;
558 	i40e_status ret_code;
559 	int retry = 0;
560 
561 	/* verify input for valid configuration */
562 	if ((hw->aq.num_arq_entries == 0) ||
563 	    (hw->aq.num_asq_entries == 0) ||
564 	    (hw->aq.arq_buf_size == 0) ||
565 	    (hw->aq.asq_buf_size == 0)) {
566 		ret_code = I40E_ERR_CONFIG;
567 		goto init_adminq_exit;
568 	}
569 
570 	/* Set up register offsets */
571 	i40e_adminq_init_regs(hw);
572 
573 	/* setup ASQ command write back timeout */
574 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
575 
576 	/* allocate the ASQ */
577 	ret_code = i40e_init_asq(hw);
578 	if (ret_code)
579 		goto init_adminq_destroy_locks;
580 
581 	/* allocate the ARQ */
582 	ret_code = i40e_init_arq(hw);
583 	if (ret_code)
584 		goto init_adminq_free_asq;
585 
586 	/* There are some cases where the firmware may not be quite ready
587 	 * for AdminQ operations, so we retry the AdminQ setup a few times
588 	 * if we see timeouts in this first AQ call.
589 	 */
590 	do {
591 		ret_code = i40e_aq_get_firmware_version(hw,
592 							&hw->aq.fw_maj_ver,
593 							&hw->aq.fw_min_ver,
594 							&hw->aq.fw_build,
595 							&hw->aq.api_maj_ver,
596 							&hw->aq.api_min_ver,
597 							NULL);
598 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
599 			break;
600 		retry++;
601 		msleep(100);
602 		i40e_resume_aq(hw);
603 	} while (retry < 10);
604 	if (ret_code != I40E_SUCCESS)
605 		goto init_adminq_free_arq;
606 
607 	/* get the NVM version info */
608 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
609 			   &hw->nvm.version);
610 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
611 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
612 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
613 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
614 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
615 			   &oem_hi);
616 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
617 			   &oem_lo);
618 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
619 
620 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
621 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
622 		goto init_adminq_free_arq;
623 	}
624 
625 	/* pre-emptive resource lock release */
626 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
627 	hw->aq.nvm_release_on_done = false;
628 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
629 
630 	ret_code = i40e_aq_set_hmc_resource_profile(hw,
631 						    I40E_HMC_PROFILE_DEFAULT,
632 						    0,
633 						    NULL);
634 	ret_code = 0;
635 
636 	/* success! */
637 	goto init_adminq_exit;
638 
639 init_adminq_free_arq:
640 	i40e_shutdown_arq(hw);
641 init_adminq_free_asq:
642 	i40e_shutdown_asq(hw);
643 init_adminq_destroy_locks:
644 
645 init_adminq_exit:
646 	return ret_code;
647 }
648 
649 /**
650  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
651  *  @hw: pointer to the hardware structure
652  **/
653 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
654 {
655 	i40e_status ret_code = 0;
656 
657 	if (i40e_check_asq_alive(hw))
658 		i40e_aq_queue_shutdown(hw, true);
659 
660 	i40e_shutdown_asq(hw);
661 	i40e_shutdown_arq(hw);
662 
663 	if (hw->nvm_buff.va)
664 		i40e_free_virt_mem(hw, &hw->nvm_buff);
665 
666 	return ret_code;
667 }
668 
669 /**
670  *  i40e_clean_asq - cleans Admin send queue
671  *  @hw: pointer to the hardware structure
672  *
673  *  returns the number of free desc
674  **/
675 static u16 i40e_clean_asq(struct i40e_hw *hw)
676 {
677 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
678 	struct i40e_asq_cmd_details *details;
679 	u16 ntc = asq->next_to_clean;
680 	struct i40e_aq_desc desc_cb;
681 	struct i40e_aq_desc *desc;
682 
683 	desc = I40E_ADMINQ_DESC(*asq, ntc);
684 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
685 	while (rd32(hw, hw->aq.asq.head) != ntc) {
686 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
687 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
688 
689 		if (details->callback) {
690 			I40E_ADMINQ_CALLBACK cb_func =
691 					(I40E_ADMINQ_CALLBACK)details->callback;
692 			desc_cb = *desc;
693 			cb_func(hw, &desc_cb);
694 		}
695 		memset(desc, 0, sizeof(*desc));
696 		memset(details, 0, sizeof(*details));
697 		ntc++;
698 		if (ntc == asq->count)
699 			ntc = 0;
700 		desc = I40E_ADMINQ_DESC(*asq, ntc);
701 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
702 	}
703 
704 	asq->next_to_clean = ntc;
705 
706 	return I40E_DESC_UNUSED(asq);
707 }
708 
709 /**
710  *  i40e_asq_done - check if FW has processed the Admin Send Queue
711  *  @hw: pointer to the hw struct
712  *
713  *  Returns true if the firmware has processed all descriptors on the
714  *  admin send queue. Returns false if there are still requests pending.
715  **/
716 static bool i40e_asq_done(struct i40e_hw *hw)
717 {
718 	/* AQ designers suggest use of head for better
719 	 * timing reliability than DD bit
720 	 */
721 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
722 
723 }
724 
725 /**
726  *  i40e_asq_send_command - send command to Admin Queue
727  *  @hw: pointer to the hw struct
728  *  @desc: prefilled descriptor describing the command (non DMA mem)
729  *  @buff: buffer to use for indirect commands
730  *  @buff_size: size of buffer for indirect commands
731  *  @cmd_details: pointer to command details structure
732  *
733  *  This is the main send command driver routine for the Admin Queue send
734  *  queue.  It runs the queue, cleans the queue, etc
735  **/
736 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
737 				struct i40e_aq_desc *desc,
738 				void *buff, /* can be NULL */
739 				u16  buff_size,
740 				struct i40e_asq_cmd_details *cmd_details)
741 {
742 	i40e_status status = 0;
743 	struct i40e_dma_mem *dma_buff = NULL;
744 	struct i40e_asq_cmd_details *details;
745 	struct i40e_aq_desc *desc_on_ring;
746 	bool cmd_completed = false;
747 	u16  retval = 0;
748 	u32  val = 0;
749 
750 	mutex_lock(&hw->aq.asq_mutex);
751 
752 	if (hw->aq.asq.count == 0) {
753 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
754 			   "AQTX: Admin queue not initialized.\n");
755 		status = I40E_ERR_QUEUE_EMPTY;
756 		goto asq_send_command_error;
757 	}
758 
759 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
760 
761 	val = rd32(hw, hw->aq.asq.head);
762 	if (val >= hw->aq.num_asq_entries) {
763 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
764 			   "AQTX: head overrun at %d\n", val);
765 		status = I40E_ERR_QUEUE_EMPTY;
766 		goto asq_send_command_error;
767 	}
768 
769 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
770 	if (cmd_details) {
771 		*details = *cmd_details;
772 
773 		/* If the cmd_details are defined copy the cookie.  The
774 		 * cpu_to_le32 is not needed here because the data is ignored
775 		 * by the FW, only used by the driver
776 		 */
777 		if (details->cookie) {
778 			desc->cookie_high =
779 				cpu_to_le32(upper_32_bits(details->cookie));
780 			desc->cookie_low =
781 				cpu_to_le32(lower_32_bits(details->cookie));
782 		}
783 	} else {
784 		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
785 	}
786 
787 	/* clear requested flags and then set additional flags if defined */
788 	desc->flags &= ~cpu_to_le16(details->flags_dis);
789 	desc->flags |= cpu_to_le16(details->flags_ena);
790 
791 	if (buff_size > hw->aq.asq_buf_size) {
792 		i40e_debug(hw,
793 			   I40E_DEBUG_AQ_MESSAGE,
794 			   "AQTX: Invalid buffer size: %d.\n",
795 			   buff_size);
796 		status = I40E_ERR_INVALID_SIZE;
797 		goto asq_send_command_error;
798 	}
799 
800 	if (details->postpone && !details->async) {
801 		i40e_debug(hw,
802 			   I40E_DEBUG_AQ_MESSAGE,
803 			   "AQTX: Async flag not set along with postpone flag");
804 		status = I40E_ERR_PARAM;
805 		goto asq_send_command_error;
806 	}
807 
808 	/* call clean and check queue available function to reclaim the
809 	 * descriptors that were processed by FW, the function returns the
810 	 * number of desc available
811 	 */
812 	/* the clean function called here could be called in a separate thread
813 	 * in case of asynchronous completions
814 	 */
815 	if (i40e_clean_asq(hw) == 0) {
816 		i40e_debug(hw,
817 			   I40E_DEBUG_AQ_MESSAGE,
818 			   "AQTX: Error queue is full.\n");
819 		status = I40E_ERR_ADMIN_QUEUE_FULL;
820 		goto asq_send_command_error;
821 	}
822 
823 	/* initialize the temp desc pointer with the right desc */
824 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
825 
826 	/* if the desc is available copy the temp desc to the right place */
827 	*desc_on_ring = *desc;
828 
829 	/* if buff is not NULL assume indirect command */
830 	if (buff != NULL) {
831 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
832 		/* copy the user buff into the respective DMA buff */
833 		memcpy(dma_buff->va, buff, buff_size);
834 		desc_on_ring->datalen = cpu_to_le16(buff_size);
835 
836 		/* Update the address values in the desc with the pa value
837 		 * for respective buffer
838 		 */
839 		desc_on_ring->params.external.addr_high =
840 				cpu_to_le32(upper_32_bits(dma_buff->pa));
841 		desc_on_ring->params.external.addr_low =
842 				cpu_to_le32(lower_32_bits(dma_buff->pa));
843 	}
844 
845 	/* bump the tail */
846 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
847 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
848 		      buff, buff_size);
849 	(hw->aq.asq.next_to_use)++;
850 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
851 		hw->aq.asq.next_to_use = 0;
852 	if (!details->postpone)
853 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
854 
855 	/* if cmd_details are not defined or async flag is not set,
856 	 * we need to wait for desc write back
857 	 */
858 	if (!details->async && !details->postpone) {
859 		u32 total_delay = 0;
860 
861 		do {
862 			/* AQ designers suggest use of head for better
863 			 * timing reliability than DD bit
864 			 */
865 			if (i40e_asq_done(hw))
866 				break;
867 			usleep_range(1000, 2000);
868 			total_delay++;
869 		} while (total_delay < hw->aq.asq_cmd_timeout);
870 	}
871 
872 	/* if ready, copy the desc back to temp */
873 	if (i40e_asq_done(hw)) {
874 		*desc = *desc_on_ring;
875 		if (buff != NULL)
876 			memcpy(buff, dma_buff->va, buff_size);
877 		retval = le16_to_cpu(desc->retval);
878 		if (retval != 0) {
879 			i40e_debug(hw,
880 				   I40E_DEBUG_AQ_MESSAGE,
881 				   "AQTX: Command completed with error 0x%X.\n",
882 				   retval);
883 
884 			/* strip off FW internal code */
885 			retval &= 0xff;
886 		}
887 		cmd_completed = true;
888 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
889 			status = 0;
890 		else
891 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
892 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
893 	}
894 
895 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
896 		   "AQTX: desc and buffer writeback:\n");
897 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
898 
899 	/* save writeback aq if requested */
900 	if (details->wb_desc)
901 		*details->wb_desc = *desc_on_ring;
902 
903 	/* update the error if time out occurred */
904 	if ((!cmd_completed) &&
905 	    (!details->async && !details->postpone)) {
906 		i40e_debug(hw,
907 			   I40E_DEBUG_AQ_MESSAGE,
908 			   "AQTX: Writeback timeout.\n");
909 		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
910 	}
911 
912 asq_send_command_error:
913 	mutex_unlock(&hw->aq.asq_mutex);
914 	return status;
915 }
916 
917 /**
918  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
919  *  @desc:     pointer to the temp descriptor (non DMA mem)
920  *  @opcode:   the opcode can be used to decide which flags to turn off or on
921  *
922  *  Fill the desc with default values
923  **/
924 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
925 				       u16 opcode)
926 {
927 	/* zero out the desc */
928 	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
929 	desc->opcode = cpu_to_le16(opcode);
930 	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
931 }
932 
933 /**
934  *  i40e_clean_arq_element
935  *  @hw: pointer to the hw struct
936  *  @e: event info from the receive descriptor, includes any buffers
937  *  @pending: number of events that could be left to process
938  *
939  *  This function cleans one Admin Receive Queue element and returns
940  *  the contents through e.  It can also return how many events are
941  *  left to process through 'pending'
942  **/
943 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
944 					     struct i40e_arq_event_info *e,
945 					     u16 *pending)
946 {
947 	i40e_status ret_code = 0;
948 	u16 ntc = hw->aq.arq.next_to_clean;
949 	struct i40e_aq_desc *desc;
950 	struct i40e_dma_mem *bi;
951 	u16 desc_idx;
952 	u16 datalen;
953 	u16 flags;
954 	u16 ntu;
955 
956 	/* pre-clean the event info */
957 	memset(&e->desc, 0, sizeof(e->desc));
958 
959 	/* take the lock before we start messing with the ring */
960 	mutex_lock(&hw->aq.arq_mutex);
961 
962 	if (hw->aq.arq.count == 0) {
963 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
964 			   "AQRX: Admin queue not initialized.\n");
965 		ret_code = I40E_ERR_QUEUE_EMPTY;
966 		goto clean_arq_element_err;
967 	}
968 
969 	/* set next_to_use to head */
970 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
971 	if (ntu == ntc) {
972 		/* nothing to do - shouldn't need to update ring's values */
973 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
974 		goto clean_arq_element_out;
975 	}
976 
977 	/* now clean the next descriptor */
978 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
979 	desc_idx = ntc;
980 
981 	flags = le16_to_cpu(desc->flags);
982 	if (flags & I40E_AQ_FLAG_ERR) {
983 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
984 		hw->aq.arq_last_status =
985 			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
986 		i40e_debug(hw,
987 			   I40E_DEBUG_AQ_MESSAGE,
988 			   "AQRX: Event received with error 0x%X.\n",
989 			   hw->aq.arq_last_status);
990 	}
991 
992 	e->desc = *desc;
993 	datalen = le16_to_cpu(desc->datalen);
994 	e->msg_len = min(datalen, e->buf_len);
995 	if (e->msg_buf != NULL && (e->msg_len != 0))
996 		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
997 		       e->msg_len);
998 
999 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1000 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1001 		      hw->aq.arq_buf_size);
1002 
1003 	/* Restore the original datalen and buffer address in the desc,
1004 	 * FW updates datalen to indicate the event message
1005 	 * size
1006 	 */
1007 	bi = &hw->aq.arq.r.arq_bi[ntc];
1008 	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1009 
1010 	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1011 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1012 		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1013 	desc->datalen = cpu_to_le16((u16)bi->size);
1014 	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1015 	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1016 
1017 	/* set tail = the last cleaned desc index. */
1018 	wr32(hw, hw->aq.arq.tail, ntc);
1019 	/* ntc is updated to tail + 1 */
1020 	ntc++;
1021 	if (ntc == hw->aq.num_arq_entries)
1022 		ntc = 0;
1023 	hw->aq.arq.next_to_clean = ntc;
1024 	hw->aq.arq.next_to_use = ntu;
1025 
1026 	if (i40e_is_nvm_update_op(&e->desc)) {
1027 		if (hw->aq.nvm_release_on_done) {
1028 			i40e_release_nvm(hw);
1029 			hw->aq.nvm_release_on_done = false;
1030 		}
1031 
1032 		switch (hw->nvmupd_state) {
1033 		case I40E_NVMUPD_STATE_INIT_WAIT:
1034 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1035 			break;
1036 
1037 		case I40E_NVMUPD_STATE_WRITE_WAIT:
1038 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1039 			break;
1040 
1041 		default:
1042 			break;
1043 		}
1044 	}
1045 
1046 clean_arq_element_out:
1047 	/* Set pending if needed, unlock and return */
1048 	if (pending)
1049 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1050 clean_arq_element_err:
1051 	mutex_unlock(&hw->aq.arq_mutex);
1052 
1053 	return ret_code;
1054 }
1055 
1056 static void i40e_resume_aq(struct i40e_hw *hw)
1057 {
1058 	/* Registers are reset after PF reset */
1059 	hw->aq.asq.next_to_use = 0;
1060 	hw->aq.asq.next_to_clean = 0;
1061 
1062 	i40e_config_asq_regs(hw);
1063 
1064 	hw->aq.arq.next_to_use = 0;
1065 	hw->aq.arq.next_to_clean = 0;
1066 
1067 	i40e_config_arq_regs(hw);
1068 }
1069