1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 
6 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
7 do {								\
8 	(qinfo)->sq.head = prefix##_ATQH;			\
9 	(qinfo)->sq.tail = prefix##_ATQT;			\
10 	(qinfo)->sq.len = prefix##_ATQLEN;			\
11 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
12 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
13 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
14 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
15 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
16 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
17 	(qinfo)->rq.head = prefix##_ARQH;			\
18 	(qinfo)->rq.tail = prefix##_ARQT;			\
19 	(qinfo)->rq.len = prefix##_ARQLEN;			\
20 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
21 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
22 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
23 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
24 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
25 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
26 } while (0)
27 
28 /**
29  * ice_adminq_init_regs - Initialize AdminQ registers
30  * @hw: pointer to the hardware structure
31  *
32  * This assumes the alloc_sq and alloc_rq functions have already been called
33  */
34 static void ice_adminq_init_regs(struct ice_hw *hw)
35 {
36 	struct ice_ctl_q_info *cq = &hw->adminq;
37 
38 	ICE_CQ_INIT_REGS(cq, PF_FW);
39 }
40 
41 /**
42  * ice_mailbox_init_regs - Initialize Mailbox registers
43  * @hw: pointer to the hardware structure
44  *
45  * This assumes the alloc_sq and alloc_rq functions have already been called
46  */
47 static void ice_mailbox_init_regs(struct ice_hw *hw)
48 {
49 	struct ice_ctl_q_info *cq = &hw->mailboxq;
50 
51 	ICE_CQ_INIT_REGS(cq, PF_MBX);
52 }
53 
54 /**
55  * ice_check_sq_alive
56  * @hw: pointer to the HW struct
57  * @cq: pointer to the specific Control queue
58  *
59  * Returns true if Queue is enabled else false.
60  */
61 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
62 {
63 	/* check both queue-length and queue-enable fields */
64 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
65 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
66 						cq->sq.len_ena_mask)) ==
67 			(cq->num_sq_entries | cq->sq.len_ena_mask);
68 
69 	return false;
70 }
71 
72 /**
73  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
74  * @hw: pointer to the hardware structure
75  * @cq: pointer to the specific Control queue
76  */
77 static enum ice_status
78 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
79 {
80 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
81 
82 	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
83 						 &cq->sq.desc_buf.pa,
84 						 GFP_KERNEL | __GFP_ZERO);
85 	if (!cq->sq.desc_buf.va)
86 		return ICE_ERR_NO_MEMORY;
87 	cq->sq.desc_buf.size = size;
88 
89 	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
90 				      sizeof(struct ice_sq_cd), GFP_KERNEL);
91 	if (!cq->sq.cmd_buf) {
92 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
93 				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
94 		cq->sq.desc_buf.va = NULL;
95 		cq->sq.desc_buf.pa = 0;
96 		cq->sq.desc_buf.size = 0;
97 		return ICE_ERR_NO_MEMORY;
98 	}
99 
100 	return 0;
101 }
102 
103 /**
104  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
105  * @hw: pointer to the hardware structure
106  * @cq: pointer to the specific Control queue
107  */
108 static enum ice_status
109 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110 {
111 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
112 
113 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
114 						 &cq->rq.desc_buf.pa,
115 						 GFP_KERNEL | __GFP_ZERO);
116 	if (!cq->rq.desc_buf.va)
117 		return ICE_ERR_NO_MEMORY;
118 	cq->rq.desc_buf.size = size;
119 	return 0;
120 }
121 
122 /**
123  * ice_free_cq_ring - Free control queue ring
124  * @hw: pointer to the hardware structure
125  * @ring: pointer to the specific control queue ring
126  *
127  * This assumes the posted buffers have already been cleaned
128  * and de-allocated
129  */
130 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
131 {
132 	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
133 			   ring->desc_buf.va, ring->desc_buf.pa);
134 	ring->desc_buf.va = NULL;
135 	ring->desc_buf.pa = 0;
136 	ring->desc_buf.size = 0;
137 }
138 
139 /**
140  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
141  * @hw: pointer to the hardware structure
142  * @cq: pointer to the specific Control queue
143  */
144 static enum ice_status
145 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
146 {
147 	int i;
148 
149 	/* We'll be allocating the buffer info memory first, then we can
150 	 * allocate the mapped buffers for the event processing
151 	 */
152 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
153 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
154 	if (!cq->rq.dma_head)
155 		return ICE_ERR_NO_MEMORY;
156 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
157 
158 	/* allocate the mapped buffers */
159 	for (i = 0; i < cq->num_rq_entries; i++) {
160 		struct ice_aq_desc *desc;
161 		struct ice_dma_mem *bi;
162 
163 		bi = &cq->rq.r.rq_bi[i];
164 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
165 					     cq->rq_buf_size, &bi->pa,
166 					     GFP_KERNEL | __GFP_ZERO);
167 		if (!bi->va)
168 			goto unwind_alloc_rq_bufs;
169 		bi->size = cq->rq_buf_size;
170 
171 		/* now configure the descriptors for use */
172 		desc = ICE_CTL_Q_DESC(cq->rq, i);
173 
174 		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
175 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
176 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
177 		desc->opcode = 0;
178 		/* This is in accordance with Admin queue design, there is no
179 		 * register for buffer size configuration
180 		 */
181 		desc->datalen = cpu_to_le16(bi->size);
182 		desc->retval = 0;
183 		desc->cookie_high = 0;
184 		desc->cookie_low = 0;
185 		desc->params.generic.addr_high =
186 			cpu_to_le32(upper_32_bits(bi->pa));
187 		desc->params.generic.addr_low =
188 			cpu_to_le32(lower_32_bits(bi->pa));
189 		desc->params.generic.param0 = 0;
190 		desc->params.generic.param1 = 0;
191 	}
192 	return 0;
193 
194 unwind_alloc_rq_bufs:
195 	/* don't try to free the one that failed... */
196 	i--;
197 	for (; i >= 0; i--) {
198 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
199 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
200 		cq->rq.r.rq_bi[i].va = NULL;
201 		cq->rq.r.rq_bi[i].pa = 0;
202 		cq->rq.r.rq_bi[i].size = 0;
203 	}
204 	cq->rq.r.rq_bi = NULL;
205 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
206 	cq->rq.dma_head = NULL;
207 
208 	return ICE_ERR_NO_MEMORY;
209 }
210 
211 /**
212  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
213  * @hw: pointer to the hardware structure
214  * @cq: pointer to the specific Control queue
215  */
216 static enum ice_status
217 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
218 {
219 	int i;
220 
221 	/* No mapped memory needed yet, just the buffer info structures */
222 	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
223 				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
224 	if (!cq->sq.dma_head)
225 		return ICE_ERR_NO_MEMORY;
226 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
227 
228 	/* allocate the mapped buffers */
229 	for (i = 0; i < cq->num_sq_entries; i++) {
230 		struct ice_dma_mem *bi;
231 
232 		bi = &cq->sq.r.sq_bi[i];
233 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
234 					     cq->sq_buf_size, &bi->pa,
235 					     GFP_KERNEL | __GFP_ZERO);
236 		if (!bi->va)
237 			goto unwind_alloc_sq_bufs;
238 		bi->size = cq->sq_buf_size;
239 	}
240 	return 0;
241 
242 unwind_alloc_sq_bufs:
243 	/* don't try to free the one that failed... */
244 	i--;
245 	for (; i >= 0; i--) {
246 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
247 				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
248 		cq->sq.r.sq_bi[i].va = NULL;
249 		cq->sq.r.sq_bi[i].pa = 0;
250 		cq->sq.r.sq_bi[i].size = 0;
251 	}
252 	cq->sq.r.sq_bi = NULL;
253 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
254 	cq->sq.dma_head = NULL;
255 
256 	return ICE_ERR_NO_MEMORY;
257 }
258 
259 static enum ice_status
260 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
261 {
262 	/* Clear Head and Tail */
263 	wr32(hw, ring->head, 0);
264 	wr32(hw, ring->tail, 0);
265 
266 	/* set starting point */
267 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
268 	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
269 	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
270 
271 	/* Check one register to verify that config was applied */
272 	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
273 		return ICE_ERR_AQ_ERROR;
274 
275 	return 0;
276 }
277 
278 /**
279  * ice_cfg_sq_regs - configure Control ATQ registers
280  * @hw: pointer to the hardware structure
281  * @cq: pointer to the specific Control queue
282  *
283  * Configure base address and length registers for the transmit queue
284  */
285 static enum ice_status
286 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
287 {
288 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
289 }
290 
291 /**
292  * ice_cfg_rq_regs - configure Control ARQ register
293  * @hw: pointer to the hardware structure
294  * @cq: pointer to the specific Control queue
295  *
296  * Configure base address and length registers for the receive (event queue)
297  */
298 static enum ice_status
299 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300 {
301 	enum ice_status status;
302 
303 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
304 	if (status)
305 		return status;
306 
307 	/* Update tail in the HW to post pre-allocated buffers */
308 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309 
310 	return 0;
311 }
312 
313 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
314 do {									\
315 	/* free descriptors */						\
316 	if ((qi)->ring.r.ring##_bi) {					\
317 		int i;							\
318 									\
319 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
320 			if ((qi)->ring.r.ring##_bi[i].pa) {		\
321 				dmam_free_coherent(ice_hw_to_dev(hw),	\
322 					(qi)->ring.r.ring##_bi[i].size,	\
323 					(qi)->ring.r.ring##_bi[i].va,	\
324 					(qi)->ring.r.ring##_bi[i].pa);	\
325 					(qi)->ring.r.ring##_bi[i].va = NULL;\
326 					(qi)->ring.r.ring##_bi[i].pa = 0;\
327 					(qi)->ring.r.ring##_bi[i].size = 0;\
328 		}							\
329 	}								\
330 	/* free the buffer info list */					\
331 	if ((qi)->ring.cmd_buf)						\
332 		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
333 	/* free DMA head */						\
334 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
335 } while (0)
336 
337 /**
338  * ice_init_sq - main initialization routine for Control ATQ
339  * @hw: pointer to the hardware structure
340  * @cq: pointer to the specific Control queue
341  *
342  * This is the main initialization routine for the Control Send Queue
343  * Prior to calling this function, the driver *MUST* set the following fields
344  * in the cq->structure:
345  *     - cq->num_sq_entries
346  *     - cq->sq_buf_size
347  *
348  * Do *NOT* hold the lock when calling this as the memory allocation routines
349  * called are not going to be atomic context safe
350  */
351 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
352 {
353 	enum ice_status ret_code;
354 
355 	if (cq->sq.count > 0) {
356 		/* queue already initialized */
357 		ret_code = ICE_ERR_NOT_READY;
358 		goto init_ctrlq_exit;
359 	}
360 
361 	/* verify input for valid configuration */
362 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
363 		ret_code = ICE_ERR_CFG;
364 		goto init_ctrlq_exit;
365 	}
366 
367 	cq->sq.next_to_use = 0;
368 	cq->sq.next_to_clean = 0;
369 
370 	/* allocate the ring memory */
371 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
372 	if (ret_code)
373 		goto init_ctrlq_exit;
374 
375 	/* allocate buffers in the rings */
376 	ret_code = ice_alloc_sq_bufs(hw, cq);
377 	if (ret_code)
378 		goto init_ctrlq_free_rings;
379 
380 	/* initialize base registers */
381 	ret_code = ice_cfg_sq_regs(hw, cq);
382 	if (ret_code)
383 		goto init_ctrlq_free_rings;
384 
385 	/* success! */
386 	cq->sq.count = cq->num_sq_entries;
387 	goto init_ctrlq_exit;
388 
389 init_ctrlq_free_rings:
390 	ICE_FREE_CQ_BUFS(hw, cq, sq);
391 	ice_free_cq_ring(hw, &cq->sq);
392 
393 init_ctrlq_exit:
394 	return ret_code;
395 }
396 
397 /**
398  * ice_init_rq - initialize ARQ
399  * @hw: pointer to the hardware structure
400  * @cq: pointer to the specific Control queue
401  *
402  * The main initialization routine for the Admin Receive (Event) Queue.
403  * Prior to calling this function, the driver *MUST* set the following fields
404  * in the cq->structure:
405  *     - cq->num_rq_entries
406  *     - cq->rq_buf_size
407  *
408  * Do *NOT* hold the lock when calling this as the memory allocation routines
409  * called are not going to be atomic context safe
410  */
411 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
412 {
413 	enum ice_status ret_code;
414 
415 	if (cq->rq.count > 0) {
416 		/* queue already initialized */
417 		ret_code = ICE_ERR_NOT_READY;
418 		goto init_ctrlq_exit;
419 	}
420 
421 	/* verify input for valid configuration */
422 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
423 		ret_code = ICE_ERR_CFG;
424 		goto init_ctrlq_exit;
425 	}
426 
427 	cq->rq.next_to_use = 0;
428 	cq->rq.next_to_clean = 0;
429 
430 	/* allocate the ring memory */
431 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
432 	if (ret_code)
433 		goto init_ctrlq_exit;
434 
435 	/* allocate buffers in the rings */
436 	ret_code = ice_alloc_rq_bufs(hw, cq);
437 	if (ret_code)
438 		goto init_ctrlq_free_rings;
439 
440 	/* initialize base registers */
441 	ret_code = ice_cfg_rq_regs(hw, cq);
442 	if (ret_code)
443 		goto init_ctrlq_free_rings;
444 
445 	/* success! */
446 	cq->rq.count = cq->num_rq_entries;
447 	goto init_ctrlq_exit;
448 
449 init_ctrlq_free_rings:
450 	ICE_FREE_CQ_BUFS(hw, cq, rq);
451 	ice_free_cq_ring(hw, &cq->rq);
452 
453 init_ctrlq_exit:
454 	return ret_code;
455 }
456 
457 /**
458  * ice_shutdown_sq - shutdown the Control ATQ
459  * @hw: pointer to the hardware structure
460  * @cq: pointer to the specific Control queue
461  *
462  * The main shutdown routine for the Control Transmit Queue
463  */
464 static enum ice_status
465 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
466 {
467 	enum ice_status ret_code = 0;
468 
469 	mutex_lock(&cq->sq_lock);
470 
471 	if (!cq->sq.count) {
472 		ret_code = ICE_ERR_NOT_READY;
473 		goto shutdown_sq_out;
474 	}
475 
476 	/* Stop firmware AdminQ processing */
477 	wr32(hw, cq->sq.head, 0);
478 	wr32(hw, cq->sq.tail, 0);
479 	wr32(hw, cq->sq.len, 0);
480 	wr32(hw, cq->sq.bal, 0);
481 	wr32(hw, cq->sq.bah, 0);
482 
483 	cq->sq.count = 0;	/* to indicate uninitialized queue */
484 
485 	/* free ring buffers and the ring itself */
486 	ICE_FREE_CQ_BUFS(hw, cq, sq);
487 	ice_free_cq_ring(hw, &cq->sq);
488 
489 shutdown_sq_out:
490 	mutex_unlock(&cq->sq_lock);
491 	return ret_code;
492 }
493 
494 /**
495  * ice_aq_ver_check - Check the reported AQ API version.
496  * @hw: pointer to the hardware structure
497  *
498  * Checks if the driver should load on a given AQ API version.
499  *
500  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
501  */
502 static bool ice_aq_ver_check(struct ice_hw *hw)
503 {
504 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
505 		/* Major API version is newer than expected, don't load */
506 		dev_warn(ice_hw_to_dev(hw),
507 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
508 		return false;
509 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 			dev_info(ice_hw_to_dev(hw),
512 				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
513 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
514 			dev_info(ice_hw_to_dev(hw),
515 				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
516 	} else {
517 		/* Major API version is older than expected, log a warning */
518 		dev_info(ice_hw_to_dev(hw),
519 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
520 	}
521 	return true;
522 }
523 
524 /**
525  * ice_shutdown_rq - shutdown Control ARQ
526  * @hw: pointer to the hardware structure
527  * @cq: pointer to the specific Control queue
528  *
529  * The main shutdown routine for the Control Receive Queue
530  */
531 static enum ice_status
532 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
533 {
534 	enum ice_status ret_code = 0;
535 
536 	mutex_lock(&cq->rq_lock);
537 
538 	if (!cq->rq.count) {
539 		ret_code = ICE_ERR_NOT_READY;
540 		goto shutdown_rq_out;
541 	}
542 
543 	/* Stop Control Queue processing */
544 	wr32(hw, cq->rq.head, 0);
545 	wr32(hw, cq->rq.tail, 0);
546 	wr32(hw, cq->rq.len, 0);
547 	wr32(hw, cq->rq.bal, 0);
548 	wr32(hw, cq->rq.bah, 0);
549 
550 	/* set rq.count to 0 to indicate uninitialized queue */
551 	cq->rq.count = 0;
552 
553 	/* free ring buffers and the ring itself */
554 	ICE_FREE_CQ_BUFS(hw, cq, rq);
555 	ice_free_cq_ring(hw, &cq->rq);
556 
557 shutdown_rq_out:
558 	mutex_unlock(&cq->rq_lock);
559 	return ret_code;
560 }
561 
562 /**
563  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
564  * @hw: pointer to the hardware structure
565  */
566 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
567 {
568 	struct ice_ctl_q_info *cq = &hw->adminq;
569 	enum ice_status status;
570 
571 	status = ice_aq_get_fw_ver(hw, NULL);
572 	if (status)
573 		goto init_ctrlq_free_rq;
574 
575 	if (!ice_aq_ver_check(hw)) {
576 		status = ICE_ERR_FW_API_VER;
577 		goto init_ctrlq_free_rq;
578 	}
579 
580 	return 0;
581 
582 init_ctrlq_free_rq:
583 	ice_shutdown_rq(hw, cq);
584 	ice_shutdown_sq(hw, cq);
585 	return status;
586 }
587 
588 /**
589  * ice_init_ctrlq - main initialization routine for any control Queue
590  * @hw: pointer to the hardware structure
591  * @q_type: specific Control queue type
592  *
593  * Prior to calling this function, the driver *MUST* set the following fields
594  * in the cq->structure:
595  *     - cq->num_sq_entries
596  *     - cq->num_rq_entries
597  *     - cq->rq_buf_size
598  *     - cq->sq_buf_size
599  *
600  * NOTE: this function does not initialize the controlq locks
601  */
602 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
603 {
604 	struct ice_ctl_q_info *cq;
605 	enum ice_status ret_code;
606 
607 	switch (q_type) {
608 	case ICE_CTL_Q_ADMIN:
609 		ice_adminq_init_regs(hw);
610 		cq = &hw->adminq;
611 		break;
612 	case ICE_CTL_Q_MAILBOX:
613 		ice_mailbox_init_regs(hw);
614 		cq = &hw->mailboxq;
615 		break;
616 	default:
617 		return ICE_ERR_PARAM;
618 	}
619 	cq->qtype = q_type;
620 
621 	/* verify input for valid configuration */
622 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
623 	    !cq->rq_buf_size || !cq->sq_buf_size) {
624 		return ICE_ERR_CFG;
625 	}
626 
627 	/* setup SQ command write back timeout */
628 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
629 
630 	/* allocate the ATQ */
631 	ret_code = ice_init_sq(hw, cq);
632 	if (ret_code)
633 		return ret_code;
634 
635 	/* allocate the ARQ */
636 	ret_code = ice_init_rq(hw, cq);
637 	if (ret_code)
638 		goto init_ctrlq_free_sq;
639 
640 	/* success! */
641 	return 0;
642 
643 init_ctrlq_free_sq:
644 	ice_shutdown_sq(hw, cq);
645 	return ret_code;
646 }
647 
648 /**
649  * ice_shutdown_ctrlq - shutdown routine for any control queue
650  * @hw: pointer to the hardware structure
651  * @q_type: specific Control queue type
652  *
653  * NOTE: this function does not destroy the control queue locks.
654  */
655 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
656 {
657 	struct ice_ctl_q_info *cq;
658 
659 	switch (q_type) {
660 	case ICE_CTL_Q_ADMIN:
661 		cq = &hw->adminq;
662 		if (ice_check_sq_alive(hw, cq))
663 			ice_aq_q_shutdown(hw, true);
664 		break;
665 	case ICE_CTL_Q_MAILBOX:
666 		cq = &hw->mailboxq;
667 		break;
668 	default:
669 		return;
670 	}
671 
672 	ice_shutdown_sq(hw, cq);
673 	ice_shutdown_rq(hw, cq);
674 }
675 
676 /**
677  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
678  * @hw: pointer to the hardware structure
679  *
680  * NOTE: this function does not destroy the control queue locks. The driver
681  * may call this at runtime to shutdown and later restart control queues, such
682  * as in response to a reset event.
683  */
684 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
685 {
686 	/* Shutdown FW admin queue */
687 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
688 	/* Shutdown PF-VF Mailbox */
689 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
690 }
691 
692 /**
693  * ice_init_all_ctrlq - main initialization routine for all control queues
694  * @hw: pointer to the hardware structure
695  *
696  * Prior to calling this function, the driver MUST* set the following fields
697  * in the cq->structure for all control queues:
698  *     - cq->num_sq_entries
699  *     - cq->num_rq_entries
700  *     - cq->rq_buf_size
701  *     - cq->sq_buf_size
702  *
703  * NOTE: this function does not initialize the controlq locks.
704  */
705 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
706 {
707 	enum ice_status status;
708 	u32 retry = 0;
709 
710 	/* Init FW admin queue */
711 	do {
712 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
713 		if (status)
714 			return status;
715 
716 		status = ice_init_check_adminq(hw);
717 		if (status != ICE_ERR_AQ_FW_CRITICAL)
718 			break;
719 
720 		ice_debug(hw, ICE_DBG_AQ_MSG,
721 			  "Retry Admin Queue init due to FW critical error\n");
722 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
723 		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
724 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
725 
726 	if (status)
727 		return status;
728 	/* Init Mailbox queue */
729 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
730 }
731 
732 /**
733  * ice_init_ctrlq_locks - Initialize locks for a control queue
734  * @cq: pointer to the control queue
735  *
736  * Initializes the send and receive queue locks for a given control queue.
737  */
738 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
739 {
740 	mutex_init(&cq->sq_lock);
741 	mutex_init(&cq->rq_lock);
742 }
743 
744 /**
745  * ice_create_all_ctrlq - main initialization routine for all control queues
746  * @hw: pointer to the hardware structure
747  *
748  * Prior to calling this function, the driver *MUST* set the following fields
749  * in the cq->structure for all control queues:
750  *     - cq->num_sq_entries
751  *     - cq->num_rq_entries
752  *     - cq->rq_buf_size
753  *     - cq->sq_buf_size
754  *
755  * This function creates all the control queue locks and then calls
756  * ice_init_all_ctrlq. It should be called once during driver load. If the
757  * driver needs to re-initialize control queues at run time it should call
758  * ice_init_all_ctrlq instead.
759  */
760 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
761 {
762 	ice_init_ctrlq_locks(&hw->adminq);
763 	ice_init_ctrlq_locks(&hw->mailboxq);
764 
765 	return ice_init_all_ctrlq(hw);
766 }
767 
768 /**
769  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
770  * @cq: pointer to the control queue
771  *
772  * Destroys the send and receive queue locks for a given control queue.
773  */
774 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
775 {
776 	mutex_destroy(&cq->sq_lock);
777 	mutex_destroy(&cq->rq_lock);
778 }
779 
780 /**
781  * ice_destroy_all_ctrlq - exit routine for all control queues
782  * @hw: pointer to the hardware structure
783  *
784  * This function shuts down all the control queues and then destroys the
785  * control queue locks. It should be called once during driver unload. The
786  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
787  * reinitialize control queues, such as in response to a reset event.
788  */
789 void ice_destroy_all_ctrlq(struct ice_hw *hw)
790 {
791 	/* shut down all the control queues first */
792 	ice_shutdown_all_ctrlq(hw);
793 
794 	ice_destroy_ctrlq_locks(&hw->adminq);
795 	ice_destroy_ctrlq_locks(&hw->mailboxq);
796 }
797 
798 /**
799  * ice_clean_sq - cleans Admin send queue (ATQ)
800  * @hw: pointer to the hardware structure
801  * @cq: pointer to the specific Control queue
802  *
803  * returns the number of free desc
804  */
805 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
806 {
807 	struct ice_ctl_q_ring *sq = &cq->sq;
808 	u16 ntc = sq->next_to_clean;
809 	struct ice_sq_cd *details;
810 	struct ice_aq_desc *desc;
811 
812 	desc = ICE_CTL_Q_DESC(*sq, ntc);
813 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
814 
815 	while (rd32(hw, cq->sq.head) != ntc) {
816 		ice_debug(hw, ICE_DBG_AQ_MSG,
817 			  "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818 		memset(desc, 0, sizeof(*desc));
819 		memset(details, 0, sizeof(*details));
820 		ntc++;
821 		if (ntc == sq->count)
822 			ntc = 0;
823 		desc = ICE_CTL_Q_DESC(*sq, ntc);
824 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
825 	}
826 
827 	sq->next_to_clean = ntc;
828 
829 	return ICE_CTL_Q_DESC_UNUSED(sq);
830 }
831 
832 /**
833  * ice_debug_cq
834  * @hw: pointer to the hardware structure
835  * @desc: pointer to control queue descriptor
836  * @buf: pointer to command buffer
837  * @buf_len: max length of buf
838  *
839  * Dumps debug log about control command with descriptor contents.
840  */
841 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
842 {
843 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
844 	u16 len;
845 
846 	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
847 	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
848 		return;
849 
850 	if (!desc)
851 		return;
852 
853 	len = le16_to_cpu(cq_desc->datalen);
854 
855 	ice_debug(hw, ICE_DBG_AQ_DESC,
856 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857 		  le16_to_cpu(cq_desc->opcode),
858 		  le16_to_cpu(cq_desc->flags),
859 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
860 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
861 		  le32_to_cpu(cq_desc->cookie_high),
862 		  le32_to_cpu(cq_desc->cookie_low));
863 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
864 		  le32_to_cpu(cq_desc->params.generic.param0),
865 		  le32_to_cpu(cq_desc->params.generic.param1));
866 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
867 		  le32_to_cpu(cq_desc->params.generic.addr_high),
868 		  le32_to_cpu(cq_desc->params.generic.addr_low));
869 	if (buf && cq_desc->datalen != 0) {
870 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
871 		if (buf_len < len)
872 			len = buf_len;
873 
874 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
875 	}
876 }
877 
878 /**
879  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
880  * @hw: pointer to the HW struct
881  * @cq: pointer to the specific Control queue
882  *
883  * Returns true if the firmware has processed all descriptors on the
884  * admin send queue. Returns false if there are still requests pending.
885  */
886 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
887 {
888 	/* AQ designers suggest use of head for better
889 	 * timing reliability than DD bit
890 	 */
891 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
892 }
893 
894 /**
895  * ice_sq_send_cmd - send command to Control Queue (ATQ)
896  * @hw: pointer to the HW struct
897  * @cq: pointer to the specific Control queue
898  * @desc: prefilled descriptor describing the command (non DMA mem)
899  * @buf: buffer to use for indirect commands (or NULL for direct commands)
900  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
901  * @cd: pointer to command details structure
902  *
903  * This is the main send command routine for the ATQ. It runs the queue,
904  * cleans the queue, etc.
905  */
906 enum ice_status
907 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
908 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
909 		struct ice_sq_cd *cd)
910 {
911 	struct ice_dma_mem *dma_buf = NULL;
912 	struct ice_aq_desc *desc_on_ring;
913 	bool cmd_completed = false;
914 	enum ice_status status = 0;
915 	struct ice_sq_cd *details;
916 	u32 total_delay = 0;
917 	u16 retval = 0;
918 	u32 val = 0;
919 
920 	/* if reset is in progress return a soft error */
921 	if (hw->reset_ongoing)
922 		return ICE_ERR_RESET_ONGOING;
923 	mutex_lock(&cq->sq_lock);
924 
925 	cq->sq_last_status = ICE_AQ_RC_OK;
926 
927 	if (!cq->sq.count) {
928 		ice_debug(hw, ICE_DBG_AQ_MSG,
929 			  "Control Send queue not initialized.\n");
930 		status = ICE_ERR_AQ_EMPTY;
931 		goto sq_send_command_error;
932 	}
933 
934 	if ((buf && !buf_size) || (!buf && buf_size)) {
935 		status = ICE_ERR_PARAM;
936 		goto sq_send_command_error;
937 	}
938 
939 	if (buf) {
940 		if (buf_size > cq->sq_buf_size) {
941 			ice_debug(hw, ICE_DBG_AQ_MSG,
942 				  "Invalid buffer size for Control Send queue: %d.\n",
943 				  buf_size);
944 			status = ICE_ERR_INVAL_SIZE;
945 			goto sq_send_command_error;
946 		}
947 
948 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
949 		if (buf_size > ICE_AQ_LG_BUF)
950 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
951 	}
952 
953 	val = rd32(hw, cq->sq.head);
954 	if (val >= cq->num_sq_entries) {
955 		ice_debug(hw, ICE_DBG_AQ_MSG,
956 			  "head overrun at %d in the Control Send Queue ring\n",
957 			  val);
958 		status = ICE_ERR_AQ_EMPTY;
959 		goto sq_send_command_error;
960 	}
961 
962 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
963 	if (cd)
964 		*details = *cd;
965 	else
966 		memset(details, 0, sizeof(*details));
967 
968 	/* Call clean and check queue available function to reclaim the
969 	 * descriptors that were processed by FW/MBX; the function returns the
970 	 * number of desc available. The clean function called here could be
971 	 * called in a separate thread in case of asynchronous completions.
972 	 */
973 	if (ice_clean_sq(hw, cq) == 0) {
974 		ice_debug(hw, ICE_DBG_AQ_MSG,
975 			  "Error: Control Send Queue is full.\n");
976 		status = ICE_ERR_AQ_FULL;
977 		goto sq_send_command_error;
978 	}
979 
980 	/* initialize the temp desc pointer with the right desc */
981 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
982 
983 	/* if the desc is available copy the temp desc to the right place */
984 	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
985 
986 	/* if buf is not NULL assume indirect command */
987 	if (buf) {
988 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
989 		/* copy the user buf into the respective DMA buf */
990 		memcpy(dma_buf->va, buf, buf_size);
991 		desc_on_ring->datalen = cpu_to_le16(buf_size);
992 
993 		/* Update the address values in the desc with the pa value
994 		 * for respective buffer
995 		 */
996 		desc_on_ring->params.generic.addr_high =
997 			cpu_to_le32(upper_32_bits(dma_buf->pa));
998 		desc_on_ring->params.generic.addr_low =
999 			cpu_to_le32(lower_32_bits(dma_buf->pa));
1000 	}
1001 
1002 	/* Debug desc and buffer */
1003 	ice_debug(hw, ICE_DBG_AQ_DESC,
1004 		  "ATQ: Control Send queue desc and buffer:\n");
1005 
1006 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1007 
1008 	(cq->sq.next_to_use)++;
1009 	if (cq->sq.next_to_use == cq->sq.count)
1010 		cq->sq.next_to_use = 0;
1011 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1012 
1013 	do {
1014 		if (ice_sq_done(hw, cq))
1015 			break;
1016 
1017 		udelay(ICE_CTL_Q_SQ_CMD_USEC);
1018 		total_delay++;
1019 	} while (total_delay < cq->sq_cmd_timeout);
1020 
1021 	/* if ready, copy the desc back to temp */
1022 	if (ice_sq_done(hw, cq)) {
1023 		memcpy(desc, desc_on_ring, sizeof(*desc));
1024 		if (buf) {
1025 			/* get returned length to copy */
1026 			u16 copy_size = le16_to_cpu(desc->datalen);
1027 
1028 			if (copy_size > buf_size) {
1029 				ice_debug(hw, ICE_DBG_AQ_MSG,
1030 					  "Return len %d > than buf len %d\n",
1031 					  copy_size, buf_size);
1032 				status = ICE_ERR_AQ_ERROR;
1033 			} else {
1034 				memcpy(buf, dma_buf->va, copy_size);
1035 			}
1036 		}
1037 		retval = le16_to_cpu(desc->retval);
1038 		if (retval) {
1039 			ice_debug(hw, ICE_DBG_AQ_MSG,
1040 				  "Control Send Queue command 0x%04X completed with error 0x%X\n",
1041 				  le16_to_cpu(desc->opcode),
1042 				  retval);
1043 
1044 			/* strip off FW internal code */
1045 			retval &= 0xff;
1046 		}
1047 		cmd_completed = true;
1048 		if (!status && retval != ICE_AQ_RC_OK)
1049 			status = ICE_ERR_AQ_ERROR;
1050 		cq->sq_last_status = (enum ice_aq_err)retval;
1051 	}
1052 
1053 	ice_debug(hw, ICE_DBG_AQ_MSG,
1054 		  "ATQ: desc and buffer writeback:\n");
1055 
1056 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1057 
1058 	/* save writeback AQ if requested */
1059 	if (details->wb_desc)
1060 		memcpy(details->wb_desc, desc_on_ring,
1061 		       sizeof(*details->wb_desc));
1062 
1063 	/* update the error if time out occurred */
1064 	if (!cmd_completed) {
1065 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1066 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1067 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1068 			status = ICE_ERR_AQ_FW_CRITICAL;
1069 		} else {
1070 			ice_debug(hw, ICE_DBG_AQ_MSG,
1071 				  "Control Send Queue Writeback timeout.\n");
1072 			status = ICE_ERR_AQ_TIMEOUT;
1073 		}
1074 	}
1075 
1076 sq_send_command_error:
1077 	mutex_unlock(&cq->sq_lock);
1078 	return status;
1079 }
1080 
1081 /**
1082  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1083  * @desc: pointer to the temp descriptor (non DMA mem)
1084  * @opcode: the opcode can be used to decide which flags to turn off or on
1085  *
1086  * Fill the desc with default values
1087  */
1088 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1089 {
1090 	/* zero out the desc */
1091 	memset(desc, 0, sizeof(*desc));
1092 	desc->opcode = cpu_to_le16(opcode);
1093 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1094 }
1095 
1096 /**
1097  * ice_clean_rq_elem
1098  * @hw: pointer to the HW struct
1099  * @cq: pointer to the specific Control queue
1100  * @e: event info from the receive descriptor, includes any buffers
1101  * @pending: number of events that could be left to process
1102  *
1103  * This function cleans one Admin Receive Queue element and returns
1104  * the contents through e. It can also return how many events are
1105  * left to process through 'pending'.
1106  */
1107 enum ice_status
1108 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1109 		  struct ice_rq_event_info *e, u16 *pending)
1110 {
1111 	u16 ntc = cq->rq.next_to_clean;
1112 	enum ice_status ret_code = 0;
1113 	struct ice_aq_desc *desc;
1114 	struct ice_dma_mem *bi;
1115 	u16 desc_idx;
1116 	u16 datalen;
1117 	u16 flags;
1118 	u16 ntu;
1119 
1120 	/* pre-clean the event info */
1121 	memset(&e->desc, 0, sizeof(e->desc));
1122 
1123 	/* take the lock before we start messing with the ring */
1124 	mutex_lock(&cq->rq_lock);
1125 
1126 	if (!cq->rq.count) {
1127 		ice_debug(hw, ICE_DBG_AQ_MSG,
1128 			  "Control Receive queue not initialized.\n");
1129 		ret_code = ICE_ERR_AQ_EMPTY;
1130 		goto clean_rq_elem_err;
1131 	}
1132 
1133 	/* set next_to_use to head */
1134 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1135 
1136 	if (ntu == ntc) {
1137 		/* nothing to do - shouldn't need to update ring's values */
1138 		ret_code = ICE_ERR_AQ_NO_WORK;
1139 		goto clean_rq_elem_out;
1140 	}
1141 
1142 	/* now clean the next descriptor */
1143 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1144 	desc_idx = ntc;
1145 
1146 	cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1147 	flags = le16_to_cpu(desc->flags);
1148 	if (flags & ICE_AQ_FLAG_ERR) {
1149 		ret_code = ICE_ERR_AQ_ERROR;
1150 		ice_debug(hw, ICE_DBG_AQ_MSG,
1151 			  "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1152 			  le16_to_cpu(desc->opcode),
1153 			  cq->rq_last_status);
1154 	}
1155 	memcpy(&e->desc, desc, sizeof(e->desc));
1156 	datalen = le16_to_cpu(desc->datalen);
1157 	e->msg_len = min_t(u16, datalen, e->buf_len);
1158 	if (e->msg_buf && e->msg_len)
1159 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1160 
1161 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1162 
1163 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1164 
1165 	/* Restore the original datalen and buffer address in the desc,
1166 	 * FW updates datalen to indicate the event message size
1167 	 */
1168 	bi = &cq->rq.r.rq_bi[ntc];
1169 	memset(desc, 0, sizeof(*desc));
1170 
1171 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1172 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1173 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1174 	desc->datalen = cpu_to_le16(bi->size);
1175 	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1176 	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1177 
1178 	/* set tail = the last cleaned desc index. */
1179 	wr32(hw, cq->rq.tail, ntc);
1180 	/* ntc is updated to tail + 1 */
1181 	ntc++;
1182 	if (ntc == cq->num_rq_entries)
1183 		ntc = 0;
1184 	cq->rq.next_to_clean = ntc;
1185 	cq->rq.next_to_use = ntu;
1186 
1187 clean_rq_elem_out:
1188 	/* Set pending if needed, unlock and return */
1189 	if (pending) {
1190 		/* re-read HW head to calculate actual pending messages */
1191 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1192 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1193 	}
1194 clean_rq_elem_err:
1195 	mutex_unlock(&cq->rq_lock);
1196 
1197 	return ret_code;
1198 }
1199