17ec59eeaSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
27ec59eeaSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
37ec59eeaSAnirudh Venkataramanan
47ec59eeaSAnirudh Venkataramanan #include "ice_common.h"
57ec59eeaSAnirudh Venkataramanan
67afdbc90SBruce Allan #define ICE_CQ_INIT_REGS(qinfo, prefix) \
77afdbc90SBruce Allan do { \
87afdbc90SBruce Allan (qinfo)->sq.head = prefix##_ATQH; \
97afdbc90SBruce Allan (qinfo)->sq.tail = prefix##_ATQT; \
107afdbc90SBruce Allan (qinfo)->sq.len = prefix##_ATQLEN; \
117afdbc90SBruce Allan (qinfo)->sq.bah = prefix##_ATQBAH; \
127afdbc90SBruce Allan (qinfo)->sq.bal = prefix##_ATQBAL; \
137afdbc90SBruce Allan (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
147afdbc90SBruce Allan (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15b5c7f857SEvan Swanson (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
167afdbc90SBruce Allan (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
177afdbc90SBruce Allan (qinfo)->rq.head = prefix##_ARQH; \
187afdbc90SBruce Allan (qinfo)->rq.tail = prefix##_ARQT; \
197afdbc90SBruce Allan (qinfo)->rq.len = prefix##_ARQLEN; \
207afdbc90SBruce Allan (qinfo)->rq.bah = prefix##_ARQBAH; \
217afdbc90SBruce Allan (qinfo)->rq.bal = prefix##_ARQBAL; \
227afdbc90SBruce Allan (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
237afdbc90SBruce Allan (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24b5c7f857SEvan Swanson (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
257afdbc90SBruce Allan (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
267afdbc90SBruce Allan } while (0)
277afdbc90SBruce Allan
287ec59eeaSAnirudh Venkataramanan /**
297ec59eeaSAnirudh Venkataramanan * ice_adminq_init_regs - Initialize AdminQ registers
307ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
317ec59eeaSAnirudh Venkataramanan *
327ec59eeaSAnirudh Venkataramanan * This assumes the alloc_sq and alloc_rq functions have already been called
337ec59eeaSAnirudh Venkataramanan */
ice_adminq_init_regs(struct ice_hw * hw)347ec59eeaSAnirudh Venkataramanan static void ice_adminq_init_regs(struct ice_hw *hw)
357ec59eeaSAnirudh Venkataramanan {
367ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq = &hw->adminq;
377ec59eeaSAnirudh Venkataramanan
387afdbc90SBruce Allan ICE_CQ_INIT_REGS(cq, PF_FW);
397ec59eeaSAnirudh Venkataramanan }
407ec59eeaSAnirudh Venkataramanan
417ec59eeaSAnirudh Venkataramanan /**
4275d2b253SAnirudh Venkataramanan * ice_mailbox_init_regs - Initialize Mailbox registers
4375d2b253SAnirudh Venkataramanan * @hw: pointer to the hardware structure
4475d2b253SAnirudh Venkataramanan *
4575d2b253SAnirudh Venkataramanan * This assumes the alloc_sq and alloc_rq functions have already been called
4675d2b253SAnirudh Venkataramanan */
ice_mailbox_init_regs(struct ice_hw * hw)4775d2b253SAnirudh Venkataramanan static void ice_mailbox_init_regs(struct ice_hw *hw)
4875d2b253SAnirudh Venkataramanan {
4975d2b253SAnirudh Venkataramanan struct ice_ctl_q_info *cq = &hw->mailboxq;
5075d2b253SAnirudh Venkataramanan
517afdbc90SBruce Allan ICE_CQ_INIT_REGS(cq, PF_MBX);
5275d2b253SAnirudh Venkataramanan }
5375d2b253SAnirudh Venkataramanan
5475d2b253SAnirudh Venkataramanan /**
558f5ee3c4SJacob Keller * ice_sb_init_regs - Initialize Sideband registers
568f5ee3c4SJacob Keller * @hw: pointer to the hardware structure
578f5ee3c4SJacob Keller *
588f5ee3c4SJacob Keller * This assumes the alloc_sq and alloc_rq functions have already been called
598f5ee3c4SJacob Keller */
ice_sb_init_regs(struct ice_hw * hw)608f5ee3c4SJacob Keller static void ice_sb_init_regs(struct ice_hw *hw)
618f5ee3c4SJacob Keller {
628f5ee3c4SJacob Keller struct ice_ctl_q_info *cq = &hw->sbq;
638f5ee3c4SJacob Keller
648f5ee3c4SJacob Keller ICE_CQ_INIT_REGS(cq, PF_SB);
658f5ee3c4SJacob Keller }
668f5ee3c4SJacob Keller
678f5ee3c4SJacob Keller /**
687ec59eeaSAnirudh Venkataramanan * ice_check_sq_alive
69f9867df6SAnirudh Venkataramanan * @hw: pointer to the HW struct
707ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
717ec59eeaSAnirudh Venkataramanan *
727ec59eeaSAnirudh Venkataramanan * Returns true if Queue is enabled else false.
737ec59eeaSAnirudh Venkataramanan */
ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq)747ec59eeaSAnirudh Venkataramanan bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
757ec59eeaSAnirudh Venkataramanan {
767ec59eeaSAnirudh Venkataramanan /* check both queue-length and queue-enable fields */
777ec59eeaSAnirudh Venkataramanan if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
787ec59eeaSAnirudh Venkataramanan return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
797ec59eeaSAnirudh Venkataramanan cq->sq.len_ena_mask)) ==
807ec59eeaSAnirudh Venkataramanan (cq->num_sq_entries | cq->sq.len_ena_mask);
817ec59eeaSAnirudh Venkataramanan
827ec59eeaSAnirudh Venkataramanan return false;
837ec59eeaSAnirudh Venkataramanan }
847ec59eeaSAnirudh Venkataramanan
857ec59eeaSAnirudh Venkataramanan /**
867ec59eeaSAnirudh Venkataramanan * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
877ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
887ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
897ec59eeaSAnirudh Venkataramanan */
905e24d598STony Nguyen static int
ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)917ec59eeaSAnirudh Venkataramanan ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927ec59eeaSAnirudh Venkataramanan {
937ec59eeaSAnirudh Venkataramanan size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
947ec59eeaSAnirudh Venkataramanan
957ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
967ec59eeaSAnirudh Venkataramanan &cq->sq.desc_buf.pa,
977ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO);
987ec59eeaSAnirudh Venkataramanan if (!cq->sq.desc_buf.va)
99d54699e2STony Nguyen return -ENOMEM;
1007ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.size = size;
1017ec59eeaSAnirudh Venkataramanan
1027ec59eeaSAnirudh Venkataramanan cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
1037ec59eeaSAnirudh Venkataramanan sizeof(struct ice_sq_cd), GFP_KERNEL);
1047ec59eeaSAnirudh Venkataramanan if (!cq->sq.cmd_buf) {
1057ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
1067ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
1077ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va = NULL;
1087ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.pa = 0;
1097ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.size = 0;
110d54699e2STony Nguyen return -ENOMEM;
1117ec59eeaSAnirudh Venkataramanan }
1127ec59eeaSAnirudh Venkataramanan
1137ec59eeaSAnirudh Venkataramanan return 0;
1147ec59eeaSAnirudh Venkataramanan }
1157ec59eeaSAnirudh Venkataramanan
1167ec59eeaSAnirudh Venkataramanan /**
1177ec59eeaSAnirudh Venkataramanan * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
1187ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
1197ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
1207ec59eeaSAnirudh Venkataramanan */
1215e24d598STony Nguyen static int
ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)1227ec59eeaSAnirudh Venkataramanan ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1237ec59eeaSAnirudh Venkataramanan {
1247ec59eeaSAnirudh Venkataramanan size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
1257ec59eeaSAnirudh Venkataramanan
1267ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
1277ec59eeaSAnirudh Venkataramanan &cq->rq.desc_buf.pa,
1287ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO);
1297ec59eeaSAnirudh Venkataramanan if (!cq->rq.desc_buf.va)
130d54699e2STony Nguyen return -ENOMEM;
1317ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.size = size;
1327ec59eeaSAnirudh Venkataramanan return 0;
1337ec59eeaSAnirudh Venkataramanan }
1347ec59eeaSAnirudh Venkataramanan
1357ec59eeaSAnirudh Venkataramanan /**
1367afdbc90SBruce Allan * ice_free_cq_ring - Free control queue ring
1377ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
1387afdbc90SBruce Allan * @ring: pointer to the specific control queue ring
1397ec59eeaSAnirudh Venkataramanan *
1407afdbc90SBruce Allan * This assumes the posted buffers have already been cleaned
1417ec59eeaSAnirudh Venkataramanan * and de-allocated
1427ec59eeaSAnirudh Venkataramanan */
ice_free_cq_ring(struct ice_hw * hw,struct ice_ctl_q_ring * ring)1437afdbc90SBruce Allan static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
1447ec59eeaSAnirudh Venkataramanan {
1457afdbc90SBruce Allan dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
1467afdbc90SBruce Allan ring->desc_buf.va, ring->desc_buf.pa);
1477afdbc90SBruce Allan ring->desc_buf.va = NULL;
1487afdbc90SBruce Allan ring->desc_buf.pa = 0;
1497afdbc90SBruce Allan ring->desc_buf.size = 0;
1507ec59eeaSAnirudh Venkataramanan }
1517ec59eeaSAnirudh Venkataramanan
1527ec59eeaSAnirudh Venkataramanan /**
1537ec59eeaSAnirudh Venkataramanan * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
1547ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
1557ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
1567ec59eeaSAnirudh Venkataramanan */
1575e24d598STony Nguyen static int
ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)1587ec59eeaSAnirudh Venkataramanan ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1597ec59eeaSAnirudh Venkataramanan {
1607ec59eeaSAnirudh Venkataramanan int i;
1617ec59eeaSAnirudh Venkataramanan
1627ec59eeaSAnirudh Venkataramanan /* We'll be allocating the buffer info memory first, then we can
1637ec59eeaSAnirudh Venkataramanan * allocate the mapped buffers for the event processing
1647ec59eeaSAnirudh Venkataramanan */
1657ec59eeaSAnirudh Venkataramanan cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
1667ec59eeaSAnirudh Venkataramanan sizeof(cq->rq.desc_buf), GFP_KERNEL);
1677ec59eeaSAnirudh Venkataramanan if (!cq->rq.dma_head)
168d54699e2STony Nguyen return -ENOMEM;
1697ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
1707ec59eeaSAnirudh Venkataramanan
1717ec59eeaSAnirudh Venkataramanan /* allocate the mapped buffers */
1727ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_rq_entries; i++) {
1737ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc;
1747ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *bi;
1757ec59eeaSAnirudh Venkataramanan
1767ec59eeaSAnirudh Venkataramanan bi = &cq->rq.r.rq_bi[i];
1777ec59eeaSAnirudh Venkataramanan bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
1787ec59eeaSAnirudh Venkataramanan cq->rq_buf_size, &bi->pa,
1797ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO);
1807ec59eeaSAnirudh Venkataramanan if (!bi->va)
1817ec59eeaSAnirudh Venkataramanan goto unwind_alloc_rq_bufs;
1827ec59eeaSAnirudh Venkataramanan bi->size = cq->rq_buf_size;
1837ec59eeaSAnirudh Venkataramanan
1847ec59eeaSAnirudh Venkataramanan /* now configure the descriptors for use */
1857ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(cq->rq, i);
1867ec59eeaSAnirudh Venkataramanan
1877ec59eeaSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1887ec59eeaSAnirudh Venkataramanan if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1897ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1907ec59eeaSAnirudh Venkataramanan desc->opcode = 0;
1917ec59eeaSAnirudh Venkataramanan /* This is in accordance with Admin queue design, there is no
1927ec59eeaSAnirudh Venkataramanan * register for buffer size configuration
1937ec59eeaSAnirudh Venkataramanan */
1947ec59eeaSAnirudh Venkataramanan desc->datalen = cpu_to_le16(bi->size);
1957ec59eeaSAnirudh Venkataramanan desc->retval = 0;
1967ec59eeaSAnirudh Venkataramanan desc->cookie_high = 0;
1977ec59eeaSAnirudh Venkataramanan desc->cookie_low = 0;
1987ec59eeaSAnirudh Venkataramanan desc->params.generic.addr_high =
1997ec59eeaSAnirudh Venkataramanan cpu_to_le32(upper_32_bits(bi->pa));
2007ec59eeaSAnirudh Venkataramanan desc->params.generic.addr_low =
2017ec59eeaSAnirudh Venkataramanan cpu_to_le32(lower_32_bits(bi->pa));
2027ec59eeaSAnirudh Venkataramanan desc->params.generic.param0 = 0;
2037ec59eeaSAnirudh Venkataramanan desc->params.generic.param1 = 0;
2047ec59eeaSAnirudh Venkataramanan }
2057ec59eeaSAnirudh Venkataramanan return 0;
2067ec59eeaSAnirudh Venkataramanan
2077ec59eeaSAnirudh Venkataramanan unwind_alloc_rq_bufs:
2087ec59eeaSAnirudh Venkataramanan /* don't try to free the one that failed... */
2097ec59eeaSAnirudh Venkataramanan i--;
2107ec59eeaSAnirudh Venkataramanan for (; i >= 0; i--) {
2117ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
2127ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
2137ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va = NULL;
2147ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].pa = 0;
2157ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].size = 0;
2167ec59eeaSAnirudh Venkataramanan }
21768d27078SSurabhi Boob cq->rq.r.rq_bi = NULL;
2187ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
21968d27078SSurabhi Boob cq->rq.dma_head = NULL;
2207ec59eeaSAnirudh Venkataramanan
221d54699e2STony Nguyen return -ENOMEM;
2227ec59eeaSAnirudh Venkataramanan }
2237ec59eeaSAnirudh Venkataramanan
2247ec59eeaSAnirudh Venkataramanan /**
2257ec59eeaSAnirudh Venkataramanan * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
2267ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
2277ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
2287ec59eeaSAnirudh Venkataramanan */
2295e24d598STony Nguyen static int
ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)2307ec59eeaSAnirudh Venkataramanan ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
2317ec59eeaSAnirudh Venkataramanan {
2327ec59eeaSAnirudh Venkataramanan int i;
2337ec59eeaSAnirudh Venkataramanan
2347ec59eeaSAnirudh Venkataramanan /* No mapped memory needed yet, just the buffer info structures */
2357ec59eeaSAnirudh Venkataramanan cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
2367ec59eeaSAnirudh Venkataramanan sizeof(cq->sq.desc_buf), GFP_KERNEL);
2377ec59eeaSAnirudh Venkataramanan if (!cq->sq.dma_head)
238d54699e2STony Nguyen return -ENOMEM;
2397ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
2407ec59eeaSAnirudh Venkataramanan
2417ec59eeaSAnirudh Venkataramanan /* allocate the mapped buffers */
2427ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_sq_entries; i++) {
2437ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *bi;
2447ec59eeaSAnirudh Venkataramanan
2457ec59eeaSAnirudh Venkataramanan bi = &cq->sq.r.sq_bi[i];
2467ec59eeaSAnirudh Venkataramanan bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
2477ec59eeaSAnirudh Venkataramanan cq->sq_buf_size, &bi->pa,
2487ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO);
2497ec59eeaSAnirudh Venkataramanan if (!bi->va)
2507ec59eeaSAnirudh Venkataramanan goto unwind_alloc_sq_bufs;
2517ec59eeaSAnirudh Venkataramanan bi->size = cq->sq_buf_size;
2527ec59eeaSAnirudh Venkataramanan }
2537ec59eeaSAnirudh Venkataramanan return 0;
2547ec59eeaSAnirudh Venkataramanan
2557ec59eeaSAnirudh Venkataramanan unwind_alloc_sq_bufs:
2567ec59eeaSAnirudh Venkataramanan /* don't try to free the one that failed... */
2577ec59eeaSAnirudh Venkataramanan i--;
2587ec59eeaSAnirudh Venkataramanan for (; i >= 0; i--) {
2597ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
2607ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
2617ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va = NULL;
2627ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].pa = 0;
2637ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].size = 0;
2647ec59eeaSAnirudh Venkataramanan }
26568d27078SSurabhi Boob cq->sq.r.sq_bi = NULL;
2667ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
26768d27078SSurabhi Boob cq->sq.dma_head = NULL;
2687ec59eeaSAnirudh Venkataramanan
269d54699e2STony Nguyen return -ENOMEM;
2707ec59eeaSAnirudh Venkataramanan }
2717ec59eeaSAnirudh Venkataramanan
2725e24d598STony Nguyen static int
ice_cfg_cq_regs(struct ice_hw * hw,struct ice_ctl_q_ring * ring,u16 num_entries)2737afdbc90SBruce Allan ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
2747ec59eeaSAnirudh Venkataramanan {
2757afdbc90SBruce Allan /* Clear Head and Tail */
2767afdbc90SBruce Allan wr32(hw, ring->head, 0);
2777afdbc90SBruce Allan wr32(hw, ring->tail, 0);
2787ec59eeaSAnirudh Venkataramanan
2797afdbc90SBruce Allan /* set starting point */
2807afdbc90SBruce Allan wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
2817afdbc90SBruce Allan wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
2827afdbc90SBruce Allan wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
2837ec59eeaSAnirudh Venkataramanan
2847afdbc90SBruce Allan /* Check one register to verify that config was applied */
2857afdbc90SBruce Allan if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
286d54699e2STony Nguyen return -EIO;
2877ec59eeaSAnirudh Venkataramanan
2887afdbc90SBruce Allan return 0;
2897ec59eeaSAnirudh Venkataramanan }
2907ec59eeaSAnirudh Venkataramanan
2917ec59eeaSAnirudh Venkataramanan /**
2927ec59eeaSAnirudh Venkataramanan * ice_cfg_sq_regs - configure Control ATQ registers
2937ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
2947ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
2957ec59eeaSAnirudh Venkataramanan *
2967ec59eeaSAnirudh Venkataramanan * Configure base address and length registers for the transmit queue
2977ec59eeaSAnirudh Venkataramanan */
ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)2985518ac2aSTony Nguyen static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
2997ec59eeaSAnirudh Venkataramanan {
3007afdbc90SBruce Allan return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
3017ec59eeaSAnirudh Venkataramanan }
3027ec59eeaSAnirudh Venkataramanan
3037ec59eeaSAnirudh Venkataramanan /**
3047ec59eeaSAnirudh Venkataramanan * ice_cfg_rq_regs - configure Control ARQ register
3057ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
3067ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
3077ec59eeaSAnirudh Venkataramanan *
308f9867df6SAnirudh Venkataramanan * Configure base address and length registers for the receive (event queue)
3097ec59eeaSAnirudh Venkataramanan */
ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)3105518ac2aSTony Nguyen static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
3117ec59eeaSAnirudh Venkataramanan {
3125e24d598STony Nguyen int status;
3137ec59eeaSAnirudh Venkataramanan
3147afdbc90SBruce Allan status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
3157afdbc90SBruce Allan if (status)
3167afdbc90SBruce Allan return status;
3177ec59eeaSAnirudh Venkataramanan
3187ec59eeaSAnirudh Venkataramanan /* Update tail in the HW to post pre-allocated buffers */
3197ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
3207ec59eeaSAnirudh Venkataramanan
3217ec59eeaSAnirudh Venkataramanan return 0;
3227ec59eeaSAnirudh Venkataramanan }
3237ec59eeaSAnirudh Venkataramanan
32468d27078SSurabhi Boob #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
32568d27078SSurabhi Boob do { \
32668d27078SSurabhi Boob /* free descriptors */ \
327e923f04dSBruce Allan if ((qi)->ring.r.ring##_bi) { \
328e923f04dSBruce Allan int i; \
329e923f04dSBruce Allan \
33068d27078SSurabhi Boob for (i = 0; i < (qi)->num_##ring##_entries; i++) \
33168d27078SSurabhi Boob if ((qi)->ring.r.ring##_bi[i].pa) { \
33268d27078SSurabhi Boob dmam_free_coherent(ice_hw_to_dev(hw), \
33368d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].size, \
33468d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].va, \
33568d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].pa); \
33668d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].va = NULL;\
33768d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].pa = 0;\
33868d27078SSurabhi Boob (qi)->ring.r.ring##_bi[i].size = 0;\
33968d27078SSurabhi Boob } \
340e923f04dSBruce Allan } \
34168d27078SSurabhi Boob /* free the buffer info list */ \
34268d27078SSurabhi Boob devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
34368d27078SSurabhi Boob /* free DMA head */ \
34468d27078SSurabhi Boob devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
34568d27078SSurabhi Boob } while (0)
34668d27078SSurabhi Boob
34768d27078SSurabhi Boob /**
3487ec59eeaSAnirudh Venkataramanan * ice_init_sq - main initialization routine for Control ATQ
3497ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
3507ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
3517ec59eeaSAnirudh Venkataramanan *
3527ec59eeaSAnirudh Venkataramanan * This is the main initialization routine for the Control Send Queue
3537ec59eeaSAnirudh Venkataramanan * Prior to calling this function, the driver *MUST* set the following fields
3545c91ecfdSJacob Keller * in the cq->structure:
3557ec59eeaSAnirudh Venkataramanan * - cq->num_sq_entries
3567ec59eeaSAnirudh Venkataramanan * - cq->sq_buf_size
3577ec59eeaSAnirudh Venkataramanan *
3587ec59eeaSAnirudh Venkataramanan * Do *NOT* hold the lock when calling this as the memory allocation routines
3597ec59eeaSAnirudh Venkataramanan * called are not going to be atomic context safe
3607ec59eeaSAnirudh Venkataramanan */
ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)3617ec59eeaSAnirudh Venkataramanan static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
3625e24d598STony Nguyen {
3637ec59eeaSAnirudh Venkataramanan int ret_code;
3645e24d598STony Nguyen
3657ec59eeaSAnirudh Venkataramanan if (cq->sq.count > 0) {
3667ec59eeaSAnirudh Venkataramanan /* queue already initialized */
3677ec59eeaSAnirudh Venkataramanan ret_code = -EBUSY;
368d54699e2STony Nguyen goto init_ctrlq_exit;
3697ec59eeaSAnirudh Venkataramanan }
3707ec59eeaSAnirudh Venkataramanan
3717ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */
3727ec59eeaSAnirudh Venkataramanan if (!cq->num_sq_entries || !cq->sq_buf_size) {
3737ec59eeaSAnirudh Venkataramanan ret_code = -EIO;
374d54699e2STony Nguyen goto init_ctrlq_exit;
3757ec59eeaSAnirudh Venkataramanan }
3767ec59eeaSAnirudh Venkataramanan
3777ec59eeaSAnirudh Venkataramanan cq->sq.next_to_use = 0;
3787ec59eeaSAnirudh Venkataramanan cq->sq.next_to_clean = 0;
3797ec59eeaSAnirudh Venkataramanan
3807ec59eeaSAnirudh Venkataramanan /* allocate the ring memory */
3817ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
3827ec59eeaSAnirudh Venkataramanan if (ret_code)
3837ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit;
3847ec59eeaSAnirudh Venkataramanan
3857ec59eeaSAnirudh Venkataramanan /* allocate buffers in the rings */
3867ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_sq_bufs(hw, cq);
3877ec59eeaSAnirudh Venkataramanan if (ret_code)
3887ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings;
3897ec59eeaSAnirudh Venkataramanan
3907ec59eeaSAnirudh Venkataramanan /* initialize base registers */
3917ec59eeaSAnirudh Venkataramanan ret_code = ice_cfg_sq_regs(hw, cq);
3927ec59eeaSAnirudh Venkataramanan if (ret_code)
3937ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings;
3947ec59eeaSAnirudh Venkataramanan
3957ec59eeaSAnirudh Venkataramanan /* success! */
3967ec59eeaSAnirudh Venkataramanan cq->sq.count = cq->num_sq_entries;
3977ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit;
3987ec59eeaSAnirudh Venkataramanan
3997ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rings:
4007ec59eeaSAnirudh Venkataramanan ICE_FREE_CQ_BUFS(hw, cq, sq);
40168d27078SSurabhi Boob ice_free_cq_ring(hw, &cq->sq);
4027afdbc90SBruce Allan
4037ec59eeaSAnirudh Venkataramanan init_ctrlq_exit:
4047ec59eeaSAnirudh Venkataramanan return ret_code;
4057ec59eeaSAnirudh Venkataramanan }
4067ec59eeaSAnirudh Venkataramanan
4077ec59eeaSAnirudh Venkataramanan /**
4087ec59eeaSAnirudh Venkataramanan * ice_init_rq - initialize ARQ
4097ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
4107ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
4117ec59eeaSAnirudh Venkataramanan *
4127ec59eeaSAnirudh Venkataramanan * The main initialization routine for the Admin Receive (Event) Queue.
4137ec59eeaSAnirudh Venkataramanan * Prior to calling this function, the driver *MUST* set the following fields
4145c91ecfdSJacob Keller * in the cq->structure:
4157ec59eeaSAnirudh Venkataramanan * - cq->num_rq_entries
4167ec59eeaSAnirudh Venkataramanan * - cq->rq_buf_size
4177ec59eeaSAnirudh Venkataramanan *
4187ec59eeaSAnirudh Venkataramanan * Do *NOT* hold the lock when calling this as the memory allocation routines
4197ec59eeaSAnirudh Venkataramanan * called are not going to be atomic context safe
4207ec59eeaSAnirudh Venkataramanan */
ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)4217ec59eeaSAnirudh Venkataramanan static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
4225e24d598STony Nguyen {
4237ec59eeaSAnirudh Venkataramanan int ret_code;
4245e24d598STony Nguyen
4257ec59eeaSAnirudh Venkataramanan if (cq->rq.count > 0) {
4267ec59eeaSAnirudh Venkataramanan /* queue already initialized */
4277ec59eeaSAnirudh Venkataramanan ret_code = -EBUSY;
428d54699e2STony Nguyen goto init_ctrlq_exit;
4297ec59eeaSAnirudh Venkataramanan }
4307ec59eeaSAnirudh Venkataramanan
4317ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */
4327ec59eeaSAnirudh Venkataramanan if (!cq->num_rq_entries || !cq->rq_buf_size) {
4337ec59eeaSAnirudh Venkataramanan ret_code = -EIO;
434d54699e2STony Nguyen goto init_ctrlq_exit;
4357ec59eeaSAnirudh Venkataramanan }
4367ec59eeaSAnirudh Venkataramanan
4377ec59eeaSAnirudh Venkataramanan cq->rq.next_to_use = 0;
4387ec59eeaSAnirudh Venkataramanan cq->rq.next_to_clean = 0;
4397ec59eeaSAnirudh Venkataramanan
4407ec59eeaSAnirudh Venkataramanan /* allocate the ring memory */
4417ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
4427ec59eeaSAnirudh Venkataramanan if (ret_code)
4437ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit;
4447ec59eeaSAnirudh Venkataramanan
4457ec59eeaSAnirudh Venkataramanan /* allocate buffers in the rings */
4467ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_rq_bufs(hw, cq);
4477ec59eeaSAnirudh Venkataramanan if (ret_code)
4487ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings;
4497ec59eeaSAnirudh Venkataramanan
4507ec59eeaSAnirudh Venkataramanan /* initialize base registers */
4517ec59eeaSAnirudh Venkataramanan ret_code = ice_cfg_rq_regs(hw, cq);
4527ec59eeaSAnirudh Venkataramanan if (ret_code)
4537ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings;
4547ec59eeaSAnirudh Venkataramanan
4557ec59eeaSAnirudh Venkataramanan /* success! */
4567ec59eeaSAnirudh Venkataramanan cq->rq.count = cq->num_rq_entries;
4577ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit;
4587ec59eeaSAnirudh Venkataramanan
4597ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rings:
4607ec59eeaSAnirudh Venkataramanan ICE_FREE_CQ_BUFS(hw, cq, rq);
46168d27078SSurabhi Boob ice_free_cq_ring(hw, &cq->rq);
4627afdbc90SBruce Allan
4637ec59eeaSAnirudh Venkataramanan init_ctrlq_exit:
4647ec59eeaSAnirudh Venkataramanan return ret_code;
4657ec59eeaSAnirudh Venkataramanan }
4667ec59eeaSAnirudh Venkataramanan
4677ec59eeaSAnirudh Venkataramanan /**
4687ec59eeaSAnirudh Venkataramanan * ice_shutdown_sq - shutdown the Control ATQ
4697ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
4707ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
4717ec59eeaSAnirudh Venkataramanan *
4727ec59eeaSAnirudh Venkataramanan * The main shutdown routine for the Control Transmit Queue
4737ec59eeaSAnirudh Venkataramanan */
ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)4747ec59eeaSAnirudh Venkataramanan static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
4755518ac2aSTony Nguyen {
4767ec59eeaSAnirudh Venkataramanan int ret_code = 0;
4775e24d598STony Nguyen
4787ec59eeaSAnirudh Venkataramanan mutex_lock(&cq->sq_lock);
4797ec59eeaSAnirudh Venkataramanan
4807ec59eeaSAnirudh Venkataramanan if (!cq->sq.count) {
4817ec59eeaSAnirudh Venkataramanan ret_code = -EBUSY;
482d54699e2STony Nguyen goto shutdown_sq_out;
4837ec59eeaSAnirudh Venkataramanan }
4847ec59eeaSAnirudh Venkataramanan
4857ec59eeaSAnirudh Venkataramanan /* Stop firmware AdminQ processing */
4867ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.head, 0);
4877ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.tail, 0);
4887ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.len, 0);
4897ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bal, 0);
4907ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bah, 0);
4917ec59eeaSAnirudh Venkataramanan
4927ec59eeaSAnirudh Venkataramanan cq->sq.count = 0; /* to indicate uninitialized queue */
4937ec59eeaSAnirudh Venkataramanan
4947ec59eeaSAnirudh Venkataramanan /* free ring buffers and the ring itself */
4957ec59eeaSAnirudh Venkataramanan ICE_FREE_CQ_BUFS(hw, cq, sq);
4967afdbc90SBruce Allan ice_free_cq_ring(hw, &cq->sq);
4977afdbc90SBruce Allan
4987ec59eeaSAnirudh Venkataramanan shutdown_sq_out:
4997ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->sq_lock);
5007ec59eeaSAnirudh Venkataramanan return ret_code;
5017ec59eeaSAnirudh Venkataramanan }
5027ec59eeaSAnirudh Venkataramanan
5037ec59eeaSAnirudh Venkataramanan /**
5047ec59eeaSAnirudh Venkataramanan * ice_aq_ver_check - Check the reported AQ API version.
5057ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
506396fbf9cSJacob Keller *
5077ec59eeaSAnirudh Venkataramanan * Checks if the driver should load on a given AQ API version.
5087ec59eeaSAnirudh Venkataramanan *
5097ec59eeaSAnirudh Venkataramanan * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
5107ec59eeaSAnirudh Venkataramanan */
ice_aq_ver_check(struct ice_hw * hw)5117ec59eeaSAnirudh Venkataramanan static bool ice_aq_ver_check(struct ice_hw *hw)
512396fbf9cSJacob Keller {
5137ec59eeaSAnirudh Venkataramanan if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
514396fbf9cSJacob Keller /* Major API version is newer than expected, don't load */
515396fbf9cSJacob Keller dev_warn(ice_hw_to_dev(hw),
516396fbf9cSJacob Keller "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
517396fbf9cSJacob Keller return false;
5187ec59eeaSAnirudh Venkataramanan } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
519396fbf9cSJacob Keller if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
520396fbf9cSJacob Keller dev_info(ice_hw_to_dev(hw),
521396fbf9cSJacob Keller "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
522396fbf9cSJacob Keller else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
523396fbf9cSJacob Keller dev_info(ice_hw_to_dev(hw),
524396fbf9cSJacob Keller "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
525396fbf9cSJacob Keller } else {
526396fbf9cSJacob Keller /* Major API version is older than expected, log a warning */
527396fbf9cSJacob Keller dev_info(ice_hw_to_dev(hw),
528396fbf9cSJacob Keller "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
529396fbf9cSJacob Keller }
530396fbf9cSJacob Keller return true;
5317ec59eeaSAnirudh Venkataramanan }
5327ec59eeaSAnirudh Venkataramanan
5337ec59eeaSAnirudh Venkataramanan /**
5347ec59eeaSAnirudh Venkataramanan * ice_shutdown_rq - shutdown Control ARQ
5357ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
5367ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
5377ec59eeaSAnirudh Venkataramanan *
5387ec59eeaSAnirudh Venkataramanan * The main shutdown routine for the Control Receive Queue
5397ec59eeaSAnirudh Venkataramanan */
ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)5407ec59eeaSAnirudh Venkataramanan static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
5415518ac2aSTony Nguyen {
5427ec59eeaSAnirudh Venkataramanan int ret_code = 0;
5435e24d598STony Nguyen
5447ec59eeaSAnirudh Venkataramanan mutex_lock(&cq->rq_lock);
5457ec59eeaSAnirudh Venkataramanan
5467ec59eeaSAnirudh Venkataramanan if (!cq->rq.count) {
5477ec59eeaSAnirudh Venkataramanan ret_code = -EBUSY;
548d54699e2STony Nguyen goto shutdown_rq_out;
5497ec59eeaSAnirudh Venkataramanan }
5507ec59eeaSAnirudh Venkataramanan
5517ec59eeaSAnirudh Venkataramanan /* Stop Control Queue processing */
5527ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.head, 0);
5537ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.tail, 0);
5547ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.len, 0);
5557ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bal, 0);
5567ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bah, 0);
5577ec59eeaSAnirudh Venkataramanan
5587ec59eeaSAnirudh Venkataramanan /* set rq.count to 0 to indicate uninitialized queue */
5597ec59eeaSAnirudh Venkataramanan cq->rq.count = 0;
5607ec59eeaSAnirudh Venkataramanan
5617ec59eeaSAnirudh Venkataramanan /* free ring buffers and the ring itself */
5627ec59eeaSAnirudh Venkataramanan ICE_FREE_CQ_BUFS(hw, cq, rq);
5637afdbc90SBruce Allan ice_free_cq_ring(hw, &cq->rq);
5647afdbc90SBruce Allan
5657ec59eeaSAnirudh Venkataramanan shutdown_rq_out:
5667ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->rq_lock);
5677ec59eeaSAnirudh Venkataramanan return ret_code;
5687ec59eeaSAnirudh Venkataramanan }
5697ec59eeaSAnirudh Venkataramanan
5707ec59eeaSAnirudh Venkataramanan /**
5717ec59eeaSAnirudh Venkataramanan * ice_init_check_adminq - Check version for Admin Queue to know if its alive
5727ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
5737ec59eeaSAnirudh Venkataramanan */
ice_init_check_adminq(struct ice_hw * hw)5747ec59eeaSAnirudh Venkataramanan static int ice_init_check_adminq(struct ice_hw *hw)
5755e24d598STony Nguyen {
5767ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq = &hw->adminq;
5777ec59eeaSAnirudh Venkataramanan int status;
5785e24d598STony Nguyen
5797ec59eeaSAnirudh Venkataramanan status = ice_aq_get_fw_ver(hw, NULL);
5807ec59eeaSAnirudh Venkataramanan if (status)
5817ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rq;
5827ec59eeaSAnirudh Venkataramanan
5837ec59eeaSAnirudh Venkataramanan if (!ice_aq_ver_check(hw)) {
584396fbf9cSJacob Keller status = -EIO;
585d54699e2STony Nguyen goto init_ctrlq_free_rq;
5867ec59eeaSAnirudh Venkataramanan }
5877ec59eeaSAnirudh Venkataramanan
5887ec59eeaSAnirudh Venkataramanan return 0;
5897ec59eeaSAnirudh Venkataramanan
5907ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rq:
5917ec59eeaSAnirudh Venkataramanan ice_shutdown_rq(hw, cq);
5927ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq);
5937ec59eeaSAnirudh Venkataramanan return status;
5947ec59eeaSAnirudh Venkataramanan }
5957ec59eeaSAnirudh Venkataramanan
5967ec59eeaSAnirudh Venkataramanan /**
5977ec59eeaSAnirudh Venkataramanan * ice_init_ctrlq - main initialization routine for any control Queue
5987ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
5997ec59eeaSAnirudh Venkataramanan * @q_type: specific Control queue type
6007ec59eeaSAnirudh Venkataramanan *
6017ec59eeaSAnirudh Venkataramanan * Prior to calling this function, the driver *MUST* set the following fields
6025c91ecfdSJacob Keller * in the cq->structure:
6037ec59eeaSAnirudh Venkataramanan * - cq->num_sq_entries
6047ec59eeaSAnirudh Venkataramanan * - cq->num_rq_entries
6057ec59eeaSAnirudh Venkataramanan * - cq->rq_buf_size
6067ec59eeaSAnirudh Venkataramanan * - cq->sq_buf_size
6077ec59eeaSAnirudh Venkataramanan *
6085c91ecfdSJacob Keller * NOTE: this function does not initialize the controlq locks
6095c91ecfdSJacob Keller */
ice_init_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)6107ec59eeaSAnirudh Venkataramanan static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
6115e24d598STony Nguyen {
6127ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq;
6137ec59eeaSAnirudh Venkataramanan int ret_code;
6145e24d598STony Nguyen
6157ec59eeaSAnirudh Venkataramanan switch (q_type) {
6167ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN:
6177ec59eeaSAnirudh Venkataramanan ice_adminq_init_regs(hw);
6187ec59eeaSAnirudh Venkataramanan cq = &hw->adminq;
6197ec59eeaSAnirudh Venkataramanan break;
6207ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_SB:
6218f5ee3c4SJacob Keller ice_sb_init_regs(hw);
6228f5ee3c4SJacob Keller cq = &hw->sbq;
6238f5ee3c4SJacob Keller break;
6248f5ee3c4SJacob Keller case ICE_CTL_Q_MAILBOX:
62575d2b253SAnirudh Venkataramanan ice_mailbox_init_regs(hw);
62675d2b253SAnirudh Venkataramanan cq = &hw->mailboxq;
62775d2b253SAnirudh Venkataramanan break;
62875d2b253SAnirudh Venkataramanan default:
6297ec59eeaSAnirudh Venkataramanan return -EINVAL;
630d54699e2STony Nguyen }
6317ec59eeaSAnirudh Venkataramanan cq->qtype = q_type;
6327ec59eeaSAnirudh Venkataramanan
6337ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */
6347ec59eeaSAnirudh Venkataramanan if (!cq->num_rq_entries || !cq->num_sq_entries ||
6357ec59eeaSAnirudh Venkataramanan !cq->rq_buf_size || !cq->sq_buf_size) {
6367ec59eeaSAnirudh Venkataramanan return -EIO;
637d54699e2STony Nguyen }
6387ec59eeaSAnirudh Venkataramanan
6397ec59eeaSAnirudh Venkataramanan /* allocate the ATQ */
6407ec59eeaSAnirudh Venkataramanan ret_code = ice_init_sq(hw, cq);
6417ec59eeaSAnirudh Venkataramanan if (ret_code)
6427ec59eeaSAnirudh Venkataramanan return ret_code;
6435c91ecfdSJacob Keller
6447ec59eeaSAnirudh Venkataramanan /* allocate the ARQ */
6457ec59eeaSAnirudh Venkataramanan ret_code = ice_init_rq(hw, cq);
6467ec59eeaSAnirudh Venkataramanan if (ret_code)
6477ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_sq;
6487ec59eeaSAnirudh Venkataramanan
6497ec59eeaSAnirudh Venkataramanan /* success! */
6507ec59eeaSAnirudh Venkataramanan return 0;
6517ec59eeaSAnirudh Venkataramanan
6527ec59eeaSAnirudh Venkataramanan init_ctrlq_free_sq:
6537ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq);
6547ec59eeaSAnirudh Venkataramanan return ret_code;
6557ec59eeaSAnirudh Venkataramanan }
6567ec59eeaSAnirudh Venkataramanan
6577ec59eeaSAnirudh Venkataramanan /**
6587ec59eeaSAnirudh Venkataramanan * ice_is_sbq_supported - is the sideband queue supported
6598f5ee3c4SJacob Keller * @hw: pointer to the hardware structure
6608f5ee3c4SJacob Keller *
6618f5ee3c4SJacob Keller * Returns true if the sideband control queue interface is
6628f5ee3c4SJacob Keller * supported for the device, false otherwise
6638f5ee3c4SJacob Keller */
ice_is_sbq_supported(struct ice_hw * hw)6648f5ee3c4SJacob Keller bool ice_is_sbq_supported(struct ice_hw *hw)
6658f5ee3c4SJacob Keller {
6668f5ee3c4SJacob Keller /* The device sideband queue is only supported on devices with the
6678f5ee3c4SJacob Keller * generic MAC type.
6688f5ee3c4SJacob Keller */
6698f5ee3c4SJacob Keller return hw->mac_type == ICE_MAC_GENERIC;
6708f5ee3c4SJacob Keller }
6718f5ee3c4SJacob Keller
6728f5ee3c4SJacob Keller /**
6738f5ee3c4SJacob Keller * ice_get_sbq - returns the right control queue to use for sideband
6748f5ee3c4SJacob Keller * @hw: pointer to the hardware structure
6758f5ee3c4SJacob Keller */
ice_get_sbq(struct ice_hw * hw)6768f5ee3c4SJacob Keller struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
6778f5ee3c4SJacob Keller {
6788f5ee3c4SJacob Keller if (ice_is_sbq_supported(hw))
6798f5ee3c4SJacob Keller return &hw->sbq;
6808f5ee3c4SJacob Keller return &hw->adminq;
6818f5ee3c4SJacob Keller }
6828f5ee3c4SJacob Keller
6838f5ee3c4SJacob Keller /**
6848f5ee3c4SJacob Keller * ice_shutdown_ctrlq - shutdown routine for any control queue
6857ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
6867ec59eeaSAnirudh Venkataramanan * @q_type: specific Control queue type
6877ec59eeaSAnirudh Venkataramanan *
6885c91ecfdSJacob Keller * NOTE: this function does not destroy the control queue locks.
6895c91ecfdSJacob Keller */
ice_shutdown_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)6907ec59eeaSAnirudh Venkataramanan static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
6917ec59eeaSAnirudh Venkataramanan {
6927ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq;
6937ec59eeaSAnirudh Venkataramanan
6947ec59eeaSAnirudh Venkataramanan switch (q_type) {
6957ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN:
6967ec59eeaSAnirudh Venkataramanan cq = &hw->adminq;
6977ec59eeaSAnirudh Venkataramanan if (ice_check_sq_alive(hw, cq))
6987ec59eeaSAnirudh Venkataramanan ice_aq_q_shutdown(hw, true);
6997ec59eeaSAnirudh Venkataramanan break;
7007ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_SB:
7018f5ee3c4SJacob Keller cq = &hw->sbq;
7028f5ee3c4SJacob Keller break;
7038f5ee3c4SJacob Keller case ICE_CTL_Q_MAILBOX:
70475d2b253SAnirudh Venkataramanan cq = &hw->mailboxq;
70575d2b253SAnirudh Venkataramanan break;
70675d2b253SAnirudh Venkataramanan default:
7077ec59eeaSAnirudh Venkataramanan return;
7087ec59eeaSAnirudh Venkataramanan }
7097ec59eeaSAnirudh Venkataramanan
7107ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq);
7117ec59eeaSAnirudh Venkataramanan ice_shutdown_rq(hw, cq);
712b29bc220SPreethi Banala }
713b29bc220SPreethi Banala
7147ec59eeaSAnirudh Venkataramanan /**
7157ec59eeaSAnirudh Venkataramanan * ice_shutdown_all_ctrlq - shutdown routine for all control queues
7167ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
7177ec59eeaSAnirudh Venkataramanan *
7185c91ecfdSJacob Keller * NOTE: this function does not destroy the control queue locks. The driver
7195c91ecfdSJacob Keller * may call this at runtime to shutdown and later restart control queues, such
7205c91ecfdSJacob Keller * as in response to a reset event.
7215c91ecfdSJacob Keller */
ice_shutdown_all_ctrlq(struct ice_hw * hw)7227ec59eeaSAnirudh Venkataramanan void ice_shutdown_all_ctrlq(struct ice_hw *hw)
7237ec59eeaSAnirudh Venkataramanan {
7247ec59eeaSAnirudh Venkataramanan /* Shutdown FW admin queue */
7257ec59eeaSAnirudh Venkataramanan ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
7267ec59eeaSAnirudh Venkataramanan /* Shutdown PHY Sideband */
7278f5ee3c4SJacob Keller if (ice_is_sbq_supported(hw))
7288f5ee3c4SJacob Keller ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
7298f5ee3c4SJacob Keller /* Shutdown PF-VF Mailbox */
73075d2b253SAnirudh Venkataramanan ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
73175d2b253SAnirudh Venkataramanan }
7327ec59eeaSAnirudh Venkataramanan
7337ec59eeaSAnirudh Venkataramanan /**
7347ec59eeaSAnirudh Venkataramanan * ice_init_all_ctrlq - main initialization routine for all control queues
735b5c7f857SEvan Swanson * @hw: pointer to the hardware structure
736b5c7f857SEvan Swanson *
737b5c7f857SEvan Swanson * Prior to calling this function, the driver MUST* set the following fields
738b5c7f857SEvan Swanson * in the cq->structure for all control queues:
739b5c7f857SEvan Swanson * - cq->num_sq_entries
740b5c7f857SEvan Swanson * - cq->num_rq_entries
741b5c7f857SEvan Swanson * - cq->rq_buf_size
742b5c7f857SEvan Swanson * - cq->sq_buf_size
743b5c7f857SEvan Swanson *
744b5c7f857SEvan Swanson * NOTE: this function does not initialize the controlq locks.
745b5c7f857SEvan Swanson */
ice_init_all_ctrlq(struct ice_hw * hw)746b5c7f857SEvan Swanson int ice_init_all_ctrlq(struct ice_hw *hw)
7475e24d598STony Nguyen {
748b5c7f857SEvan Swanson u32 retry = 0;
749b5c7f857SEvan Swanson int status;
7505518ac2aSTony Nguyen
751b5c7f857SEvan Swanson /* Init FW admin queue */
752b5c7f857SEvan Swanson do {
753b5c7f857SEvan Swanson status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
754b5c7f857SEvan Swanson if (status)
755b5c7f857SEvan Swanson return status;
756b5c7f857SEvan Swanson
757b5c7f857SEvan Swanson status = ice_init_check_adminq(hw);
758b5c7f857SEvan Swanson if (status != -EIO)
759d54699e2STony Nguyen break;
760b5c7f857SEvan Swanson
761b5c7f857SEvan Swanson ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
7629228d8b2SJacob Keller ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
763b5c7f857SEvan Swanson msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
764b5c7f857SEvan Swanson } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
765b5c7f857SEvan Swanson
766b5c7f857SEvan Swanson if (status)
767b5c7f857SEvan Swanson return status;
768b5c7f857SEvan Swanson /* sideband control queue (SBQ) interface is not supported on some
7698f5ee3c4SJacob Keller * devices. Initialize if supported, else fallback to the admin queue
7708f5ee3c4SJacob Keller * interface
7718f5ee3c4SJacob Keller */
7728f5ee3c4SJacob Keller if (ice_is_sbq_supported(hw)) {
7738f5ee3c4SJacob Keller status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
7748f5ee3c4SJacob Keller if (status)
7758f5ee3c4SJacob Keller return status;
7768f5ee3c4SJacob Keller }
7778f5ee3c4SJacob Keller /* Init Mailbox queue */
778b5c7f857SEvan Swanson return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
779b5c7f857SEvan Swanson }
780b5c7f857SEvan Swanson
781b5c7f857SEvan Swanson /**
782b5c7f857SEvan Swanson * ice_init_ctrlq_locks - Initialize locks for a control queue
783b5c7f857SEvan Swanson * @cq: pointer to the control queue
784b5c7f857SEvan Swanson *
785b5c7f857SEvan Swanson * Initializes the send and receive queue locks for a given control queue.
786b5c7f857SEvan Swanson */
ice_init_ctrlq_locks(struct ice_ctl_q_info * cq)787b5c7f857SEvan Swanson static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
788b5c7f857SEvan Swanson {
789b5c7f857SEvan Swanson mutex_init(&cq->sq_lock);
790b5c7f857SEvan Swanson mutex_init(&cq->rq_lock);
791b5c7f857SEvan Swanson }
792b5c7f857SEvan Swanson
793b5c7f857SEvan Swanson /**
794b5c7f857SEvan Swanson * ice_create_all_ctrlq - main initialization routine for all control queues
795b5c7f857SEvan Swanson * @hw: pointer to the hardware structure
796b5c7f857SEvan Swanson *
797b5c7f857SEvan Swanson * Prior to calling this function, the driver *MUST* set the following fields
798b5c7f857SEvan Swanson * in the cq->structure for all control queues:
799b5c7f857SEvan Swanson * - cq->num_sq_entries
800b5c7f857SEvan Swanson * - cq->num_rq_entries
801b5c7f857SEvan Swanson * - cq->rq_buf_size
802b5c7f857SEvan Swanson * - cq->sq_buf_size
803b5c7f857SEvan Swanson *
804b5c7f857SEvan Swanson * This function creates all the control queue locks and then calls
805b5c7f857SEvan Swanson * ice_init_all_ctrlq. It should be called once during driver load. If the
806b5c7f857SEvan Swanson * driver needs to re-initialize control queues at run time it should call
807b5c7f857SEvan Swanson * ice_init_all_ctrlq instead.
808b5c7f857SEvan Swanson */
ice_create_all_ctrlq(struct ice_hw * hw)809b5c7f857SEvan Swanson int ice_create_all_ctrlq(struct ice_hw *hw)
8105e24d598STony Nguyen {
811b5c7f857SEvan Swanson ice_init_ctrlq_locks(&hw->adminq);
812b5c7f857SEvan Swanson if (ice_is_sbq_supported(hw))
8138f5ee3c4SJacob Keller ice_init_ctrlq_locks(&hw->sbq);
8148f5ee3c4SJacob Keller ice_init_ctrlq_locks(&hw->mailboxq);
815b5c7f857SEvan Swanson
816b5c7f857SEvan Swanson return ice_init_all_ctrlq(hw);
817b5c7f857SEvan Swanson }
818b5c7f857SEvan Swanson
819b5c7f857SEvan Swanson /**
820b5c7f857SEvan Swanson * ice_destroy_ctrlq_locks - Destroy locks for a control queue
8215c91ecfdSJacob Keller * @cq: pointer to the control queue
8225c91ecfdSJacob Keller *
8235c91ecfdSJacob Keller * Destroys the send and receive queue locks for a given control queue.
8245c91ecfdSJacob Keller */
ice_destroy_ctrlq_locks(struct ice_ctl_q_info * cq)8255c91ecfdSJacob Keller static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
826ebb462dcSBruce Allan {
8275c91ecfdSJacob Keller mutex_destroy(&cq->sq_lock);
8285c91ecfdSJacob Keller mutex_destroy(&cq->rq_lock);
8295c91ecfdSJacob Keller }
8305c91ecfdSJacob Keller
8315c91ecfdSJacob Keller /**
8325c91ecfdSJacob Keller * ice_destroy_all_ctrlq - exit routine for all control queues
8335c91ecfdSJacob Keller * @hw: pointer to the hardware structure
8345c91ecfdSJacob Keller *
8355c91ecfdSJacob Keller * This function shuts down all the control queues and then destroys the
8365c91ecfdSJacob Keller * control queue locks. It should be called once during driver unload. The
8375c91ecfdSJacob Keller * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
8385c91ecfdSJacob Keller * reinitialize control queues, such as in response to a reset event.
8395c91ecfdSJacob Keller */
ice_destroy_all_ctrlq(struct ice_hw * hw)8405c91ecfdSJacob Keller void ice_destroy_all_ctrlq(struct ice_hw *hw)
8415c91ecfdSJacob Keller {
8425c91ecfdSJacob Keller /* shut down all the control queues first */
8435c91ecfdSJacob Keller ice_shutdown_all_ctrlq(hw);
8445c91ecfdSJacob Keller
8455c91ecfdSJacob Keller ice_destroy_ctrlq_locks(&hw->adminq);
8465c91ecfdSJacob Keller if (ice_is_sbq_supported(hw))
8478f5ee3c4SJacob Keller ice_destroy_ctrlq_locks(&hw->sbq);
8488f5ee3c4SJacob Keller ice_destroy_ctrlq_locks(&hw->mailboxq);
8495c91ecfdSJacob Keller }
8505c91ecfdSJacob Keller
8515c91ecfdSJacob Keller /**
8525c91ecfdSJacob Keller * ice_clean_sq - cleans Admin send queue (ATQ)
8537ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure
8547ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
8557ec59eeaSAnirudh Venkataramanan *
8567ec59eeaSAnirudh Venkataramanan * returns the number of free desc
8577ec59eeaSAnirudh Venkataramanan */
ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)8587ec59eeaSAnirudh Venkataramanan static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
8597ec59eeaSAnirudh Venkataramanan {
8607ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_ring *sq = &cq->sq;
8617ec59eeaSAnirudh Venkataramanan u16 ntc = sq->next_to_clean;
8627ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *details;
8637ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc;
8647ec59eeaSAnirudh Venkataramanan
8657ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(*sq, ntc);
8667ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(*sq, ntc);
8677ec59eeaSAnirudh Venkataramanan
8687ec59eeaSAnirudh Venkataramanan while (rd32(hw, cq->sq.head) != ntc) {
8697ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
8709228d8b2SJacob Keller memset(desc, 0, sizeof(*desc));
8717ec59eeaSAnirudh Venkataramanan memset(details, 0, sizeof(*details));
8727ec59eeaSAnirudh Venkataramanan ntc++;
8737ec59eeaSAnirudh Venkataramanan if (ntc == sq->count)
8747ec59eeaSAnirudh Venkataramanan ntc = 0;
8757ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(*sq, ntc);
8767ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(*sq, ntc);
8777ec59eeaSAnirudh Venkataramanan }
8787ec59eeaSAnirudh Venkataramanan
8797ec59eeaSAnirudh Venkataramanan sq->next_to_clean = ntc;
8807ec59eeaSAnirudh Venkataramanan
8817ec59eeaSAnirudh Venkataramanan return ICE_CTL_Q_DESC_UNUSED(sq);
8827ec59eeaSAnirudh Venkataramanan }
8837ec59eeaSAnirudh Venkataramanan
8847ec59eeaSAnirudh Venkataramanan /**
8857ec59eeaSAnirudh Venkataramanan * ice_debug_cq
886faa01721SJacob Keller * @hw: pointer to the hardware structure
887faa01721SJacob Keller * @desc: pointer to control queue descriptor
888faa01721SJacob Keller * @buf: pointer to command buffer
889faa01721SJacob Keller * @buf_len: max length of buf
890faa01721SJacob Keller *
891faa01721SJacob Keller * Dumps debug log about control command with descriptor contents.
892faa01721SJacob Keller */
ice_debug_cq(struct ice_hw * hw,void * desc,void * buf,u16 buf_len)893faa01721SJacob Keller static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
894faa01721SJacob Keller {
895faa01721SJacob Keller struct ice_aq_desc *cq_desc = desc;
8967a63dae0SBruce Allan u16 len;
897faa01721SJacob Keller
898faa01721SJacob Keller if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
899faa01721SJacob Keller !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
900faa01721SJacob Keller return;
901faa01721SJacob Keller
902faa01721SJacob Keller if (!desc)
903faa01721SJacob Keller return;
904faa01721SJacob Keller
905faa01721SJacob Keller len = le16_to_cpu(cq_desc->datalen);
906faa01721SJacob Keller
907faa01721SJacob Keller ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
9089228d8b2SJacob Keller le16_to_cpu(cq_desc->opcode),
909faa01721SJacob Keller le16_to_cpu(cq_desc->flags),
910faa01721SJacob Keller le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
911faa01721SJacob Keller ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
912faa01721SJacob Keller le32_to_cpu(cq_desc->cookie_high),
913faa01721SJacob Keller le32_to_cpu(cq_desc->cookie_low));
914faa01721SJacob Keller ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
915faa01721SJacob Keller le32_to_cpu(cq_desc->params.generic.param0),
916faa01721SJacob Keller le32_to_cpu(cq_desc->params.generic.param1));
917faa01721SJacob Keller ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
918faa01721SJacob Keller le32_to_cpu(cq_desc->params.generic.addr_high),
919faa01721SJacob Keller le32_to_cpu(cq_desc->params.generic.addr_low));
920faa01721SJacob Keller if (buf && cq_desc->datalen != 0) {
921faa01721SJacob Keller ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
922faa01721SJacob Keller if (buf_len < len)
923faa01721SJacob Keller len = buf_len;
924faa01721SJacob Keller
925faa01721SJacob Keller ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
9267a63dae0SBruce Allan }
927faa01721SJacob Keller }
928faa01721SJacob Keller
929faa01721SJacob Keller /**
930faa01721SJacob Keller * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
9317ec59eeaSAnirudh Venkataramanan * @hw: pointer to the HW struct
932f9867df6SAnirudh Venkataramanan * @cq: pointer to the specific Control queue
9337ec59eeaSAnirudh Venkataramanan *
9347ec59eeaSAnirudh Venkataramanan * Returns true if the firmware has processed all descriptors on the
9357ec59eeaSAnirudh Venkataramanan * admin send queue. Returns false if there are still requests pending.
9367ec59eeaSAnirudh Venkataramanan */
ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq)9377ec59eeaSAnirudh Venkataramanan static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
9387ec59eeaSAnirudh Venkataramanan {
9397ec59eeaSAnirudh Venkataramanan /* AQ designers suggest use of head for better
9407ec59eeaSAnirudh Venkataramanan * timing reliability than DD bit
9417ec59eeaSAnirudh Venkataramanan */
9427ec59eeaSAnirudh Venkataramanan return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
9437ec59eeaSAnirudh Venkataramanan }
9447ec59eeaSAnirudh Venkataramanan
9457ec59eeaSAnirudh Venkataramanan /**
9467ec59eeaSAnirudh Venkataramanan * ice_sq_send_cmd - send command to Control Queue (ATQ)
9477ec59eeaSAnirudh Venkataramanan * @hw: pointer to the HW struct
948f9867df6SAnirudh Venkataramanan * @cq: pointer to the specific Control queue
9497ec59eeaSAnirudh Venkataramanan * @desc: prefilled descriptor describing the command
9503056df93SChinh T Cao * @buf: buffer to use for indirect commands (or NULL for direct commands)
9517ec59eeaSAnirudh Venkataramanan * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
9527ec59eeaSAnirudh Venkataramanan * @cd: pointer to command details structure
9537ec59eeaSAnirudh Venkataramanan *
9547ec59eeaSAnirudh Venkataramanan * This is the main send command routine for the ATQ. It runs the queue,
955df17b7e0SAnirudh Venkataramanan * cleans the queue, etc.
9567ec59eeaSAnirudh Venkataramanan */
9577ec59eeaSAnirudh Venkataramanan int
ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)9585e24d598STony Nguyen ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
9597ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc, void *buf, u16 buf_size,
9607ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *cd)
9617ec59eeaSAnirudh Venkataramanan {
9627ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *dma_buf = NULL;
9637ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc_on_ring;
9647ec59eeaSAnirudh Venkataramanan bool cmd_completed = false;
9657ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *details;
9667ec59eeaSAnirudh Venkataramanan unsigned long timeout;
967*f86d6f9cSMichal Schmidt int status = 0;
9685518ac2aSTony Nguyen u16 retval = 0;
9697ec59eeaSAnirudh Venkataramanan u32 val = 0;
9707ec59eeaSAnirudh Venkataramanan
9717ec59eeaSAnirudh Venkataramanan /* if reset is in progress return a soft error */
972fd2a9817SAnirudh Venkataramanan if (hw->reset_ongoing)
973fd2a9817SAnirudh Venkataramanan return -EBUSY;
974d54699e2STony Nguyen mutex_lock(&cq->sq_lock);
9757ec59eeaSAnirudh Venkataramanan
9767ec59eeaSAnirudh Venkataramanan cq->sq_last_status = ICE_AQ_RC_OK;
9777ec59eeaSAnirudh Venkataramanan
9787ec59eeaSAnirudh Venkataramanan if (!cq->sq.count) {
9797ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
9809228d8b2SJacob Keller status = -EIO;
981d54699e2STony Nguyen goto sq_send_command_error;
9827ec59eeaSAnirudh Venkataramanan }
9837ec59eeaSAnirudh Venkataramanan
9847ec59eeaSAnirudh Venkataramanan if ((buf && !buf_size) || (!buf && buf_size)) {
9857ec59eeaSAnirudh Venkataramanan status = -EINVAL;
986d54699e2STony Nguyen goto sq_send_command_error;
9877ec59eeaSAnirudh Venkataramanan }
9887ec59eeaSAnirudh Venkataramanan
9897ec59eeaSAnirudh Venkataramanan if (buf) {
9907ec59eeaSAnirudh Venkataramanan if (buf_size > cq->sq_buf_size) {
9917ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
9929228d8b2SJacob Keller buf_size);
9937ec59eeaSAnirudh Venkataramanan status = -EINVAL;
994d54699e2STony Nguyen goto sq_send_command_error;
9957ec59eeaSAnirudh Venkataramanan }
9967ec59eeaSAnirudh Venkataramanan
9977ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
9987ec59eeaSAnirudh Venkataramanan if (buf_size > ICE_AQ_LG_BUF)
9997ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
10007ec59eeaSAnirudh Venkataramanan }
10017ec59eeaSAnirudh Venkataramanan
10027ec59eeaSAnirudh Venkataramanan val = rd32(hw, cq->sq.head);
10037ec59eeaSAnirudh Venkataramanan if (val >= cq->num_sq_entries) {
10047ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
10059228d8b2SJacob Keller val);
10067ec59eeaSAnirudh Venkataramanan status = -EIO;
1007d54699e2STony Nguyen goto sq_send_command_error;
10087ec59eeaSAnirudh Venkataramanan }
10097ec59eeaSAnirudh Venkataramanan
10107ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
10117ec59eeaSAnirudh Venkataramanan if (cd)
10127ec59eeaSAnirudh Venkataramanan *details = *cd;
1013daca32a2SBruce Allan else
10147ec59eeaSAnirudh Venkataramanan memset(details, 0, sizeof(*details));
10157ec59eeaSAnirudh Venkataramanan
10167ec59eeaSAnirudh Venkataramanan /* Call clean and check queue available function to reclaim the
10177ec59eeaSAnirudh Venkataramanan * descriptors that were processed by FW/MBX; the function returns the
10187ec59eeaSAnirudh Venkataramanan * number of desc available. The clean function called here could be
10197ec59eeaSAnirudh Venkataramanan * called in a separate thread in case of asynchronous completions.
10207ec59eeaSAnirudh Venkataramanan */
10217ec59eeaSAnirudh Venkataramanan if (ice_clean_sq(hw, cq) == 0) {
10227ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
10239228d8b2SJacob Keller status = -ENOSPC;
1024d54699e2STony Nguyen goto sq_send_command_error;
10257ec59eeaSAnirudh Venkataramanan }
10267ec59eeaSAnirudh Venkataramanan
10277ec59eeaSAnirudh Venkataramanan /* initialize the temp desc pointer with the right desc */
10287ec59eeaSAnirudh Venkataramanan desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
10297ec59eeaSAnirudh Venkataramanan
10307ec59eeaSAnirudh Venkataramanan /* if the desc is available copy the temp desc to the right place */
10317ec59eeaSAnirudh Venkataramanan memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
10327ec59eeaSAnirudh Venkataramanan
10337ec59eeaSAnirudh Venkataramanan /* if buf is not NULL assume indirect command */
10347ec59eeaSAnirudh Venkataramanan if (buf) {
10357ec59eeaSAnirudh Venkataramanan dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
10367ec59eeaSAnirudh Venkataramanan /* copy the user buf into the respective DMA buf */
10377ec59eeaSAnirudh Venkataramanan memcpy(dma_buf->va, buf, buf_size);
10387ec59eeaSAnirudh Venkataramanan desc_on_ring->datalen = cpu_to_le16(buf_size);
10397ec59eeaSAnirudh Venkataramanan
10407ec59eeaSAnirudh Venkataramanan /* Update the address values in the desc with the pa value
10417ec59eeaSAnirudh Venkataramanan * for respective buffer
10427ec59eeaSAnirudh Venkataramanan */
10437ec59eeaSAnirudh Venkataramanan desc_on_ring->params.generic.addr_high =
10447ec59eeaSAnirudh Venkataramanan cpu_to_le32(upper_32_bits(dma_buf->pa));
10457ec59eeaSAnirudh Venkataramanan desc_on_ring->params.generic.addr_low =
10467ec59eeaSAnirudh Venkataramanan cpu_to_le32(lower_32_bits(dma_buf->pa));
10477ec59eeaSAnirudh Venkataramanan }
10487ec59eeaSAnirudh Venkataramanan
10497ec59eeaSAnirudh Venkataramanan /* Debug desc and buffer */
10507ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
10519228d8b2SJacob Keller
10527ec59eeaSAnirudh Venkataramanan ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1053faa01721SJacob Keller
10547ec59eeaSAnirudh Venkataramanan (cq->sq.next_to_use)++;
10557ec59eeaSAnirudh Venkataramanan if (cq->sq.next_to_use == cq->sq.count)
10567ec59eeaSAnirudh Venkataramanan cq->sq.next_to_use = 0;
10577ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.tail, cq->sq.next_to_use);
10587ec59eeaSAnirudh Venkataramanan ice_flush(hw);
10597ec59eeaSAnirudh Venkataramanan
1060*f86d6f9cSMichal Schmidt /* Wait a short time before initial ice_sq_done() check, to allow
10617ec59eeaSAnirudh Venkataramanan * hardware time for completion.
10627ec59eeaSAnirudh Venkataramanan */
10637ec59eeaSAnirudh Venkataramanan udelay(5);
10647ec59eeaSAnirudh Venkataramanan
1065*f86d6f9cSMichal Schmidt timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
1066*f86d6f9cSMichal Schmidt do {
1067*f86d6f9cSMichal Schmidt if (ice_sq_done(hw, cq))
10687ec59eeaSAnirudh Venkataramanan break;
10697ec59eeaSAnirudh Venkataramanan
10707ec59eeaSAnirudh Venkataramanan usleep_range(100, 150);
10717ec59eeaSAnirudh Venkataramanan } while (time_before(jiffies, timeout));
10727ec59eeaSAnirudh Venkataramanan
10737ec59eeaSAnirudh Venkataramanan /* if ready, copy the desc back to temp */
10747ec59eeaSAnirudh Venkataramanan if (ice_sq_done(hw, cq)) {
10757ec59eeaSAnirudh Venkataramanan memcpy(desc, desc_on_ring, sizeof(*desc));
10767ec59eeaSAnirudh Venkataramanan if (buf) {
10779228d8b2SJacob Keller /* get returned length to copy */
10787ec59eeaSAnirudh Venkataramanan u16 copy_size = le16_to_cpu(desc->datalen);
1079d54699e2STony Nguyen
10807ec59eeaSAnirudh Venkataramanan if (copy_size > buf_size) {
10817ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
10827ec59eeaSAnirudh Venkataramanan copy_size, buf_size);
10837ec59eeaSAnirudh Venkataramanan status = -EIO;
10847ec59eeaSAnirudh Venkataramanan } else {
10857ec59eeaSAnirudh Venkataramanan memcpy(buf, dma_buf->va, copy_size);
10869228d8b2SJacob Keller }
1087fb0254b2SJacob Keller }
10887ec59eeaSAnirudh Venkataramanan retval = le16_to_cpu(desc->retval);
10897ec59eeaSAnirudh Venkataramanan if (retval) {
10907ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
10917ec59eeaSAnirudh Venkataramanan le16_to_cpu(desc->opcode),
10927ec59eeaSAnirudh Venkataramanan retval);
10937ec59eeaSAnirudh Venkataramanan
10947ec59eeaSAnirudh Venkataramanan /* strip off FW internal code */
1095d54699e2STony Nguyen retval &= 0xff;
10967ec59eeaSAnirudh Venkataramanan }
10977ec59eeaSAnirudh Venkataramanan cmd_completed = true;
10987ec59eeaSAnirudh Venkataramanan if (!status && retval != ICE_AQ_RC_OK)
10999228d8b2SJacob Keller status = -EIO;
11007ec59eeaSAnirudh Venkataramanan cq->sq_last_status = (enum ice_aq_err)retval;
1101faa01721SJacob Keller }
11027ec59eeaSAnirudh Venkataramanan
11037ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
11047ec59eeaSAnirudh Venkataramanan
11057ec59eeaSAnirudh Venkataramanan ice_debug_cq(hw, (void *)desc, buf, buf_size);
11067ec59eeaSAnirudh Venkataramanan
11077ec59eeaSAnirudh Venkataramanan /* save writeback AQ if requested */
11087ec59eeaSAnirudh Venkataramanan if (details->wb_desc)
11097ec59eeaSAnirudh Venkataramanan memcpy(details->wb_desc, desc_on_ring,
1110b5c7f857SEvan Swanson sizeof(*details->wb_desc));
1111b5c7f857SEvan Swanson
1112b5c7f857SEvan Swanson /* update the error if time out occurred */
1113d54699e2STony Nguyen if (!cmd_completed) {
1114b5c7f857SEvan Swanson if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
11159228d8b2SJacob Keller rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1116d54699e2STony Nguyen ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
11177ec59eeaSAnirudh Venkataramanan status = -EIO;
1118b5c7f857SEvan Swanson } else {
11197ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
11207ec59eeaSAnirudh Venkataramanan status = -EIO;
11217ec59eeaSAnirudh Venkataramanan }
11227ec59eeaSAnirudh Venkataramanan }
11237ec59eeaSAnirudh Venkataramanan
11247ec59eeaSAnirudh Venkataramanan sq_send_command_error:
11257ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->sq_lock);
11267ec59eeaSAnirudh Venkataramanan return status;
11277ec59eeaSAnirudh Venkataramanan }
11287ec59eeaSAnirudh Venkataramanan
11297ec59eeaSAnirudh Venkataramanan /**
11307ec59eeaSAnirudh Venkataramanan * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
11317ec59eeaSAnirudh Venkataramanan * @desc: pointer to the temp descriptor (non DMA mem)
11327ec59eeaSAnirudh Venkataramanan * @opcode: the opcode can be used to decide which flags to turn off or on
11337ec59eeaSAnirudh Venkataramanan *
11347ec59eeaSAnirudh Venkataramanan * Fill the desc with default values
11357ec59eeaSAnirudh Venkataramanan */
ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc * desc,u16 opcode)11367ec59eeaSAnirudh Venkataramanan void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
11377ec59eeaSAnirudh Venkataramanan {
11387ec59eeaSAnirudh Venkataramanan /* zero out the desc */
1139940b61afSAnirudh Venkataramanan memset(desc, 0, sizeof(*desc));
1140940b61afSAnirudh Venkataramanan desc->opcode = cpu_to_le16(opcode);
1141940b61afSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1142f9867df6SAnirudh Venkataramanan }
1143940b61afSAnirudh Venkataramanan
1144940b61afSAnirudh Venkataramanan /**
1145940b61afSAnirudh Venkataramanan * ice_clean_rq_elem
1146940b61afSAnirudh Venkataramanan * @hw: pointer to the HW struct
1147940b61afSAnirudh Venkataramanan * @cq: pointer to the specific Control queue
1148940b61afSAnirudh Venkataramanan * @e: event info from the receive descriptor, includes any buffers
1149940b61afSAnirudh Venkataramanan * @pending: number of events that could be left to process
1150940b61afSAnirudh Venkataramanan *
11515e24d598STony Nguyen * This function cleans one Admin Receive Queue element and returns
1152940b61afSAnirudh Venkataramanan * the contents through e. It can also return how many events are
1153940b61afSAnirudh Venkataramanan * left to process through 'pending'.
1154940b61afSAnirudh Venkataramanan */
1155940b61afSAnirudh Venkataramanan int
ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending)11561cdea9a7SJesse Brandeburg ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1157940b61afSAnirudh Venkataramanan struct ice_rq_event_info *e, u16 *pending)
1158940b61afSAnirudh Venkataramanan {
11595518ac2aSTony Nguyen u16 ntc = cq->rq.next_to_clean;
1160940b61afSAnirudh Venkataramanan enum ice_aq_err rq_last_status;
1161940b61afSAnirudh Venkataramanan struct ice_aq_desc *desc;
1162940b61afSAnirudh Venkataramanan struct ice_dma_mem *bi;
1163940b61afSAnirudh Venkataramanan int ret_code = 0;
1164940b61afSAnirudh Venkataramanan u16 desc_idx;
1165940b61afSAnirudh Venkataramanan u16 datalen;
1166940b61afSAnirudh Venkataramanan u16 flags;
1167940b61afSAnirudh Venkataramanan u16 ntu;
1168940b61afSAnirudh Venkataramanan
1169940b61afSAnirudh Venkataramanan /* pre-clean the event info */
1170940b61afSAnirudh Venkataramanan memset(&e->desc, 0, sizeof(e->desc));
1171940b61afSAnirudh Venkataramanan
11729228d8b2SJacob Keller /* take the lock before we start messing with the ring */
1173d54699e2STony Nguyen mutex_lock(&cq->rq_lock);
1174940b61afSAnirudh Venkataramanan
1175940b61afSAnirudh Venkataramanan if (!cq->rq.count) {
1176940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1177940b61afSAnirudh Venkataramanan ret_code = -EIO;
1178940b61afSAnirudh Venkataramanan goto clean_rq_elem_err;
1179940b61afSAnirudh Venkataramanan }
1180940b61afSAnirudh Venkataramanan
1181940b61afSAnirudh Venkataramanan /* set next_to_use to head */
1182d54699e2STony Nguyen ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1183940b61afSAnirudh Venkataramanan
1184940b61afSAnirudh Venkataramanan if (ntu == ntc) {
1185940b61afSAnirudh Venkataramanan /* nothing to do - shouldn't need to update ring's values */
1186940b61afSAnirudh Venkataramanan ret_code = -EALREADY;
1187940b61afSAnirudh Venkataramanan goto clean_rq_elem_out;
1188940b61afSAnirudh Venkataramanan }
1189940b61afSAnirudh Venkataramanan
11901cdea9a7SJesse Brandeburg /* now clean the next descriptor */
1191940b61afSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1192940b61afSAnirudh Venkataramanan desc_idx = ntc;
1193d54699e2STony Nguyen
11949228d8b2SJacob Keller rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
11951cdea9a7SJesse Brandeburg flags = le16_to_cpu(desc->flags);
1196940b61afSAnirudh Venkataramanan if (flags & ICE_AQ_FLAG_ERR) {
1197940b61afSAnirudh Venkataramanan ret_code = -EIO;
1198940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
119988865fc4SKarol Kolacinski le16_to_cpu(desc->opcode), rq_last_status);
1200940b61afSAnirudh Venkataramanan }
1201940b61afSAnirudh Venkataramanan memcpy(&e->desc, desc, sizeof(e->desc));
1202940b61afSAnirudh Venkataramanan datalen = le16_to_cpu(desc->datalen);
1203faa01721SJacob Keller e->msg_len = min_t(u16, datalen, e->buf_len);
1204940b61afSAnirudh Venkataramanan if (e->msg_buf && e->msg_len)
1205faa01721SJacob Keller memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1206940b61afSAnirudh Venkataramanan
1207940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1208940b61afSAnirudh Venkataramanan
1209940b61afSAnirudh Venkataramanan ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1210940b61afSAnirudh Venkataramanan
1211940b61afSAnirudh Venkataramanan /* Restore the original datalen and buffer address in the desc,
1212940b61afSAnirudh Venkataramanan * FW updates datalen to indicate the event message size
1213940b61afSAnirudh Venkataramanan */
1214940b61afSAnirudh Venkataramanan bi = &cq->rq.r.rq_bi[ntc];
1215940b61afSAnirudh Venkataramanan memset(desc, 0, sizeof(*desc));
1216940b61afSAnirudh Venkataramanan
1217940b61afSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1218940b61afSAnirudh Venkataramanan if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1219940b61afSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1220940b61afSAnirudh Venkataramanan desc->datalen = cpu_to_le16(bi->size);
1221940b61afSAnirudh Venkataramanan desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1222940b61afSAnirudh Venkataramanan desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1223940b61afSAnirudh Venkataramanan
1224940b61afSAnirudh Venkataramanan /* set tail = the last cleaned desc index. */
1225940b61afSAnirudh Venkataramanan wr32(hw, cq->rq.tail, ntc);
1226940b61afSAnirudh Venkataramanan /* ntc is updated to tail + 1 */
1227940b61afSAnirudh Venkataramanan ntc++;
1228940b61afSAnirudh Venkataramanan if (ntc == cq->num_rq_entries)
1229940b61afSAnirudh Venkataramanan ntc = 0;
1230940b61afSAnirudh Venkataramanan cq->rq.next_to_clean = ntc;
12313d6b640eSAnirudh Venkataramanan cq->rq.next_to_use = ntu;
12323d6b640eSAnirudh Venkataramanan
12333d6b640eSAnirudh Venkataramanan clean_rq_elem_out:
1234940b61afSAnirudh Venkataramanan /* Set pending if needed, unlock and return */
12353d6b640eSAnirudh Venkataramanan if (pending) {
1236940b61afSAnirudh Venkataramanan /* re-read HW head to calculate actual pending messages */
1237940b61afSAnirudh Venkataramanan ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1238940b61afSAnirudh Venkataramanan *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1239940b61afSAnirudh Venkataramanan }
1240940b61afSAnirudh Venkataramanan clean_rq_elem_err:
1241 mutex_unlock(&cq->rq_lock);
1242
1243 return ret_code;
1244 }
1245