17ec59eeaSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 27ec59eeaSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 37ec59eeaSAnirudh Venkataramanan 47ec59eeaSAnirudh Venkataramanan #include "ice_common.h" 57ec59eeaSAnirudh Venkataramanan 67ec59eeaSAnirudh Venkataramanan /** 77ec59eeaSAnirudh Venkataramanan * ice_adminq_init_regs - Initialize AdminQ registers 87ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 97ec59eeaSAnirudh Venkataramanan * 107ec59eeaSAnirudh Venkataramanan * This assumes the alloc_sq and alloc_rq functions have already been called 117ec59eeaSAnirudh Venkataramanan */ 127ec59eeaSAnirudh Venkataramanan static void ice_adminq_init_regs(struct ice_hw *hw) 137ec59eeaSAnirudh Venkataramanan { 147ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq = &hw->adminq; 157ec59eeaSAnirudh Venkataramanan 167ec59eeaSAnirudh Venkataramanan cq->sq.head = PF_FW_ATQH; 177ec59eeaSAnirudh Venkataramanan cq->sq.tail = PF_FW_ATQT; 187ec59eeaSAnirudh Venkataramanan cq->sq.len = PF_FW_ATQLEN; 197ec59eeaSAnirudh Venkataramanan cq->sq.bah = PF_FW_ATQBAH; 207ec59eeaSAnirudh Venkataramanan cq->sq.bal = PF_FW_ATQBAL; 217ec59eeaSAnirudh Venkataramanan cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; 227ec59eeaSAnirudh Venkataramanan cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; 237ec59eeaSAnirudh Venkataramanan cq->sq.head_mask = PF_FW_ATQH_ATQH_M; 247ec59eeaSAnirudh Venkataramanan 257ec59eeaSAnirudh Venkataramanan cq->rq.head = PF_FW_ARQH; 267ec59eeaSAnirudh Venkataramanan cq->rq.tail = PF_FW_ARQT; 277ec59eeaSAnirudh Venkataramanan cq->rq.len = PF_FW_ARQLEN; 287ec59eeaSAnirudh Venkataramanan cq->rq.bah = PF_FW_ARQBAH; 297ec59eeaSAnirudh Venkataramanan cq->rq.bal = PF_FW_ARQBAL; 307ec59eeaSAnirudh Venkataramanan cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; 317ec59eeaSAnirudh Venkataramanan cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; 327ec59eeaSAnirudh Venkataramanan cq->rq.head_mask = PF_FW_ARQH_ARQH_M; 337ec59eeaSAnirudh Venkataramanan } 347ec59eeaSAnirudh Venkataramanan 357ec59eeaSAnirudh Venkataramanan /** 367ec59eeaSAnirudh Venkataramanan * ice_check_sq_alive 377ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hw struct 387ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 397ec59eeaSAnirudh Venkataramanan * 407ec59eeaSAnirudh Venkataramanan * Returns true if Queue is enabled else false. 417ec59eeaSAnirudh Venkataramanan */ 427ec59eeaSAnirudh Venkataramanan bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 437ec59eeaSAnirudh Venkataramanan { 447ec59eeaSAnirudh Venkataramanan /* check both queue-length and queue-enable fields */ 457ec59eeaSAnirudh Venkataramanan if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 467ec59eeaSAnirudh Venkataramanan return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 477ec59eeaSAnirudh Venkataramanan cq->sq.len_ena_mask)) == 487ec59eeaSAnirudh Venkataramanan (cq->num_sq_entries | cq->sq.len_ena_mask); 497ec59eeaSAnirudh Venkataramanan 507ec59eeaSAnirudh Venkataramanan return false; 517ec59eeaSAnirudh Venkataramanan } 527ec59eeaSAnirudh Venkataramanan 537ec59eeaSAnirudh Venkataramanan /** 547ec59eeaSAnirudh Venkataramanan * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 557ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 567ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 577ec59eeaSAnirudh Venkataramanan */ 587ec59eeaSAnirudh Venkataramanan static enum ice_status 597ec59eeaSAnirudh Venkataramanan ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 607ec59eeaSAnirudh Venkataramanan { 617ec59eeaSAnirudh Venkataramanan size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 627ec59eeaSAnirudh Venkataramanan 637ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 647ec59eeaSAnirudh Venkataramanan &cq->sq.desc_buf.pa, 657ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO); 667ec59eeaSAnirudh Venkataramanan if (!cq->sq.desc_buf.va) 677ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 687ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.size = size; 697ec59eeaSAnirudh Venkataramanan 707ec59eeaSAnirudh Venkataramanan cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 717ec59eeaSAnirudh Venkataramanan sizeof(struct ice_sq_cd), GFP_KERNEL); 727ec59eeaSAnirudh Venkataramanan if (!cq->sq.cmd_buf) { 737ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 747ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 757ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va = NULL; 767ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.pa = 0; 777ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.size = 0; 787ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 797ec59eeaSAnirudh Venkataramanan } 807ec59eeaSAnirudh Venkataramanan 817ec59eeaSAnirudh Venkataramanan return 0; 827ec59eeaSAnirudh Venkataramanan } 837ec59eeaSAnirudh Venkataramanan 847ec59eeaSAnirudh Venkataramanan /** 857ec59eeaSAnirudh Venkataramanan * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 867ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 877ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 887ec59eeaSAnirudh Venkataramanan */ 897ec59eeaSAnirudh Venkataramanan static enum ice_status 907ec59eeaSAnirudh Venkataramanan ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 917ec59eeaSAnirudh Venkataramanan { 927ec59eeaSAnirudh Venkataramanan size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 937ec59eeaSAnirudh Venkataramanan 947ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 957ec59eeaSAnirudh Venkataramanan &cq->rq.desc_buf.pa, 967ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO); 977ec59eeaSAnirudh Venkataramanan if (!cq->rq.desc_buf.va) 987ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 997ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.size = size; 1007ec59eeaSAnirudh Venkataramanan return 0; 1017ec59eeaSAnirudh Venkataramanan } 1027ec59eeaSAnirudh Venkataramanan 1037ec59eeaSAnirudh Venkataramanan /** 1047ec59eeaSAnirudh Venkataramanan * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings 1057ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 1067ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 1077ec59eeaSAnirudh Venkataramanan * 1087ec59eeaSAnirudh Venkataramanan * This assumes the posted send buffers have already been cleaned 1097ec59eeaSAnirudh Venkataramanan * and de-allocated 1107ec59eeaSAnirudh Venkataramanan */ 1117ec59eeaSAnirudh Venkataramanan static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1127ec59eeaSAnirudh Venkataramanan { 1137ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 1147ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 1157ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.va = NULL; 1167ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.pa = 0; 1177ec59eeaSAnirudh Venkataramanan cq->sq.desc_buf.size = 0; 1187ec59eeaSAnirudh Venkataramanan } 1197ec59eeaSAnirudh Venkataramanan 1207ec59eeaSAnirudh Venkataramanan /** 1217ec59eeaSAnirudh Venkataramanan * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings 1227ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 1237ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 1247ec59eeaSAnirudh Venkataramanan * 1257ec59eeaSAnirudh Venkataramanan * This assumes the posted receive buffers have already been cleaned 1267ec59eeaSAnirudh Venkataramanan * and de-allocated 1277ec59eeaSAnirudh Venkataramanan */ 1287ec59eeaSAnirudh Venkataramanan static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1297ec59eeaSAnirudh Venkataramanan { 1307ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, 1317ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.va, cq->rq.desc_buf.pa); 1327ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.va = NULL; 1337ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.pa = 0; 1347ec59eeaSAnirudh Venkataramanan cq->rq.desc_buf.size = 0; 1357ec59eeaSAnirudh Venkataramanan } 1367ec59eeaSAnirudh Venkataramanan 1377ec59eeaSAnirudh Venkataramanan /** 1387ec59eeaSAnirudh Venkataramanan * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 1397ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 1407ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 1417ec59eeaSAnirudh Venkataramanan */ 1427ec59eeaSAnirudh Venkataramanan static enum ice_status 1437ec59eeaSAnirudh Venkataramanan ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1447ec59eeaSAnirudh Venkataramanan { 1457ec59eeaSAnirudh Venkataramanan int i; 1467ec59eeaSAnirudh Venkataramanan 1477ec59eeaSAnirudh Venkataramanan /* We'll be allocating the buffer info memory first, then we can 1487ec59eeaSAnirudh Venkataramanan * allocate the mapped buffers for the event processing 1497ec59eeaSAnirudh Venkataramanan */ 1507ec59eeaSAnirudh Venkataramanan cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 1517ec59eeaSAnirudh Venkataramanan sizeof(cq->rq.desc_buf), GFP_KERNEL); 1527ec59eeaSAnirudh Venkataramanan if (!cq->rq.dma_head) 1537ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 1547ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 1557ec59eeaSAnirudh Venkataramanan 1567ec59eeaSAnirudh Venkataramanan /* allocate the mapped buffers */ 1577ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_rq_entries; i++) { 1587ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc; 1597ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *bi; 1607ec59eeaSAnirudh Venkataramanan 1617ec59eeaSAnirudh Venkataramanan bi = &cq->rq.r.rq_bi[i]; 1627ec59eeaSAnirudh Venkataramanan bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 1637ec59eeaSAnirudh Venkataramanan cq->rq_buf_size, &bi->pa, 1647ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO); 1657ec59eeaSAnirudh Venkataramanan if (!bi->va) 1667ec59eeaSAnirudh Venkataramanan goto unwind_alloc_rq_bufs; 1677ec59eeaSAnirudh Venkataramanan bi->size = cq->rq_buf_size; 1687ec59eeaSAnirudh Venkataramanan 1697ec59eeaSAnirudh Venkataramanan /* now configure the descriptors for use */ 1707ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(cq->rq, i); 1717ec59eeaSAnirudh Venkataramanan 1727ec59eeaSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1737ec59eeaSAnirudh Venkataramanan if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1747ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1757ec59eeaSAnirudh Venkataramanan desc->opcode = 0; 1767ec59eeaSAnirudh Venkataramanan /* This is in accordance with Admin queue design, there is no 1777ec59eeaSAnirudh Venkataramanan * register for buffer size configuration 1787ec59eeaSAnirudh Venkataramanan */ 1797ec59eeaSAnirudh Venkataramanan desc->datalen = cpu_to_le16(bi->size); 1807ec59eeaSAnirudh Venkataramanan desc->retval = 0; 1817ec59eeaSAnirudh Venkataramanan desc->cookie_high = 0; 1827ec59eeaSAnirudh Venkataramanan desc->cookie_low = 0; 1837ec59eeaSAnirudh Venkataramanan desc->params.generic.addr_high = 1847ec59eeaSAnirudh Venkataramanan cpu_to_le32(upper_32_bits(bi->pa)); 1857ec59eeaSAnirudh Venkataramanan desc->params.generic.addr_low = 1867ec59eeaSAnirudh Venkataramanan cpu_to_le32(lower_32_bits(bi->pa)); 1877ec59eeaSAnirudh Venkataramanan desc->params.generic.param0 = 0; 1887ec59eeaSAnirudh Venkataramanan desc->params.generic.param1 = 0; 1897ec59eeaSAnirudh Venkataramanan } 1907ec59eeaSAnirudh Venkataramanan return 0; 1917ec59eeaSAnirudh Venkataramanan 1927ec59eeaSAnirudh Venkataramanan unwind_alloc_rq_bufs: 1937ec59eeaSAnirudh Venkataramanan /* don't try to free the one that failed... */ 1947ec59eeaSAnirudh Venkataramanan i--; 1957ec59eeaSAnirudh Venkataramanan for (; i >= 0; i--) { 1967ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 1977ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 1987ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va = NULL; 1997ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].pa = 0; 2007ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].size = 0; 2017ec59eeaSAnirudh Venkataramanan } 2027ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 2037ec59eeaSAnirudh Venkataramanan 2047ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 2057ec59eeaSAnirudh Venkataramanan } 2067ec59eeaSAnirudh Venkataramanan 2077ec59eeaSAnirudh Venkataramanan /** 2087ec59eeaSAnirudh Venkataramanan * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 2097ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 2107ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 2117ec59eeaSAnirudh Venkataramanan */ 2127ec59eeaSAnirudh Venkataramanan static enum ice_status 2137ec59eeaSAnirudh Venkataramanan ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 2147ec59eeaSAnirudh Venkataramanan { 2157ec59eeaSAnirudh Venkataramanan int i; 2167ec59eeaSAnirudh Venkataramanan 2177ec59eeaSAnirudh Venkataramanan /* No mapped memory needed yet, just the buffer info structures */ 2187ec59eeaSAnirudh Venkataramanan cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 2197ec59eeaSAnirudh Venkataramanan sizeof(cq->sq.desc_buf), GFP_KERNEL); 2207ec59eeaSAnirudh Venkataramanan if (!cq->sq.dma_head) 2217ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 2227ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 2237ec59eeaSAnirudh Venkataramanan 2247ec59eeaSAnirudh Venkataramanan /* allocate the mapped buffers */ 2257ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_sq_entries; i++) { 2267ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *bi; 2277ec59eeaSAnirudh Venkataramanan 2287ec59eeaSAnirudh Venkataramanan bi = &cq->sq.r.sq_bi[i]; 2297ec59eeaSAnirudh Venkataramanan bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 2307ec59eeaSAnirudh Venkataramanan cq->sq_buf_size, &bi->pa, 2317ec59eeaSAnirudh Venkataramanan GFP_KERNEL | __GFP_ZERO); 2327ec59eeaSAnirudh Venkataramanan if (!bi->va) 2337ec59eeaSAnirudh Venkataramanan goto unwind_alloc_sq_bufs; 2347ec59eeaSAnirudh Venkataramanan bi->size = cq->sq_buf_size; 2357ec59eeaSAnirudh Venkataramanan } 2367ec59eeaSAnirudh Venkataramanan return 0; 2377ec59eeaSAnirudh Venkataramanan 2387ec59eeaSAnirudh Venkataramanan unwind_alloc_sq_bufs: 2397ec59eeaSAnirudh Venkataramanan /* don't try to free the one that failed... */ 2407ec59eeaSAnirudh Venkataramanan i--; 2417ec59eeaSAnirudh Venkataramanan for (; i >= 0; i--) { 2427ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 2437ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 2447ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va = NULL; 2457ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].pa = 0; 2467ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].size = 0; 2477ec59eeaSAnirudh Venkataramanan } 2487ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 2497ec59eeaSAnirudh Venkataramanan 2507ec59eeaSAnirudh Venkataramanan return ICE_ERR_NO_MEMORY; 2517ec59eeaSAnirudh Venkataramanan } 2527ec59eeaSAnirudh Venkataramanan 2537ec59eeaSAnirudh Venkataramanan /** 2547ec59eeaSAnirudh Venkataramanan * ice_free_rq_bufs - Free ARQ buffer info elements 2557ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 2567ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 2577ec59eeaSAnirudh Venkataramanan */ 2587ec59eeaSAnirudh Venkataramanan static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 2597ec59eeaSAnirudh Venkataramanan { 2607ec59eeaSAnirudh Venkataramanan int i; 2617ec59eeaSAnirudh Venkataramanan 2627ec59eeaSAnirudh Venkataramanan /* free descriptors */ 2637ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_rq_entries; i++) { 2647ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 2657ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 2667ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].va = NULL; 2677ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].pa = 0; 2687ec59eeaSAnirudh Venkataramanan cq->rq.r.rq_bi[i].size = 0; 2697ec59eeaSAnirudh Venkataramanan } 2707ec59eeaSAnirudh Venkataramanan 2717ec59eeaSAnirudh Venkataramanan /* free the dma header */ 2727ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 2737ec59eeaSAnirudh Venkataramanan } 2747ec59eeaSAnirudh Venkataramanan 2757ec59eeaSAnirudh Venkataramanan /** 2767ec59eeaSAnirudh Venkataramanan * ice_free_sq_bufs - Free ATQ buffer info elements 2777ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 2787ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 2797ec59eeaSAnirudh Venkataramanan */ 2807ec59eeaSAnirudh Venkataramanan static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 2817ec59eeaSAnirudh Venkataramanan { 2827ec59eeaSAnirudh Venkataramanan int i; 2837ec59eeaSAnirudh Venkataramanan 2847ec59eeaSAnirudh Venkataramanan /* only unmap if the address is non-NULL */ 2857ec59eeaSAnirudh Venkataramanan for (i = 0; i < cq->num_sq_entries; i++) 2867ec59eeaSAnirudh Venkataramanan if (cq->sq.r.sq_bi[i].pa) { 2877ec59eeaSAnirudh Venkataramanan dmam_free_coherent(ice_hw_to_dev(hw), 2887ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].size, 2897ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va, 2907ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].pa); 2917ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].va = NULL; 2927ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].pa = 0; 2937ec59eeaSAnirudh Venkataramanan cq->sq.r.sq_bi[i].size = 0; 2947ec59eeaSAnirudh Venkataramanan } 2957ec59eeaSAnirudh Venkataramanan 2967ec59eeaSAnirudh Venkataramanan /* free the buffer info list */ 2977ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); 2987ec59eeaSAnirudh Venkataramanan 2997ec59eeaSAnirudh Venkataramanan /* free the dma header */ 3007ec59eeaSAnirudh Venkataramanan devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 3017ec59eeaSAnirudh Venkataramanan } 3027ec59eeaSAnirudh Venkataramanan 3037ec59eeaSAnirudh Venkataramanan /** 3047ec59eeaSAnirudh Venkataramanan * ice_cfg_sq_regs - configure Control ATQ registers 3057ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 3067ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 3077ec59eeaSAnirudh Venkataramanan * 3087ec59eeaSAnirudh Venkataramanan * Configure base address and length registers for the transmit queue 3097ec59eeaSAnirudh Venkataramanan */ 3107ec59eeaSAnirudh Venkataramanan static enum ice_status 3117ec59eeaSAnirudh Venkataramanan ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 3127ec59eeaSAnirudh Venkataramanan { 3137ec59eeaSAnirudh Venkataramanan u32 reg = 0; 3147ec59eeaSAnirudh Venkataramanan 3157ec59eeaSAnirudh Venkataramanan /* Clear Head and Tail */ 3167ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.head, 0); 3177ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.tail, 0); 3187ec59eeaSAnirudh Venkataramanan 3197ec59eeaSAnirudh Venkataramanan /* set starting point */ 3207ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); 3217ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); 3227ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); 3237ec59eeaSAnirudh Venkataramanan 3247ec59eeaSAnirudh Venkataramanan /* Check one register to verify that config was applied */ 3257ec59eeaSAnirudh Venkataramanan reg = rd32(hw, cq->sq.bal); 3267ec59eeaSAnirudh Venkataramanan if (reg != lower_32_bits(cq->sq.desc_buf.pa)) 3277ec59eeaSAnirudh Venkataramanan return ICE_ERR_AQ_ERROR; 3287ec59eeaSAnirudh Venkataramanan 3297ec59eeaSAnirudh Venkataramanan return 0; 3307ec59eeaSAnirudh Venkataramanan } 3317ec59eeaSAnirudh Venkataramanan 3327ec59eeaSAnirudh Venkataramanan /** 3337ec59eeaSAnirudh Venkataramanan * ice_cfg_rq_regs - configure Control ARQ register 3347ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 3357ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 3367ec59eeaSAnirudh Venkataramanan * 3377ec59eeaSAnirudh Venkataramanan * Configure base address and length registers for the receive (event q) 3387ec59eeaSAnirudh Venkataramanan */ 3397ec59eeaSAnirudh Venkataramanan static enum ice_status 3407ec59eeaSAnirudh Venkataramanan ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 3417ec59eeaSAnirudh Venkataramanan { 3427ec59eeaSAnirudh Venkataramanan u32 reg = 0; 3437ec59eeaSAnirudh Venkataramanan 3447ec59eeaSAnirudh Venkataramanan /* Clear Head and Tail */ 3457ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.head, 0); 3467ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.tail, 0); 3477ec59eeaSAnirudh Venkataramanan 3487ec59eeaSAnirudh Venkataramanan /* set starting point */ 3497ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); 3507ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); 3517ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); 3527ec59eeaSAnirudh Venkataramanan 3537ec59eeaSAnirudh Venkataramanan /* Update tail in the HW to post pre-allocated buffers */ 3547ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 3557ec59eeaSAnirudh Venkataramanan 3567ec59eeaSAnirudh Venkataramanan /* Check one register to verify that config was applied */ 3577ec59eeaSAnirudh Venkataramanan reg = rd32(hw, cq->rq.bal); 3587ec59eeaSAnirudh Venkataramanan if (reg != lower_32_bits(cq->rq.desc_buf.pa)) 3597ec59eeaSAnirudh Venkataramanan return ICE_ERR_AQ_ERROR; 3607ec59eeaSAnirudh Venkataramanan 3617ec59eeaSAnirudh Venkataramanan return 0; 3627ec59eeaSAnirudh Venkataramanan } 3637ec59eeaSAnirudh Venkataramanan 3647ec59eeaSAnirudh Venkataramanan /** 3657ec59eeaSAnirudh Venkataramanan * ice_init_sq - main initialization routine for Control ATQ 3667ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 3677ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 3687ec59eeaSAnirudh Venkataramanan * 3697ec59eeaSAnirudh Venkataramanan * This is the main initialization routine for the Control Send Queue 3707ec59eeaSAnirudh Venkataramanan * Prior to calling this function, drivers *MUST* set the following fields 3717ec59eeaSAnirudh Venkataramanan * in the cq->structure: 3727ec59eeaSAnirudh Venkataramanan * - cq->num_sq_entries 3737ec59eeaSAnirudh Venkataramanan * - cq->sq_buf_size 3747ec59eeaSAnirudh Venkataramanan * 3757ec59eeaSAnirudh Venkataramanan * Do *NOT* hold the lock when calling this as the memory allocation routines 3767ec59eeaSAnirudh Venkataramanan * called are not going to be atomic context safe 3777ec59eeaSAnirudh Venkataramanan */ 3787ec59eeaSAnirudh Venkataramanan static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 3797ec59eeaSAnirudh Venkataramanan { 3807ec59eeaSAnirudh Venkataramanan enum ice_status ret_code; 3817ec59eeaSAnirudh Venkataramanan 3827ec59eeaSAnirudh Venkataramanan if (cq->sq.count > 0) { 3837ec59eeaSAnirudh Venkataramanan /* queue already initialized */ 3847ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_NOT_READY; 3857ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 3867ec59eeaSAnirudh Venkataramanan } 3877ec59eeaSAnirudh Venkataramanan 3887ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */ 3897ec59eeaSAnirudh Venkataramanan if (!cq->num_sq_entries || !cq->sq_buf_size) { 3907ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_CFG; 3917ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 3927ec59eeaSAnirudh Venkataramanan } 3937ec59eeaSAnirudh Venkataramanan 3947ec59eeaSAnirudh Venkataramanan cq->sq.next_to_use = 0; 3957ec59eeaSAnirudh Venkataramanan cq->sq.next_to_clean = 0; 3967ec59eeaSAnirudh Venkataramanan 3977ec59eeaSAnirudh Venkataramanan /* allocate the ring memory */ 3987ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 3997ec59eeaSAnirudh Venkataramanan if (ret_code) 4007ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4017ec59eeaSAnirudh Venkataramanan 4027ec59eeaSAnirudh Venkataramanan /* allocate buffers in the rings */ 4037ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_sq_bufs(hw, cq); 4047ec59eeaSAnirudh Venkataramanan if (ret_code) 4057ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings; 4067ec59eeaSAnirudh Venkataramanan 4077ec59eeaSAnirudh Venkataramanan /* initialize base registers */ 4087ec59eeaSAnirudh Venkataramanan ret_code = ice_cfg_sq_regs(hw, cq); 4097ec59eeaSAnirudh Venkataramanan if (ret_code) 4107ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings; 4117ec59eeaSAnirudh Venkataramanan 4127ec59eeaSAnirudh Venkataramanan /* success! */ 4137ec59eeaSAnirudh Venkataramanan cq->sq.count = cq->num_sq_entries; 4147ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4157ec59eeaSAnirudh Venkataramanan 4167ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rings: 4177ec59eeaSAnirudh Venkataramanan ice_free_ctrlq_sq_ring(hw, cq); 4187ec59eeaSAnirudh Venkataramanan 4197ec59eeaSAnirudh Venkataramanan init_ctrlq_exit: 4207ec59eeaSAnirudh Venkataramanan return ret_code; 4217ec59eeaSAnirudh Venkataramanan } 4227ec59eeaSAnirudh Venkataramanan 4237ec59eeaSAnirudh Venkataramanan /** 4247ec59eeaSAnirudh Venkataramanan * ice_init_rq - initialize ARQ 4257ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 4267ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 4277ec59eeaSAnirudh Venkataramanan * 4287ec59eeaSAnirudh Venkataramanan * The main initialization routine for the Admin Receive (Event) Queue. 4297ec59eeaSAnirudh Venkataramanan * Prior to calling this function, drivers *MUST* set the following fields 4307ec59eeaSAnirudh Venkataramanan * in the cq->structure: 4317ec59eeaSAnirudh Venkataramanan * - cq->num_rq_entries 4327ec59eeaSAnirudh Venkataramanan * - cq->rq_buf_size 4337ec59eeaSAnirudh Venkataramanan * 4347ec59eeaSAnirudh Venkataramanan * Do *NOT* hold the lock when calling this as the memory allocation routines 4357ec59eeaSAnirudh Venkataramanan * called are not going to be atomic context safe 4367ec59eeaSAnirudh Venkataramanan */ 4377ec59eeaSAnirudh Venkataramanan static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 4387ec59eeaSAnirudh Venkataramanan { 4397ec59eeaSAnirudh Venkataramanan enum ice_status ret_code; 4407ec59eeaSAnirudh Venkataramanan 4417ec59eeaSAnirudh Venkataramanan if (cq->rq.count > 0) { 4427ec59eeaSAnirudh Venkataramanan /* queue already initialized */ 4437ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_NOT_READY; 4447ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4457ec59eeaSAnirudh Venkataramanan } 4467ec59eeaSAnirudh Venkataramanan 4477ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */ 4487ec59eeaSAnirudh Venkataramanan if (!cq->num_rq_entries || !cq->rq_buf_size) { 4497ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_CFG; 4507ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4517ec59eeaSAnirudh Venkataramanan } 4527ec59eeaSAnirudh Venkataramanan 4537ec59eeaSAnirudh Venkataramanan cq->rq.next_to_use = 0; 4547ec59eeaSAnirudh Venkataramanan cq->rq.next_to_clean = 0; 4557ec59eeaSAnirudh Venkataramanan 4567ec59eeaSAnirudh Venkataramanan /* allocate the ring memory */ 4577ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 4587ec59eeaSAnirudh Venkataramanan if (ret_code) 4597ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4607ec59eeaSAnirudh Venkataramanan 4617ec59eeaSAnirudh Venkataramanan /* allocate buffers in the rings */ 4627ec59eeaSAnirudh Venkataramanan ret_code = ice_alloc_rq_bufs(hw, cq); 4637ec59eeaSAnirudh Venkataramanan if (ret_code) 4647ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings; 4657ec59eeaSAnirudh Venkataramanan 4667ec59eeaSAnirudh Venkataramanan /* initialize base registers */ 4677ec59eeaSAnirudh Venkataramanan ret_code = ice_cfg_rq_regs(hw, cq); 4687ec59eeaSAnirudh Venkataramanan if (ret_code) 4697ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rings; 4707ec59eeaSAnirudh Venkataramanan 4717ec59eeaSAnirudh Venkataramanan /* success! */ 4727ec59eeaSAnirudh Venkataramanan cq->rq.count = cq->num_rq_entries; 4737ec59eeaSAnirudh Venkataramanan goto init_ctrlq_exit; 4747ec59eeaSAnirudh Venkataramanan 4757ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rings: 4767ec59eeaSAnirudh Venkataramanan ice_free_ctrlq_rq_ring(hw, cq); 4777ec59eeaSAnirudh Venkataramanan 4787ec59eeaSAnirudh Venkataramanan init_ctrlq_exit: 4797ec59eeaSAnirudh Venkataramanan return ret_code; 4807ec59eeaSAnirudh Venkataramanan } 4817ec59eeaSAnirudh Venkataramanan 4827ec59eeaSAnirudh Venkataramanan /** 4837ec59eeaSAnirudh Venkataramanan * ice_shutdown_sq - shutdown the Control ATQ 4847ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 4857ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 4867ec59eeaSAnirudh Venkataramanan * 4877ec59eeaSAnirudh Venkataramanan * The main shutdown routine for the Control Transmit Queue 4887ec59eeaSAnirudh Venkataramanan */ 4897ec59eeaSAnirudh Venkataramanan static enum ice_status 4907ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 4917ec59eeaSAnirudh Venkataramanan { 4927ec59eeaSAnirudh Venkataramanan enum ice_status ret_code = 0; 4937ec59eeaSAnirudh Venkataramanan 4947ec59eeaSAnirudh Venkataramanan mutex_lock(&cq->sq_lock); 4957ec59eeaSAnirudh Venkataramanan 4967ec59eeaSAnirudh Venkataramanan if (!cq->sq.count) { 4977ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_NOT_READY; 4987ec59eeaSAnirudh Venkataramanan goto shutdown_sq_out; 4997ec59eeaSAnirudh Venkataramanan } 5007ec59eeaSAnirudh Venkataramanan 5017ec59eeaSAnirudh Venkataramanan /* Stop firmware AdminQ processing */ 5027ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.head, 0); 5037ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.tail, 0); 5047ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.len, 0); 5057ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bal, 0); 5067ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.bah, 0); 5077ec59eeaSAnirudh Venkataramanan 5087ec59eeaSAnirudh Venkataramanan cq->sq.count = 0; /* to indicate uninitialized queue */ 5097ec59eeaSAnirudh Venkataramanan 5107ec59eeaSAnirudh Venkataramanan /* free ring buffers and the ring itself */ 5117ec59eeaSAnirudh Venkataramanan ice_free_sq_bufs(hw, cq); 5127ec59eeaSAnirudh Venkataramanan ice_free_ctrlq_sq_ring(hw, cq); 5137ec59eeaSAnirudh Venkataramanan 5147ec59eeaSAnirudh Venkataramanan shutdown_sq_out: 5157ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->sq_lock); 5167ec59eeaSAnirudh Venkataramanan return ret_code; 5177ec59eeaSAnirudh Venkataramanan } 5187ec59eeaSAnirudh Venkataramanan 5197ec59eeaSAnirudh Venkataramanan /** 5207ec59eeaSAnirudh Venkataramanan * ice_aq_ver_check - Check the reported AQ API version. 5217ec59eeaSAnirudh Venkataramanan * @fw_branch: The "branch" of FW, typically describes the device type 5227ec59eeaSAnirudh Venkataramanan * @fw_major: The major version of the FW API 5237ec59eeaSAnirudh Venkataramanan * @fw_minor: The minor version increment of the FW API 5247ec59eeaSAnirudh Venkataramanan * 5257ec59eeaSAnirudh Venkataramanan * Checks if the driver should load on a given AQ API version. 5267ec59eeaSAnirudh Venkataramanan * 5277ec59eeaSAnirudh Venkataramanan * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 5287ec59eeaSAnirudh Venkataramanan */ 5297ec59eeaSAnirudh Venkataramanan static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) 5307ec59eeaSAnirudh Venkataramanan { 5317ec59eeaSAnirudh Venkataramanan if (fw_branch != EXP_FW_API_VER_BRANCH) 5327ec59eeaSAnirudh Venkataramanan return false; 5337ec59eeaSAnirudh Venkataramanan if (fw_major != EXP_FW_API_VER_MAJOR) 5347ec59eeaSAnirudh Venkataramanan return false; 5357ec59eeaSAnirudh Venkataramanan if (fw_minor != EXP_FW_API_VER_MINOR) 5367ec59eeaSAnirudh Venkataramanan return false; 5377ec59eeaSAnirudh Venkataramanan return true; 5387ec59eeaSAnirudh Venkataramanan } 5397ec59eeaSAnirudh Venkataramanan 5407ec59eeaSAnirudh Venkataramanan /** 5417ec59eeaSAnirudh Venkataramanan * ice_shutdown_rq - shutdown Control ARQ 5427ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 5437ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 5447ec59eeaSAnirudh Venkataramanan * 5457ec59eeaSAnirudh Venkataramanan * The main shutdown routine for the Control Receive Queue 5467ec59eeaSAnirudh Venkataramanan */ 5477ec59eeaSAnirudh Venkataramanan static enum ice_status 5487ec59eeaSAnirudh Venkataramanan ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 5497ec59eeaSAnirudh Venkataramanan { 5507ec59eeaSAnirudh Venkataramanan enum ice_status ret_code = 0; 5517ec59eeaSAnirudh Venkataramanan 5527ec59eeaSAnirudh Venkataramanan mutex_lock(&cq->rq_lock); 5537ec59eeaSAnirudh Venkataramanan 5547ec59eeaSAnirudh Venkataramanan if (!cq->rq.count) { 5557ec59eeaSAnirudh Venkataramanan ret_code = ICE_ERR_NOT_READY; 5567ec59eeaSAnirudh Venkataramanan goto shutdown_rq_out; 5577ec59eeaSAnirudh Venkataramanan } 5587ec59eeaSAnirudh Venkataramanan 5597ec59eeaSAnirudh Venkataramanan /* Stop Control Queue processing */ 5607ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.head, 0); 5617ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.tail, 0); 5627ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.len, 0); 5637ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bal, 0); 5647ec59eeaSAnirudh Venkataramanan wr32(hw, cq->rq.bah, 0); 5657ec59eeaSAnirudh Venkataramanan 5667ec59eeaSAnirudh Venkataramanan /* set rq.count to 0 to indicate uninitialized queue */ 5677ec59eeaSAnirudh Venkataramanan cq->rq.count = 0; 5687ec59eeaSAnirudh Venkataramanan 5697ec59eeaSAnirudh Venkataramanan /* free ring buffers and the ring itself */ 5707ec59eeaSAnirudh Venkataramanan ice_free_rq_bufs(hw, cq); 5717ec59eeaSAnirudh Venkataramanan ice_free_ctrlq_rq_ring(hw, cq); 5727ec59eeaSAnirudh Venkataramanan 5737ec59eeaSAnirudh Venkataramanan shutdown_rq_out: 5747ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->rq_lock); 5757ec59eeaSAnirudh Venkataramanan return ret_code; 5767ec59eeaSAnirudh Venkataramanan } 5777ec59eeaSAnirudh Venkataramanan 5787ec59eeaSAnirudh Venkataramanan /** 5797ec59eeaSAnirudh Venkataramanan * ice_init_check_adminq - Check version for Admin Queue to know if its alive 5807ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 5817ec59eeaSAnirudh Venkataramanan */ 5827ec59eeaSAnirudh Venkataramanan static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 5837ec59eeaSAnirudh Venkataramanan { 5847ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq = &hw->adminq; 5857ec59eeaSAnirudh Venkataramanan enum ice_status status; 5867ec59eeaSAnirudh Venkataramanan 5877ec59eeaSAnirudh Venkataramanan status = ice_aq_get_fw_ver(hw, NULL); 5887ec59eeaSAnirudh Venkataramanan if (status) 5897ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rq; 5907ec59eeaSAnirudh Venkataramanan 5917ec59eeaSAnirudh Venkataramanan if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, 5927ec59eeaSAnirudh Venkataramanan hw->api_min_ver)) { 5937ec59eeaSAnirudh Venkataramanan status = ICE_ERR_FW_API_VER; 5947ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_rq; 5957ec59eeaSAnirudh Venkataramanan } 5967ec59eeaSAnirudh Venkataramanan 5977ec59eeaSAnirudh Venkataramanan return 0; 5987ec59eeaSAnirudh Venkataramanan 5997ec59eeaSAnirudh Venkataramanan init_ctrlq_free_rq: 600b29bc220SPreethi Banala if (cq->rq.head) { 6017ec59eeaSAnirudh Venkataramanan ice_shutdown_rq(hw, cq); 602b29bc220SPreethi Banala mutex_destroy(&cq->rq_lock); 603b29bc220SPreethi Banala } 604b29bc220SPreethi Banala if (cq->sq.head) { 6057ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq); 6067ec59eeaSAnirudh Venkataramanan mutex_destroy(&cq->sq_lock); 607b29bc220SPreethi Banala } 6087ec59eeaSAnirudh Venkataramanan return status; 6097ec59eeaSAnirudh Venkataramanan } 6107ec59eeaSAnirudh Venkataramanan 6117ec59eeaSAnirudh Venkataramanan /** 6127ec59eeaSAnirudh Venkataramanan * ice_init_ctrlq - main initialization routine for any control Queue 6137ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 6147ec59eeaSAnirudh Venkataramanan * @q_type: specific Control queue type 6157ec59eeaSAnirudh Venkataramanan * 6167ec59eeaSAnirudh Venkataramanan * Prior to calling this function, drivers *MUST* set the following fields 6177ec59eeaSAnirudh Venkataramanan * in the cq->structure: 6187ec59eeaSAnirudh Venkataramanan * - cq->num_sq_entries 6197ec59eeaSAnirudh Venkataramanan * - cq->num_rq_entries 6207ec59eeaSAnirudh Venkataramanan * - cq->rq_buf_size 6217ec59eeaSAnirudh Venkataramanan * - cq->sq_buf_size 6227ec59eeaSAnirudh Venkataramanan * 6237ec59eeaSAnirudh Venkataramanan */ 6247ec59eeaSAnirudh Venkataramanan static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 6257ec59eeaSAnirudh Venkataramanan { 6267ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq; 6277ec59eeaSAnirudh Venkataramanan enum ice_status ret_code; 6287ec59eeaSAnirudh Venkataramanan 6297ec59eeaSAnirudh Venkataramanan switch (q_type) { 6307ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN: 6317ec59eeaSAnirudh Venkataramanan ice_adminq_init_regs(hw); 6327ec59eeaSAnirudh Venkataramanan cq = &hw->adminq; 6337ec59eeaSAnirudh Venkataramanan break; 6347ec59eeaSAnirudh Venkataramanan default: 6357ec59eeaSAnirudh Venkataramanan return ICE_ERR_PARAM; 6367ec59eeaSAnirudh Venkataramanan } 6377ec59eeaSAnirudh Venkataramanan cq->qtype = q_type; 6387ec59eeaSAnirudh Venkataramanan 6397ec59eeaSAnirudh Venkataramanan /* verify input for valid configuration */ 6407ec59eeaSAnirudh Venkataramanan if (!cq->num_rq_entries || !cq->num_sq_entries || 6417ec59eeaSAnirudh Venkataramanan !cq->rq_buf_size || !cq->sq_buf_size) { 6427ec59eeaSAnirudh Venkataramanan return ICE_ERR_CFG; 6437ec59eeaSAnirudh Venkataramanan } 6447ec59eeaSAnirudh Venkataramanan mutex_init(&cq->sq_lock); 6457ec59eeaSAnirudh Venkataramanan mutex_init(&cq->rq_lock); 6467ec59eeaSAnirudh Venkataramanan 6477ec59eeaSAnirudh Venkataramanan /* setup SQ command write back timeout */ 6487ec59eeaSAnirudh Venkataramanan cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 6497ec59eeaSAnirudh Venkataramanan 6507ec59eeaSAnirudh Venkataramanan /* allocate the ATQ */ 6517ec59eeaSAnirudh Venkataramanan ret_code = ice_init_sq(hw, cq); 6527ec59eeaSAnirudh Venkataramanan if (ret_code) 6537ec59eeaSAnirudh Venkataramanan goto init_ctrlq_destroy_locks; 6547ec59eeaSAnirudh Venkataramanan 6557ec59eeaSAnirudh Venkataramanan /* allocate the ARQ */ 6567ec59eeaSAnirudh Venkataramanan ret_code = ice_init_rq(hw, cq); 6577ec59eeaSAnirudh Venkataramanan if (ret_code) 6587ec59eeaSAnirudh Venkataramanan goto init_ctrlq_free_sq; 6597ec59eeaSAnirudh Venkataramanan 6607ec59eeaSAnirudh Venkataramanan /* success! */ 6617ec59eeaSAnirudh Venkataramanan return 0; 6627ec59eeaSAnirudh Venkataramanan 6637ec59eeaSAnirudh Venkataramanan init_ctrlq_free_sq: 6647ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq); 6657ec59eeaSAnirudh Venkataramanan init_ctrlq_destroy_locks: 6667ec59eeaSAnirudh Venkataramanan mutex_destroy(&cq->sq_lock); 6677ec59eeaSAnirudh Venkataramanan mutex_destroy(&cq->rq_lock); 6687ec59eeaSAnirudh Venkataramanan return ret_code; 6697ec59eeaSAnirudh Venkataramanan } 6707ec59eeaSAnirudh Venkataramanan 6717ec59eeaSAnirudh Venkataramanan /** 6727ec59eeaSAnirudh Venkataramanan * ice_init_all_ctrlq - main initialization routine for all control queues 6737ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 6747ec59eeaSAnirudh Venkataramanan * 6757ec59eeaSAnirudh Venkataramanan * Prior to calling this function, drivers *MUST* set the following fields 6767ec59eeaSAnirudh Venkataramanan * in the cq->structure for all control queues: 6777ec59eeaSAnirudh Venkataramanan * - cq->num_sq_entries 6787ec59eeaSAnirudh Venkataramanan * - cq->num_rq_entries 6797ec59eeaSAnirudh Venkataramanan * - cq->rq_buf_size 6807ec59eeaSAnirudh Venkataramanan * - cq->sq_buf_size 6817ec59eeaSAnirudh Venkataramanan */ 6827ec59eeaSAnirudh Venkataramanan enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 6837ec59eeaSAnirudh Venkataramanan { 6847ec59eeaSAnirudh Venkataramanan enum ice_status ret_code; 6857ec59eeaSAnirudh Venkataramanan 6867ec59eeaSAnirudh Venkataramanan /* Init FW admin queue */ 6877ec59eeaSAnirudh Venkataramanan ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 6887ec59eeaSAnirudh Venkataramanan if (ret_code) 6897ec59eeaSAnirudh Venkataramanan return ret_code; 6907ec59eeaSAnirudh Venkataramanan 6917ec59eeaSAnirudh Venkataramanan return ice_init_check_adminq(hw); 6927ec59eeaSAnirudh Venkataramanan } 6937ec59eeaSAnirudh Venkataramanan 6947ec59eeaSAnirudh Venkataramanan /** 6957ec59eeaSAnirudh Venkataramanan * ice_shutdown_ctrlq - shutdown routine for any control queue 6967ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 6977ec59eeaSAnirudh Venkataramanan * @q_type: specific Control queue type 6987ec59eeaSAnirudh Venkataramanan */ 6997ec59eeaSAnirudh Venkataramanan static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 7007ec59eeaSAnirudh Venkataramanan { 7017ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_info *cq; 7027ec59eeaSAnirudh Venkataramanan 7037ec59eeaSAnirudh Venkataramanan switch (q_type) { 7047ec59eeaSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN: 7057ec59eeaSAnirudh Venkataramanan cq = &hw->adminq; 7067ec59eeaSAnirudh Venkataramanan if (ice_check_sq_alive(hw, cq)) 7077ec59eeaSAnirudh Venkataramanan ice_aq_q_shutdown(hw, true); 7087ec59eeaSAnirudh Venkataramanan break; 7097ec59eeaSAnirudh Venkataramanan default: 7107ec59eeaSAnirudh Venkataramanan return; 7117ec59eeaSAnirudh Venkataramanan } 7127ec59eeaSAnirudh Venkataramanan 713b29bc220SPreethi Banala if (cq->sq.head) { 7147ec59eeaSAnirudh Venkataramanan ice_shutdown_sq(hw, cq); 7157ec59eeaSAnirudh Venkataramanan mutex_destroy(&cq->sq_lock); 716b29bc220SPreethi Banala } 717b29bc220SPreethi Banala if (cq->rq.head) { 718b29bc220SPreethi Banala ice_shutdown_rq(hw, cq); 7197ec59eeaSAnirudh Venkataramanan mutex_destroy(&cq->rq_lock); 7207ec59eeaSAnirudh Venkataramanan } 721b29bc220SPreethi Banala } 7227ec59eeaSAnirudh Venkataramanan 7237ec59eeaSAnirudh Venkataramanan /** 7247ec59eeaSAnirudh Venkataramanan * ice_shutdown_all_ctrlq - shutdown routine for all control queues 7257ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 7267ec59eeaSAnirudh Venkataramanan */ 7277ec59eeaSAnirudh Venkataramanan void ice_shutdown_all_ctrlq(struct ice_hw *hw) 7287ec59eeaSAnirudh Venkataramanan { 7297ec59eeaSAnirudh Venkataramanan /* Shutdown FW admin queue */ 7307ec59eeaSAnirudh Venkataramanan ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 7317ec59eeaSAnirudh Venkataramanan } 7327ec59eeaSAnirudh Venkataramanan 7337ec59eeaSAnirudh Venkataramanan /** 7347ec59eeaSAnirudh Venkataramanan * ice_clean_sq - cleans Admin send queue (ATQ) 7357ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hardware structure 7367ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 7377ec59eeaSAnirudh Venkataramanan * 7387ec59eeaSAnirudh Venkataramanan * returns the number of free desc 7397ec59eeaSAnirudh Venkataramanan */ 7407ec59eeaSAnirudh Venkataramanan static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 7417ec59eeaSAnirudh Venkataramanan { 7427ec59eeaSAnirudh Venkataramanan struct ice_ctl_q_ring *sq = &cq->sq; 7437ec59eeaSAnirudh Venkataramanan u16 ntc = sq->next_to_clean; 7447ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *details; 7457ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc; 7467ec59eeaSAnirudh Venkataramanan 7477ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(*sq, ntc); 7487ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(*sq, ntc); 7497ec59eeaSAnirudh Venkataramanan 7507ec59eeaSAnirudh Venkataramanan while (rd32(hw, cq->sq.head) != ntc) { 7517ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 7527ec59eeaSAnirudh Venkataramanan "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 7537ec59eeaSAnirudh Venkataramanan memset(desc, 0, sizeof(*desc)); 7547ec59eeaSAnirudh Venkataramanan memset(details, 0, sizeof(*details)); 7557ec59eeaSAnirudh Venkataramanan ntc++; 7567ec59eeaSAnirudh Venkataramanan if (ntc == sq->count) 7577ec59eeaSAnirudh Venkataramanan ntc = 0; 7587ec59eeaSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(*sq, ntc); 7597ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(*sq, ntc); 7607ec59eeaSAnirudh Venkataramanan } 7617ec59eeaSAnirudh Venkataramanan 7627ec59eeaSAnirudh Venkataramanan sq->next_to_clean = ntc; 7637ec59eeaSAnirudh Venkataramanan 7647ec59eeaSAnirudh Venkataramanan return ICE_CTL_Q_DESC_UNUSED(sq); 7657ec59eeaSAnirudh Venkataramanan } 7667ec59eeaSAnirudh Venkataramanan 7677ec59eeaSAnirudh Venkataramanan /** 7687ec59eeaSAnirudh Venkataramanan * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 7697ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hw struct 7707ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 7717ec59eeaSAnirudh Venkataramanan * 7727ec59eeaSAnirudh Venkataramanan * Returns true if the firmware has processed all descriptors on the 7737ec59eeaSAnirudh Venkataramanan * admin send queue. Returns false if there are still requests pending. 7747ec59eeaSAnirudh Venkataramanan */ 7757ec59eeaSAnirudh Venkataramanan static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 7767ec59eeaSAnirudh Venkataramanan { 7777ec59eeaSAnirudh Venkataramanan /* AQ designers suggest use of head for better 7787ec59eeaSAnirudh Venkataramanan * timing reliability than DD bit 7797ec59eeaSAnirudh Venkataramanan */ 7807ec59eeaSAnirudh Venkataramanan return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 7817ec59eeaSAnirudh Venkataramanan } 7827ec59eeaSAnirudh Venkataramanan 7837ec59eeaSAnirudh Venkataramanan /** 7847ec59eeaSAnirudh Venkataramanan * ice_sq_send_cmd - send command to Control Queue (ATQ) 7857ec59eeaSAnirudh Venkataramanan * @hw: pointer to the hw struct 7867ec59eeaSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 7877ec59eeaSAnirudh Venkataramanan * @desc: prefilled descriptor describing the command (non DMA mem) 7887ec59eeaSAnirudh Venkataramanan * @buf: buffer to use for indirect commands (or NULL for direct commands) 7897ec59eeaSAnirudh Venkataramanan * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 7907ec59eeaSAnirudh Venkataramanan * @cd: pointer to command details structure 7917ec59eeaSAnirudh Venkataramanan * 7927ec59eeaSAnirudh Venkataramanan * This is the main send command routine for the ATQ. It runs the q, 7937ec59eeaSAnirudh Venkataramanan * cleans the queue, etc. 7947ec59eeaSAnirudh Venkataramanan */ 7957ec59eeaSAnirudh Venkataramanan enum ice_status 7967ec59eeaSAnirudh Venkataramanan ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 7977ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc, void *buf, u16 buf_size, 7987ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *cd) 7997ec59eeaSAnirudh Venkataramanan { 8007ec59eeaSAnirudh Venkataramanan struct ice_dma_mem *dma_buf = NULL; 8017ec59eeaSAnirudh Venkataramanan struct ice_aq_desc *desc_on_ring; 8027ec59eeaSAnirudh Venkataramanan bool cmd_completed = false; 8037ec59eeaSAnirudh Venkataramanan enum ice_status status = 0; 8047ec59eeaSAnirudh Venkataramanan struct ice_sq_cd *details; 8057ec59eeaSAnirudh Venkataramanan u32 total_delay = 0; 8067ec59eeaSAnirudh Venkataramanan u16 retval = 0; 8077ec59eeaSAnirudh Venkataramanan u32 val = 0; 8087ec59eeaSAnirudh Venkataramanan 8097ec59eeaSAnirudh Venkataramanan mutex_lock(&cq->sq_lock); 8107ec59eeaSAnirudh Venkataramanan 8117ec59eeaSAnirudh Venkataramanan cq->sq_last_status = ICE_AQ_RC_OK; 8127ec59eeaSAnirudh Venkataramanan 8137ec59eeaSAnirudh Venkataramanan if (!cq->sq.count) { 8147ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 8157ec59eeaSAnirudh Venkataramanan "Control Send queue not initialized.\n"); 8167ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_EMPTY; 8177ec59eeaSAnirudh Venkataramanan goto sq_send_command_error; 8187ec59eeaSAnirudh Venkataramanan } 8197ec59eeaSAnirudh Venkataramanan 8207ec59eeaSAnirudh Venkataramanan if ((buf && !buf_size) || (!buf && buf_size)) { 8217ec59eeaSAnirudh Venkataramanan status = ICE_ERR_PARAM; 8227ec59eeaSAnirudh Venkataramanan goto sq_send_command_error; 8237ec59eeaSAnirudh Venkataramanan } 8247ec59eeaSAnirudh Venkataramanan 8257ec59eeaSAnirudh Venkataramanan if (buf) { 8267ec59eeaSAnirudh Venkataramanan if (buf_size > cq->sq_buf_size) { 8277ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 8287ec59eeaSAnirudh Venkataramanan "Invalid buffer size for Control Send queue: %d.\n", 8297ec59eeaSAnirudh Venkataramanan buf_size); 8307ec59eeaSAnirudh Venkataramanan status = ICE_ERR_INVAL_SIZE; 8317ec59eeaSAnirudh Venkataramanan goto sq_send_command_error; 8327ec59eeaSAnirudh Venkataramanan } 8337ec59eeaSAnirudh Venkataramanan 8347ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 8357ec59eeaSAnirudh Venkataramanan if (buf_size > ICE_AQ_LG_BUF) 8367ec59eeaSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 8377ec59eeaSAnirudh Venkataramanan } 8387ec59eeaSAnirudh Venkataramanan 8397ec59eeaSAnirudh Venkataramanan val = rd32(hw, cq->sq.head); 8407ec59eeaSAnirudh Venkataramanan if (val >= cq->num_sq_entries) { 8417ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 8427ec59eeaSAnirudh Venkataramanan "head overrun at %d in the Control Send Queue ring\n", 8437ec59eeaSAnirudh Venkataramanan val); 8447ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_EMPTY; 8457ec59eeaSAnirudh Venkataramanan goto sq_send_command_error; 8467ec59eeaSAnirudh Venkataramanan } 8477ec59eeaSAnirudh Venkataramanan 8487ec59eeaSAnirudh Venkataramanan details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 8497ec59eeaSAnirudh Venkataramanan if (cd) 8507ec59eeaSAnirudh Venkataramanan memcpy(details, cd, sizeof(*details)); 8517ec59eeaSAnirudh Venkataramanan else 8527ec59eeaSAnirudh Venkataramanan memset(details, 0, sizeof(*details)); 8537ec59eeaSAnirudh Venkataramanan 8547ec59eeaSAnirudh Venkataramanan /* Call clean and check queue available function to reclaim the 8557ec59eeaSAnirudh Venkataramanan * descriptors that were processed by FW/MBX; the function returns the 8567ec59eeaSAnirudh Venkataramanan * number of desc available. The clean function called here could be 8577ec59eeaSAnirudh Venkataramanan * called in a separate thread in case of asynchronous completions. 8587ec59eeaSAnirudh Venkataramanan */ 8597ec59eeaSAnirudh Venkataramanan if (ice_clean_sq(hw, cq) == 0) { 8607ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 8617ec59eeaSAnirudh Venkataramanan "Error: Control Send Queue is full.\n"); 8627ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_FULL; 8637ec59eeaSAnirudh Venkataramanan goto sq_send_command_error; 8647ec59eeaSAnirudh Venkataramanan } 8657ec59eeaSAnirudh Venkataramanan 8667ec59eeaSAnirudh Venkataramanan /* initialize the temp desc pointer with the right desc */ 8677ec59eeaSAnirudh Venkataramanan desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 8687ec59eeaSAnirudh Venkataramanan 8697ec59eeaSAnirudh Venkataramanan /* if the desc is available copy the temp desc to the right place */ 8707ec59eeaSAnirudh Venkataramanan memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 8717ec59eeaSAnirudh Venkataramanan 8727ec59eeaSAnirudh Venkataramanan /* if buf is not NULL assume indirect command */ 8737ec59eeaSAnirudh Venkataramanan if (buf) { 8747ec59eeaSAnirudh Venkataramanan dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 8757ec59eeaSAnirudh Venkataramanan /* copy the user buf into the respective DMA buf */ 8767ec59eeaSAnirudh Venkataramanan memcpy(dma_buf->va, buf, buf_size); 8777ec59eeaSAnirudh Venkataramanan desc_on_ring->datalen = cpu_to_le16(buf_size); 8787ec59eeaSAnirudh Venkataramanan 8797ec59eeaSAnirudh Venkataramanan /* Update the address values in the desc with the pa value 8807ec59eeaSAnirudh Venkataramanan * for respective buffer 8817ec59eeaSAnirudh Venkataramanan */ 8827ec59eeaSAnirudh Venkataramanan desc_on_ring->params.generic.addr_high = 8837ec59eeaSAnirudh Venkataramanan cpu_to_le32(upper_32_bits(dma_buf->pa)); 8847ec59eeaSAnirudh Venkataramanan desc_on_ring->params.generic.addr_low = 8857ec59eeaSAnirudh Venkataramanan cpu_to_le32(lower_32_bits(dma_buf->pa)); 8867ec59eeaSAnirudh Venkataramanan } 8877ec59eeaSAnirudh Venkataramanan 8887ec59eeaSAnirudh Venkataramanan /* Debug desc and buffer */ 8897ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 8907ec59eeaSAnirudh Venkataramanan "ATQ: Control Send queue desc and buffer:\n"); 8917ec59eeaSAnirudh Venkataramanan 8927ec59eeaSAnirudh Venkataramanan ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); 8937ec59eeaSAnirudh Venkataramanan 8947ec59eeaSAnirudh Venkataramanan (cq->sq.next_to_use)++; 8957ec59eeaSAnirudh Venkataramanan if (cq->sq.next_to_use == cq->sq.count) 8967ec59eeaSAnirudh Venkataramanan cq->sq.next_to_use = 0; 8977ec59eeaSAnirudh Venkataramanan wr32(hw, cq->sq.tail, cq->sq.next_to_use); 8987ec59eeaSAnirudh Venkataramanan 8997ec59eeaSAnirudh Venkataramanan do { 9007ec59eeaSAnirudh Venkataramanan if (ice_sq_done(hw, cq)) 9017ec59eeaSAnirudh Venkataramanan break; 9027ec59eeaSAnirudh Venkataramanan 9037ec59eeaSAnirudh Venkataramanan mdelay(1); 9047ec59eeaSAnirudh Venkataramanan total_delay++; 9057ec59eeaSAnirudh Venkataramanan } while (total_delay < cq->sq_cmd_timeout); 9067ec59eeaSAnirudh Venkataramanan 9077ec59eeaSAnirudh Venkataramanan /* if ready, copy the desc back to temp */ 9087ec59eeaSAnirudh Venkataramanan if (ice_sq_done(hw, cq)) { 9097ec59eeaSAnirudh Venkataramanan memcpy(desc, desc_on_ring, sizeof(*desc)); 9107ec59eeaSAnirudh Venkataramanan if (buf) { 9117ec59eeaSAnirudh Venkataramanan /* get returned length to copy */ 9127ec59eeaSAnirudh Venkataramanan u16 copy_size = le16_to_cpu(desc->datalen); 9137ec59eeaSAnirudh Venkataramanan 9147ec59eeaSAnirudh Venkataramanan if (copy_size > buf_size) { 9157ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 9167ec59eeaSAnirudh Venkataramanan "Return len %d > than buf len %d\n", 9177ec59eeaSAnirudh Venkataramanan copy_size, buf_size); 9187ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_ERROR; 9197ec59eeaSAnirudh Venkataramanan } else { 9207ec59eeaSAnirudh Venkataramanan memcpy(buf, dma_buf->va, copy_size); 9217ec59eeaSAnirudh Venkataramanan } 9227ec59eeaSAnirudh Venkataramanan } 9237ec59eeaSAnirudh Venkataramanan retval = le16_to_cpu(desc->retval); 9247ec59eeaSAnirudh Venkataramanan if (retval) { 9257ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 9267ec59eeaSAnirudh Venkataramanan "Control Send Queue command completed with error 0x%x\n", 9277ec59eeaSAnirudh Venkataramanan retval); 9287ec59eeaSAnirudh Venkataramanan 9297ec59eeaSAnirudh Venkataramanan /* strip off FW internal code */ 9307ec59eeaSAnirudh Venkataramanan retval &= 0xff; 9317ec59eeaSAnirudh Venkataramanan } 9327ec59eeaSAnirudh Venkataramanan cmd_completed = true; 9337ec59eeaSAnirudh Venkataramanan if (!status && retval != ICE_AQ_RC_OK) 9347ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_ERROR; 9357ec59eeaSAnirudh Venkataramanan cq->sq_last_status = (enum ice_aq_err)retval; 9367ec59eeaSAnirudh Venkataramanan } 9377ec59eeaSAnirudh Venkataramanan 9387ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 9397ec59eeaSAnirudh Venkataramanan "ATQ: desc and buffer writeback:\n"); 9407ec59eeaSAnirudh Venkataramanan 9417ec59eeaSAnirudh Venkataramanan ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); 9427ec59eeaSAnirudh Venkataramanan 9437ec59eeaSAnirudh Venkataramanan /* save writeback AQ if requested */ 9447ec59eeaSAnirudh Venkataramanan if (details->wb_desc) 9457ec59eeaSAnirudh Venkataramanan memcpy(details->wb_desc, desc_on_ring, 9467ec59eeaSAnirudh Venkataramanan sizeof(*details->wb_desc)); 9477ec59eeaSAnirudh Venkataramanan 9487ec59eeaSAnirudh Venkataramanan /* update the error if time out occurred */ 9497ec59eeaSAnirudh Venkataramanan if (!cmd_completed) { 9507ec59eeaSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 9517ec59eeaSAnirudh Venkataramanan "Control Send Queue Writeback timeout.\n"); 9527ec59eeaSAnirudh Venkataramanan status = ICE_ERR_AQ_TIMEOUT; 9537ec59eeaSAnirudh Venkataramanan } 9547ec59eeaSAnirudh Venkataramanan 9557ec59eeaSAnirudh Venkataramanan sq_send_command_error: 9567ec59eeaSAnirudh Venkataramanan mutex_unlock(&cq->sq_lock); 9577ec59eeaSAnirudh Venkataramanan return status; 9587ec59eeaSAnirudh Venkataramanan } 9597ec59eeaSAnirudh Venkataramanan 9607ec59eeaSAnirudh Venkataramanan /** 9617ec59eeaSAnirudh Venkataramanan * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 9627ec59eeaSAnirudh Venkataramanan * @desc: pointer to the temp descriptor (non DMA mem) 9637ec59eeaSAnirudh Venkataramanan * @opcode: the opcode can be used to decide which flags to turn off or on 9647ec59eeaSAnirudh Venkataramanan * 9657ec59eeaSAnirudh Venkataramanan * Fill the desc with default values 9667ec59eeaSAnirudh Venkataramanan */ 9677ec59eeaSAnirudh Venkataramanan void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 9687ec59eeaSAnirudh Venkataramanan { 9697ec59eeaSAnirudh Venkataramanan /* zero out the desc */ 9707ec59eeaSAnirudh Venkataramanan memset(desc, 0, sizeof(*desc)); 9717ec59eeaSAnirudh Venkataramanan desc->opcode = cpu_to_le16(opcode); 9727ec59eeaSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 9737ec59eeaSAnirudh Venkataramanan } 974940b61afSAnirudh Venkataramanan 975940b61afSAnirudh Venkataramanan /** 976940b61afSAnirudh Venkataramanan * ice_clean_rq_elem 977940b61afSAnirudh Venkataramanan * @hw: pointer to the hw struct 978940b61afSAnirudh Venkataramanan * @cq: pointer to the specific Control queue 979940b61afSAnirudh Venkataramanan * @e: event info from the receive descriptor, includes any buffers 980940b61afSAnirudh Venkataramanan * @pending: number of events that could be left to process 981940b61afSAnirudh Venkataramanan * 982940b61afSAnirudh Venkataramanan * This function cleans one Admin Receive Queue element and returns 983940b61afSAnirudh Venkataramanan * the contents through e. It can also return how many events are 984940b61afSAnirudh Venkataramanan * left to process through 'pending'. 985940b61afSAnirudh Venkataramanan */ 986940b61afSAnirudh Venkataramanan enum ice_status 987940b61afSAnirudh Venkataramanan ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 988940b61afSAnirudh Venkataramanan struct ice_rq_event_info *e, u16 *pending) 989940b61afSAnirudh Venkataramanan { 990940b61afSAnirudh Venkataramanan u16 ntc = cq->rq.next_to_clean; 991940b61afSAnirudh Venkataramanan enum ice_status ret_code = 0; 992940b61afSAnirudh Venkataramanan struct ice_aq_desc *desc; 993940b61afSAnirudh Venkataramanan struct ice_dma_mem *bi; 994940b61afSAnirudh Venkataramanan u16 desc_idx; 995940b61afSAnirudh Venkataramanan u16 datalen; 996940b61afSAnirudh Venkataramanan u16 flags; 997940b61afSAnirudh Venkataramanan u16 ntu; 998940b61afSAnirudh Venkataramanan 999940b61afSAnirudh Venkataramanan /* pre-clean the event info */ 1000940b61afSAnirudh Venkataramanan memset(&e->desc, 0, sizeof(e->desc)); 1001940b61afSAnirudh Venkataramanan 1002940b61afSAnirudh Venkataramanan /* take the lock before we start messing with the ring */ 1003940b61afSAnirudh Venkataramanan mutex_lock(&cq->rq_lock); 1004940b61afSAnirudh Venkataramanan 1005940b61afSAnirudh Venkataramanan if (!cq->rq.count) { 1006940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 1007940b61afSAnirudh Venkataramanan "Control Receive queue not initialized.\n"); 1008940b61afSAnirudh Venkataramanan ret_code = ICE_ERR_AQ_EMPTY; 1009940b61afSAnirudh Venkataramanan goto clean_rq_elem_err; 1010940b61afSAnirudh Venkataramanan } 1011940b61afSAnirudh Venkataramanan 1012940b61afSAnirudh Venkataramanan /* set next_to_use to head */ 1013940b61afSAnirudh Venkataramanan ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1014940b61afSAnirudh Venkataramanan 1015940b61afSAnirudh Venkataramanan if (ntu == ntc) { 1016940b61afSAnirudh Venkataramanan /* nothing to do - shouldn't need to update ring's values */ 1017940b61afSAnirudh Venkataramanan ret_code = ICE_ERR_AQ_NO_WORK; 1018940b61afSAnirudh Venkataramanan goto clean_rq_elem_out; 1019940b61afSAnirudh Venkataramanan } 1020940b61afSAnirudh Venkataramanan 1021940b61afSAnirudh Venkataramanan /* now clean the next descriptor */ 1022940b61afSAnirudh Venkataramanan desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1023940b61afSAnirudh Venkataramanan desc_idx = ntc; 1024940b61afSAnirudh Venkataramanan 1025ea3beca4SJeff Shaw cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1026940b61afSAnirudh Venkataramanan flags = le16_to_cpu(desc->flags); 1027940b61afSAnirudh Venkataramanan if (flags & ICE_AQ_FLAG_ERR) { 1028940b61afSAnirudh Venkataramanan ret_code = ICE_ERR_AQ_ERROR; 1029940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, 1030940b61afSAnirudh Venkataramanan "Control Receive Queue Event received with error 0x%x\n", 1031940b61afSAnirudh Venkataramanan cq->rq_last_status); 1032940b61afSAnirudh Venkataramanan } 1033940b61afSAnirudh Venkataramanan memcpy(&e->desc, desc, sizeof(e->desc)); 1034940b61afSAnirudh Venkataramanan datalen = le16_to_cpu(desc->datalen); 1035940b61afSAnirudh Venkataramanan e->msg_len = min(datalen, e->buf_len); 1036940b61afSAnirudh Venkataramanan if (e->msg_buf && e->msg_len) 1037940b61afSAnirudh Venkataramanan memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1038940b61afSAnirudh Venkataramanan 1039940b61afSAnirudh Venkataramanan ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); 1040940b61afSAnirudh Venkataramanan 1041940b61afSAnirudh Venkataramanan ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, 1042940b61afSAnirudh Venkataramanan cq->rq_buf_size); 1043940b61afSAnirudh Venkataramanan 1044940b61afSAnirudh Venkataramanan /* Restore the original datalen and buffer address in the desc, 1045940b61afSAnirudh Venkataramanan * FW updates datalen to indicate the event message size 1046940b61afSAnirudh Venkataramanan */ 1047940b61afSAnirudh Venkataramanan bi = &cq->rq.r.rq_bi[ntc]; 1048940b61afSAnirudh Venkataramanan memset(desc, 0, sizeof(*desc)); 1049940b61afSAnirudh Venkataramanan 1050940b61afSAnirudh Venkataramanan desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1051940b61afSAnirudh Venkataramanan if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1052940b61afSAnirudh Venkataramanan desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1053940b61afSAnirudh Venkataramanan desc->datalen = cpu_to_le16(bi->size); 1054940b61afSAnirudh Venkataramanan desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1055940b61afSAnirudh Venkataramanan desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1056940b61afSAnirudh Venkataramanan 1057940b61afSAnirudh Venkataramanan /* set tail = the last cleaned desc index. */ 1058940b61afSAnirudh Venkataramanan wr32(hw, cq->rq.tail, ntc); 1059940b61afSAnirudh Venkataramanan /* ntc is updated to tail + 1 */ 1060940b61afSAnirudh Venkataramanan ntc++; 1061940b61afSAnirudh Venkataramanan if (ntc == cq->num_rq_entries) 1062940b61afSAnirudh Venkataramanan ntc = 0; 1063940b61afSAnirudh Venkataramanan cq->rq.next_to_clean = ntc; 1064940b61afSAnirudh Venkataramanan cq->rq.next_to_use = ntu; 1065940b61afSAnirudh Venkataramanan 1066940b61afSAnirudh Venkataramanan clean_rq_elem_out: 1067940b61afSAnirudh Venkataramanan /* Set pending if needed, unlock and return */ 1068940b61afSAnirudh Venkataramanan if (pending) 1069940b61afSAnirudh Venkataramanan *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1070940b61afSAnirudh Venkataramanan clean_rq_elem_err: 1071940b61afSAnirudh Venkataramanan mutex_unlock(&cq->rq_lock); 1072940b61afSAnirudh Venkataramanan 1073940b61afSAnirudh Venkataramanan return ret_code; 1074940b61afSAnirudh Venkataramanan } 1075