12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f91090f7SAviad Krawczyk /*
3f91090f7SAviad Krawczyk * Huawei HiNIC PCI Express Linux driver
4f91090f7SAviad Krawczyk * Copyright(c) 2017 Huawei Technologies Co., Ltd
5f91090f7SAviad Krawczyk */
6f91090f7SAviad Krawczyk
7f91090f7SAviad Krawczyk #include <linux/kernel.h>
8f91090f7SAviad Krawczyk #include <linux/types.h>
9f91090f7SAviad Krawczyk #include <linux/pci.h>
10f91090f7SAviad Krawczyk #include <linux/device.h>
11f91090f7SAviad Krawczyk #include <linux/dma-mapping.h>
12f91090f7SAviad Krawczyk #include <linux/vmalloc.h>
13f91090f7SAviad Krawczyk #include <linux/errno.h>
14f91090f7SAviad Krawczyk #include <linux/sizes.h>
1553e7d6feSAviad Krawczyk #include <linux/atomic.h>
16e2585ea7SAviad Krawczyk #include <linux/skbuff.h>
1700e57a6dSAviad Krawczyk #include <linux/io.h>
18e2585ea7SAviad Krawczyk #include <asm/barrier.h>
1953e7d6feSAviad Krawczyk #include <asm/byteorder.h>
20f91090f7SAviad Krawczyk
2153e7d6feSAviad Krawczyk #include "hinic_common.h"
22f91090f7SAviad Krawczyk #include "hinic_hw_if.h"
23e2585ea7SAviad Krawczyk #include "hinic_hw_wqe.h"
24f91090f7SAviad Krawczyk #include "hinic_hw_wq.h"
2553e7d6feSAviad Krawczyk #include "hinic_hw_qp_ctxt.h"
26f91090f7SAviad Krawczyk #include "hinic_hw_qp.h"
2700e57a6dSAviad Krawczyk #include "hinic_hw_io.h"
28f91090f7SAviad Krawczyk
29f91090f7SAviad Krawczyk #define SQ_DB_OFF SZ_2K
30f91090f7SAviad Krawczyk
3153e7d6feSAviad Krawczyk /* The number of cache line to prefetch Until threshold state */
3253e7d6feSAviad Krawczyk #define WQ_PREFETCH_MAX 2
3353e7d6feSAviad Krawczyk /* The number of cache line to prefetch After threshold state */
3453e7d6feSAviad Krawczyk #define WQ_PREFETCH_MIN 1
3553e7d6feSAviad Krawczyk /* Threshold state */
3653e7d6feSAviad Krawczyk #define WQ_PREFETCH_THRESHOLD 256
3753e7d6feSAviad Krawczyk
3853e7d6feSAviad Krawczyk /* sizes of the SQ/RQ ctxt */
3953e7d6feSAviad Krawczyk #define Q_CTXT_SIZE 48
4053e7d6feSAviad Krawczyk #define CTXT_RSVD 240
4153e7d6feSAviad Krawczyk
4253e7d6feSAviad Krawczyk #define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
4353e7d6feSAviad Krawczyk (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
4453e7d6feSAviad Krawczyk
4553e7d6feSAviad Krawczyk #define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
4653e7d6feSAviad Krawczyk (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
4753e7d6feSAviad Krawczyk (max_sqs + (q_id)) * Q_CTXT_SIZE)
4853e7d6feSAviad Krawczyk
4953e7d6feSAviad Krawczyk #define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
50e2585ea7SAviad Krawczyk #define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
5100e57a6dSAviad Krawczyk #define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
52e2585ea7SAviad Krawczyk
5300e57a6dSAviad Krawczyk #define SQ_DB_PI_HI_SHIFT 8
5400e57a6dSAviad Krawczyk #define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
5500e57a6dSAviad Krawczyk
5600e57a6dSAviad Krawczyk #define SQ_DB_PI_LOW_MASK 0xFF
5700e57a6dSAviad Krawczyk #define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
5800e57a6dSAviad Krawczyk
5900e57a6dSAviad Krawczyk #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
6000e57a6dSAviad Krawczyk
6100e57a6dSAviad Krawczyk #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
62e2585ea7SAviad Krawczyk #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
63e2585ea7SAviad Krawczyk
6400e57a6dSAviad Krawczyk enum sq_wqe_type {
6500e57a6dSAviad Krawczyk SQ_NORMAL_WQE = 0,
6600e57a6dSAviad Krawczyk };
6700e57a6dSAviad Krawczyk
68e2585ea7SAviad Krawczyk enum rq_completion_fmt {
69e2585ea7SAviad Krawczyk RQ_COMPLETE_SGE = 1
70e2585ea7SAviad Krawczyk };
7153e7d6feSAviad Krawczyk
hinic_qp_prepare_header(struct hinic_qp_ctxt_header * qp_ctxt_hdr,enum hinic_qp_ctxt_type ctxt_type,u16 num_queues,u16 max_queues)7253e7d6feSAviad Krawczyk void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
7353e7d6feSAviad Krawczyk enum hinic_qp_ctxt_type ctxt_type,
7453e7d6feSAviad Krawczyk u16 num_queues, u16 max_queues)
7553e7d6feSAviad Krawczyk {
7653e7d6feSAviad Krawczyk u16 max_sqs = max_queues;
7753e7d6feSAviad Krawczyk u16 max_rqs = max_queues;
7853e7d6feSAviad Krawczyk
7953e7d6feSAviad Krawczyk qp_ctxt_hdr->num_queues = num_queues;
8053e7d6feSAviad Krawczyk qp_ctxt_hdr->queue_type = ctxt_type;
8153e7d6feSAviad Krawczyk
8253e7d6feSAviad Krawczyk if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
8353e7d6feSAviad Krawczyk qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
8453e7d6feSAviad Krawczyk else
8553e7d6feSAviad Krawczyk qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
8653e7d6feSAviad Krawczyk
8753e7d6feSAviad Krawczyk qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
8853e7d6feSAviad Krawczyk
8953e7d6feSAviad Krawczyk hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
9053e7d6feSAviad Krawczyk }
9153e7d6feSAviad Krawczyk
hinic_sq_prepare_ctxt(struct hinic_sq_ctxt * sq_ctxt,struct hinic_sq * sq,u16 global_qid)9253e7d6feSAviad Krawczyk void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
9353e7d6feSAviad Krawczyk struct hinic_sq *sq, u16 global_qid)
9453e7d6feSAviad Krawczyk {
9553e7d6feSAviad Krawczyk u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
9653e7d6feSAviad Krawczyk u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
9753e7d6feSAviad Krawczyk u16 pi_start, ci_start;
9853e7d6feSAviad Krawczyk struct hinic_wq *wq;
9953e7d6feSAviad Krawczyk
10053e7d6feSAviad Krawczyk wq = sq->wq;
10153e7d6feSAviad Krawczyk ci_start = atomic_read(&wq->cons_idx);
10253e7d6feSAviad Krawczyk pi_start = atomic_read(&wq->prod_idx);
10353e7d6feSAviad Krawczyk
10453e7d6feSAviad Krawczyk /* Read the first page paddr from the WQ page paddr ptrs */
10553e7d6feSAviad Krawczyk wq_page_addr = be64_to_cpu(*wq->block_vaddr);
10653e7d6feSAviad Krawczyk
10753e7d6feSAviad Krawczyk wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
10853e7d6feSAviad Krawczyk wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
10953e7d6feSAviad Krawczyk wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
11053e7d6feSAviad Krawczyk
1117dd29ee1SLuo bin /* If only one page, use 0-level CLA */
1127dd29ee1SLuo bin if (wq->num_q_pages == 1)
1137dd29ee1SLuo bin wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
1147dd29ee1SLuo bin else
11553e7d6feSAviad Krawczyk wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
1167dd29ee1SLuo bin
11753e7d6feSAviad Krawczyk wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
11853e7d6feSAviad Krawczyk wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
11953e7d6feSAviad Krawczyk
12053e7d6feSAviad Krawczyk sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
12153e7d6feSAviad Krawczyk GLOBAL_SQ_ID) |
12253e7d6feSAviad Krawczyk HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
12353e7d6feSAviad Krawczyk
12453e7d6feSAviad Krawczyk sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
12553e7d6feSAviad Krawczyk HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
12653e7d6feSAviad Krawczyk
12753e7d6feSAviad Krawczyk sq_ctxt->wq_hi_pfn_pi =
12853e7d6feSAviad Krawczyk HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
12953e7d6feSAviad Krawczyk HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
13053e7d6feSAviad Krawczyk
13153e7d6feSAviad Krawczyk sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
13253e7d6feSAviad Krawczyk
13353e7d6feSAviad Krawczyk sq_ctxt->pref_cache =
13453e7d6feSAviad Krawczyk HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
13553e7d6feSAviad Krawczyk HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
13653e7d6feSAviad Krawczyk HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
13753e7d6feSAviad Krawczyk
13853e7d6feSAviad Krawczyk sq_ctxt->pref_wrapped = 1;
13953e7d6feSAviad Krawczyk
14053e7d6feSAviad Krawczyk sq_ctxt->pref_wq_hi_pfn_ci =
14153e7d6feSAviad Krawczyk HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
14253e7d6feSAviad Krawczyk HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
14353e7d6feSAviad Krawczyk
14453e7d6feSAviad Krawczyk sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
14553e7d6feSAviad Krawczyk
14653e7d6feSAviad Krawczyk sq_ctxt->wq_block_hi_pfn =
14753e7d6feSAviad Krawczyk HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
14853e7d6feSAviad Krawczyk
14953e7d6feSAviad Krawczyk sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
15053e7d6feSAviad Krawczyk
15153e7d6feSAviad Krawczyk hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
15253e7d6feSAviad Krawczyk }
15353e7d6feSAviad Krawczyk
hinic_rq_prepare_ctxt(struct hinic_rq_ctxt * rq_ctxt,struct hinic_rq * rq,u16 global_qid)15453e7d6feSAviad Krawczyk void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
15553e7d6feSAviad Krawczyk struct hinic_rq *rq, u16 global_qid)
15653e7d6feSAviad Krawczyk {
15753e7d6feSAviad Krawczyk u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
15853e7d6feSAviad Krawczyk u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
15953e7d6feSAviad Krawczyk u16 pi_start, ci_start;
16053e7d6feSAviad Krawczyk struct hinic_wq *wq;
16153e7d6feSAviad Krawczyk
16253e7d6feSAviad Krawczyk wq = rq->wq;
16353e7d6feSAviad Krawczyk ci_start = atomic_read(&wq->cons_idx);
16453e7d6feSAviad Krawczyk pi_start = atomic_read(&wq->prod_idx);
16553e7d6feSAviad Krawczyk
16653e7d6feSAviad Krawczyk /* Read the first page paddr from the WQ page paddr ptrs */
16753e7d6feSAviad Krawczyk wq_page_addr = be64_to_cpu(*wq->block_vaddr);
16853e7d6feSAviad Krawczyk
16953e7d6feSAviad Krawczyk wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
17053e7d6feSAviad Krawczyk wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
17153e7d6feSAviad Krawczyk wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
17253e7d6feSAviad Krawczyk
17353e7d6feSAviad Krawczyk wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
17453e7d6feSAviad Krawczyk wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
17553e7d6feSAviad Krawczyk wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
17653e7d6feSAviad Krawczyk
17753e7d6feSAviad Krawczyk rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
17853e7d6feSAviad Krawczyk HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
17953e7d6feSAviad Krawczyk
18053e7d6feSAviad Krawczyk rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
18153e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
18253e7d6feSAviad Krawczyk
18353e7d6feSAviad Krawczyk rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
18453e7d6feSAviad Krawczyk HI_PFN) |
18553e7d6feSAviad Krawczyk HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
18653e7d6feSAviad Krawczyk
18753e7d6feSAviad Krawczyk rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
18853e7d6feSAviad Krawczyk
18953e7d6feSAviad Krawczyk rq_ctxt->pref_cache =
19053e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
19153e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
19253e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
19353e7d6feSAviad Krawczyk
19453e7d6feSAviad Krawczyk rq_ctxt->pref_wrapped = 1;
19553e7d6feSAviad Krawczyk
19653e7d6feSAviad Krawczyk rq_ctxt->pref_wq_hi_pfn_ci =
19753e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
19853e7d6feSAviad Krawczyk HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
19953e7d6feSAviad Krawczyk
20053e7d6feSAviad Krawczyk rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
20153e7d6feSAviad Krawczyk
20253e7d6feSAviad Krawczyk rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
20353e7d6feSAviad Krawczyk rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
20453e7d6feSAviad Krawczyk
20553e7d6feSAviad Krawczyk rq_ctxt->wq_block_hi_pfn =
20653e7d6feSAviad Krawczyk HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
20753e7d6feSAviad Krawczyk
20853e7d6feSAviad Krawczyk rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
20953e7d6feSAviad Krawczyk
21053e7d6feSAviad Krawczyk hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
21153e7d6feSAviad Krawczyk }
21253e7d6feSAviad Krawczyk
213f91090f7SAviad Krawczyk /**
214f91090f7SAviad Krawczyk * alloc_sq_skb_arr - allocate sq array for saved skb
215f91090f7SAviad Krawczyk * @sq: HW Send Queue
216f91090f7SAviad Krawczyk *
217f91090f7SAviad Krawczyk * Return 0 - Success, negative - Failure
218f91090f7SAviad Krawczyk **/
alloc_sq_skb_arr(struct hinic_sq * sq)219f91090f7SAviad Krawczyk static int alloc_sq_skb_arr(struct hinic_sq *sq)
220f91090f7SAviad Krawczyk {
221f91090f7SAviad Krawczyk struct hinic_wq *wq = sq->wq;
222f91090f7SAviad Krawczyk size_t skb_arr_size;
223f91090f7SAviad Krawczyk
224f91090f7SAviad Krawczyk skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
225f91090f7SAviad Krawczyk sq->saved_skb = vzalloc(skb_arr_size);
226f91090f7SAviad Krawczyk if (!sq->saved_skb)
227f91090f7SAviad Krawczyk return -ENOMEM;
228f91090f7SAviad Krawczyk
229f91090f7SAviad Krawczyk return 0;
230f91090f7SAviad Krawczyk }
231f91090f7SAviad Krawczyk
232f91090f7SAviad Krawczyk /**
233f91090f7SAviad Krawczyk * free_sq_skb_arr - free sq array for saved skb
234f91090f7SAviad Krawczyk * @sq: HW Send Queue
235f91090f7SAviad Krawczyk **/
free_sq_skb_arr(struct hinic_sq * sq)236f91090f7SAviad Krawczyk static void free_sq_skb_arr(struct hinic_sq *sq)
237f91090f7SAviad Krawczyk {
238f91090f7SAviad Krawczyk vfree(sq->saved_skb);
239f91090f7SAviad Krawczyk }
240f91090f7SAviad Krawczyk
241f91090f7SAviad Krawczyk /**
242f91090f7SAviad Krawczyk * alloc_rq_skb_arr - allocate rq array for saved skb
243f91090f7SAviad Krawczyk * @rq: HW Receive Queue
244f91090f7SAviad Krawczyk *
245f91090f7SAviad Krawczyk * Return 0 - Success, negative - Failure
246f91090f7SAviad Krawczyk **/
alloc_rq_skb_arr(struct hinic_rq * rq)247f91090f7SAviad Krawczyk static int alloc_rq_skb_arr(struct hinic_rq *rq)
248f91090f7SAviad Krawczyk {
249f91090f7SAviad Krawczyk struct hinic_wq *wq = rq->wq;
250f91090f7SAviad Krawczyk size_t skb_arr_size;
251f91090f7SAviad Krawczyk
252f91090f7SAviad Krawczyk skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
253f91090f7SAviad Krawczyk rq->saved_skb = vzalloc(skb_arr_size);
254f91090f7SAviad Krawczyk if (!rq->saved_skb)
255f91090f7SAviad Krawczyk return -ENOMEM;
256f91090f7SAviad Krawczyk
257f91090f7SAviad Krawczyk return 0;
258f91090f7SAviad Krawczyk }
259f91090f7SAviad Krawczyk
260f91090f7SAviad Krawczyk /**
261f91090f7SAviad Krawczyk * free_rq_skb_arr - free rq array for saved skb
262f91090f7SAviad Krawczyk * @rq: HW Receive Queue
263f91090f7SAviad Krawczyk **/
free_rq_skb_arr(struct hinic_rq * rq)264f91090f7SAviad Krawczyk static void free_rq_skb_arr(struct hinic_rq *rq)
265f91090f7SAviad Krawczyk {
266f91090f7SAviad Krawczyk vfree(rq->saved_skb);
267f91090f7SAviad Krawczyk }
268f91090f7SAviad Krawczyk
269f91090f7SAviad Krawczyk /**
270f91090f7SAviad Krawczyk * hinic_init_sq - Initialize HW Send Queue
271f91090f7SAviad Krawczyk * @sq: HW Send Queue
272f91090f7SAviad Krawczyk * @hwif: HW Interface for accessing HW
273f91090f7SAviad Krawczyk * @wq: Work Queue for the data of the SQ
274f91090f7SAviad Krawczyk * @entry: msix entry for sq
275f91090f7SAviad Krawczyk * @ci_addr: address for reading the current HW consumer index
276f91090f7SAviad Krawczyk * @ci_dma_addr: dma address for reading the current HW consumer index
277f91090f7SAviad Krawczyk * @db_base: doorbell base address
278f91090f7SAviad Krawczyk *
279f91090f7SAviad Krawczyk * Return 0 - Success, negative - Failure
280f91090f7SAviad Krawczyk **/
hinic_init_sq(struct hinic_sq * sq,struct hinic_hwif * hwif,struct hinic_wq * wq,struct msix_entry * entry,void * ci_addr,dma_addr_t ci_dma_addr,void __iomem * db_base)281f91090f7SAviad Krawczyk int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
282f91090f7SAviad Krawczyk struct hinic_wq *wq, struct msix_entry *entry,
283f91090f7SAviad Krawczyk void *ci_addr, dma_addr_t ci_dma_addr,
284f91090f7SAviad Krawczyk void __iomem *db_base)
285f91090f7SAviad Krawczyk {
286f91090f7SAviad Krawczyk sq->hwif = hwif;
287f91090f7SAviad Krawczyk
288f91090f7SAviad Krawczyk sq->wq = wq;
289f91090f7SAviad Krawczyk
290f91090f7SAviad Krawczyk sq->irq = entry->vector;
291f91090f7SAviad Krawczyk sq->msix_entry = entry->entry;
292f91090f7SAviad Krawczyk
293f91090f7SAviad Krawczyk sq->hw_ci_addr = ci_addr;
294f91090f7SAviad Krawczyk sq->hw_ci_dma_addr = ci_dma_addr;
295f91090f7SAviad Krawczyk
296f91090f7SAviad Krawczyk sq->db_base = db_base + SQ_DB_OFF;
297f91090f7SAviad Krawczyk
298f91090f7SAviad Krawczyk return alloc_sq_skb_arr(sq);
299f91090f7SAviad Krawczyk }
300f91090f7SAviad Krawczyk
301f91090f7SAviad Krawczyk /**
302f91090f7SAviad Krawczyk * hinic_clean_sq - Clean HW Send Queue's Resources
303f91090f7SAviad Krawczyk * @sq: Send Queue
304f91090f7SAviad Krawczyk **/
hinic_clean_sq(struct hinic_sq * sq)305f91090f7SAviad Krawczyk void hinic_clean_sq(struct hinic_sq *sq)
306f91090f7SAviad Krawczyk {
307f91090f7SAviad Krawczyk free_sq_skb_arr(sq);
308f91090f7SAviad Krawczyk }
309f91090f7SAviad Krawczyk
310f91090f7SAviad Krawczyk /**
311f91090f7SAviad Krawczyk * alloc_rq_cqe - allocate rq completion queue elements
312f91090f7SAviad Krawczyk * @rq: HW Receive Queue
313f91090f7SAviad Krawczyk *
314f91090f7SAviad Krawczyk * Return 0 - Success, negative - Failure
315f91090f7SAviad Krawczyk **/
alloc_rq_cqe(struct hinic_rq * rq)316f91090f7SAviad Krawczyk static int alloc_rq_cqe(struct hinic_rq *rq)
317f91090f7SAviad Krawczyk {
318f91090f7SAviad Krawczyk struct hinic_hwif *hwif = rq->hwif;
319f91090f7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
320f91090f7SAviad Krawczyk size_t cqe_dma_size, cqe_size;
321f91090f7SAviad Krawczyk struct hinic_wq *wq = rq->wq;
322f91090f7SAviad Krawczyk int j, i;
323f91090f7SAviad Krawczyk
324f91090f7SAviad Krawczyk cqe_size = wq->q_depth * sizeof(*rq->cqe);
325f91090f7SAviad Krawczyk rq->cqe = vzalloc(cqe_size);
326f91090f7SAviad Krawczyk if (!rq->cqe)
327f91090f7SAviad Krawczyk return -ENOMEM;
328f91090f7SAviad Krawczyk
329f91090f7SAviad Krawczyk cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
330f91090f7SAviad Krawczyk rq->cqe_dma = vzalloc(cqe_dma_size);
331f91090f7SAviad Krawczyk if (!rq->cqe_dma)
332f91090f7SAviad Krawczyk goto err_cqe_dma_arr_alloc;
333f91090f7SAviad Krawczyk
334f91090f7SAviad Krawczyk for (i = 0; i < wq->q_depth; i++) {
335750afb08SLuis Chamberlain rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
336f91090f7SAviad Krawczyk sizeof(*rq->cqe[i]),
337f91090f7SAviad Krawczyk &rq->cqe_dma[i], GFP_KERNEL);
338f91090f7SAviad Krawczyk if (!rq->cqe[i])
339f91090f7SAviad Krawczyk goto err_cqe_alloc;
340f91090f7SAviad Krawczyk }
341f91090f7SAviad Krawczyk
342f91090f7SAviad Krawczyk return 0;
343f91090f7SAviad Krawczyk
344f91090f7SAviad Krawczyk err_cqe_alloc:
345f91090f7SAviad Krawczyk for (j = 0; j < i; j++)
346f91090f7SAviad Krawczyk dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
347f91090f7SAviad Krawczyk rq->cqe_dma[j]);
348f91090f7SAviad Krawczyk
349f91090f7SAviad Krawczyk vfree(rq->cqe_dma);
350f91090f7SAviad Krawczyk
351f91090f7SAviad Krawczyk err_cqe_dma_arr_alloc:
352f91090f7SAviad Krawczyk vfree(rq->cqe);
353f91090f7SAviad Krawczyk return -ENOMEM;
354f91090f7SAviad Krawczyk }
355f91090f7SAviad Krawczyk
356f91090f7SAviad Krawczyk /**
357f91090f7SAviad Krawczyk * free_rq_cqe - free rq completion queue elements
358f91090f7SAviad Krawczyk * @rq: HW Receive Queue
359f91090f7SAviad Krawczyk **/
free_rq_cqe(struct hinic_rq * rq)360f91090f7SAviad Krawczyk static void free_rq_cqe(struct hinic_rq *rq)
361f91090f7SAviad Krawczyk {
362f91090f7SAviad Krawczyk struct hinic_hwif *hwif = rq->hwif;
363f91090f7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
364f91090f7SAviad Krawczyk struct hinic_wq *wq = rq->wq;
365f91090f7SAviad Krawczyk int i;
366f91090f7SAviad Krawczyk
367f91090f7SAviad Krawczyk for (i = 0; i < wq->q_depth; i++)
368f91090f7SAviad Krawczyk dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
369f91090f7SAviad Krawczyk rq->cqe_dma[i]);
370f91090f7SAviad Krawczyk
371f91090f7SAviad Krawczyk vfree(rq->cqe_dma);
372f91090f7SAviad Krawczyk vfree(rq->cqe);
373f91090f7SAviad Krawczyk }
374f91090f7SAviad Krawczyk
375f91090f7SAviad Krawczyk /**
376f91090f7SAviad Krawczyk * hinic_init_rq - Initialize HW Receive Queue
377f91090f7SAviad Krawczyk * @rq: HW Receive Queue
378f91090f7SAviad Krawczyk * @hwif: HW Interface for accessing HW
379f91090f7SAviad Krawczyk * @wq: Work Queue for the data of the RQ
380f91090f7SAviad Krawczyk * @entry: msix entry for rq
381f91090f7SAviad Krawczyk *
382f91090f7SAviad Krawczyk * Return 0 - Success, negative - Failure
383f91090f7SAviad Krawczyk **/
hinic_init_rq(struct hinic_rq * rq,struct hinic_hwif * hwif,struct hinic_wq * wq,struct msix_entry * entry)384f91090f7SAviad Krawczyk int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
385f91090f7SAviad Krawczyk struct hinic_wq *wq, struct msix_entry *entry)
386f91090f7SAviad Krawczyk {
387f91090f7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
388f91090f7SAviad Krawczyk size_t pi_size;
389f91090f7SAviad Krawczyk int err;
390f91090f7SAviad Krawczyk
391f91090f7SAviad Krawczyk rq->hwif = hwif;
392f91090f7SAviad Krawczyk
393f91090f7SAviad Krawczyk rq->wq = wq;
394f91090f7SAviad Krawczyk
395f91090f7SAviad Krawczyk rq->irq = entry->vector;
396f91090f7SAviad Krawczyk rq->msix_entry = entry->entry;
397f91090f7SAviad Krawczyk
398f91090f7SAviad Krawczyk rq->buf_sz = HINIC_RX_BUF_SZ;
399f91090f7SAviad Krawczyk
400f91090f7SAviad Krawczyk err = alloc_rq_skb_arr(rq);
401f91090f7SAviad Krawczyk if (err) {
402f91090f7SAviad Krawczyk dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
403f91090f7SAviad Krawczyk return err;
404f91090f7SAviad Krawczyk }
405f91090f7SAviad Krawczyk
406f91090f7SAviad Krawczyk err = alloc_rq_cqe(rq);
407f91090f7SAviad Krawczyk if (err) {
408f91090f7SAviad Krawczyk dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
409f91090f7SAviad Krawczyk goto err_alloc_rq_cqe;
410f91090f7SAviad Krawczyk }
411f91090f7SAviad Krawczyk
412f91090f7SAviad Krawczyk /* HW requirements: Must be at least 32 bit */
413f91090f7SAviad Krawczyk pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
414750afb08SLuis Chamberlain rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
415f91090f7SAviad Krawczyk &rq->pi_dma_addr, GFP_KERNEL);
416f91090f7SAviad Krawczyk if (!rq->pi_virt_addr) {
417f91090f7SAviad Krawczyk err = -ENOMEM;
418f91090f7SAviad Krawczyk goto err_pi_virt;
419f91090f7SAviad Krawczyk }
420f91090f7SAviad Krawczyk
421f91090f7SAviad Krawczyk return 0;
422f91090f7SAviad Krawczyk
423f91090f7SAviad Krawczyk err_pi_virt:
424f91090f7SAviad Krawczyk free_rq_cqe(rq);
425f91090f7SAviad Krawczyk
426f91090f7SAviad Krawczyk err_alloc_rq_cqe:
427f91090f7SAviad Krawczyk free_rq_skb_arr(rq);
428f91090f7SAviad Krawczyk return err;
429f91090f7SAviad Krawczyk }
430f91090f7SAviad Krawczyk
431f91090f7SAviad Krawczyk /**
432f91090f7SAviad Krawczyk * hinic_clean_rq - Clean HW Receive Queue's Resources
433f91090f7SAviad Krawczyk * @rq: HW Receive Queue
434f91090f7SAviad Krawczyk **/
hinic_clean_rq(struct hinic_rq * rq)435f91090f7SAviad Krawczyk void hinic_clean_rq(struct hinic_rq *rq)
436f91090f7SAviad Krawczyk {
437f91090f7SAviad Krawczyk struct hinic_hwif *hwif = rq->hwif;
438f91090f7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
439f91090f7SAviad Krawczyk size_t pi_size;
440f91090f7SAviad Krawczyk
441f91090f7SAviad Krawczyk pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
442f91090f7SAviad Krawczyk dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
443f91090f7SAviad Krawczyk rq->pi_dma_addr);
444f91090f7SAviad Krawczyk
445f91090f7SAviad Krawczyk free_rq_cqe(rq);
446f91090f7SAviad Krawczyk free_rq_skb_arr(rq);
447f91090f7SAviad Krawczyk }
448e2585ea7SAviad Krawczyk
449e2585ea7SAviad Krawczyk /**
45000e57a6dSAviad Krawczyk * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
45100e57a6dSAviad Krawczyk * @sq: send queue
45200e57a6dSAviad Krawczyk *
45300e57a6dSAviad Krawczyk * Return number of free wqebbs
45400e57a6dSAviad Krawczyk **/
hinic_get_sq_free_wqebbs(struct hinic_sq * sq)45500e57a6dSAviad Krawczyk int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
45600e57a6dSAviad Krawczyk {
45700e57a6dSAviad Krawczyk struct hinic_wq *wq = sq->wq;
45800e57a6dSAviad Krawczyk
45900e57a6dSAviad Krawczyk return atomic_read(&wq->delta) - 1;
46000e57a6dSAviad Krawczyk }
46100e57a6dSAviad Krawczyk
46200e57a6dSAviad Krawczyk /**
463e2585ea7SAviad Krawczyk * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
464e2585ea7SAviad Krawczyk * @rq: recv queue
465e2585ea7SAviad Krawczyk *
466e2585ea7SAviad Krawczyk * Return number of free wqebbs
467e2585ea7SAviad Krawczyk **/
hinic_get_rq_free_wqebbs(struct hinic_rq * rq)468e2585ea7SAviad Krawczyk int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
469e2585ea7SAviad Krawczyk {
470e2585ea7SAviad Krawczyk struct hinic_wq *wq = rq->wq;
471e2585ea7SAviad Krawczyk
472e2585ea7SAviad Krawczyk return atomic_read(&wq->delta) - 1;
473e2585ea7SAviad Krawczyk }
474e2585ea7SAviad Krawczyk
sq_prepare_ctrl(struct hinic_sq_ctrl * ctrl,int nr_descs)475*c706df6dSZhengchao Shao static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
47600e57a6dSAviad Krawczyk {
47700e57a6dSAviad Krawczyk u32 ctrl_size, task_size, bufdesc_size;
47800e57a6dSAviad Krawczyk
47900e57a6dSAviad Krawczyk ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
48000e57a6dSAviad Krawczyk task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
48100e57a6dSAviad Krawczyk bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
48200e57a6dSAviad Krawczyk bufdesc_size = SIZE_8BYTES(bufdesc_size);
48300e57a6dSAviad Krawczyk
48400e57a6dSAviad Krawczyk ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
48500e57a6dSAviad Krawczyk HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
48600e57a6dSAviad Krawczyk HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
48700e57a6dSAviad Krawczyk HINIC_SQ_CTRL_SET(ctrl_size, LEN);
48800e57a6dSAviad Krawczyk
489cc18a754SZhao Chen ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT,
490cc18a754SZhao Chen QUEUE_INFO_MSS) |
491cc18a754SZhao Chen HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC);
49200e57a6dSAviad Krawczyk }
49300e57a6dSAviad Krawczyk
sq_prepare_task(struct hinic_sq_task * task)49400e57a6dSAviad Krawczyk static void sq_prepare_task(struct hinic_sq_task *task)
49500e57a6dSAviad Krawczyk {
496cc18a754SZhao Chen task->pkt_info0 = 0;
497cc18a754SZhao Chen task->pkt_info1 = 0;
498cc18a754SZhao Chen task->pkt_info2 = 0;
49900e57a6dSAviad Krawczyk
50000e57a6dSAviad Krawczyk task->ufo_v6_identify = 0;
50100e57a6dSAviad Krawczyk
50200e57a6dSAviad Krawczyk task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
50300e57a6dSAviad Krawczyk
50400e57a6dSAviad Krawczyk task->zero_pad = 0;
50500e57a6dSAviad Krawczyk }
50600e57a6dSAviad Krawczyk
hinic_task_set_l2hdr(struct hinic_sq_task * task,u32 len)507cc18a754SZhao Chen void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
508cc18a754SZhao Chen {
509cc18a754SZhao Chen task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
510cc18a754SZhao Chen }
511cc18a754SZhao Chen
hinic_task_set_outter_l3(struct hinic_sq_task * task,enum hinic_l3_offload_type l3_type,u32 network_len)512cc18a754SZhao Chen void hinic_task_set_outter_l3(struct hinic_sq_task *task,
513cc18a754SZhao Chen enum hinic_l3_offload_type l3_type,
514cc18a754SZhao Chen u32 network_len)
515cc18a754SZhao Chen {
516cc18a754SZhao Chen task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
517cc18a754SZhao Chen HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
518cc18a754SZhao Chen }
519cc18a754SZhao Chen
hinic_task_set_inner_l3(struct hinic_sq_task * task,enum hinic_l3_offload_type l3_type,u32 network_len)520cc18a754SZhao Chen void hinic_task_set_inner_l3(struct hinic_sq_task *task,
521cc18a754SZhao Chen enum hinic_l3_offload_type l3_type,
522cc18a754SZhao Chen u32 network_len)
523cc18a754SZhao Chen {
524cc18a754SZhao Chen task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
525cc18a754SZhao Chen task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
526cc18a754SZhao Chen }
527cc18a754SZhao Chen
hinic_task_set_tunnel_l4(struct hinic_sq_task * task,enum hinic_l4_tunnel_type l4_type,u32 tunnel_len)528cc18a754SZhao Chen void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
5296e29464bSNathan Chancellor enum hinic_l4_tunnel_type l4_type,
530cc18a754SZhao Chen u32 tunnel_len)
531cc18a754SZhao Chen {
532cc18a754SZhao Chen task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
533cc18a754SZhao Chen HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
534cc18a754SZhao Chen }
535cc18a754SZhao Chen
hinic_set_cs_inner_l4(struct hinic_sq_task * task,u32 * queue_info,enum hinic_l4_offload_type l4_offload,u32 l4_len,u32 offset)536cc18a754SZhao Chen void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
537cc18a754SZhao Chen enum hinic_l4_offload_type l4_offload,
538cc18a754SZhao Chen u32 l4_len, u32 offset)
539cc18a754SZhao Chen {
540cc18a754SZhao Chen u32 tcp_udp_cs = 0, sctp = 0;
541cc18a754SZhao Chen u32 mss = HINIC_MSS_DEFAULT;
542cc18a754SZhao Chen
543cc18a754SZhao Chen if (l4_offload == TCP_OFFLOAD_ENABLE ||
544cc18a754SZhao Chen l4_offload == UDP_OFFLOAD_ENABLE)
545cc18a754SZhao Chen tcp_udp_cs = 1;
546cc18a754SZhao Chen else if (l4_offload == SCTP_OFFLOAD_ENABLE)
547cc18a754SZhao Chen sctp = 1;
548cc18a754SZhao Chen
549cc18a754SZhao Chen task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
550cc18a754SZhao Chen task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
551cc18a754SZhao Chen
552cc18a754SZhao Chen *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
553cc18a754SZhao Chen HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) |
554cc18a754SZhao Chen HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP);
555cc18a754SZhao Chen
556cc18a754SZhao Chen *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
557cc18a754SZhao Chen *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
558cc18a754SZhao Chen }
559cc18a754SZhao Chen
hinic_set_tso_inner_l4(struct hinic_sq_task * task,u32 * queue_info,enum hinic_l4_offload_type l4_offload,u32 l4_len,u32 offset,u32 ip_ident,u32 mss)560cc18a754SZhao Chen void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
561cc18a754SZhao Chen enum hinic_l4_offload_type l4_offload,
562cc18a754SZhao Chen u32 l4_len, u32 offset, u32 ip_ident, u32 mss)
563cc18a754SZhao Chen {
564cc18a754SZhao Chen u32 tso = 0, ufo = 0;
565cc18a754SZhao Chen
566cc18a754SZhao Chen if (l4_offload == TCP_OFFLOAD_ENABLE)
567cc18a754SZhao Chen tso = 1;
568cc18a754SZhao Chen else if (l4_offload == UDP_OFFLOAD_ENABLE)
569cc18a754SZhao Chen ufo = 1;
570cc18a754SZhao Chen
571cc18a754SZhao Chen task->ufo_v6_identify = ip_ident;
572cc18a754SZhao Chen
573cc18a754SZhao Chen task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
574cc18a754SZhao Chen task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
575cc18a754SZhao Chen task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
576cc18a754SZhao Chen
577cc18a754SZhao Chen *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
578cc18a754SZhao Chen HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) |
579cc18a754SZhao Chen HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) |
580cc18a754SZhao Chen HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS);
581cc18a754SZhao Chen
582cc18a754SZhao Chen /* set MSS value */
583cc18a754SZhao Chen *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
584cc18a754SZhao Chen *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
585cc18a754SZhao Chen }
586cc18a754SZhao Chen
58700e57a6dSAviad Krawczyk /**
58800e57a6dSAviad Krawczyk * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
58900e57a6dSAviad Krawczyk * @sq: send queue
59000e57a6dSAviad Krawczyk * @sq_wqe: wqe to prepare
59100e57a6dSAviad Krawczyk * @sges: sges for use by the wqe for send for buf addresses
59200e57a6dSAviad Krawczyk * @nr_sges: number of sges
59300e57a6dSAviad Krawczyk **/
hinic_sq_prepare_wqe(struct hinic_sq * sq,struct hinic_sq_wqe * sq_wqe,struct hinic_sge * sges,int nr_sges)594*c706df6dSZhengchao Shao void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
595*c706df6dSZhengchao Shao struct hinic_sge *sges, int nr_sges)
59600e57a6dSAviad Krawczyk {
59700e57a6dSAviad Krawczyk int i;
59800e57a6dSAviad Krawczyk
599*c706df6dSZhengchao Shao sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
60000e57a6dSAviad Krawczyk
60100e57a6dSAviad Krawczyk sq_prepare_task(&sq_wqe->task);
60200e57a6dSAviad Krawczyk
60300e57a6dSAviad Krawczyk for (i = 0; i < nr_sges; i++)
60400e57a6dSAviad Krawczyk sq_wqe->buf_descs[i].sge = sges[i];
60500e57a6dSAviad Krawczyk }
60600e57a6dSAviad Krawczyk
60700e57a6dSAviad Krawczyk /**
60800e57a6dSAviad Krawczyk * sq_prepare_db - prepare doorbell to write
60900e57a6dSAviad Krawczyk * @sq: send queue
61000e57a6dSAviad Krawczyk * @prod_idx: pi value for the doorbell
61100e57a6dSAviad Krawczyk * @cos: cos of the doorbell
61200e57a6dSAviad Krawczyk *
61300e57a6dSAviad Krawczyk * Return db value
61400e57a6dSAviad Krawczyk **/
sq_prepare_db(struct hinic_sq * sq,u16 prod_idx,unsigned int cos)61500e57a6dSAviad Krawczyk static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
61600e57a6dSAviad Krawczyk {
61700e57a6dSAviad Krawczyk struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
61800e57a6dSAviad Krawczyk u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
61900e57a6dSAviad Krawczyk
62000e57a6dSAviad Krawczyk /* Data should be written to HW in Big Endian Format */
62100e57a6dSAviad Krawczyk return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
62200e57a6dSAviad Krawczyk HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
62300e57a6dSAviad Krawczyk HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
62400e57a6dSAviad Krawczyk HINIC_SQ_DB_INFO_SET(cos, COS) |
62500e57a6dSAviad Krawczyk HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
62600e57a6dSAviad Krawczyk }
62700e57a6dSAviad Krawczyk
62800e57a6dSAviad Krawczyk /**
62900e57a6dSAviad Krawczyk * hinic_sq_write_db- write doorbell
63000e57a6dSAviad Krawczyk * @sq: send queue
63100e57a6dSAviad Krawczyk * @prod_idx: pi value for the doorbell
63200e57a6dSAviad Krawczyk * @wqe_size: wqe size
63300e57a6dSAviad Krawczyk * @cos: cos of the wqe
63400e57a6dSAviad Krawczyk **/
hinic_sq_write_db(struct hinic_sq * sq,u16 prod_idx,unsigned int wqe_size,unsigned int cos)63500e57a6dSAviad Krawczyk void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
63600e57a6dSAviad Krawczyk unsigned int cos)
63700e57a6dSAviad Krawczyk {
63800e57a6dSAviad Krawczyk struct hinic_wq *wq = sq->wq;
63900e57a6dSAviad Krawczyk
64000e57a6dSAviad Krawczyk /* increment prod_idx to the next */
64100e57a6dSAviad Krawczyk prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
642bcab6782SLuo bin prod_idx = SQ_MASKED_IDX(sq, prod_idx);
64300e57a6dSAviad Krawczyk
64400e57a6dSAviad Krawczyk wmb(); /* Write all before the doorbell */
64500e57a6dSAviad Krawczyk
64600e57a6dSAviad Krawczyk writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
64700e57a6dSAviad Krawczyk }
64800e57a6dSAviad Krawczyk
64900e57a6dSAviad Krawczyk /**
65000e57a6dSAviad Krawczyk * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
65100e57a6dSAviad Krawczyk * @sq: sq to get wqe from
65200e57a6dSAviad Krawczyk * @wqe_size: wqe size
65300e57a6dSAviad Krawczyk * @prod_idx: returned pi
65400e57a6dSAviad Krawczyk *
65500e57a6dSAviad Krawczyk * Return wqe pointer
65600e57a6dSAviad Krawczyk **/
hinic_sq_get_wqe(struct hinic_sq * sq,unsigned int wqe_size,u16 * prod_idx)65700e57a6dSAviad Krawczyk struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
65800e57a6dSAviad Krawczyk unsigned int wqe_size, u16 *prod_idx)
65900e57a6dSAviad Krawczyk {
66000e57a6dSAviad Krawczyk struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
66100e57a6dSAviad Krawczyk prod_idx);
66200e57a6dSAviad Krawczyk
66300e57a6dSAviad Krawczyk if (IS_ERR(hw_wqe))
66400e57a6dSAviad Krawczyk return NULL;
66500e57a6dSAviad Krawczyk
66600e57a6dSAviad Krawczyk return &hw_wqe->sq_wqe;
66700e57a6dSAviad Krawczyk }
66800e57a6dSAviad Krawczyk
66900e57a6dSAviad Krawczyk /**
670cc18a754SZhao Chen * hinic_sq_return_wqe - return the wqe to the sq
671cc18a754SZhao Chen * @sq: send queue
672cc18a754SZhao Chen * @wqe_size: the size of the wqe
673cc18a754SZhao Chen **/
hinic_sq_return_wqe(struct hinic_sq * sq,unsigned int wqe_size)674cc18a754SZhao Chen void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size)
675cc18a754SZhao Chen {
676cc18a754SZhao Chen hinic_return_wqe(sq->wq, wqe_size);
677cc18a754SZhao Chen }
678cc18a754SZhao Chen
679cc18a754SZhao Chen /**
68000e57a6dSAviad Krawczyk * hinic_sq_write_wqe - write the wqe to the sq
68100e57a6dSAviad Krawczyk * @sq: send queue
68200e57a6dSAviad Krawczyk * @prod_idx: pi of the wqe
68300e57a6dSAviad Krawczyk * @sq_wqe: the wqe to write
68400e57a6dSAviad Krawczyk * @skb: skb to save
68500e57a6dSAviad Krawczyk * @wqe_size: the size of the wqe
68600e57a6dSAviad Krawczyk **/
hinic_sq_write_wqe(struct hinic_sq * sq,u16 prod_idx,struct hinic_sq_wqe * sq_wqe,struct sk_buff * skb,unsigned int wqe_size)68700e57a6dSAviad Krawczyk void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
68800e57a6dSAviad Krawczyk struct hinic_sq_wqe *sq_wqe,
68900e57a6dSAviad Krawczyk struct sk_buff *skb, unsigned int wqe_size)
69000e57a6dSAviad Krawczyk {
69100e57a6dSAviad Krawczyk struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
69200e57a6dSAviad Krawczyk
69300e57a6dSAviad Krawczyk sq->saved_skb[prod_idx] = skb;
69400e57a6dSAviad Krawczyk
69500e57a6dSAviad Krawczyk /* The data in the HW should be in Big Endian Format */
69600e57a6dSAviad Krawczyk hinic_cpu_to_be32(sq_wqe, wqe_size);
69700e57a6dSAviad Krawczyk
69800e57a6dSAviad Krawczyk hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
69900e57a6dSAviad Krawczyk }
70000e57a6dSAviad Krawczyk
70100e57a6dSAviad Krawczyk /**
7029c2956d2SZhao Chen * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
7039c2956d2SZhao Chen * wqe only have one wqebb
70400e57a6dSAviad Krawczyk * @sq: send queue
70500e57a6dSAviad Krawczyk * @skb: return skb that was saved
7069c2956d2SZhao Chen * @wqe_size: the wqe size ptr
70700e57a6dSAviad Krawczyk * @cons_idx: consumer index of the wqe
70800e57a6dSAviad Krawczyk *
70900e57a6dSAviad Krawczyk * Return wqe in ci position
71000e57a6dSAviad Krawczyk **/
hinic_sq_read_wqebb(struct hinic_sq * sq,struct sk_buff ** skb,unsigned int * wqe_size,u16 * cons_idx)7119c2956d2SZhao Chen struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
71200e57a6dSAviad Krawczyk struct sk_buff **skb,
71300e57a6dSAviad Krawczyk unsigned int *wqe_size, u16 *cons_idx)
71400e57a6dSAviad Krawczyk {
71500e57a6dSAviad Krawczyk struct hinic_hw_wqe *hw_wqe;
71600e57a6dSAviad Krawczyk struct hinic_sq_wqe *sq_wqe;
71700e57a6dSAviad Krawczyk struct hinic_sq_ctrl *ctrl;
71800e57a6dSAviad Krawczyk unsigned int buf_sect_len;
71900e57a6dSAviad Krawczyk u32 ctrl_info;
72000e57a6dSAviad Krawczyk
72100e57a6dSAviad Krawczyk /* read the ctrl section for getting wqe size */
72200e57a6dSAviad Krawczyk hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
72300e57a6dSAviad Krawczyk if (IS_ERR(hw_wqe))
72400e57a6dSAviad Krawczyk return NULL;
72500e57a6dSAviad Krawczyk
7269c2956d2SZhao Chen *skb = sq->saved_skb[*cons_idx];
7279c2956d2SZhao Chen
72800e57a6dSAviad Krawczyk sq_wqe = &hw_wqe->sq_wqe;
72900e57a6dSAviad Krawczyk ctrl = &sq_wqe->ctrl;
73000e57a6dSAviad Krawczyk ctrl_info = be32_to_cpu(ctrl->ctrl_info);
73100e57a6dSAviad Krawczyk buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
73200e57a6dSAviad Krawczyk
73300e57a6dSAviad Krawczyk *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
73400e57a6dSAviad Krawczyk *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
7359c2956d2SZhao Chen *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
73600e57a6dSAviad Krawczyk
7379c2956d2SZhao Chen return &hw_wqe->sq_wqe;
7389c2956d2SZhao Chen }
7399c2956d2SZhao Chen
7409c2956d2SZhao Chen /**
7419c2956d2SZhao Chen * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
7429c2956d2SZhao Chen * @sq: send queue
7439c2956d2SZhao Chen * @skb: return skb that was saved
7449c2956d2SZhao Chen * @wqe_size: the size of the wqe
7459c2956d2SZhao Chen * @cons_idx: consumer index of the wqe
7469c2956d2SZhao Chen *
7479c2956d2SZhao Chen * Return wqe in ci position
7489c2956d2SZhao Chen **/
hinic_sq_read_wqe(struct hinic_sq * sq,struct sk_buff ** skb,unsigned int wqe_size,u16 * cons_idx)7499c2956d2SZhao Chen struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
7509c2956d2SZhao Chen struct sk_buff **skb,
7519c2956d2SZhao Chen unsigned int wqe_size, u16 *cons_idx)
7529c2956d2SZhao Chen {
7539c2956d2SZhao Chen struct hinic_hw_wqe *hw_wqe;
7549c2956d2SZhao Chen
7559c2956d2SZhao Chen hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
75600e57a6dSAviad Krawczyk *skb = sq->saved_skb[*cons_idx];
75700e57a6dSAviad Krawczyk
75800e57a6dSAviad Krawczyk return &hw_wqe->sq_wqe;
75900e57a6dSAviad Krawczyk }
76000e57a6dSAviad Krawczyk
76100e57a6dSAviad Krawczyk /**
76200e57a6dSAviad Krawczyk * hinic_sq_put_wqe - release the ci for new wqes
76300e57a6dSAviad Krawczyk * @sq: send queue
76400e57a6dSAviad Krawczyk * @wqe_size: the size of the wqe
76500e57a6dSAviad Krawczyk **/
hinic_sq_put_wqe(struct hinic_sq * sq,unsigned int wqe_size)76600e57a6dSAviad Krawczyk void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
76700e57a6dSAviad Krawczyk {
76800e57a6dSAviad Krawczyk hinic_put_wqe(sq->wq, wqe_size);
76900e57a6dSAviad Krawczyk }
77000e57a6dSAviad Krawczyk
77100e57a6dSAviad Krawczyk /**
77200e57a6dSAviad Krawczyk * hinic_sq_get_sges - get sges from the wqe
77300e57a6dSAviad Krawczyk * @sq_wqe: wqe to get the sges from its buffer addresses
77400e57a6dSAviad Krawczyk * @sges: returned sges
77500e57a6dSAviad Krawczyk * @nr_sges: number sges to return
77600e57a6dSAviad Krawczyk **/
hinic_sq_get_sges(struct hinic_sq_wqe * sq_wqe,struct hinic_sge * sges,int nr_sges)77700e57a6dSAviad Krawczyk void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
77800e57a6dSAviad Krawczyk int nr_sges)
77900e57a6dSAviad Krawczyk {
78000e57a6dSAviad Krawczyk int i;
78100e57a6dSAviad Krawczyk
78200e57a6dSAviad Krawczyk for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
78300e57a6dSAviad Krawczyk sges[i] = sq_wqe->buf_descs[i].sge;
78400e57a6dSAviad Krawczyk hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
78500e57a6dSAviad Krawczyk }
78600e57a6dSAviad Krawczyk }
78700e57a6dSAviad Krawczyk
788e2585ea7SAviad Krawczyk /**
789e2585ea7SAviad Krawczyk * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
790e2585ea7SAviad Krawczyk * @rq: rq to get wqe from
791e2585ea7SAviad Krawczyk * @wqe_size: wqe size
792e2585ea7SAviad Krawczyk * @prod_idx: returned pi
793e2585ea7SAviad Krawczyk *
794e2585ea7SAviad Krawczyk * Return wqe pointer
795e2585ea7SAviad Krawczyk **/
hinic_rq_get_wqe(struct hinic_rq * rq,unsigned int wqe_size,u16 * prod_idx)796e2585ea7SAviad Krawczyk struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
797e2585ea7SAviad Krawczyk unsigned int wqe_size, u16 *prod_idx)
798e2585ea7SAviad Krawczyk {
799e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
800e2585ea7SAviad Krawczyk prod_idx);
801e2585ea7SAviad Krawczyk
802e2585ea7SAviad Krawczyk if (IS_ERR(hw_wqe))
803e2585ea7SAviad Krawczyk return NULL;
804e2585ea7SAviad Krawczyk
805e2585ea7SAviad Krawczyk return &hw_wqe->rq_wqe;
806e2585ea7SAviad Krawczyk }
807e2585ea7SAviad Krawczyk
808e2585ea7SAviad Krawczyk /**
809e2585ea7SAviad Krawczyk * hinic_rq_write_wqe - write the wqe to the rq
810e2585ea7SAviad Krawczyk * @rq: recv queue
811e2585ea7SAviad Krawczyk * @prod_idx: pi of the wqe
812e2585ea7SAviad Krawczyk * @rq_wqe: the wqe to write
813e2585ea7SAviad Krawczyk * @skb: skb to save
814e2585ea7SAviad Krawczyk **/
hinic_rq_write_wqe(struct hinic_rq * rq,u16 prod_idx,struct hinic_rq_wqe * rq_wqe,struct sk_buff * skb)815e2585ea7SAviad Krawczyk void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
816e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
817e2585ea7SAviad Krawczyk {
818e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
819e2585ea7SAviad Krawczyk
820e2585ea7SAviad Krawczyk rq->saved_skb[prod_idx] = skb;
821e2585ea7SAviad Krawczyk
822e2585ea7SAviad Krawczyk /* The data in the HW should be in Big Endian Format */
823e2585ea7SAviad Krawczyk hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
824e2585ea7SAviad Krawczyk
825e2585ea7SAviad Krawczyk hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
826e2585ea7SAviad Krawczyk }
827e2585ea7SAviad Krawczyk
828e2585ea7SAviad Krawczyk /**
829e2585ea7SAviad Krawczyk * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
830e2585ea7SAviad Krawczyk * @rq: recv queue
831e2585ea7SAviad Krawczyk * @wqe_size: the size of the wqe
832e2585ea7SAviad Krawczyk * @skb: return saved skb
833e2585ea7SAviad Krawczyk * @cons_idx: consumer index of the wqe
834e2585ea7SAviad Krawczyk *
835e2585ea7SAviad Krawczyk * Return wqe in ci position
836e2585ea7SAviad Krawczyk **/
hinic_rq_read_wqe(struct hinic_rq * rq,unsigned int wqe_size,struct sk_buff ** skb,u16 * cons_idx)837e2585ea7SAviad Krawczyk struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
838e2585ea7SAviad Krawczyk unsigned int wqe_size,
839e2585ea7SAviad Krawczyk struct sk_buff **skb, u16 *cons_idx)
840e2585ea7SAviad Krawczyk {
841e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe;
842e2585ea7SAviad Krawczyk struct hinic_rq_cqe *cqe;
843e2585ea7SAviad Krawczyk int rx_done;
844e2585ea7SAviad Krawczyk u32 status;
845e2585ea7SAviad Krawczyk
846e2585ea7SAviad Krawczyk hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
847e2585ea7SAviad Krawczyk if (IS_ERR(hw_wqe))
848e2585ea7SAviad Krawczyk return NULL;
849e2585ea7SAviad Krawczyk
850e2585ea7SAviad Krawczyk cqe = rq->cqe[*cons_idx];
851e2585ea7SAviad Krawczyk
852e2585ea7SAviad Krawczyk status = be32_to_cpu(cqe->status);
853e2585ea7SAviad Krawczyk
854e2585ea7SAviad Krawczyk rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
855e2585ea7SAviad Krawczyk if (!rx_done)
856e2585ea7SAviad Krawczyk return NULL;
857e2585ea7SAviad Krawczyk
858e2585ea7SAviad Krawczyk *skb = rq->saved_skb[*cons_idx];
859e2585ea7SAviad Krawczyk
860e2585ea7SAviad Krawczyk return &hw_wqe->rq_wqe;
861e2585ea7SAviad Krawczyk }
862e2585ea7SAviad Krawczyk
863e2585ea7SAviad Krawczyk /**
864e2585ea7SAviad Krawczyk * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
865e2585ea7SAviad Krawczyk * @rq: recv queue
866e2585ea7SAviad Krawczyk * @wqe_size: the size of the wqe
867e2585ea7SAviad Krawczyk * @skb: return saved skb
868e2585ea7SAviad Krawczyk * @cons_idx: consumer index in the wq
869e2585ea7SAviad Krawczyk *
870e2585ea7SAviad Krawczyk * Return wqe in incremented ci position
871e2585ea7SAviad Krawczyk **/
hinic_rq_read_next_wqe(struct hinic_rq * rq,unsigned int wqe_size,struct sk_buff ** skb,u16 * cons_idx)872e2585ea7SAviad Krawczyk struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
873e2585ea7SAviad Krawczyk unsigned int wqe_size,
874e2585ea7SAviad Krawczyk struct sk_buff **skb,
875e2585ea7SAviad Krawczyk u16 *cons_idx)
876e2585ea7SAviad Krawczyk {
877e2585ea7SAviad Krawczyk struct hinic_wq *wq = rq->wq;
878e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe;
879e2585ea7SAviad Krawczyk unsigned int num_wqebbs;
880e2585ea7SAviad Krawczyk
881e2585ea7SAviad Krawczyk wqe_size = ALIGN(wqe_size, wq->wqebb_size);
882e2585ea7SAviad Krawczyk num_wqebbs = wqe_size / wq->wqebb_size;
883e2585ea7SAviad Krawczyk
884e2585ea7SAviad Krawczyk *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
885e2585ea7SAviad Krawczyk
886e2585ea7SAviad Krawczyk *skb = rq->saved_skb[*cons_idx];
887e2585ea7SAviad Krawczyk
888e2585ea7SAviad Krawczyk hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
889e2585ea7SAviad Krawczyk
890e2585ea7SAviad Krawczyk return &hw_wqe->rq_wqe;
891e2585ea7SAviad Krawczyk }
892e2585ea7SAviad Krawczyk
893e2585ea7SAviad Krawczyk /**
894d6174870SYang Shen * hinic_rq_put_wqe - release the ci for new wqes
895e2585ea7SAviad Krawczyk * @rq: recv queue
896e2585ea7SAviad Krawczyk * @cons_idx: consumer index of the wqe
897e2585ea7SAviad Krawczyk * @wqe_size: the size of the wqe
898e2585ea7SAviad Krawczyk **/
hinic_rq_put_wqe(struct hinic_rq * rq,u16 cons_idx,unsigned int wqe_size)899e2585ea7SAviad Krawczyk void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
900e2585ea7SAviad Krawczyk unsigned int wqe_size)
901e2585ea7SAviad Krawczyk {
902e2585ea7SAviad Krawczyk struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
903e2585ea7SAviad Krawczyk u32 status = be32_to_cpu(cqe->status);
904e2585ea7SAviad Krawczyk
905e2585ea7SAviad Krawczyk status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
906e2585ea7SAviad Krawczyk
907e2585ea7SAviad Krawczyk /* Rx WQE size is 1 WQEBB, no wq shadow*/
908e2585ea7SAviad Krawczyk cqe->status = cpu_to_be32(status);
909e2585ea7SAviad Krawczyk
910e2585ea7SAviad Krawczyk wmb(); /* clear done flag */
911e2585ea7SAviad Krawczyk
912e2585ea7SAviad Krawczyk hinic_put_wqe(rq->wq, wqe_size);
913e2585ea7SAviad Krawczyk }
914e2585ea7SAviad Krawczyk
915e2585ea7SAviad Krawczyk /**
916e2585ea7SAviad Krawczyk * hinic_rq_get_sge - get sge from the wqe
917e2585ea7SAviad Krawczyk * @rq: recv queue
918e2585ea7SAviad Krawczyk * @rq_wqe: wqe to get the sge from its buf address
919e2585ea7SAviad Krawczyk * @cons_idx: consumer index
920e2585ea7SAviad Krawczyk * @sge: returned sge
921e2585ea7SAviad Krawczyk **/
hinic_rq_get_sge(struct hinic_rq * rq,struct hinic_rq_wqe * rq_wqe,u16 cons_idx,struct hinic_sge * sge)922e2585ea7SAviad Krawczyk void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
923e2585ea7SAviad Krawczyk u16 cons_idx, struct hinic_sge *sge)
924e2585ea7SAviad Krawczyk {
925e2585ea7SAviad Krawczyk struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
926e2585ea7SAviad Krawczyk u32 len = be32_to_cpu(cqe->len);
927e2585ea7SAviad Krawczyk
928e2585ea7SAviad Krawczyk sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
929e2585ea7SAviad Krawczyk sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
930e2585ea7SAviad Krawczyk sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
931e2585ea7SAviad Krawczyk }
932e2585ea7SAviad Krawczyk
933e2585ea7SAviad Krawczyk /**
934e2585ea7SAviad Krawczyk * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
935e2585ea7SAviad Krawczyk * @rq: recv queue
936e2585ea7SAviad Krawczyk * @prod_idx: pi value
937e2585ea7SAviad Krawczyk * @rq_wqe: the wqe
938e2585ea7SAviad Krawczyk * @sge: sge for use by the wqe for recv buf address
939e2585ea7SAviad Krawczyk **/
hinic_rq_prepare_wqe(struct hinic_rq * rq,u16 prod_idx,struct hinic_rq_wqe * rq_wqe,struct hinic_sge * sge)940e2585ea7SAviad Krawczyk void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
941e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
942e2585ea7SAviad Krawczyk {
943e2585ea7SAviad Krawczyk struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
944e2585ea7SAviad Krawczyk struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
945e2585ea7SAviad Krawczyk struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
946e2585ea7SAviad Krawczyk struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
947e2585ea7SAviad Krawczyk dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
948e2585ea7SAviad Krawczyk
949e2585ea7SAviad Krawczyk ctrl->ctrl_info =
950e2585ea7SAviad Krawczyk HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
951e2585ea7SAviad Krawczyk HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
952e2585ea7SAviad Krawczyk COMPLETE_LEN) |
953e2585ea7SAviad Krawczyk HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
954e2585ea7SAviad Krawczyk BUFDESC_SECT_LEN) |
955e2585ea7SAviad Krawczyk HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
956e2585ea7SAviad Krawczyk
957e2585ea7SAviad Krawczyk hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
958e2585ea7SAviad Krawczyk
959e2585ea7SAviad Krawczyk buf_desc->hi_addr = sge->hi_addr;
960e2585ea7SAviad Krawczyk buf_desc->lo_addr = sge->lo_addr;
961e2585ea7SAviad Krawczyk }
962e2585ea7SAviad Krawczyk
963e2585ea7SAviad Krawczyk /**
964e2585ea7SAviad Krawczyk * hinic_rq_update - update pi of the rq
965e2585ea7SAviad Krawczyk * @rq: recv queue
966e2585ea7SAviad Krawczyk * @prod_idx: pi value
967e2585ea7SAviad Krawczyk **/
hinic_rq_update(struct hinic_rq * rq,u16 prod_idx)968e2585ea7SAviad Krawczyk void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
969e2585ea7SAviad Krawczyk {
970e2585ea7SAviad Krawczyk *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
971e2585ea7SAviad Krawczyk }
972