1542578c6STariq Toukan /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2542578c6STariq Toukan /* Copyright (c) 2019 Mellanox Technologies. */ 3542578c6STariq Toukan 4542578c6STariq Toukan #ifndef __MLX5_EN_TXRX_H___ 5542578c6STariq Toukan #define __MLX5_EN_TXRX_H___ 6542578c6STariq Toukan 7542578c6STariq Toukan #include "en.h" 85d0b8476STariq Toukan #include <linux/indirect_call_wrapper.h> 9542578c6STariq Toukan 1097e3afd6SMaxim Mikityanskiy #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) 1197e3afd6SMaxim Mikityanskiy 12542578c6STariq Toukan #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) 13542578c6STariq Toukan 1428bff095STariq Toukan enum mlx5e_icosq_wqe_type { 1528bff095STariq Toukan MLX5E_ICOSQ_WQE_NOP, 1628bff095STariq Toukan MLX5E_ICOSQ_WQE_UMR_RX, 171182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS 181182f365STariq Toukan MLX5E_ICOSQ_WQE_UMR_TLS, 191182f365STariq Toukan MLX5E_ICOSQ_WQE_SET_PSV_TLS, 200419d8c9STariq Toukan MLX5E_ICOSQ_WQE_GET_PSV_TLS, 211182f365STariq Toukan #endif 2228bff095STariq Toukan }; 2328bff095STariq Toukan 24b307f7f1STariq Toukan /* General */ 25b307f7f1STariq Toukan void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 26b307f7f1STariq Toukan void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); 27b307f7f1STariq Toukan void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 28b307f7f1STariq Toukan int mlx5e_napi_poll(struct napi_struct *napi, int budget); 29b307f7f1STariq Toukan int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); 30b307f7f1STariq Toukan 31b307f7f1STariq Toukan /* RX */ 32b307f7f1STariq Toukan void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); 33b307f7f1STariq Toukan void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, 34b307f7f1STariq Toukan struct mlx5e_dma_info *dma_info, 35b307f7f1STariq Toukan bool recycle); 365d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); 375d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); 38b307f7f1STariq Toukan int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 39b307f7f1STariq Toukan void mlx5e_free_rx_descs(struct mlx5e_rq *rq); 40b307f7f1STariq Toukan void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); 41b307f7f1STariq Toukan 42b307f7f1STariq Toukan /* TX */ 43b307f7f1STariq Toukan u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 44b307f7f1STariq Toukan struct net_device *sb_dev); 45b307f7f1STariq Toukan netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 46b307f7f1STariq Toukan bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 47b307f7f1STariq Toukan void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 48b307f7f1STariq Toukan 49542578c6STariq Toukan static inline bool 50542578c6STariq Toukan mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) 51542578c6STariq Toukan { 52542578c6STariq Toukan return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 53542578c6STariq Toukan } 54542578c6STariq Toukan 55fed0c6cfSMaxim Mikityanskiy static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) 56542578c6STariq Toukan { 57fd1b2259STariq Toukan void *wqe; 58542578c6STariq Toukan 59fed0c6cfSMaxim Mikityanskiy wqe = mlx5_wq_cyc_get_wqe(wq, pi); 60fed0c6cfSMaxim Mikityanskiy memset(wqe, 0, wqe_size); 61fd1b2259STariq Toukan 62fd1b2259STariq Toukan return wqe; 63542578c6STariq Toukan } 64542578c6STariq Toukan 65fed0c6cfSMaxim Mikityanskiy #define MLX5E_TX_FETCH_WQE(sq, pi) \ 66fed0c6cfSMaxim Mikityanskiy ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) 67fed0c6cfSMaxim Mikityanskiy 68542578c6STariq Toukan static inline struct mlx5e_tx_wqe * 69542578c6STariq Toukan mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 70542578c6STariq Toukan { 71542578c6STariq Toukan u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 72542578c6STariq Toukan struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 73542578c6STariq Toukan struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 74542578c6STariq Toukan 75542578c6STariq Toukan memset(cseg, 0, sizeof(*cseg)); 76542578c6STariq Toukan 77542578c6STariq Toukan cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 78542578c6STariq Toukan cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 79542578c6STariq Toukan 80542578c6STariq Toukan (*pc)++; 81542578c6STariq Toukan 82542578c6STariq Toukan return wqe; 83542578c6STariq Toukan } 84542578c6STariq Toukan 8537badd15STariq Toukan static inline struct mlx5e_tx_wqe * 8637badd15STariq Toukan mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 8737badd15STariq Toukan { 8837badd15STariq Toukan u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 8937badd15STariq Toukan struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 9037badd15STariq Toukan struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 9137badd15STariq Toukan 9237badd15STariq Toukan memset(cseg, 0, sizeof(*cseg)); 9337badd15STariq Toukan 9437badd15STariq Toukan cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 9537badd15STariq Toukan cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 9637badd15STariq Toukan cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 9737badd15STariq Toukan 9837badd15STariq Toukan (*pc)++; 9937badd15STariq Toukan 10037badd15STariq Toukan return wqe; 10137badd15STariq Toukan } 10237badd15STariq Toukan 10305dfd570STariq Toukan struct mlx5e_tx_wqe_info { 10405dfd570STariq Toukan struct sk_buff *skb; 10505dfd570STariq Toukan u32 num_bytes; 10605dfd570STariq Toukan u8 num_wqebbs; 10705dfd570STariq Toukan u8 num_dma; 108338c46c6SMaxim Mikityanskiy u8 num_fifo_pkts; 10905dfd570STariq Toukan #ifdef CONFIG_MLX5_EN_TLS 11005dfd570STariq Toukan struct page *resync_dump_frag_page; 11105dfd570STariq Toukan #endif 11205dfd570STariq Toukan }; 11305dfd570STariq Toukan 114ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) 115ec9cdca0SMaxim Mikityanskiy { 116ec9cdca0SMaxim Mikityanskiy struct mlx5_wq_cyc *wq = &sq->wq; 117ec9cdca0SMaxim Mikityanskiy u16 pi, contig_wqebbs; 118ec9cdca0SMaxim Mikityanskiy 119ec9cdca0SMaxim Mikityanskiy pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 120ec9cdca0SMaxim Mikityanskiy contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 121ec9cdca0SMaxim Mikityanskiy if (unlikely(contig_wqebbs < size)) { 122ec9cdca0SMaxim Mikityanskiy struct mlx5e_tx_wqe_info *wi, *edge_wi; 123ec9cdca0SMaxim Mikityanskiy 124ec9cdca0SMaxim Mikityanskiy wi = &sq->db.wqe_info[pi]; 125ec9cdca0SMaxim Mikityanskiy edge_wi = wi + contig_wqebbs; 126ec9cdca0SMaxim Mikityanskiy 127ec9cdca0SMaxim Mikityanskiy /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 128ec9cdca0SMaxim Mikityanskiy for (; wi < edge_wi; wi++) { 129ec9cdca0SMaxim Mikityanskiy *wi = (struct mlx5e_tx_wqe_info) { 130ec9cdca0SMaxim Mikityanskiy .num_wqebbs = 1, 131ec9cdca0SMaxim Mikityanskiy }; 132ec9cdca0SMaxim Mikityanskiy mlx5e_post_nop(wq, sq->sqn, &sq->pc); 133ec9cdca0SMaxim Mikityanskiy } 134ec9cdca0SMaxim Mikityanskiy sq->stats->nop += contig_wqebbs; 135ec9cdca0SMaxim Mikityanskiy 136ec9cdca0SMaxim Mikityanskiy pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 137ec9cdca0SMaxim Mikityanskiy } 138ec9cdca0SMaxim Mikityanskiy 139ec9cdca0SMaxim Mikityanskiy return pi; 140ec9cdca0SMaxim Mikityanskiy } 141ec9cdca0SMaxim Mikityanskiy 14205dfd570STariq Toukan struct mlx5e_icosq_wqe_info { 14328bff095STariq Toukan u8 wqe_type; 14405dfd570STariq Toukan u8 num_wqebbs; 14505dfd570STariq Toukan 14628bff095STariq Toukan /* Auxiliary data for different wqe types. */ 14705dfd570STariq Toukan union { 14805dfd570STariq Toukan struct { 14905dfd570STariq Toukan struct mlx5e_rq *rq; 15005dfd570STariq Toukan } umr; 1511182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS 1521182f365STariq Toukan struct { 1531182f365STariq Toukan struct mlx5e_ktls_offload_context_rx *priv_rx; 1541182f365STariq Toukan } tls_set_params; 1550419d8c9STariq Toukan struct { 1560419d8c9STariq Toukan struct mlx5e_ktls_rx_resync_buf *buf; 1570419d8c9STariq Toukan } tls_get_params; 1581182f365STariq Toukan #endif 15905dfd570STariq Toukan }; 16005dfd570STariq Toukan }; 16105dfd570STariq Toukan 1621182f365STariq Toukan void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq); 1631182f365STariq Toukan 164ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) 165ec9cdca0SMaxim Mikityanskiy { 166ec9cdca0SMaxim Mikityanskiy struct mlx5_wq_cyc *wq = &sq->wq; 167ec9cdca0SMaxim Mikityanskiy u16 pi, contig_wqebbs; 168ec9cdca0SMaxim Mikityanskiy 169ec9cdca0SMaxim Mikityanskiy pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 170ec9cdca0SMaxim Mikityanskiy contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 171ec9cdca0SMaxim Mikityanskiy if (unlikely(contig_wqebbs < size)) { 172ec9cdca0SMaxim Mikityanskiy struct mlx5e_icosq_wqe_info *wi, *edge_wi; 173ec9cdca0SMaxim Mikityanskiy 174ec9cdca0SMaxim Mikityanskiy wi = &sq->db.wqe_info[pi]; 175ec9cdca0SMaxim Mikityanskiy edge_wi = wi + contig_wqebbs; 176ec9cdca0SMaxim Mikityanskiy 177ec9cdca0SMaxim Mikityanskiy /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 178ec9cdca0SMaxim Mikityanskiy for (; wi < edge_wi; wi++) { 179ec9cdca0SMaxim Mikityanskiy *wi = (struct mlx5e_icosq_wqe_info) { 18028bff095STariq Toukan .wqe_type = MLX5E_ICOSQ_WQE_NOP, 181ec9cdca0SMaxim Mikityanskiy .num_wqebbs = 1, 182ec9cdca0SMaxim Mikityanskiy }; 183ec9cdca0SMaxim Mikityanskiy mlx5e_post_nop(wq, sq->sqn, &sq->pc); 184ec9cdca0SMaxim Mikityanskiy } 185ec9cdca0SMaxim Mikityanskiy 186ec9cdca0SMaxim Mikityanskiy pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 187ec9cdca0SMaxim Mikityanskiy } 188ec9cdca0SMaxim Mikityanskiy 189ec9cdca0SMaxim Mikityanskiy return pi; 190ec9cdca0SMaxim Mikityanskiy } 191ec9cdca0SMaxim Mikityanskiy 192542578c6STariq Toukan static inline void 193542578c6STariq Toukan mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, 194542578c6STariq Toukan struct mlx5_wqe_ctrl_seg *ctrl) 195542578c6STariq Toukan { 19682fe2996STariq Toukan ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; 197542578c6STariq Toukan /* ensure wqe is visible to device before updating doorbell record */ 198542578c6STariq Toukan dma_wmb(); 199542578c6STariq Toukan 200542578c6STariq Toukan *wq->db = cpu_to_be32(pc); 201542578c6STariq Toukan 202542578c6STariq Toukan /* ensure doorbell record is visible to device before ringing the 203542578c6STariq Toukan * doorbell 204542578c6STariq Toukan */ 205542578c6STariq Toukan wmb(); 206542578c6STariq Toukan 207542578c6STariq Toukan mlx5_write64((__be32 *)ctrl, uar_map); 208542578c6STariq Toukan } 209542578c6STariq Toukan 210542578c6STariq Toukan static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) 211542578c6STariq Toukan { 212542578c6STariq Toukan struct mlx5_core_cq *mcq; 213542578c6STariq Toukan 214542578c6STariq Toukan mcq = &cq->mcq; 215542578c6STariq Toukan mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); 216542578c6STariq Toukan } 217542578c6STariq Toukan 218542578c6STariq Toukan static inline struct mlx5e_sq_dma * 219542578c6STariq Toukan mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 220542578c6STariq Toukan { 221542578c6STariq Toukan return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 222542578c6STariq Toukan } 223542578c6STariq Toukan 224542578c6STariq Toukan static inline void 225542578c6STariq Toukan mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, 226542578c6STariq Toukan enum mlx5e_dma_map_type map_type) 227542578c6STariq Toukan { 228542578c6STariq Toukan struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); 229542578c6STariq Toukan 230542578c6STariq Toukan dma->addr = addr; 231542578c6STariq Toukan dma->size = size; 232542578c6STariq Toukan dma->type = map_type; 233542578c6STariq Toukan } 234542578c6STariq Toukan 235338c46c6SMaxim Mikityanskiy static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i) 236338c46c6SMaxim Mikityanskiy { 237338c46c6SMaxim Mikityanskiy return &sq->db.skb_fifo[i & sq->skb_fifo_mask]; 238338c46c6SMaxim Mikityanskiy } 239338c46c6SMaxim Mikityanskiy 240338c46c6SMaxim Mikityanskiy static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb) 241338c46c6SMaxim Mikityanskiy { 242338c46c6SMaxim Mikityanskiy struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++); 243338c46c6SMaxim Mikityanskiy 244338c46c6SMaxim Mikityanskiy *skb_item = skb; 245338c46c6SMaxim Mikityanskiy } 246338c46c6SMaxim Mikityanskiy 247338c46c6SMaxim Mikityanskiy static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq) 248338c46c6SMaxim Mikityanskiy { 249338c46c6SMaxim Mikityanskiy return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++); 250338c46c6SMaxim Mikityanskiy } 251338c46c6SMaxim Mikityanskiy 252542578c6STariq Toukan static inline void 253542578c6STariq Toukan mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) 254542578c6STariq Toukan { 255542578c6STariq Toukan switch (dma->type) { 256542578c6STariq Toukan case MLX5E_DMA_MAP_SINGLE: 257542578c6STariq Toukan dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 258542578c6STariq Toukan break; 259542578c6STariq Toukan case MLX5E_DMA_MAP_PAGE: 260542578c6STariq Toukan dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 261542578c6STariq Toukan break; 262542578c6STariq Toukan default: 263542578c6STariq Toukan WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); 264542578c6STariq Toukan } 265542578c6STariq Toukan } 266542578c6STariq Toukan 2678e4b53f6SMaxim Mikityanskiy void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); 2688e4b53f6SMaxim Mikityanskiy 2695ee090edSAya Levin static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) 2705ee090edSAya Levin { 27139369fd5SAya Levin if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 2725ee090edSAya Levin mlx5_wq_ll_reset(&rq->mpwqe.wq); 27339369fd5SAya Levin rq->mpwqe.actual_wq_head = 0; 27439369fd5SAya Levin } else { 2755ee090edSAya Levin mlx5_wq_cyc_reset(&rq->wqe.wq); 2765ee090edSAya Levin } 27739369fd5SAya Levin } 2785ee090edSAya Levin 279b9961af7SAya Levin static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, 280f1b95753STariq Toukan struct mlx5_err_cqe *err_cqe) 281f1b95753STariq Toukan { 282f1b95753STariq Toukan struct mlx5_cqwq *wq = &cq->wq; 283f1b95753STariq Toukan u32 ci; 284f1b95753STariq Toukan 285f1b95753STariq Toukan ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); 286f1b95753STariq Toukan 287f1b95753STariq Toukan netdev_err(cq->channel->netdev, 288b9961af7SAya Levin "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 289b9961af7SAya Levin cq->mcq.cqn, ci, qn, 290f1b95753STariq Toukan get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), 291f1b95753STariq Toukan err_cqe->syndrome, err_cqe->vendor_err_synd); 292f1b95753STariq Toukan mlx5_dump_err_cqe(cq->mdev, err_cqe); 293f1b95753STariq Toukan } 294f1b95753STariq Toukan 2955d95c816SAya Levin static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) 2965d95c816SAya Levin { 2975d95c816SAya Levin switch (rq->wq_type) { 2985d95c816SAya Levin case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2995d95c816SAya Levin return mlx5_wq_ll_get_size(&rq->mpwqe.wq); 3005d95c816SAya Levin default: 3015d95c816SAya Levin return mlx5_wq_cyc_get_size(&rq->wqe.wq); 3025d95c816SAya Levin } 3035d95c816SAya Levin } 3045d95c816SAya Levin 3055d95c816SAya Levin static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) 3065d95c816SAya Levin { 3075d95c816SAya Levin switch (rq->wq_type) { 3085d95c816SAya Levin case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 3095d95c816SAya Levin return rq->mpwqe.wq.cur_sz; 3105d95c816SAya Levin default: 3115d95c816SAya Levin return rq->wqe.wq.cur_sz; 3125d95c816SAya Levin } 3135d95c816SAya Levin } 3145d95c816SAya Levin 315fc42d0deSAya Levin static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) 316fc42d0deSAya Levin { 317fc42d0deSAya Levin switch (rq->wq_type) { 318fc42d0deSAya Levin case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 319fc42d0deSAya Levin return mlx5_wq_ll_get_head(&rq->mpwqe.wq); 320fc42d0deSAya Levin default: 321fc42d0deSAya Levin return mlx5_wq_cyc_get_head(&rq->wqe.wq); 322fc42d0deSAya Levin } 323fc42d0deSAya Levin } 324fc42d0deSAya Levin 325de6c6ab7SAya Levin static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) 326de6c6ab7SAya Levin { 327de6c6ab7SAya Levin switch (rq->wq_type) { 328de6c6ab7SAya Levin case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 329de6c6ab7SAya Levin return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); 330de6c6ab7SAya Levin default: 331de6c6ab7SAya Levin return mlx5_wq_cyc_get_counter(&rq->wqe.wq); 332de6c6ab7SAya Levin } 333de6c6ab7SAya Levin } 334de6c6ab7SAya Levin 335542578c6STariq Toukan /* SW parser related functions */ 336542578c6STariq Toukan 337542578c6STariq Toukan struct mlx5e_swp_spec { 338542578c6STariq Toukan __be16 l3_proto; 339542578c6STariq Toukan u8 l4_proto; 340542578c6STariq Toukan u8 is_tun; 341542578c6STariq Toukan __be16 tun_l3_proto; 342542578c6STariq Toukan u8 tun_l4_proto; 343542578c6STariq Toukan }; 344542578c6STariq Toukan 345542578c6STariq Toukan static inline void 346542578c6STariq Toukan mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, 347542578c6STariq Toukan struct mlx5e_swp_spec *swp_spec) 348542578c6STariq Toukan { 349542578c6STariq Toukan /* SWP offsets are in 2-bytes words */ 350542578c6STariq Toukan eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; 351542578c6STariq Toukan if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 352542578c6STariq Toukan eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; 353542578c6STariq Toukan if (swp_spec->l4_proto) { 354542578c6STariq Toukan eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; 355542578c6STariq Toukan if (swp_spec->l4_proto == IPPROTO_UDP) 356542578c6STariq Toukan eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; 357542578c6STariq Toukan } 358542578c6STariq Toukan 359542578c6STariq Toukan if (swp_spec->is_tun) { 360542578c6STariq Toukan eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; 361542578c6STariq Toukan if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) 362542578c6STariq Toukan eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 363542578c6STariq Toukan } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ 364542578c6STariq Toukan eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; 365542578c6STariq Toukan if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 366542578c6STariq Toukan eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 367542578c6STariq Toukan } 368542578c6STariq Toukan switch (swp_spec->tun_l4_proto) { 369542578c6STariq Toukan case IPPROTO_UDP: 370542578c6STariq Toukan eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; 371c8b838d1SGustavo A. R. Silva fallthrough; 372542578c6STariq Toukan case IPPROTO_TCP: 373542578c6STariq Toukan eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 374542578c6STariq Toukan break; 375542578c6STariq Toukan } 376542578c6STariq Toukan } 377542578c6STariq Toukan 3785ffb4d85SMaxim Mikityanskiy static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) 3795ffb4d85SMaxim Mikityanskiy { 3805ffb4d85SMaxim Mikityanskiy BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS); 3815ffb4d85SMaxim Mikityanskiy 3825ffb4d85SMaxim Mikityanskiy /* A WQE must not cross the page boundary, hence two conditions: 3835ffb4d85SMaxim Mikityanskiy * 1. Its size must not exceed the page size. 3845ffb4d85SMaxim Mikityanskiy * 2. If the WQE size is X, and the space remaining in a page is less 3855ffb4d85SMaxim Mikityanskiy * than X, this space needs to be padded with NOPs. So, one WQE of 3865ffb4d85SMaxim Mikityanskiy * size X may require up to X-1 WQEBBs of padding, which makes the 3875ffb4d85SMaxim Mikityanskiy * stop room of X-1 + X. 3885ffb4d85SMaxim Mikityanskiy * WQE size is also limited by the hardware limit. 3895ffb4d85SMaxim Mikityanskiy */ 3905ffb4d85SMaxim Mikityanskiy 3915ffb4d85SMaxim Mikityanskiy if (__builtin_constant_p(wqe_size)) 3925ffb4d85SMaxim Mikityanskiy BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); 3935ffb4d85SMaxim Mikityanskiy else 3945ffb4d85SMaxim Mikityanskiy WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); 3955ffb4d85SMaxim Mikityanskiy 3965ffb4d85SMaxim Mikityanskiy return wqe_size * 2 - 1; 3975ffb4d85SMaxim Mikityanskiy } 3985ffb4d85SMaxim Mikityanskiy 399542578c6STariq Toukan #endif 400