1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_TXRX_H___ 5 #define __MLX5_EN_TXRX_H___ 6 7 #include "en.h" 8 #include <linux/indirect_call_wrapper.h> 9 10 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) 11 12 /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS 13 * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. 14 * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a 15 * full-session WQE be cache-aligned. 16 */ 17 #if L1_CACHE_BYTES < 128 18 #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) 19 #else 20 #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) 21 #endif 22 23 #define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) 24 25 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) 26 27 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) 28 29 static inline 30 ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) 31 { 32 return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time, 33 clock, cqe_ts); 34 } 35 36 enum mlx5e_icosq_wqe_type { 37 MLX5E_ICOSQ_WQE_NOP, 38 MLX5E_ICOSQ_WQE_UMR_RX, 39 #ifdef CONFIG_MLX5_EN_TLS 40 MLX5E_ICOSQ_WQE_UMR_TLS, 41 MLX5E_ICOSQ_WQE_SET_PSV_TLS, 42 MLX5E_ICOSQ_WQE_GET_PSV_TLS, 43 #endif 44 }; 45 46 /* General */ 47 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb) 48 { 49 return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST; 50 } 51 52 void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 53 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); 54 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 55 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 56 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); 57 58 /* RX */ 59 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); 60 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, 61 struct mlx5e_dma_info *dma_info, 62 bool recycle); 63 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); 64 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); 65 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 66 void mlx5e_free_rx_descs(struct mlx5e_rq *rq); 67 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); 68 69 /* TX */ 70 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 71 struct net_device *sb_dev); 72 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 73 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 74 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 75 76 static inline bool 77 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) 78 { 79 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 80 } 81 82 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) 83 { 84 void *wqe; 85 86 wqe = mlx5_wq_cyc_get_wqe(wq, pi); 87 memset(wqe, 0, wqe_size); 88 89 return wqe; 90 } 91 92 #define MLX5E_TX_FETCH_WQE(sq, pi) \ 93 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) 94 95 static inline struct mlx5e_tx_wqe * 96 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 97 { 98 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 99 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 100 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 101 102 memset(cseg, 0, sizeof(*cseg)); 103 104 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 105 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 106 107 (*pc)++; 108 109 return wqe; 110 } 111 112 static inline struct mlx5e_tx_wqe * 113 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 114 { 115 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 116 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 117 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 118 119 memset(cseg, 0, sizeof(*cseg)); 120 121 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 122 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 123 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 124 125 (*pc)++; 126 127 return wqe; 128 } 129 130 struct mlx5e_tx_wqe_info { 131 struct sk_buff *skb; 132 u32 num_bytes; 133 u8 num_wqebbs; 134 u8 num_dma; 135 u8 num_fifo_pkts; 136 #ifdef CONFIG_MLX5_EN_TLS 137 struct page *resync_dump_frag_page; 138 #endif 139 }; 140 141 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) 142 { 143 struct mlx5_wq_cyc *wq = &sq->wq; 144 u16 pi, contig_wqebbs; 145 146 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 147 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 148 if (unlikely(contig_wqebbs < size)) { 149 struct mlx5e_tx_wqe_info *wi, *edge_wi; 150 151 wi = &sq->db.wqe_info[pi]; 152 edge_wi = wi + contig_wqebbs; 153 154 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 155 for (; wi < edge_wi; wi++) { 156 *wi = (struct mlx5e_tx_wqe_info) { 157 .num_wqebbs = 1, 158 }; 159 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 160 } 161 sq->stats->nop += contig_wqebbs; 162 163 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 164 } 165 166 return pi; 167 } 168 169 struct mlx5e_icosq_wqe_info { 170 u8 wqe_type; 171 u8 num_wqebbs; 172 173 /* Auxiliary data for different wqe types. */ 174 union { 175 struct { 176 struct mlx5e_rq *rq; 177 } umr; 178 #ifdef CONFIG_MLX5_EN_TLS 179 struct { 180 struct mlx5e_ktls_offload_context_rx *priv_rx; 181 } tls_set_params; 182 struct { 183 struct mlx5e_ktls_rx_resync_buf *buf; 184 } tls_get_params; 185 #endif 186 }; 187 }; 188 189 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq); 190 191 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) 192 { 193 struct mlx5_wq_cyc *wq = &sq->wq; 194 u16 pi, contig_wqebbs; 195 196 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 197 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 198 if (unlikely(contig_wqebbs < size)) { 199 struct mlx5e_icosq_wqe_info *wi, *edge_wi; 200 201 wi = &sq->db.wqe_info[pi]; 202 edge_wi = wi + contig_wqebbs; 203 204 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 205 for (; wi < edge_wi; wi++) { 206 *wi = (struct mlx5e_icosq_wqe_info) { 207 .wqe_type = MLX5E_ICOSQ_WQE_NOP, 208 .num_wqebbs = 1, 209 }; 210 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 211 } 212 213 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 214 } 215 216 return pi; 217 } 218 219 static inline void 220 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, 221 struct mlx5_wqe_ctrl_seg *ctrl) 222 { 223 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; 224 /* ensure wqe is visible to device before updating doorbell record */ 225 dma_wmb(); 226 227 *wq->db = cpu_to_be32(pc); 228 229 /* ensure doorbell record is visible to device before ringing the 230 * doorbell 231 */ 232 wmb(); 233 234 mlx5_write64((__be32 *)ctrl, uar_map); 235 } 236 237 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) 238 { 239 struct mlx5_core_cq *mcq; 240 241 mcq = &cq->mcq; 242 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); 243 } 244 245 static inline struct mlx5e_sq_dma * 246 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 247 { 248 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 249 } 250 251 static inline void 252 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, 253 enum mlx5e_dma_map_type map_type) 254 { 255 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); 256 257 dma->addr = addr; 258 dma->size = size; 259 dma->type = map_type; 260 } 261 262 static inline 263 struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i) 264 { 265 return &fifo->fifo[i & fifo->mask]; 266 } 267 268 static inline 269 void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) 270 { 271 struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++); 272 273 *skb_item = skb; 274 } 275 276 static inline 277 struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) 278 { 279 return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); 280 } 281 282 static inline void 283 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) 284 { 285 switch (dma->type) { 286 case MLX5E_DMA_MAP_SINGLE: 287 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 288 break; 289 case MLX5E_DMA_MAP_PAGE: 290 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 291 break; 292 default: 293 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); 294 } 295 } 296 297 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); 298 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); 299 300 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) 301 { 302 return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS; 303 } 304 305 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) 306 { 307 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 308 mlx5_wq_ll_reset(&rq->mpwqe.wq); 309 rq->mpwqe.actual_wq_head = 0; 310 } else { 311 mlx5_wq_cyc_reset(&rq->wqe.wq); 312 } 313 } 314 315 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, 316 struct mlx5_err_cqe *err_cqe) 317 { 318 struct mlx5_cqwq *wq = &cq->wq; 319 u32 ci; 320 321 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); 322 323 netdev_err(cq->netdev, 324 "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 325 cq->mcq.cqn, ci, qn, 326 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), 327 err_cqe->syndrome, err_cqe->vendor_err_synd); 328 mlx5_dump_err_cqe(cq->mdev, err_cqe); 329 } 330 331 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) 332 { 333 switch (rq->wq_type) { 334 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 335 return mlx5_wq_ll_get_size(&rq->mpwqe.wq); 336 default: 337 return mlx5_wq_cyc_get_size(&rq->wqe.wq); 338 } 339 } 340 341 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) 342 { 343 switch (rq->wq_type) { 344 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 345 return rq->mpwqe.wq.cur_sz; 346 default: 347 return rq->wqe.wq.cur_sz; 348 } 349 } 350 351 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) 352 { 353 switch (rq->wq_type) { 354 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 355 return mlx5_wq_ll_get_head(&rq->mpwqe.wq); 356 default: 357 return mlx5_wq_cyc_get_head(&rq->wqe.wq); 358 } 359 } 360 361 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) 362 { 363 switch (rq->wq_type) { 364 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 365 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); 366 default: 367 return mlx5_wq_cyc_get_counter(&rq->wqe.wq); 368 } 369 } 370 371 /* SW parser related functions */ 372 373 struct mlx5e_swp_spec { 374 __be16 l3_proto; 375 u8 l4_proto; 376 u8 is_tun; 377 __be16 tun_l3_proto; 378 u8 tun_l4_proto; 379 }; 380 381 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg) 382 { 383 /* SWP offsets are in 2-bytes words */ 384 eseg->swp_outer_l3_offset += VLAN_HLEN / 2; 385 eseg->swp_outer_l4_offset += VLAN_HLEN / 2; 386 eseg->swp_inner_l3_offset += VLAN_HLEN / 2; 387 eseg->swp_inner_l4_offset += VLAN_HLEN / 2; 388 } 389 390 static inline void 391 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, 392 struct mlx5e_swp_spec *swp_spec) 393 { 394 /* SWP offsets are in 2-bytes words */ 395 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; 396 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 397 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; 398 if (swp_spec->l4_proto) { 399 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; 400 if (swp_spec->l4_proto == IPPROTO_UDP) 401 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; 402 } 403 404 if (swp_spec->is_tun) { 405 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; 406 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) 407 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 408 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ 409 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; 410 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 411 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 412 } 413 switch (swp_spec->tun_l4_proto) { 414 case IPPROTO_UDP: 415 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; 416 fallthrough; 417 case IPPROTO_TCP: 418 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 419 break; 420 } 421 } 422 423 static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) 424 { 425 BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS); 426 427 /* A WQE must not cross the page boundary, hence two conditions: 428 * 1. Its size must not exceed the page size. 429 * 2. If the WQE size is X, and the space remaining in a page is less 430 * than X, this space needs to be padded with NOPs. So, one WQE of 431 * size X may require up to X-1 WQEBBs of padding, which makes the 432 * stop room of X-1 + X. 433 * WQE size is also limited by the hardware limit. 434 */ 435 436 if (__builtin_constant_p(wqe_size)) 437 BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); 438 else 439 WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); 440 441 return wqe_size * 2 - 1; 442 } 443 444 #endif 445