1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_TXRX_H___ 5 #define __MLX5_EN_TXRX_H___ 6 7 #include "en.h" 8 #include <linux/indirect_call_wrapper.h> 9 10 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) 11 12 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) 13 14 /* IPSEC inline data includes: 15 * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for 16 * next header. 17 * 2. ESP authentication data: 16 bytes for ICV. 18 */ 19 #define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \ 20 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS) 21 22 /* 366 should be big enough to cover all L2, L3 and L4 headers with possible 23 * encapsulations. 24 */ 25 #define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \ 26 MLX5_SEND_WQE_DS) 27 28 /* Sync the calculation with mlx5e_sq_calc_wqe_attr. */ 29 #define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \ 30 MLX5E_MAX_TX_INLINE_DS + \ 31 MLX5E_MAX_TX_IPSEC_DS + \ 32 MAX_SKB_FRAGS + 1, \ 33 MLX5_SEND_WQEBB_NUM_DS) 34 35 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) 36 37 static inline 38 ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) 39 { 40 return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time, 41 clock, cqe_ts); 42 } 43 44 enum mlx5e_icosq_wqe_type { 45 MLX5E_ICOSQ_WQE_NOP, 46 MLX5E_ICOSQ_WQE_UMR_RX, 47 MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, 48 #ifdef CONFIG_MLX5_EN_TLS 49 MLX5E_ICOSQ_WQE_UMR_TLS, 50 MLX5E_ICOSQ_WQE_SET_PSV_TLS, 51 MLX5E_ICOSQ_WQE_GET_PSV_TLS, 52 #endif 53 }; 54 55 /* General */ 56 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb) 57 { 58 return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST; 59 } 60 61 void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 62 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); 63 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 64 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 65 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); 66 67 /* RX */ 68 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page); 69 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle); 70 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); 71 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); 72 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 73 void mlx5e_free_rx_descs(struct mlx5e_rq *rq); 74 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); 75 76 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 77 { 78 return config->rx_filter == HWTSTAMP_FILTER_ALL; 79 } 80 81 /* TX */ 82 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 83 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 84 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 85 86 static inline bool 87 mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) 88 { 89 return (u16)(*fifo->pc - *fifo->cc) < fifo->mask; 90 } 91 92 static inline bool 93 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) 94 { 95 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 96 } 97 98 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) 99 { 100 void *wqe; 101 102 wqe = mlx5_wq_cyc_get_wqe(wq, pi); 103 memset(wqe, 0, wqe_size); 104 105 return wqe; 106 } 107 108 #define MLX5E_TX_FETCH_WQE(sq, pi) \ 109 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) 110 111 static inline struct mlx5e_tx_wqe * 112 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 113 { 114 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 115 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 116 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 117 118 memset(cseg, 0, sizeof(*cseg)); 119 120 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 121 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 122 123 (*pc)++; 124 125 return wqe; 126 } 127 128 static inline struct mlx5e_tx_wqe * 129 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 130 { 131 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 132 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 133 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 134 135 memset(cseg, 0, sizeof(*cseg)); 136 137 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 138 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 139 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 140 141 (*pc)++; 142 143 return wqe; 144 } 145 146 struct mlx5e_tx_wqe_info { 147 struct sk_buff *skb; 148 u32 num_bytes; 149 u8 num_wqebbs; 150 u8 num_dma; 151 u8 num_fifo_pkts; 152 #ifdef CONFIG_MLX5_EN_TLS 153 struct page *resync_dump_frag_page; 154 #endif 155 }; 156 157 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) 158 { 159 struct mlx5_wq_cyc *wq = &sq->wq; 160 u16 pi, contig_wqebbs; 161 162 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 163 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 164 if (unlikely(contig_wqebbs < size)) { 165 struct mlx5e_tx_wqe_info *wi, *edge_wi; 166 167 wi = &sq->db.wqe_info[pi]; 168 edge_wi = wi + contig_wqebbs; 169 170 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 171 for (; wi < edge_wi; wi++) { 172 *wi = (struct mlx5e_tx_wqe_info) { 173 .num_wqebbs = 1, 174 }; 175 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 176 } 177 sq->stats->nop += contig_wqebbs; 178 179 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 180 } 181 182 return pi; 183 } 184 185 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 186 { 187 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); 188 } 189 190 struct mlx5e_shampo_umr { 191 u16 len; 192 }; 193 194 struct mlx5e_icosq_wqe_info { 195 u8 wqe_type; 196 u8 num_wqebbs; 197 198 /* Auxiliary data for different wqe types. */ 199 union { 200 struct { 201 struct mlx5e_rq *rq; 202 } umr; 203 struct mlx5e_shampo_umr shampo; 204 #ifdef CONFIG_MLX5_EN_TLS 205 struct { 206 struct mlx5e_ktls_offload_context_rx *priv_rx; 207 } tls_set_params; 208 struct { 209 struct mlx5e_ktls_rx_resync_buf *buf; 210 } tls_get_params; 211 #endif 212 }; 213 }; 214 215 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq); 216 217 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) 218 { 219 struct mlx5_wq_cyc *wq = &sq->wq; 220 u16 pi, contig_wqebbs; 221 222 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 223 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 224 if (unlikely(contig_wqebbs < size)) { 225 struct mlx5e_icosq_wqe_info *wi, *edge_wi; 226 227 wi = &sq->db.wqe_info[pi]; 228 edge_wi = wi + contig_wqebbs; 229 230 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 231 for (; wi < edge_wi; wi++) { 232 *wi = (struct mlx5e_icosq_wqe_info) { 233 .wqe_type = MLX5E_ICOSQ_WQE_NOP, 234 .num_wqebbs = 1, 235 }; 236 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 237 } 238 239 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 240 } 241 242 return pi; 243 } 244 245 static inline void 246 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, 247 struct mlx5_wqe_ctrl_seg *ctrl) 248 { 249 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; 250 /* ensure wqe is visible to device before updating doorbell record */ 251 dma_wmb(); 252 253 *wq->db = cpu_to_be32(pc); 254 255 /* ensure doorbell record is visible to device before ringing the 256 * doorbell 257 */ 258 wmb(); 259 260 mlx5_write64((__be32 *)ctrl, uar_map); 261 } 262 263 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) 264 { 265 struct mlx5_core_cq *mcq; 266 267 mcq = &cq->mcq; 268 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); 269 } 270 271 static inline struct mlx5e_sq_dma * 272 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 273 { 274 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 275 } 276 277 static inline void 278 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, 279 enum mlx5e_dma_map_type map_type) 280 { 281 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); 282 283 dma->addr = addr; 284 dma->size = size; 285 dma->type = map_type; 286 } 287 288 static inline 289 struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i) 290 { 291 return &fifo->fifo[i & fifo->mask]; 292 } 293 294 static inline 295 void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) 296 { 297 struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++); 298 299 *skb_item = skb; 300 } 301 302 static inline 303 struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) 304 { 305 return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); 306 } 307 308 static inline void 309 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) 310 { 311 switch (dma->type) { 312 case MLX5E_DMA_MAP_SINGLE: 313 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 314 break; 315 case MLX5E_DMA_MAP_PAGE: 316 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 317 break; 318 default: 319 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); 320 } 321 } 322 323 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); 324 325 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) 326 { 327 return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; 328 } 329 330 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) 331 { 332 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 333 mlx5_wq_ll_reset(&rq->mpwqe.wq); 334 rq->mpwqe.actual_wq_head = 0; 335 } else { 336 mlx5_wq_cyc_reset(&rq->wqe.wq); 337 } 338 } 339 340 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, 341 struct mlx5_err_cqe *err_cqe) 342 { 343 struct mlx5_cqwq *wq = &cq->wq; 344 u32 ci; 345 346 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); 347 348 netdev_err(cq->netdev, 349 "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 350 cq->mcq.cqn, ci, qn, 351 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), 352 err_cqe->syndrome, err_cqe->vendor_err_synd); 353 mlx5_dump_err_cqe(cq->mdev, err_cqe); 354 } 355 356 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) 357 { 358 switch (rq->wq_type) { 359 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 360 return mlx5_wq_ll_get_size(&rq->mpwqe.wq); 361 default: 362 return mlx5_wq_cyc_get_size(&rq->wqe.wq); 363 } 364 } 365 366 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) 367 { 368 switch (rq->wq_type) { 369 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 370 return rq->mpwqe.wq.cur_sz; 371 default: 372 return rq->wqe.wq.cur_sz; 373 } 374 } 375 376 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) 377 { 378 switch (rq->wq_type) { 379 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 380 return mlx5_wq_ll_get_head(&rq->mpwqe.wq); 381 default: 382 return mlx5_wq_cyc_get_head(&rq->wqe.wq); 383 } 384 } 385 386 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) 387 { 388 switch (rq->wq_type) { 389 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 390 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); 391 default: 392 return mlx5_wq_cyc_get_counter(&rq->wqe.wq); 393 } 394 } 395 396 /* SW parser related functions */ 397 398 struct mlx5e_swp_spec { 399 __be16 l3_proto; 400 u8 l4_proto; 401 u8 is_tun; 402 __be16 tun_l3_proto; 403 u8 tun_l4_proto; 404 }; 405 406 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg) 407 { 408 /* SWP offsets are in 2-bytes words */ 409 eseg->swp_outer_l3_offset += VLAN_HLEN / 2; 410 eseg->swp_outer_l4_offset += VLAN_HLEN / 2; 411 eseg->swp_inner_l3_offset += VLAN_HLEN / 2; 412 eseg->swp_inner_l4_offset += VLAN_HLEN / 2; 413 } 414 415 static inline void 416 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, 417 struct mlx5e_swp_spec *swp_spec) 418 { 419 /* SWP offsets are in 2-bytes words */ 420 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; 421 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 422 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; 423 if (swp_spec->l4_proto) { 424 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; 425 if (swp_spec->l4_proto == IPPROTO_UDP) 426 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; 427 } 428 429 if (swp_spec->is_tun) { 430 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; 431 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) 432 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 433 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ 434 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; 435 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 436 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 437 } 438 switch (swp_spec->tun_l4_proto) { 439 case IPPROTO_UDP: 440 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; 441 fallthrough; 442 case IPPROTO_TCP: 443 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 444 break; 445 } 446 } 447 448 #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1) 449 450 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) 451 { 452 WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev)); 453 454 /* A WQE must not cross the page boundary, hence two conditions: 455 * 1. Its size must not exceed the page size. 456 * 2. If the WQE size is X, and the space remaining in a page is less 457 * than X, this space needs to be padded with NOPs. So, one WQE of 458 * size X may require up to X-1 WQEBBs of padding, which makes the 459 * stop room of X-1 + X. 460 * WQE size is also limited by the hardware limit. 461 */ 462 WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev), 463 "wqe_size %u is greater than max SQ WQEBBs %u", 464 wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); 465 466 return MLX5E_STOP_ROOM(wqe_size); 467 } 468 469 static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev) 470 { 471 return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev)); 472 } 473 474 static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev) 475 { 476 u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); 477 478 return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs); 479 } 480 481 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) 482 { 483 u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size); 484 485 return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); 486 } 487 488 static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) 489 { 490 size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe); 491 492 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); 493 } 494 #endif 495