1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/tcp.h> 36 #include <net/ip6_checksum.h> 37 #include <net/page_pool.h> 38 #include <net/inet_ecn.h> 39 #include "en.h" 40 #include "en/txrx.h" 41 #include "en_tc.h" 42 #include "eswitch.h" 43 #include "en_rep.h" 44 #include "en/rep/tc.h" 45 #include "ipoib/ipoib.h" 46 #include "accel/ipsec.h" 47 #include "fpga/ipsec.h" 48 #include "en_accel/ipsec_rxtx.h" 49 #include "en_accel/tls_rxtx.h" 50 #include "en/xdp.h" 51 #include "en/xsk/rx.h" 52 #include "en/health.h" 53 #include "en/params.h" 54 #include "devlink.h" 55 #include "en/devlink.h" 56 57 static struct sk_buff * 58 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 59 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 60 static struct sk_buff * 61 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 62 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 65 66 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { 67 .handle_rx_cqe = mlx5e_handle_rx_cqe, 68 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 69 }; 70 71 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 72 { 73 return config->rx_filter == HWTSTAMP_FILTER_ALL; 74 } 75 76 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, 77 u32 cqcc, void *data) 78 { 79 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 80 81 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); 82 } 83 84 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, 85 struct mlx5_cqwq *wq, 86 u32 cqcc) 87 { 88 struct mlx5e_cq_decomp *cqd = &rq->cqd; 89 struct mlx5_cqe64 *title = &cqd->title; 90 91 mlx5e_read_cqe_slot(wq, cqcc, title); 92 cqd->left = be32_to_cpu(title->byte_cnt); 93 cqd->wqe_counter = be16_to_cpu(title->wqe_counter); 94 rq->stats->cqe_compress_blks++; 95 } 96 97 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, 98 struct mlx5e_cq_decomp *cqd, 99 u32 cqcc) 100 { 101 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); 102 cqd->mini_arr_idx = 0; 103 } 104 105 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) 106 { 107 u32 cqcc = wq->cc; 108 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; 109 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 110 u32 wq_sz = mlx5_cqwq_get_size(wq); 111 u32 ci_top = min_t(u32, wq_sz, ci + n); 112 113 for (; ci < ci_top; ci++, n--) { 114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 115 116 cqe->op_own = op_own; 117 } 118 119 if (unlikely(ci == wq_sz)) { 120 op_own = !op_own; 121 for (ci = 0; ci < n; ci++) { 122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 123 124 cqe->op_own = op_own; 125 } 126 } 127 } 128 129 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, 130 struct mlx5_cqwq *wq, 131 u32 cqcc) 132 { 133 struct mlx5e_cq_decomp *cqd = &rq->cqd; 134 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; 135 struct mlx5_cqe64 *title = &cqd->title; 136 137 title->byte_cnt = mini_cqe->byte_cnt; 138 title->check_sum = mini_cqe->checksum; 139 title->op_own &= 0xf0; 140 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); 141 142 /* state bit set implies linked-list striding RQ wq type and 143 * HW stride index capability supported 144 */ 145 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { 146 title->wqe_counter = mini_cqe->stridx; 147 return; 148 } 149 150 /* HW stride index capability not supported */ 151 title->wqe_counter = cpu_to_be16(cqd->wqe_counter); 152 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 153 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); 154 else 155 cqd->wqe_counter = 156 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); 157 } 158 159 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, 160 struct mlx5_cqwq *wq, 161 u32 cqcc) 162 { 163 struct mlx5e_cq_decomp *cqd = &rq->cqd; 164 165 mlx5e_decompress_cqe(rq, wq, cqcc); 166 cqd->title.rss_hash_type = 0; 167 cqd->title.rss_hash_result = 0; 168 } 169 170 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, 171 struct mlx5_cqwq *wq, 172 int update_owner_only, 173 int budget_rem) 174 { 175 struct mlx5e_cq_decomp *cqd = &rq->cqd; 176 u32 cqcc = wq->cc + update_owner_only; 177 u32 cqe_count; 178 u32 i; 179 180 cqe_count = min_t(u32, cqd->left, budget_rem); 181 182 for (i = update_owner_only; i < cqe_count; 183 i++, cqd->mini_arr_idx++, cqcc++) { 184 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) 185 mlx5e_read_mini_arr_slot(wq, cqd, cqcc); 186 187 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); 188 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 189 mlx5e_handle_rx_cqe, rq, &cqd->title); 190 } 191 mlx5e_cqes_update_owner(wq, cqcc - wq->cc); 192 wq->cc = cqcc; 193 cqd->left -= cqe_count; 194 rq->stats->cqe_compress_pkts += cqe_count; 195 196 return cqe_count; 197 } 198 199 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, 200 struct mlx5_cqwq *wq, 201 int budget_rem) 202 { 203 struct mlx5e_cq_decomp *cqd = &rq->cqd; 204 u32 cc = wq->cc; 205 206 mlx5e_read_title_slot(rq, wq, cc); 207 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); 208 mlx5e_decompress_cqe(rq, wq, cc); 209 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 210 mlx5e_handle_rx_cqe, rq, &cqd->title); 211 cqd->mini_arr_idx++; 212 213 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; 214 } 215 216 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, 217 struct mlx5e_dma_info *dma_info) 218 { 219 struct mlx5e_page_cache *cache = &rq->page_cache; 220 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); 221 struct mlx5e_rq_stats *stats = rq->stats; 222 223 if (tail_next == cache->head) { 224 stats->cache_full++; 225 return false; 226 } 227 228 if (!dev_page_is_reusable(dma_info->page)) { 229 stats->cache_waive++; 230 return false; 231 } 232 233 cache->page_cache[cache->tail] = *dma_info; 234 cache->tail = tail_next; 235 return true; 236 } 237 238 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, 239 struct mlx5e_dma_info *dma_info) 240 { 241 struct mlx5e_page_cache *cache = &rq->page_cache; 242 struct mlx5e_rq_stats *stats = rq->stats; 243 244 if (unlikely(cache->head == cache->tail)) { 245 stats->cache_empty++; 246 return false; 247 } 248 249 if (page_ref_count(cache->page_cache[cache->head].page) != 1) { 250 stats->cache_busy++; 251 return false; 252 } 253 254 *dma_info = cache->page_cache[cache->head]; 255 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); 256 stats->cache_reuse++; 257 258 dma_sync_single_for_device(rq->pdev, dma_info->addr, 259 PAGE_SIZE, 260 DMA_FROM_DEVICE); 261 return true; 262 } 263 264 static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, 265 struct mlx5e_dma_info *dma_info) 266 { 267 if (mlx5e_rx_cache_get(rq, dma_info)) 268 return 0; 269 270 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); 271 if (unlikely(!dma_info->page)) 272 return -ENOMEM; 273 274 dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, 275 PAGE_SIZE, rq->buff.map_dir); 276 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 277 page_pool_recycle_direct(rq->page_pool, dma_info->page); 278 dma_info->page = NULL; 279 return -ENOMEM; 280 } 281 282 return 0; 283 } 284 285 static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, 286 struct mlx5e_dma_info *dma_info) 287 { 288 if (rq->xsk_pool) 289 return mlx5e_xsk_page_alloc_pool(rq, dma_info); 290 else 291 return mlx5e_page_alloc_pool(rq, dma_info); 292 } 293 294 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) 295 { 296 dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); 297 } 298 299 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, 300 struct mlx5e_dma_info *dma_info, 301 bool recycle) 302 { 303 if (likely(recycle)) { 304 if (mlx5e_rx_cache_put(rq, dma_info)) 305 return; 306 307 mlx5e_page_dma_unmap(rq, dma_info); 308 page_pool_recycle_direct(rq->page_pool, dma_info->page); 309 } else { 310 mlx5e_page_dma_unmap(rq, dma_info); 311 page_pool_release_page(rq->page_pool, dma_info->page); 312 put_page(dma_info->page); 313 } 314 } 315 316 static inline void mlx5e_page_release(struct mlx5e_rq *rq, 317 struct mlx5e_dma_info *dma_info, 318 bool recycle) 319 { 320 if (rq->xsk_pool) 321 /* The `recycle` parameter is ignored, and the page is always 322 * put into the Reuse Ring, because there is no way to return 323 * the page to the userspace when the interface goes down. 324 */ 325 xsk_buff_free(dma_info->xsk); 326 else 327 mlx5e_page_release_dynamic(rq, dma_info, recycle); 328 } 329 330 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, 331 struct mlx5e_wqe_frag_info *frag) 332 { 333 int err = 0; 334 335 if (!frag->offset) 336 /* On first frag (offset == 0), replenish page (dma_info actually). 337 * Other frags that point to the same dma_info (with a different 338 * offset) should just use the new one without replenishing again 339 * by themselves. 340 */ 341 err = mlx5e_page_alloc(rq, frag->di); 342 343 return err; 344 } 345 346 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, 347 struct mlx5e_wqe_frag_info *frag, 348 bool recycle) 349 { 350 if (frag->last_in_page) 351 mlx5e_page_release(rq, frag->di, recycle); 352 } 353 354 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) 355 { 356 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; 357 } 358 359 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, 360 u16 ix) 361 { 362 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); 363 int err; 364 int i; 365 366 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { 367 err = mlx5e_get_rx_frag(rq, frag); 368 if (unlikely(err)) 369 goto free_frags; 370 371 wqe->data[i].addr = cpu_to_be64(frag->di->addr + 372 frag->offset + rq->buff.headroom); 373 } 374 375 return 0; 376 377 free_frags: 378 while (--i >= 0) 379 mlx5e_put_rx_frag(rq, --frag, true); 380 381 return err; 382 } 383 384 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, 385 struct mlx5e_wqe_frag_info *wi, 386 bool recycle) 387 { 388 int i; 389 390 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) 391 mlx5e_put_rx_frag(rq, wi, recycle); 392 } 393 394 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 395 { 396 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); 397 398 mlx5e_free_rx_wqe(rq, wi, false); 399 } 400 401 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) 402 { 403 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 404 int err; 405 int i; 406 407 if (rq->xsk_pool) { 408 int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; 409 410 /* Check in advance that we have enough frames, instead of 411 * allocating one-by-one, failing and moving frames to the 412 * Reuse Ring. 413 */ 414 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) 415 return -ENOMEM; 416 } 417 418 for (i = 0; i < wqe_bulk; i++) { 419 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); 420 421 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); 422 if (unlikely(err)) 423 goto free_wqes; 424 } 425 426 return 0; 427 428 free_wqes: 429 while (--i >= 0) 430 mlx5e_dealloc_rx_wqe(rq, ix + i); 431 432 return err; 433 } 434 435 static inline void 436 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, 437 struct mlx5e_dma_info *di, u32 frag_offset, u32 len, 438 unsigned int truesize) 439 { 440 dma_sync_single_for_cpu(rq->pdev, 441 di->addr + frag_offset, 442 len, DMA_FROM_DEVICE); 443 page_ref_inc(di->page); 444 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 445 di->page, frag_offset, len, truesize); 446 } 447 448 static inline void 449 mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, 450 struct mlx5e_dma_info *dma_info, 451 int offset_from, u32 headlen) 452 { 453 const void *from = page_address(dma_info->page) + offset_from; 454 /* Aligning len to sizeof(long) optimizes memcpy performance */ 455 unsigned int len = ALIGN(headlen, sizeof(long)); 456 457 dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, 458 DMA_FROM_DEVICE); 459 skb_copy_to_linear_data(skb, from, len); 460 } 461 462 static void 463 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) 464 { 465 bool no_xdp_xmit; 466 struct mlx5e_dma_info *dma_info = wi->umr.dma_info; 467 int i; 468 469 /* A common case for AF_XDP. */ 470 if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE)) 471 return; 472 473 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, 474 MLX5_MPWRQ_PAGES_PER_WQE); 475 476 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) 477 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 478 mlx5e_page_release(rq, &dma_info[i], recycle); 479 } 480 481 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) 482 { 483 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 484 485 do { 486 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); 487 488 mlx5_wq_ll_push(wq, next_wqe_index); 489 } while (--n); 490 491 /* ensure wqes are visible to device before updating doorbell record */ 492 dma_wmb(); 493 494 mlx5_wq_ll_update_db_record(wq); 495 } 496 497 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 498 { 499 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; 500 struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; 501 struct mlx5e_icosq *sq = rq->icosq; 502 struct mlx5_wq_cyc *wq = &sq->wq; 503 struct mlx5e_umr_wqe *umr_wqe; 504 u16 pi; 505 int err; 506 int i; 507 508 /* Check in advance that we have enough frames, instead of allocating 509 * one-by-one, failing and moving frames to the Reuse Ring. 510 */ 511 if (rq->xsk_pool && 512 unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) { 513 err = -ENOMEM; 514 goto err; 515 } 516 517 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS); 518 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); 519 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts)); 520 521 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { 522 err = mlx5e_page_alloc(rq, dma_info); 523 if (unlikely(err)) 524 goto err_unmap; 525 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); 526 } 527 528 bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); 529 wi->consumed_strides = 0; 530 531 umr_wqe->ctrl.opmod_idx_opcode = 532 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 533 MLX5_OPCODE_UMR); 534 umr_wqe->uctrl.xlt_offset = 535 cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); 536 537 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 538 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, 539 .num_wqebbs = MLX5E_UMR_WQEBBS, 540 .umr.rq = rq, 541 }; 542 543 sq->pc += MLX5E_UMR_WQEBBS; 544 545 sq->doorbell_cseg = &umr_wqe->ctrl; 546 547 return 0; 548 549 err_unmap: 550 while (--i >= 0) { 551 dma_info--; 552 mlx5e_page_release(rq, dma_info, true); 553 } 554 555 err: 556 rq->stats->buff_alloc_err++; 557 558 return err; 559 } 560 561 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 562 { 563 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; 564 /* Don't recycle, this function is called on rq/netdev close */ 565 mlx5e_free_rx_mpwqe(rq, wi, false); 566 } 567 568 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 569 { 570 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 571 u8 wqe_bulk; 572 int err; 573 574 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 575 return false; 576 577 wqe_bulk = rq->wqe.info.wqe_bulk; 578 579 if (mlx5_wq_cyc_missing(wq) < wqe_bulk) 580 return false; 581 582 do { 583 u16 head = mlx5_wq_cyc_get_head(wq); 584 585 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); 586 if (unlikely(err)) { 587 rq->stats->buff_alloc_err++; 588 break; 589 } 590 591 mlx5_wq_cyc_push_n(wq, wqe_bulk); 592 } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); 593 594 /* ensure wqes are visible to device before updating doorbell record */ 595 dma_wmb(); 596 597 mlx5_wq_cyc_update_db_record(wq); 598 599 return !!err; 600 } 601 602 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) 603 { 604 u16 sqcc; 605 606 sqcc = sq->cc; 607 608 while (sqcc != sq->pc) { 609 struct mlx5e_icosq_wqe_info *wi; 610 u16 ci; 611 612 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 613 wi = &sq->db.wqe_info[ci]; 614 sqcc += wi->num_wqebbs; 615 #ifdef CONFIG_MLX5_EN_TLS 616 switch (wi->wqe_type) { 617 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 618 mlx5e_ktls_handle_ctx_completion(wi); 619 break; 620 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 621 mlx5e_ktls_handle_get_psv_completion(wi, sq); 622 break; 623 } 624 #endif 625 } 626 sq->cc = sqcc; 627 } 628 629 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) 630 { 631 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); 632 struct mlx5_cqe64 *cqe; 633 u16 sqcc; 634 int i; 635 636 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 637 return 0; 638 639 cqe = mlx5_cqwq_get_cqe(&cq->wq); 640 if (likely(!cqe)) 641 return 0; 642 643 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 644 * otherwise a cq overrun may occur 645 */ 646 sqcc = sq->cc; 647 648 i = 0; 649 do { 650 u16 wqe_counter; 651 bool last_wqe; 652 653 mlx5_cqwq_pop(&cq->wq); 654 655 wqe_counter = be16_to_cpu(cqe->wqe_counter); 656 657 do { 658 struct mlx5e_icosq_wqe_info *wi; 659 u16 ci; 660 661 last_wqe = (sqcc == wqe_counter); 662 663 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 664 wi = &sq->db.wqe_info[ci]; 665 sqcc += wi->num_wqebbs; 666 667 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 668 netdev_WARN_ONCE(cq->netdev, 669 "Bad OP in ICOSQ CQE: 0x%x\n", 670 get_cqe_opcode(cqe)); 671 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 672 (struct mlx5_err_cqe *)cqe); 673 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 674 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 675 queue_work(cq->priv->wq, &sq->recover_work); 676 break; 677 } 678 679 switch (wi->wqe_type) { 680 case MLX5E_ICOSQ_WQE_UMR_RX: 681 wi->umr.rq->mpwqe.umr_completed++; 682 break; 683 case MLX5E_ICOSQ_WQE_NOP: 684 break; 685 #ifdef CONFIG_MLX5_EN_TLS 686 case MLX5E_ICOSQ_WQE_UMR_TLS: 687 break; 688 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 689 mlx5e_ktls_handle_ctx_completion(wi); 690 break; 691 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 692 mlx5e_ktls_handle_get_psv_completion(wi, sq); 693 break; 694 #endif 695 default: 696 netdev_WARN_ONCE(cq->netdev, 697 "Bad WQE type in ICOSQ WQE info: 0x%x\n", 698 wi->wqe_type); 699 } 700 } while (!last_wqe); 701 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 702 703 sq->cc = sqcc; 704 705 mlx5_cqwq_update_db_record(&cq->wq); 706 707 return i; 708 } 709 710 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) 711 { 712 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 713 u8 umr_completed = rq->mpwqe.umr_completed; 714 struct mlx5e_icosq *sq = rq->icosq; 715 int alloc_err = 0; 716 u8 missing, i; 717 u16 head; 718 719 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 720 return false; 721 722 if (umr_completed) { 723 mlx5e_post_rx_mpwqe(rq, umr_completed); 724 rq->mpwqe.umr_in_progress -= umr_completed; 725 rq->mpwqe.umr_completed = 0; 726 } 727 728 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; 729 730 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) 731 rq->stats->congst_umr++; 732 733 #define UMR_WQE_BULK (2) 734 if (likely(missing < UMR_WQE_BULK)) 735 return false; 736 737 head = rq->mpwqe.actual_wq_head; 738 i = missing; 739 do { 740 alloc_err = mlx5e_alloc_rx_mpwqe(rq, head); 741 742 if (unlikely(alloc_err)) 743 break; 744 head = mlx5_wq_ll_get_wqe_next_ix(wq, head); 745 } while (--i); 746 747 rq->mpwqe.umr_last_bulk = missing - i; 748 if (sq->doorbell_cseg) { 749 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); 750 sq->doorbell_cseg = NULL; 751 } 752 753 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; 754 rq->mpwqe.actual_wq_head = head; 755 756 /* If XSK Fill Ring doesn't have enough frames, report the error, so 757 * that one of the actions can be performed: 758 * 1. If need_wakeup is used, signal that the application has to kick 759 * the driver when it refills the Fill Ring. 760 * 2. Otherwise, busy poll by rescheduling the NAPI poll. 761 */ 762 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) 763 return true; 764 765 return false; 766 } 767 768 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) 769 { 770 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 771 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || 772 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); 773 774 tcp->check = 0; 775 tcp->psh = get_cqe_lro_tcppsh(cqe); 776 777 if (tcp_ack) { 778 tcp->ack = 1; 779 tcp->ack_seq = cqe->lro_ack_seq_num; 780 tcp->window = cqe->lro_tcp_win; 781 } 782 } 783 784 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 785 u32 cqe_bcnt) 786 { 787 struct ethhdr *eth = (struct ethhdr *)(skb->data); 788 struct tcphdr *tcp; 789 int network_depth = 0; 790 __wsum check; 791 __be16 proto; 792 u16 tot_len; 793 void *ip_p; 794 795 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); 796 797 tot_len = cqe_bcnt - network_depth; 798 ip_p = skb->data + network_depth; 799 800 if (proto == htons(ETH_P_IP)) { 801 struct iphdr *ipv4 = ip_p; 802 803 tcp = ip_p + sizeof(struct iphdr); 804 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 805 806 ipv4->ttl = cqe->lro_min_ttl; 807 ipv4->tot_len = cpu_to_be16(tot_len); 808 ipv4->check = 0; 809 ipv4->check = ip_fast_csum((unsigned char *)ipv4, 810 ipv4->ihl); 811 812 mlx5e_lro_update_tcp_hdr(cqe, tcp); 813 check = csum_partial(tcp, tcp->doff * 4, 814 csum_unfold((__force __sum16)cqe->check_sum)); 815 /* Almost done, don't forget the pseudo header */ 816 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, 817 tot_len - sizeof(struct iphdr), 818 IPPROTO_TCP, check); 819 } else { 820 u16 payload_len = tot_len - sizeof(struct ipv6hdr); 821 struct ipv6hdr *ipv6 = ip_p; 822 823 tcp = ip_p + sizeof(struct ipv6hdr); 824 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 825 826 ipv6->hop_limit = cqe->lro_min_ttl; 827 ipv6->payload_len = cpu_to_be16(payload_len); 828 829 mlx5e_lro_update_tcp_hdr(cqe, tcp); 830 check = csum_partial(tcp, tcp->doff * 4, 831 csum_unfold((__force __sum16)cqe->check_sum)); 832 /* Almost done, don't forget the pseudo header */ 833 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, 834 IPPROTO_TCP, check); 835 } 836 } 837 838 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, 839 struct sk_buff *skb) 840 { 841 u8 cht = cqe->rss_hash_type; 842 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : 843 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : 844 PKT_HASH_TYPE_NONE; 845 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); 846 } 847 848 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, 849 __be16 *proto) 850 { 851 *proto = ((struct ethhdr *)skb->data)->h_proto; 852 *proto = __vlan_get_protocol(skb, *proto, network_depth); 853 854 if (*proto == htons(ETH_P_IP)) 855 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); 856 857 if (*proto == htons(ETH_P_IPV6)) 858 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); 859 860 return false; 861 } 862 863 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) 864 { 865 int network_depth = 0; 866 __be16 proto; 867 void *ip; 868 int rc; 869 870 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) 871 return; 872 873 ip = skb->data + network_depth; 874 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : 875 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); 876 877 rq->stats->ecn_mark += !!rc; 878 } 879 880 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) 881 { 882 void *ip_p = skb->data + network_depth; 883 884 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : 885 ((struct ipv6hdr *)ip_p)->nexthdr; 886 } 887 888 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 889 890 #define MAX_PADDING 8 891 892 static void 893 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, 894 struct mlx5e_rq_stats *stats) 895 { 896 stats->csum_complete_tail_slow++; 897 skb->csum = csum_block_add(skb->csum, 898 skb_checksum(skb, offset, len, 0), 899 offset); 900 } 901 902 static void 903 tail_padding_csum(struct sk_buff *skb, int offset, 904 struct mlx5e_rq_stats *stats) 905 { 906 u8 tail_padding[MAX_PADDING]; 907 int len = skb->len - offset; 908 void *tail; 909 910 if (unlikely(len > MAX_PADDING)) { 911 tail_padding_csum_slow(skb, offset, len, stats); 912 return; 913 } 914 915 tail = skb_header_pointer(skb, offset, len, tail_padding); 916 if (unlikely(!tail)) { 917 tail_padding_csum_slow(skb, offset, len, stats); 918 return; 919 } 920 921 stats->csum_complete_tail++; 922 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); 923 } 924 925 static void 926 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, 927 struct mlx5e_rq_stats *stats) 928 { 929 struct ipv6hdr *ip6; 930 struct iphdr *ip4; 931 int pkt_len; 932 933 /* Fixup vlan headers, if any */ 934 if (network_depth > ETH_HLEN) 935 /* CQE csum is calculated from the IP header and does 936 * not cover VLAN headers (if present). This will add 937 * the checksum manually. 938 */ 939 skb->csum = csum_partial(skb->data + ETH_HLEN, 940 network_depth - ETH_HLEN, 941 skb->csum); 942 943 /* Fixup tail padding, if any */ 944 switch (proto) { 945 case htons(ETH_P_IP): 946 ip4 = (struct iphdr *)(skb->data + network_depth); 947 pkt_len = network_depth + ntohs(ip4->tot_len); 948 break; 949 case htons(ETH_P_IPV6): 950 ip6 = (struct ipv6hdr *)(skb->data + network_depth); 951 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); 952 break; 953 default: 954 return; 955 } 956 957 if (likely(pkt_len >= skb->len)) 958 return; 959 960 tail_padding_csum(skb, pkt_len, stats); 961 } 962 963 static inline void mlx5e_handle_csum(struct net_device *netdev, 964 struct mlx5_cqe64 *cqe, 965 struct mlx5e_rq *rq, 966 struct sk_buff *skb, 967 bool lro) 968 { 969 struct mlx5e_rq_stats *stats = rq->stats; 970 int network_depth = 0; 971 __be16 proto; 972 973 if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) 974 goto csum_none; 975 976 if (lro) { 977 skb->ip_summed = CHECKSUM_UNNECESSARY; 978 stats->csum_unnecessary++; 979 return; 980 } 981 982 /* True when explicitly set via priv flag, or XDP prog is loaded */ 983 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) 984 goto csum_unnecessary; 985 986 /* CQE csum doesn't cover padding octets in short ethernet 987 * frames. And the pad field is appended prior to calculating 988 * and appending the FCS field. 989 * 990 * Detecting these padded frames requires to verify and parse 991 * IP headers, so we simply force all those small frames to be 992 * CHECKSUM_UNNECESSARY even if they are not padded. 993 */ 994 if (short_frame(skb->len)) 995 goto csum_unnecessary; 996 997 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 998 u8 ipproto = get_ip_proto(skb, network_depth, proto); 999 1000 if (unlikely(ipproto == IPPROTO_SCTP)) 1001 goto csum_unnecessary; 1002 1003 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1004 goto csum_none; 1005 1006 stats->csum_complete++; 1007 skb->ip_summed = CHECKSUM_COMPLETE; 1008 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 1009 1010 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) 1011 return; /* CQE csum covers all received bytes */ 1012 1013 /* csum might need some fixups ...*/ 1014 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); 1015 return; 1016 } 1017 1018 csum_unnecessary: 1019 if (likely((cqe->hds_ip_ext & CQE_L3_OK) && 1020 (cqe->hds_ip_ext & CQE_L4_OK))) { 1021 skb->ip_summed = CHECKSUM_UNNECESSARY; 1022 if (cqe_is_tunneled(cqe)) { 1023 skb->csum_level = 1; 1024 skb->encapsulation = 1; 1025 stats->csum_unnecessary_inner++; 1026 return; 1027 } 1028 stats->csum_unnecessary++; 1029 return; 1030 } 1031 csum_none: 1032 skb->ip_summed = CHECKSUM_NONE; 1033 stats->csum_none++; 1034 } 1035 1036 #define MLX5E_CE_BIT_MASK 0x80 1037 1038 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, 1039 u32 cqe_bcnt, 1040 struct mlx5e_rq *rq, 1041 struct sk_buff *skb) 1042 { 1043 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 1044 struct mlx5e_rq_stats *stats = rq->stats; 1045 struct net_device *netdev = rq->netdev; 1046 1047 skb->mac_len = ETH_HLEN; 1048 1049 mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1050 1051 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1052 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); 1053 1054 if (lro_num_seg > 1) { 1055 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 1056 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 1057 /* Subtract one since we already counted this as one 1058 * "regular" packet in mlx5e_complete_rx_cqe() 1059 */ 1060 stats->packets += lro_num_seg - 1; 1061 stats->lro_packets++; 1062 stats->lro_bytes += cqe_bcnt; 1063 } 1064 1065 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) 1066 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 1067 rq->clock, get_cqe_ts(cqe)); 1068 skb_record_rx_queue(skb, rq->ix); 1069 1070 if (likely(netdev->features & NETIF_F_RXHASH)) 1071 mlx5e_skb_set_hash(cqe, skb); 1072 1073 if (cqe_has_vlan(cqe)) { 1074 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1075 be16_to_cpu(cqe->vlan_info)); 1076 stats->removed_vlan_packets++; 1077 } 1078 1079 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; 1080 1081 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); 1082 /* checking CE bit in cqe - MSB in ml_path field */ 1083 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) 1084 mlx5e_enable_ecn(rq, skb); 1085 1086 skb->protocol = eth_type_trans(skb, netdev); 1087 1088 if (unlikely(mlx5e_skb_is_multicast(skb))) 1089 stats->mcast_packets++; 1090 } 1091 1092 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, 1093 struct mlx5_cqe64 *cqe, 1094 u32 cqe_bcnt, 1095 struct sk_buff *skb) 1096 { 1097 struct mlx5e_rq_stats *stats = rq->stats; 1098 1099 stats->packets++; 1100 stats->bytes += cqe_bcnt; 1101 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1102 } 1103 1104 static inline 1105 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, 1106 u32 frag_size, u16 headroom, 1107 u32 cqe_bcnt) 1108 { 1109 struct sk_buff *skb = build_skb(va, frag_size); 1110 1111 if (unlikely(!skb)) { 1112 rq->stats->buff_alloc_err++; 1113 return NULL; 1114 } 1115 1116 skb_reserve(skb, headroom); 1117 skb_put(skb, cqe_bcnt); 1118 1119 return skb; 1120 } 1121 1122 static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, 1123 u32 len, struct xdp_buff *xdp) 1124 { 1125 xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); 1126 xdp_prepare_buff(xdp, va, headroom, len, false); 1127 } 1128 1129 static struct sk_buff * 1130 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1131 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) 1132 { 1133 struct mlx5e_dma_info *di = wi->di; 1134 u16 rx_headroom = rq->buff.headroom; 1135 struct xdp_buff xdp; 1136 struct sk_buff *skb; 1137 void *va, *data; 1138 u32 frag_size; 1139 1140 va = page_address(di->page) + wi->offset; 1141 data = va + rx_headroom; 1142 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1143 1144 dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, 1145 frag_size, DMA_FROM_DEVICE); 1146 net_prefetchw(va); /* xdp_frame data area */ 1147 net_prefetch(data); 1148 1149 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 1150 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp)) 1151 return NULL; /* page/packet was consumed by XDP */ 1152 1153 rx_headroom = xdp.data - xdp.data_hard_start; 1154 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1155 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); 1156 if (unlikely(!skb)) 1157 return NULL; 1158 1159 /* queue up for recycling/reuse */ 1160 page_ref_inc(di->page); 1161 1162 return skb; 1163 } 1164 1165 static struct sk_buff * 1166 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1167 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) 1168 { 1169 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 1170 struct mlx5e_wqe_frag_info *head_wi = wi; 1171 u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1172 u16 frag_headlen = headlen; 1173 u16 byte_cnt = cqe_bcnt - headlen; 1174 struct sk_buff *skb; 1175 1176 /* XDP is not supported in this configuration, as incoming packets 1177 * might spread among multiple pages. 1178 */ 1179 skb = napi_alloc_skb(rq->cq.napi, 1180 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1181 if (unlikely(!skb)) { 1182 rq->stats->buff_alloc_err++; 1183 return NULL; 1184 } 1185 1186 net_prefetchw(skb->data); 1187 1188 while (byte_cnt) { 1189 u16 frag_consumed_bytes = 1190 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); 1191 1192 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, 1193 frag_consumed_bytes, frag_info->frag_stride); 1194 byte_cnt -= frag_consumed_bytes; 1195 frag_headlen = 0; 1196 frag_info++; 1197 wi++; 1198 } 1199 1200 /* copy header */ 1201 mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); 1202 /* skb linear part was allocated with headlen and aligned to long */ 1203 skb->tail += headlen; 1204 skb->len += headlen; 1205 1206 return skb; 1207 } 1208 1209 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1210 { 1211 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; 1212 struct mlx5e_priv *priv = rq->priv; 1213 1214 if (cqe_syndrome_needs_recover(err_cqe->syndrome) && 1215 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { 1216 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); 1217 queue_work(priv->wq, &rq->recover_work); 1218 } 1219 } 1220 1221 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1222 { 1223 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1224 struct mlx5e_wqe_frag_info *wi; 1225 struct sk_buff *skb; 1226 u32 cqe_bcnt; 1227 u16 ci; 1228 1229 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1230 wi = get_frag(rq, ci); 1231 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1232 1233 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1234 trigger_report(rq, cqe); 1235 rq->stats->wqe_err++; 1236 goto free_wqe; 1237 } 1238 1239 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1240 mlx5e_skb_from_cqe_linear, 1241 mlx5e_skb_from_cqe_nonlinear, 1242 rq, cqe, wi, cqe_bcnt); 1243 if (!skb) { 1244 /* probably for XDP */ 1245 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1246 /* do not return page to cache, 1247 * it will be returned on XDP_TX completion. 1248 */ 1249 goto wq_cyc_pop; 1250 } 1251 goto free_wqe; 1252 } 1253 1254 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1255 1256 if (mlx5e_cqe_regb_chain(cqe)) 1257 if (!mlx5e_tc_update_skb(cqe, skb)) { 1258 dev_kfree_skb_any(skb); 1259 goto free_wqe; 1260 } 1261 1262 napi_gro_receive(rq->cq.napi, skb); 1263 1264 free_wqe: 1265 mlx5e_free_rx_wqe(rq, wi, true); 1266 wq_cyc_pop: 1267 mlx5_wq_cyc_pop(wq); 1268 } 1269 1270 #ifdef CONFIG_MLX5_ESWITCH 1271 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1272 { 1273 struct net_device *netdev = rq->netdev; 1274 struct mlx5e_priv *priv = netdev_priv(netdev); 1275 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1276 struct mlx5_eswitch_rep *rep = rpriv->rep; 1277 struct mlx5e_tc_update_priv tc_priv = {}; 1278 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1279 struct mlx5e_wqe_frag_info *wi; 1280 struct sk_buff *skb; 1281 u32 cqe_bcnt; 1282 u16 ci; 1283 1284 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1285 wi = get_frag(rq, ci); 1286 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1287 1288 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1289 rq->stats->wqe_err++; 1290 goto free_wqe; 1291 } 1292 1293 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1294 mlx5e_skb_from_cqe_linear, 1295 mlx5e_skb_from_cqe_nonlinear, 1296 rq, cqe, wi, cqe_bcnt); 1297 if (!skb) { 1298 /* probably for XDP */ 1299 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1300 /* do not return page to cache, 1301 * it will be returned on XDP_TX completion. 1302 */ 1303 goto wq_cyc_pop; 1304 } 1305 goto free_wqe; 1306 } 1307 1308 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1309 1310 if (rep->vlan && skb_vlan_tag_present(skb)) 1311 skb_vlan_pop(skb); 1312 1313 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { 1314 dev_kfree_skb_any(skb); 1315 goto free_wqe; 1316 } 1317 1318 napi_gro_receive(rq->cq.napi, skb); 1319 1320 mlx5_rep_tc_post_napi_receive(&tc_priv); 1321 1322 free_wqe: 1323 mlx5e_free_rx_wqe(rq, wi, true); 1324 wq_cyc_pop: 1325 mlx5_wq_cyc_pop(wq); 1326 } 1327 1328 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1329 { 1330 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1331 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1332 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; 1333 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1334 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1335 u32 head_offset = wqe_offset & (PAGE_SIZE - 1); 1336 u32 page_idx = wqe_offset >> PAGE_SHIFT; 1337 struct mlx5e_tc_update_priv tc_priv = {}; 1338 struct mlx5e_rx_wqe_ll *wqe; 1339 struct mlx5_wq_ll *wq; 1340 struct sk_buff *skb; 1341 u16 cqe_bcnt; 1342 1343 wi->consumed_strides += cstrides; 1344 1345 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1346 trigger_report(rq, cqe); 1347 rq->stats->wqe_err++; 1348 goto mpwrq_cqe_out; 1349 } 1350 1351 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1352 struct mlx5e_rq_stats *stats = rq->stats; 1353 1354 stats->mpwqe_filler_cqes++; 1355 stats->mpwqe_filler_strides += cstrides; 1356 goto mpwrq_cqe_out; 1357 } 1358 1359 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1360 1361 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1362 mlx5e_skb_from_cqe_mpwrq_linear, 1363 mlx5e_skb_from_cqe_mpwrq_nonlinear, 1364 rq, wi, cqe_bcnt, head_offset, page_idx); 1365 if (!skb) 1366 goto mpwrq_cqe_out; 1367 1368 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1369 1370 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { 1371 dev_kfree_skb_any(skb); 1372 goto mpwrq_cqe_out; 1373 } 1374 1375 napi_gro_receive(rq->cq.napi, skb); 1376 1377 mlx5_rep_tc_post_napi_receive(&tc_priv); 1378 1379 mpwrq_cqe_out: 1380 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1381 return; 1382 1383 wq = &rq->mpwqe.wq; 1384 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1385 mlx5e_free_rx_mpwqe(rq, wi, true); 1386 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1387 } 1388 1389 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { 1390 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1391 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1392 }; 1393 #endif 1394 1395 static struct sk_buff * 1396 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1397 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1398 { 1399 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1400 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; 1401 u32 frag_offset = head_offset + headlen; 1402 u32 byte_cnt = cqe_bcnt - headlen; 1403 struct mlx5e_dma_info *head_di = di; 1404 struct sk_buff *skb; 1405 1406 skb = napi_alloc_skb(rq->cq.napi, 1407 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1408 if (unlikely(!skb)) { 1409 rq->stats->buff_alloc_err++; 1410 return NULL; 1411 } 1412 1413 net_prefetchw(skb->data); 1414 1415 if (unlikely(frag_offset >= PAGE_SIZE)) { 1416 di++; 1417 frag_offset -= PAGE_SIZE; 1418 } 1419 1420 while (byte_cnt) { 1421 u32 pg_consumed_bytes = 1422 min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); 1423 unsigned int truesize = 1424 ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); 1425 1426 mlx5e_add_skb_frag(rq, skb, di, frag_offset, 1427 pg_consumed_bytes, truesize); 1428 byte_cnt -= pg_consumed_bytes; 1429 frag_offset = 0; 1430 di++; 1431 } 1432 /* copy header */ 1433 mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); 1434 /* skb linear part was allocated with headlen and aligned to long */ 1435 skb->tail += headlen; 1436 skb->len += headlen; 1437 1438 return skb; 1439 } 1440 1441 static struct sk_buff * 1442 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1443 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1444 { 1445 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; 1446 u16 rx_headroom = rq->buff.headroom; 1447 u32 cqe_bcnt32 = cqe_bcnt; 1448 struct xdp_buff xdp; 1449 struct sk_buff *skb; 1450 void *va, *data; 1451 u32 frag_size; 1452 1453 /* Check packet size. Note LRO doesn't use linear SKB */ 1454 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 1455 rq->stats->oversize_pkts_sw_drop++; 1456 return NULL; 1457 } 1458 1459 va = page_address(di->page) + head_offset; 1460 data = va + rx_headroom; 1461 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1462 1463 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, 1464 frag_size, DMA_FROM_DEVICE); 1465 net_prefetchw(va); /* xdp_frame data area */ 1466 net_prefetch(data); 1467 1468 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp); 1469 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) { 1470 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) 1471 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 1472 return NULL; /* page/packet was consumed by XDP */ 1473 } 1474 1475 rx_headroom = xdp.data - xdp.data_hard_start; 1476 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1477 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); 1478 if (unlikely(!skb)) 1479 return NULL; 1480 1481 /* queue up for recycling/reuse */ 1482 page_ref_inc(di->page); 1483 1484 return skb; 1485 } 1486 1487 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1488 { 1489 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1490 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1491 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; 1492 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1493 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1494 u32 head_offset = wqe_offset & (PAGE_SIZE - 1); 1495 u32 page_idx = wqe_offset >> PAGE_SHIFT; 1496 struct mlx5e_rx_wqe_ll *wqe; 1497 struct mlx5_wq_ll *wq; 1498 struct sk_buff *skb; 1499 u16 cqe_bcnt; 1500 1501 wi->consumed_strides += cstrides; 1502 1503 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1504 trigger_report(rq, cqe); 1505 rq->stats->wqe_err++; 1506 goto mpwrq_cqe_out; 1507 } 1508 1509 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1510 struct mlx5e_rq_stats *stats = rq->stats; 1511 1512 stats->mpwqe_filler_cqes++; 1513 stats->mpwqe_filler_strides += cstrides; 1514 goto mpwrq_cqe_out; 1515 } 1516 1517 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1518 1519 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1520 mlx5e_skb_from_cqe_mpwrq_linear, 1521 mlx5e_skb_from_cqe_mpwrq_nonlinear, 1522 rq, wi, cqe_bcnt, head_offset, page_idx); 1523 if (!skb) 1524 goto mpwrq_cqe_out; 1525 1526 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1527 1528 if (mlx5e_cqe_regb_chain(cqe)) 1529 if (!mlx5e_tc_update_skb(cqe, skb)) { 1530 dev_kfree_skb_any(skb); 1531 goto mpwrq_cqe_out; 1532 } 1533 1534 napi_gro_receive(rq->cq.napi, skb); 1535 1536 mpwrq_cqe_out: 1537 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1538 return; 1539 1540 wq = &rq->mpwqe.wq; 1541 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1542 mlx5e_free_rx_mpwqe(rq, wi, true); 1543 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1544 } 1545 1546 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) 1547 { 1548 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 1549 struct mlx5_cqwq *cqwq = &cq->wq; 1550 struct mlx5_cqe64 *cqe; 1551 int work_done = 0; 1552 1553 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 1554 return 0; 1555 1556 if (rq->page_pool) 1557 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 1558 1559 if (rq->cqd.left) { 1560 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); 1561 if (rq->cqd.left || work_done >= budget) 1562 goto out; 1563 } 1564 1565 cqe = mlx5_cqwq_get_cqe(cqwq); 1566 if (!cqe) { 1567 if (unlikely(work_done)) 1568 goto out; 1569 return 0; 1570 } 1571 1572 do { 1573 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 1574 work_done += 1575 mlx5e_decompress_cqes_start(rq, cqwq, 1576 budget - work_done); 1577 continue; 1578 } 1579 1580 mlx5_cqwq_pop(cqwq); 1581 1582 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 1583 mlx5e_handle_rx_cqe, rq, cqe); 1584 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); 1585 1586 out: 1587 if (rcu_access_pointer(rq->xdp_prog)) 1588 mlx5e_xdp_rx_poll_complete(rq); 1589 1590 mlx5_cqwq_update_db_record(cqwq); 1591 1592 /* ensure cq space is freed before enabling more cqes */ 1593 wmb(); 1594 1595 return work_done; 1596 } 1597 1598 #ifdef CONFIG_MLX5_CORE_IPOIB 1599 1600 #define MLX5_IB_GRH_SGID_OFFSET 8 1601 #define MLX5_IB_GRH_DGID_OFFSET 24 1602 #define MLX5_GID_SIZE 16 1603 1604 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 1605 struct mlx5_cqe64 *cqe, 1606 u32 cqe_bcnt, 1607 struct sk_buff *skb) 1608 { 1609 struct hwtstamp_config *tstamp; 1610 struct mlx5e_rq_stats *stats; 1611 struct net_device *netdev; 1612 struct mlx5e_priv *priv; 1613 char *pseudo_header; 1614 u32 flags_rqpn; 1615 u32 qpn; 1616 u8 *dgid; 1617 u8 g; 1618 1619 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; 1620 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); 1621 1622 /* No mapping present, cannot process SKB. This might happen if a child 1623 * interface is going down while having unprocessed CQEs on parent RQ 1624 */ 1625 if (unlikely(!netdev)) { 1626 /* TODO: add drop counters support */ 1627 skb->dev = NULL; 1628 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); 1629 return; 1630 } 1631 1632 priv = mlx5i_epriv(netdev); 1633 tstamp = &priv->tstamp; 1634 stats = &priv->channel_stats[rq->ix].rq; 1635 1636 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 1637 g = (flags_rqpn >> 28) & 3; 1638 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; 1639 if ((!g) || dgid[0] != 0xff) 1640 skb->pkt_type = PACKET_HOST; 1641 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) 1642 skb->pkt_type = PACKET_BROADCAST; 1643 else 1644 skb->pkt_type = PACKET_MULTICAST; 1645 1646 /* Drop packets that this interface sent, ie multicast packets 1647 * that the HCA has replicated. 1648 */ 1649 if (g && (qpn == (flags_rqpn & 0xffffff)) && 1650 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, 1651 MLX5_GID_SIZE) == 0)) { 1652 skb->dev = NULL; 1653 return; 1654 } 1655 1656 skb_pull(skb, MLX5_IB_GRH_BYTES); 1657 1658 skb->protocol = *((__be16 *)(skb->data)); 1659 1660 if (netdev->features & NETIF_F_RXCSUM) { 1661 skb->ip_summed = CHECKSUM_COMPLETE; 1662 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 1663 stats->csum_complete++; 1664 } else { 1665 skb->ip_summed = CHECKSUM_NONE; 1666 stats->csum_none++; 1667 } 1668 1669 if (unlikely(mlx5e_rx_hw_stamp(tstamp))) 1670 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 1671 rq->clock, get_cqe_ts(cqe)); 1672 skb_record_rx_queue(skb, rq->ix); 1673 1674 if (likely(netdev->features & NETIF_F_RXHASH)) 1675 mlx5e_skb_set_hash(cqe, skb); 1676 1677 /* 20 bytes of ipoib header and 4 for encap existing */ 1678 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); 1679 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); 1680 skb_reset_mac_header(skb); 1681 skb_pull(skb, MLX5_IPOIB_HARD_LEN); 1682 1683 skb->dev = netdev; 1684 1685 stats->packets++; 1686 stats->bytes += cqe_bcnt; 1687 } 1688 1689 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1690 { 1691 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1692 struct mlx5e_wqe_frag_info *wi; 1693 struct sk_buff *skb; 1694 u32 cqe_bcnt; 1695 u16 ci; 1696 1697 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1698 wi = get_frag(rq, ci); 1699 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1700 1701 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1702 rq->stats->wqe_err++; 1703 goto wq_free_wqe; 1704 } 1705 1706 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1707 mlx5e_skb_from_cqe_linear, 1708 mlx5e_skb_from_cqe_nonlinear, 1709 rq, cqe, wi, cqe_bcnt); 1710 if (!skb) 1711 goto wq_free_wqe; 1712 1713 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1714 if (unlikely(!skb->dev)) { 1715 dev_kfree_skb_any(skb); 1716 goto wq_free_wqe; 1717 } 1718 napi_gro_receive(rq->cq.napi, skb); 1719 1720 wq_free_wqe: 1721 mlx5e_free_rx_wqe(rq, wi, true); 1722 mlx5_wq_cyc_pop(wq); 1723 } 1724 1725 const struct mlx5e_rx_handlers mlx5i_rx_handlers = { 1726 .handle_rx_cqe = mlx5i_handle_rx_cqe, 1727 .handle_rx_cqe_mpwqe = NULL, /* Not supported */ 1728 }; 1729 #endif /* CONFIG_MLX5_CORE_IPOIB */ 1730 1731 #ifdef CONFIG_MLX5_EN_IPSEC 1732 1733 static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1734 { 1735 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1736 struct mlx5e_wqe_frag_info *wi; 1737 struct sk_buff *skb; 1738 u32 cqe_bcnt; 1739 u16 ci; 1740 1741 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1742 wi = get_frag(rq, ci); 1743 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1744 1745 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1746 rq->stats->wqe_err++; 1747 goto wq_free_wqe; 1748 } 1749 1750 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1751 mlx5e_skb_from_cqe_linear, 1752 mlx5e_skb_from_cqe_nonlinear, 1753 rq, cqe, wi, cqe_bcnt); 1754 if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ 1755 goto wq_free_wqe; 1756 1757 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); 1758 if (unlikely(!skb)) 1759 goto wq_free_wqe; 1760 1761 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1762 napi_gro_receive(rq->cq.napi, skb); 1763 1764 wq_free_wqe: 1765 mlx5e_free_rx_wqe(rq, wi, true); 1766 mlx5_wq_cyc_pop(wq); 1767 } 1768 1769 #endif /* CONFIG_MLX5_EN_IPSEC */ 1770 1771 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) 1772 { 1773 struct net_device *netdev = rq->netdev; 1774 struct mlx5_core_dev *mdev = rq->mdev; 1775 struct mlx5e_priv *priv = rq->priv; 1776 1777 switch (rq->wq_type) { 1778 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 1779 rq->mpwqe.skb_from_cqe_mpwrq = xsk ? 1780 mlx5e_xsk_skb_from_cqe_mpwrq_linear : 1781 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? 1782 mlx5e_skb_from_cqe_mpwrq_linear : 1783 mlx5e_skb_from_cqe_mpwrq_nonlinear; 1784 rq->post_wqes = mlx5e_post_rx_mpwqes; 1785 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 1786 1787 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; 1788 if (mlx5_fpga_is_ipsec_device(mdev)) { 1789 netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); 1790 return -EINVAL; 1791 } 1792 if (!rq->handle_rx_cqe) { 1793 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); 1794 return -EINVAL; 1795 } 1796 break; 1797 default: /* MLX5_WQ_TYPE_CYCLIC */ 1798 rq->wqe.skb_from_cqe = xsk ? 1799 mlx5e_xsk_skb_from_cqe_linear : 1800 mlx5e_rx_is_linear_skb(params, NULL) ? 1801 mlx5e_skb_from_cqe_linear : 1802 mlx5e_skb_from_cqe_nonlinear; 1803 rq->post_wqes = mlx5e_post_rx_wqes; 1804 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 1805 1806 #ifdef CONFIG_MLX5_EN_IPSEC 1807 if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && 1808 priv->ipsec) 1809 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; 1810 else 1811 #endif 1812 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 1813 if (!rq->handle_rx_cqe) { 1814 netdev_err(netdev, "RX handler of RQ is not set\n"); 1815 return -EINVAL; 1816 } 1817 } 1818 1819 return 0; 1820 } 1821 1822 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1823 { 1824 struct mlx5e_priv *priv = netdev_priv(rq->netdev); 1825 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1826 struct mlx5e_wqe_frag_info *wi; 1827 struct devlink_port *dl_port; 1828 struct sk_buff *skb; 1829 u32 cqe_bcnt; 1830 u16 trap_id; 1831 u16 ci; 1832 1833 trap_id = get_cqe_flow_tag(cqe); 1834 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1835 wi = get_frag(rq, ci); 1836 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1837 1838 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1839 rq->stats->wqe_err++; 1840 goto free_wqe; 1841 } 1842 1843 skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); 1844 if (!skb) 1845 goto free_wqe; 1846 1847 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1848 skb_push(skb, ETH_HLEN); 1849 1850 dl_port = mlx5e_devlink_get_dl_port(priv); 1851 mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); 1852 dev_kfree_skb_any(skb); 1853 1854 free_wqe: 1855 mlx5e_free_rx_wqe(rq, wi, false); 1856 mlx5_wq_cyc_pop(wq); 1857 } 1858 1859 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) 1860 { 1861 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ? 1862 mlx5e_skb_from_cqe_linear : 1863 mlx5e_skb_from_cqe_nonlinear; 1864 rq->post_wqes = mlx5e_post_rx_wqes; 1865 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 1866 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; 1867 } 1868