1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/tcp.h> 36 #include <linux/bitmap.h> 37 #include <net/ip6_checksum.h> 38 #include <net/page_pool.h> 39 #include <net/inet_ecn.h> 40 #include <net/gro.h> 41 #include <net/udp.h> 42 #include <net/tcp.h> 43 #include "en.h" 44 #include "en/txrx.h" 45 #include "en_tc.h" 46 #include "eswitch.h" 47 #include "en_rep.h" 48 #include "en/rep/tc.h" 49 #include "ipoib/ipoib.h" 50 #include "accel/ipsec.h" 51 #include "fpga/ipsec.h" 52 #include "en_accel/ipsec_rxtx.h" 53 #include "en_accel/tls_rxtx.h" 54 #include "en/xdp.h" 55 #include "en/xsk/rx.h" 56 #include "en/health.h" 57 #include "en/params.h" 58 #include "devlink.h" 59 #include "en/devlink.h" 60 61 static struct sk_buff * 62 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 63 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 64 static struct sk_buff * 65 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 66 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 67 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 68 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 69 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 70 71 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { 72 .handle_rx_cqe = mlx5e_handle_rx_cqe, 73 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 74 .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, 75 }; 76 77 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 78 { 79 return config->rx_filter == HWTSTAMP_FILTER_ALL; 80 } 81 82 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, 83 u32 cqcc, void *data) 84 { 85 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 86 87 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); 88 } 89 90 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, 91 struct mlx5_cqwq *wq, 92 u32 cqcc) 93 { 94 struct mlx5e_cq_decomp *cqd = &rq->cqd; 95 struct mlx5_cqe64 *title = &cqd->title; 96 97 mlx5e_read_cqe_slot(wq, cqcc, title); 98 cqd->left = be32_to_cpu(title->byte_cnt); 99 cqd->wqe_counter = be16_to_cpu(title->wqe_counter); 100 rq->stats->cqe_compress_blks++; 101 } 102 103 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, 104 struct mlx5e_cq_decomp *cqd, 105 u32 cqcc) 106 { 107 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); 108 cqd->mini_arr_idx = 0; 109 } 110 111 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) 112 { 113 u32 cqcc = wq->cc; 114 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; 115 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 116 u32 wq_sz = mlx5_cqwq_get_size(wq); 117 u32 ci_top = min_t(u32, wq_sz, ci + n); 118 119 for (; ci < ci_top; ci++, n--) { 120 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 121 122 cqe->op_own = op_own; 123 } 124 125 if (unlikely(ci == wq_sz)) { 126 op_own = !op_own; 127 for (ci = 0; ci < n; ci++) { 128 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 129 130 cqe->op_own = op_own; 131 } 132 } 133 } 134 135 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, 136 struct mlx5_cqwq *wq, 137 u32 cqcc) 138 { 139 struct mlx5e_cq_decomp *cqd = &rq->cqd; 140 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; 141 struct mlx5_cqe64 *title = &cqd->title; 142 143 title->byte_cnt = mini_cqe->byte_cnt; 144 title->check_sum = mini_cqe->checksum; 145 title->op_own &= 0xf0; 146 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); 147 148 /* state bit set implies linked-list striding RQ wq type and 149 * HW stride index capability supported 150 */ 151 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { 152 title->wqe_counter = mini_cqe->stridx; 153 return; 154 } 155 156 /* HW stride index capability not supported */ 157 title->wqe_counter = cpu_to_be16(cqd->wqe_counter); 158 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 159 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); 160 else 161 cqd->wqe_counter = 162 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); 163 } 164 165 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, 166 struct mlx5_cqwq *wq, 167 u32 cqcc) 168 { 169 struct mlx5e_cq_decomp *cqd = &rq->cqd; 170 171 mlx5e_decompress_cqe(rq, wq, cqcc); 172 cqd->title.rss_hash_type = 0; 173 cqd->title.rss_hash_result = 0; 174 } 175 176 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, 177 struct mlx5_cqwq *wq, 178 int update_owner_only, 179 int budget_rem) 180 { 181 struct mlx5e_cq_decomp *cqd = &rq->cqd; 182 u32 cqcc = wq->cc + update_owner_only; 183 u32 cqe_count; 184 u32 i; 185 186 cqe_count = min_t(u32, cqd->left, budget_rem); 187 188 for (i = update_owner_only; i < cqe_count; 189 i++, cqd->mini_arr_idx++, cqcc++) { 190 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) 191 mlx5e_read_mini_arr_slot(wq, cqd, cqcc); 192 193 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); 194 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 195 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 196 rq, &cqd->title); 197 } 198 mlx5e_cqes_update_owner(wq, cqcc - wq->cc); 199 wq->cc = cqcc; 200 cqd->left -= cqe_count; 201 rq->stats->cqe_compress_pkts += cqe_count; 202 203 return cqe_count; 204 } 205 206 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, 207 struct mlx5_cqwq *wq, 208 int budget_rem) 209 { 210 struct mlx5e_cq_decomp *cqd = &rq->cqd; 211 u32 cc = wq->cc; 212 213 mlx5e_read_title_slot(rq, wq, cc); 214 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); 215 mlx5e_decompress_cqe(rq, wq, cc); 216 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 217 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 218 rq, &cqd->title); 219 cqd->mini_arr_idx++; 220 221 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; 222 } 223 224 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, 225 struct mlx5e_dma_info *dma_info) 226 { 227 struct mlx5e_page_cache *cache = &rq->page_cache; 228 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); 229 struct mlx5e_rq_stats *stats = rq->stats; 230 231 if (tail_next == cache->head) { 232 stats->cache_full++; 233 return false; 234 } 235 236 if (!dev_page_is_reusable(dma_info->page)) { 237 stats->cache_waive++; 238 return false; 239 } 240 241 cache->page_cache[cache->tail] = *dma_info; 242 cache->tail = tail_next; 243 return true; 244 } 245 246 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, 247 struct mlx5e_dma_info *dma_info) 248 { 249 struct mlx5e_page_cache *cache = &rq->page_cache; 250 struct mlx5e_rq_stats *stats = rq->stats; 251 252 if (unlikely(cache->head == cache->tail)) { 253 stats->cache_empty++; 254 return false; 255 } 256 257 if (page_ref_count(cache->page_cache[cache->head].page) != 1) { 258 stats->cache_busy++; 259 return false; 260 } 261 262 *dma_info = cache->page_cache[cache->head]; 263 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); 264 stats->cache_reuse++; 265 266 dma_sync_single_for_device(rq->pdev, dma_info->addr, 267 PAGE_SIZE, 268 DMA_FROM_DEVICE); 269 return true; 270 } 271 272 static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, 273 struct mlx5e_dma_info *dma_info) 274 { 275 if (mlx5e_rx_cache_get(rq, dma_info)) 276 return 0; 277 278 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); 279 if (unlikely(!dma_info->page)) 280 return -ENOMEM; 281 282 dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE, 283 rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); 284 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 285 page_pool_recycle_direct(rq->page_pool, dma_info->page); 286 dma_info->page = NULL; 287 return -ENOMEM; 288 } 289 290 return 0; 291 } 292 293 static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, 294 struct mlx5e_dma_info *dma_info) 295 { 296 if (rq->xsk_pool) 297 return mlx5e_xsk_page_alloc_pool(rq, dma_info); 298 else 299 return mlx5e_page_alloc_pool(rq, dma_info); 300 } 301 302 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) 303 { 304 dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, 305 DMA_ATTR_SKIP_CPU_SYNC); 306 } 307 308 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, 309 struct mlx5e_dma_info *dma_info, 310 bool recycle) 311 { 312 if (likely(recycle)) { 313 if (mlx5e_rx_cache_put(rq, dma_info)) 314 return; 315 316 mlx5e_page_dma_unmap(rq, dma_info); 317 page_pool_recycle_direct(rq->page_pool, dma_info->page); 318 } else { 319 mlx5e_page_dma_unmap(rq, dma_info); 320 page_pool_release_page(rq->page_pool, dma_info->page); 321 put_page(dma_info->page); 322 } 323 } 324 325 static inline void mlx5e_page_release(struct mlx5e_rq *rq, 326 struct mlx5e_dma_info *dma_info, 327 bool recycle) 328 { 329 if (rq->xsk_pool) 330 /* The `recycle` parameter is ignored, and the page is always 331 * put into the Reuse Ring, because there is no way to return 332 * the page to the userspace when the interface goes down. 333 */ 334 xsk_buff_free(dma_info->xsk); 335 else 336 mlx5e_page_release_dynamic(rq, dma_info, recycle); 337 } 338 339 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, 340 struct mlx5e_wqe_frag_info *frag) 341 { 342 int err = 0; 343 344 if (!frag->offset) 345 /* On first frag (offset == 0), replenish page (dma_info actually). 346 * Other frags that point to the same dma_info (with a different 347 * offset) should just use the new one without replenishing again 348 * by themselves. 349 */ 350 err = mlx5e_page_alloc(rq, frag->di); 351 352 return err; 353 } 354 355 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, 356 struct mlx5e_wqe_frag_info *frag, 357 bool recycle) 358 { 359 if (frag->last_in_page) 360 mlx5e_page_release(rq, frag->di, recycle); 361 } 362 363 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) 364 { 365 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; 366 } 367 368 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, 369 u16 ix) 370 { 371 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); 372 int err; 373 int i; 374 375 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { 376 err = mlx5e_get_rx_frag(rq, frag); 377 if (unlikely(err)) 378 goto free_frags; 379 380 wqe->data[i].addr = cpu_to_be64(frag->di->addr + 381 frag->offset + rq->buff.headroom); 382 } 383 384 return 0; 385 386 free_frags: 387 while (--i >= 0) 388 mlx5e_put_rx_frag(rq, --frag, true); 389 390 return err; 391 } 392 393 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, 394 struct mlx5e_wqe_frag_info *wi, 395 bool recycle) 396 { 397 int i; 398 399 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) 400 mlx5e_put_rx_frag(rq, wi, recycle); 401 } 402 403 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 404 { 405 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); 406 407 mlx5e_free_rx_wqe(rq, wi, false); 408 } 409 410 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) 411 { 412 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 413 int err; 414 int i; 415 416 if (rq->xsk_pool) { 417 int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; 418 419 /* Check in advance that we have enough frames, instead of 420 * allocating one-by-one, failing and moving frames to the 421 * Reuse Ring. 422 */ 423 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) 424 return -ENOMEM; 425 } 426 427 for (i = 0; i < wqe_bulk; i++) { 428 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); 429 430 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); 431 if (unlikely(err)) 432 goto free_wqes; 433 } 434 435 return 0; 436 437 free_wqes: 438 while (--i >= 0) 439 mlx5e_dealloc_rx_wqe(rq, ix + i); 440 441 return err; 442 } 443 444 static inline void 445 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, 446 struct mlx5e_dma_info *di, u32 frag_offset, u32 len, 447 unsigned int truesize) 448 { 449 dma_sync_single_for_cpu(rq->pdev, 450 di->addr + frag_offset, 451 len, DMA_FROM_DEVICE); 452 page_ref_inc(di->page); 453 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 454 di->page, frag_offset, len, truesize); 455 } 456 457 static inline void 458 mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, 459 struct mlx5e_dma_info *dma_info, 460 int offset_from, int dma_offset, u32 headlen) 461 { 462 const void *from = page_address(dma_info->page) + offset_from; 463 /* Aligning len to sizeof(long) optimizes memcpy performance */ 464 unsigned int len = ALIGN(headlen, sizeof(long)); 465 466 dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len, 467 DMA_FROM_DEVICE); 468 skb_copy_to_linear_data(skb, from, len); 469 } 470 471 static void 472 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) 473 { 474 bool no_xdp_xmit; 475 struct mlx5e_dma_info *dma_info = wi->umr.dma_info; 476 int i; 477 478 /* A common case for AF_XDP. */ 479 if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE)) 480 return; 481 482 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, 483 MLX5_MPWRQ_PAGES_PER_WQE); 484 485 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) 486 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 487 mlx5e_page_release(rq, &dma_info[i], recycle); 488 } 489 490 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) 491 { 492 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 493 494 do { 495 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); 496 497 mlx5_wq_ll_push(wq, next_wqe_index); 498 } while (--n); 499 500 /* ensure wqes are visible to device before updating doorbell record */ 501 dma_wmb(); 502 503 mlx5_wq_ll_update_db_record(wq); 504 } 505 506 /* This function returns the size of the continuous free space inside a bitmap 507 * that starts from first and no longer than len including circular ones. 508 */ 509 static int bitmap_find_window(unsigned long *bitmap, int len, 510 int bitmap_size, int first) 511 { 512 int next_one, count; 513 514 next_one = find_next_bit(bitmap, bitmap_size, first); 515 if (next_one == bitmap_size) { 516 if (bitmap_size - first >= len) 517 return len; 518 next_one = find_next_bit(bitmap, bitmap_size, 0); 519 count = next_one + bitmap_size - first; 520 } else { 521 count = next_one - first; 522 } 523 524 return min(len, count); 525 } 526 527 static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, 528 __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) 529 { 530 memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); 531 umr_wqe->ctrl.opmod_idx_opcode = 532 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 533 MLX5_OPCODE_UMR); 534 umr_wqe->ctrl.umr_mkey = key; 535 umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) 536 | MLX5E_KLM_UMR_DS_CNT(klm_len)); 537 umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; 538 umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); 539 umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); 540 umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 541 } 542 543 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, 544 struct mlx5e_icosq *sq, 545 u16 klm_entries, u16 index) 546 { 547 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 548 u16 entries, pi, header_offset, err, wqe_bbs, new_entries; 549 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; 550 struct page *page = shampo->last_page; 551 u64 addr = shampo->last_addr; 552 struct mlx5e_dma_info *dma_info; 553 struct mlx5e_umr_wqe *umr_wqe; 554 int headroom, i; 555 556 headroom = rq->buff.headroom; 557 new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); 558 entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT); 559 wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); 560 pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); 561 umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 562 build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); 563 564 for (i = 0; i < entries; i++, index++) { 565 dma_info = &shampo->info[index]; 566 if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < 567 MLX5_UMR_KLM_ALIGNMENT)) 568 goto update_klm; 569 header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << 570 MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; 571 if (!(header_offset & (PAGE_SIZE - 1))) { 572 err = mlx5e_page_alloc(rq, dma_info); 573 if (unlikely(err)) 574 goto err_unmap; 575 addr = dma_info->addr; 576 page = dma_info->page; 577 } else { 578 dma_info->addr = addr + header_offset; 579 dma_info->page = page; 580 } 581 582 update_klm: 583 umr_wqe->inline_klms[i].bcount = 584 cpu_to_be32(MLX5E_RX_MAX_HEAD); 585 umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); 586 umr_wqe->inline_klms[i].va = 587 cpu_to_be64(dma_info->addr + headroom); 588 } 589 590 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 591 .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, 592 .num_wqebbs = wqe_bbs, 593 .shampo.len = new_entries, 594 }; 595 596 shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); 597 shampo->last_page = page; 598 shampo->last_addr = addr; 599 sq->pc += wqe_bbs; 600 sq->doorbell_cseg = &umr_wqe->ctrl; 601 602 return 0; 603 604 err_unmap: 605 while (--i >= 0) { 606 dma_info = &shampo->info[--index]; 607 if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { 608 dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); 609 mlx5e_page_release(rq, dma_info, true); 610 } 611 } 612 rq->stats->buff_alloc_err++; 613 return err; 614 } 615 616 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) 617 { 618 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 619 u16 klm_entries, num_wqe, index, entries_before; 620 struct mlx5e_icosq *sq = rq->icosq; 621 int i, err, max_klm_entries, len; 622 623 max_klm_entries = MLX5E_MAX_KLM_PER_WQE; 624 klm_entries = bitmap_find_window(shampo->bitmap, 625 shampo->hd_per_wqe, 626 shampo->hd_per_wq, shampo->pi); 627 if (!klm_entries) 628 return 0; 629 630 klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); 631 index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT); 632 entries_before = shampo->hd_per_wq - index; 633 634 if (unlikely(entries_before < klm_entries)) 635 num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + 636 DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); 637 else 638 num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); 639 640 for (i = 0; i < num_wqe; i++) { 641 len = (klm_entries > max_klm_entries) ? max_klm_entries : 642 klm_entries; 643 if (unlikely(index + len > shampo->hd_per_wq)) 644 len = shampo->hd_per_wq - index; 645 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); 646 if (unlikely(err)) 647 return err; 648 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); 649 klm_entries -= len; 650 } 651 652 return 0; 653 } 654 655 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 656 { 657 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; 658 struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; 659 struct mlx5e_icosq *sq = rq->icosq; 660 struct mlx5_wq_cyc *wq = &sq->wq; 661 struct mlx5e_umr_wqe *umr_wqe; 662 u16 pi; 663 int err; 664 int i; 665 666 /* Check in advance that we have enough frames, instead of allocating 667 * one-by-one, failing and moving frames to the Reuse Ring. 668 */ 669 if (rq->xsk_pool && 670 unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) { 671 err = -ENOMEM; 672 goto err; 673 } 674 675 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { 676 err = mlx5e_alloc_rx_hd_mpwqe(rq); 677 if (unlikely(err)) 678 goto err; 679 } 680 681 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS); 682 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); 683 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts)); 684 685 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { 686 err = mlx5e_page_alloc(rq, dma_info); 687 if (unlikely(err)) 688 goto err_unmap; 689 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); 690 } 691 692 bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); 693 wi->consumed_strides = 0; 694 695 umr_wqe->ctrl.opmod_idx_opcode = 696 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 697 MLX5_OPCODE_UMR); 698 umr_wqe->uctrl.xlt_offset = 699 cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); 700 701 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 702 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, 703 .num_wqebbs = MLX5E_UMR_WQEBBS, 704 .umr.rq = rq, 705 }; 706 707 sq->pc += MLX5E_UMR_WQEBBS; 708 709 sq->doorbell_cseg = &umr_wqe->ctrl; 710 711 return 0; 712 713 err_unmap: 714 while (--i >= 0) { 715 dma_info--; 716 mlx5e_page_release(rq, dma_info, true); 717 } 718 719 err: 720 rq->stats->buff_alloc_err++; 721 722 return err; 723 } 724 725 /* This function is responsible to dealloc SHAMPO header buffer. 726 * close == true specifies that we are in the middle of closing RQ operation so 727 * we go over all the entries and if they are not in use we free them, 728 * otherwise we only go over a specific range inside the header buffer that are 729 * not in use. 730 */ 731 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) 732 { 733 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 734 int hd_per_wq = shampo->hd_per_wq; 735 struct page *deleted_page = NULL; 736 struct mlx5e_dma_info *hd_info; 737 int i, index = start; 738 739 for (i = 0; i < len; i++, index++) { 740 if (index == hd_per_wq) 741 index = 0; 742 743 if (close && !test_bit(index, shampo->bitmap)) 744 continue; 745 746 hd_info = &shampo->info[index]; 747 hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); 748 if (hd_info->page != deleted_page) { 749 deleted_page = hd_info->page; 750 mlx5e_page_release(rq, hd_info, false); 751 } 752 } 753 754 if (start + len > hd_per_wq) { 755 len -= hd_per_wq - start; 756 bitmap_clear(shampo->bitmap, start, hd_per_wq - start); 757 start = 0; 758 } 759 760 bitmap_clear(shampo->bitmap, start, len); 761 } 762 763 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 764 { 765 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; 766 /* Don't recycle, this function is called on rq/netdev close */ 767 mlx5e_free_rx_mpwqe(rq, wi, false); 768 } 769 770 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 771 { 772 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 773 u8 wqe_bulk; 774 int err; 775 776 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 777 return false; 778 779 wqe_bulk = rq->wqe.info.wqe_bulk; 780 781 if (mlx5_wq_cyc_missing(wq) < wqe_bulk) 782 return false; 783 784 if (rq->page_pool) 785 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 786 787 do { 788 u16 head = mlx5_wq_cyc_get_head(wq); 789 790 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); 791 if (unlikely(err)) { 792 rq->stats->buff_alloc_err++; 793 break; 794 } 795 796 mlx5_wq_cyc_push_n(wq, wqe_bulk); 797 } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); 798 799 /* ensure wqes are visible to device before updating doorbell record */ 800 dma_wmb(); 801 802 mlx5_wq_cyc_update_db_record(wq); 803 804 return !!err; 805 } 806 807 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) 808 { 809 u16 sqcc; 810 811 sqcc = sq->cc; 812 813 while (sqcc != sq->pc) { 814 struct mlx5e_icosq_wqe_info *wi; 815 u16 ci; 816 817 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 818 wi = &sq->db.wqe_info[ci]; 819 sqcc += wi->num_wqebbs; 820 #ifdef CONFIG_MLX5_EN_TLS 821 switch (wi->wqe_type) { 822 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 823 mlx5e_ktls_handle_ctx_completion(wi); 824 break; 825 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 826 mlx5e_ktls_handle_get_psv_completion(wi, sq); 827 break; 828 } 829 #endif 830 } 831 sq->cc = sqcc; 832 } 833 834 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr, 835 struct mlx5e_icosq *sq) 836 { 837 struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq); 838 struct mlx5e_shampo_hd *shampo; 839 /* assume 1:1 relationship between RQ and icosq */ 840 struct mlx5e_rq *rq = &c->rq; 841 int end, from, len = umr.len; 842 843 shampo = rq->mpwqe.shampo; 844 end = shampo->hd_per_wq; 845 from = shampo->ci; 846 if (from + len > shampo->hd_per_wq) { 847 len -= end - from; 848 bitmap_set(shampo->bitmap, from, end - from); 849 from = 0; 850 } 851 852 bitmap_set(shampo->bitmap, from, len); 853 shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1); 854 } 855 856 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) 857 { 858 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); 859 struct mlx5_cqe64 *cqe; 860 u16 sqcc; 861 int i; 862 863 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 864 return 0; 865 866 cqe = mlx5_cqwq_get_cqe(&cq->wq); 867 if (likely(!cqe)) 868 return 0; 869 870 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 871 * otherwise a cq overrun may occur 872 */ 873 sqcc = sq->cc; 874 875 i = 0; 876 do { 877 u16 wqe_counter; 878 bool last_wqe; 879 880 mlx5_cqwq_pop(&cq->wq); 881 882 wqe_counter = be16_to_cpu(cqe->wqe_counter); 883 884 do { 885 struct mlx5e_icosq_wqe_info *wi; 886 u16 ci; 887 888 last_wqe = (sqcc == wqe_counter); 889 890 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 891 wi = &sq->db.wqe_info[ci]; 892 sqcc += wi->num_wqebbs; 893 894 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 895 netdev_WARN_ONCE(cq->netdev, 896 "Bad OP in ICOSQ CQE: 0x%x\n", 897 get_cqe_opcode(cqe)); 898 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 899 (struct mlx5_err_cqe *)cqe); 900 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 901 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 902 queue_work(cq->priv->wq, &sq->recover_work); 903 break; 904 } 905 906 switch (wi->wqe_type) { 907 case MLX5E_ICOSQ_WQE_UMR_RX: 908 wi->umr.rq->mpwqe.umr_completed++; 909 break; 910 case MLX5E_ICOSQ_WQE_NOP: 911 break; 912 case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR: 913 mlx5e_handle_shampo_hd_umr(wi->shampo, sq); 914 break; 915 #ifdef CONFIG_MLX5_EN_TLS 916 case MLX5E_ICOSQ_WQE_UMR_TLS: 917 break; 918 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 919 mlx5e_ktls_handle_ctx_completion(wi); 920 break; 921 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 922 mlx5e_ktls_handle_get_psv_completion(wi, sq); 923 break; 924 #endif 925 default: 926 netdev_WARN_ONCE(cq->netdev, 927 "Bad WQE type in ICOSQ WQE info: 0x%x\n", 928 wi->wqe_type); 929 } 930 } while (!last_wqe); 931 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 932 933 sq->cc = sqcc; 934 935 mlx5_cqwq_update_db_record(&cq->wq); 936 937 return i; 938 } 939 940 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) 941 { 942 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 943 u8 umr_completed = rq->mpwqe.umr_completed; 944 struct mlx5e_icosq *sq = rq->icosq; 945 int alloc_err = 0; 946 u8 missing, i; 947 u16 head; 948 949 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 950 return false; 951 952 if (umr_completed) { 953 mlx5e_post_rx_mpwqe(rq, umr_completed); 954 rq->mpwqe.umr_in_progress -= umr_completed; 955 rq->mpwqe.umr_completed = 0; 956 } 957 958 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; 959 960 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) 961 rq->stats->congst_umr++; 962 963 #define UMR_WQE_BULK (2) 964 if (likely(missing < UMR_WQE_BULK)) 965 return false; 966 967 if (rq->page_pool) 968 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 969 970 head = rq->mpwqe.actual_wq_head; 971 i = missing; 972 do { 973 alloc_err = mlx5e_alloc_rx_mpwqe(rq, head); 974 975 if (unlikely(alloc_err)) 976 break; 977 head = mlx5_wq_ll_get_wqe_next_ix(wq, head); 978 } while (--i); 979 980 rq->mpwqe.umr_last_bulk = missing - i; 981 if (sq->doorbell_cseg) { 982 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); 983 sq->doorbell_cseg = NULL; 984 } 985 986 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; 987 rq->mpwqe.actual_wq_head = head; 988 989 /* If XSK Fill Ring doesn't have enough frames, report the error, so 990 * that one of the actions can be performed: 991 * 1. If need_wakeup is used, signal that the application has to kick 992 * the driver when it refills the Fill Ring. 993 * 2. Otherwise, busy poll by rescheduling the NAPI poll. 994 */ 995 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) 996 return true; 997 998 return false; 999 } 1000 1001 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) 1002 { 1003 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 1004 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || 1005 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); 1006 1007 tcp->check = 0; 1008 tcp->psh = get_cqe_lro_tcppsh(cqe); 1009 1010 if (tcp_ack) { 1011 tcp->ack = 1; 1012 tcp->ack_seq = cqe->lro.ack_seq_num; 1013 tcp->window = cqe->lro.tcp_win; 1014 } 1015 } 1016 1017 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 1018 u32 cqe_bcnt) 1019 { 1020 struct ethhdr *eth = (struct ethhdr *)(skb->data); 1021 struct tcphdr *tcp; 1022 int network_depth = 0; 1023 __wsum check; 1024 __be16 proto; 1025 u16 tot_len; 1026 void *ip_p; 1027 1028 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); 1029 1030 tot_len = cqe_bcnt - network_depth; 1031 ip_p = skb->data + network_depth; 1032 1033 if (proto == htons(ETH_P_IP)) { 1034 struct iphdr *ipv4 = ip_p; 1035 1036 tcp = ip_p + sizeof(struct iphdr); 1037 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1038 1039 ipv4->ttl = cqe->lro.min_ttl; 1040 ipv4->tot_len = cpu_to_be16(tot_len); 1041 ipv4->check = 0; 1042 ipv4->check = ip_fast_csum((unsigned char *)ipv4, 1043 ipv4->ihl); 1044 1045 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1046 check = csum_partial(tcp, tcp->doff * 4, 1047 csum_unfold((__force __sum16)cqe->check_sum)); 1048 /* Almost done, don't forget the pseudo header */ 1049 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, 1050 tot_len - sizeof(struct iphdr), 1051 IPPROTO_TCP, check); 1052 } else { 1053 u16 payload_len = tot_len - sizeof(struct ipv6hdr); 1054 struct ipv6hdr *ipv6 = ip_p; 1055 1056 tcp = ip_p + sizeof(struct ipv6hdr); 1057 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1058 1059 ipv6->hop_limit = cqe->lro.min_ttl; 1060 ipv6->payload_len = cpu_to_be16(payload_len); 1061 1062 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1063 check = csum_partial(tcp, tcp->doff * 4, 1064 csum_unfold((__force __sum16)cqe->check_sum)); 1065 /* Almost done, don't forget the pseudo header */ 1066 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, 1067 IPPROTO_TCP, check); 1068 } 1069 } 1070 1071 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) 1072 { 1073 struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; 1074 u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; 1075 1076 return page_address(last_head->page) + head_offset; 1077 } 1078 1079 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) 1080 { 1081 int udp_off = rq->hw_gro_data->fk.control.thoff; 1082 struct sk_buff *skb = rq->hw_gro_data->skb; 1083 struct udphdr *uh; 1084 1085 uh = (struct udphdr *)(skb->data + udp_off); 1086 uh->len = htons(skb->len - udp_off); 1087 1088 if (uh->check) 1089 uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr, 1090 ipv4->daddr, 0); 1091 1092 skb->csum_start = (unsigned char *)uh - skb->head; 1093 skb->csum_offset = offsetof(struct udphdr, check); 1094 1095 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1096 } 1097 1098 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) 1099 { 1100 int udp_off = rq->hw_gro_data->fk.control.thoff; 1101 struct sk_buff *skb = rq->hw_gro_data->skb; 1102 struct udphdr *uh; 1103 1104 uh = (struct udphdr *)(skb->data + udp_off); 1105 uh->len = htons(skb->len - udp_off); 1106 1107 if (uh->check) 1108 uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr, 1109 &ipv6->daddr, 0); 1110 1111 skb->csum_start = (unsigned char *)uh - skb->head; 1112 skb->csum_offset = offsetof(struct udphdr, check); 1113 1114 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1115 } 1116 1117 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1118 struct tcphdr *skb_tcp_hd) 1119 { 1120 u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); 1121 struct tcphdr *last_tcp_hd; 1122 void *last_hd_addr; 1123 1124 last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 1125 last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; 1126 tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH); 1127 } 1128 1129 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, 1130 struct mlx5_cqe64 *cqe, bool match) 1131 { 1132 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1133 struct sk_buff *skb = rq->hw_gro_data->skb; 1134 struct tcphdr *tcp; 1135 1136 tcp = (struct tcphdr *)(skb->data + tcp_off); 1137 if (match) 1138 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1139 1140 tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr, 1141 ipv4->daddr, 0); 1142 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 1143 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) 1144 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 1145 1146 skb->csum_start = (unsigned char *)tcp - skb->head; 1147 skb->csum_offset = offsetof(struct tcphdr, check); 1148 1149 if (tcp->cwr) 1150 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1151 } 1152 1153 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, 1154 struct mlx5_cqe64 *cqe, bool match) 1155 { 1156 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1157 struct sk_buff *skb = rq->hw_gro_data->skb; 1158 struct tcphdr *tcp; 1159 1160 tcp = (struct tcphdr *)(skb->data + tcp_off); 1161 if (match) 1162 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1163 1164 tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr, 1165 &ipv6->daddr, 0); 1166 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 1167 skb->csum_start = (unsigned char *)tcp - skb->head; 1168 skb->csum_offset = offsetof(struct tcphdr, check); 1169 1170 if (tcp->cwr) 1171 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1172 } 1173 1174 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 1175 { 1176 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); 1177 struct sk_buff *skb = rq->hw_gro_data->skb; 1178 1179 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 1180 skb->ip_summed = CHECKSUM_PARTIAL; 1181 1182 if (is_ipv4) { 1183 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); 1184 struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff); 1185 __be16 newlen = htons(skb->len - nhoff); 1186 1187 csum_replace2(&ipv4->check, ipv4->tot_len, newlen); 1188 ipv4->tot_len = newlen; 1189 1190 if (ipv4->protocol == IPPROTO_TCP) 1191 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); 1192 else 1193 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); 1194 } else { 1195 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); 1196 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff); 1197 1198 ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6)); 1199 1200 if (ipv6->nexthdr == IPPROTO_TCP) 1201 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); 1202 else 1203 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); 1204 } 1205 } 1206 1207 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, 1208 struct sk_buff *skb) 1209 { 1210 u8 cht = cqe->rss_hash_type; 1211 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : 1212 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : 1213 PKT_HASH_TYPE_NONE; 1214 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); 1215 } 1216 1217 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, 1218 __be16 *proto) 1219 { 1220 *proto = ((struct ethhdr *)skb->data)->h_proto; 1221 *proto = __vlan_get_protocol(skb, *proto, network_depth); 1222 1223 if (*proto == htons(ETH_P_IP)) 1224 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); 1225 1226 if (*proto == htons(ETH_P_IPV6)) 1227 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); 1228 1229 return false; 1230 } 1231 1232 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) 1233 { 1234 int network_depth = 0; 1235 __be16 proto; 1236 void *ip; 1237 int rc; 1238 1239 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) 1240 return; 1241 1242 ip = skb->data + network_depth; 1243 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : 1244 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); 1245 1246 rq->stats->ecn_mark += !!rc; 1247 } 1248 1249 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) 1250 { 1251 void *ip_p = skb->data + network_depth; 1252 1253 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : 1254 ((struct ipv6hdr *)ip_p)->nexthdr; 1255 } 1256 1257 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 1258 1259 #define MAX_PADDING 8 1260 1261 static void 1262 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, 1263 struct mlx5e_rq_stats *stats) 1264 { 1265 stats->csum_complete_tail_slow++; 1266 skb->csum = csum_block_add(skb->csum, 1267 skb_checksum(skb, offset, len, 0), 1268 offset); 1269 } 1270 1271 static void 1272 tail_padding_csum(struct sk_buff *skb, int offset, 1273 struct mlx5e_rq_stats *stats) 1274 { 1275 u8 tail_padding[MAX_PADDING]; 1276 int len = skb->len - offset; 1277 void *tail; 1278 1279 if (unlikely(len > MAX_PADDING)) { 1280 tail_padding_csum_slow(skb, offset, len, stats); 1281 return; 1282 } 1283 1284 tail = skb_header_pointer(skb, offset, len, tail_padding); 1285 if (unlikely(!tail)) { 1286 tail_padding_csum_slow(skb, offset, len, stats); 1287 return; 1288 } 1289 1290 stats->csum_complete_tail++; 1291 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); 1292 } 1293 1294 static void 1295 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, 1296 struct mlx5e_rq_stats *stats) 1297 { 1298 struct ipv6hdr *ip6; 1299 struct iphdr *ip4; 1300 int pkt_len; 1301 1302 /* Fixup vlan headers, if any */ 1303 if (network_depth > ETH_HLEN) 1304 /* CQE csum is calculated from the IP header and does 1305 * not cover VLAN headers (if present). This will add 1306 * the checksum manually. 1307 */ 1308 skb->csum = csum_partial(skb->data + ETH_HLEN, 1309 network_depth - ETH_HLEN, 1310 skb->csum); 1311 1312 /* Fixup tail padding, if any */ 1313 switch (proto) { 1314 case htons(ETH_P_IP): 1315 ip4 = (struct iphdr *)(skb->data + network_depth); 1316 pkt_len = network_depth + ntohs(ip4->tot_len); 1317 break; 1318 case htons(ETH_P_IPV6): 1319 ip6 = (struct ipv6hdr *)(skb->data + network_depth); 1320 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); 1321 break; 1322 default: 1323 return; 1324 } 1325 1326 if (likely(pkt_len >= skb->len)) 1327 return; 1328 1329 tail_padding_csum(skb, pkt_len, stats); 1330 } 1331 1332 static inline void mlx5e_handle_csum(struct net_device *netdev, 1333 struct mlx5_cqe64 *cqe, 1334 struct mlx5e_rq *rq, 1335 struct sk_buff *skb, 1336 bool lro) 1337 { 1338 struct mlx5e_rq_stats *stats = rq->stats; 1339 int network_depth = 0; 1340 __be16 proto; 1341 1342 if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) 1343 goto csum_none; 1344 1345 if (lro) { 1346 skb->ip_summed = CHECKSUM_UNNECESSARY; 1347 stats->csum_unnecessary++; 1348 return; 1349 } 1350 1351 /* True when explicitly set via priv flag, or XDP prog is loaded */ 1352 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) 1353 goto csum_unnecessary; 1354 1355 /* CQE csum doesn't cover padding octets in short ethernet 1356 * frames. And the pad field is appended prior to calculating 1357 * and appending the FCS field. 1358 * 1359 * Detecting these padded frames requires to verify and parse 1360 * IP headers, so we simply force all those small frames to be 1361 * CHECKSUM_UNNECESSARY even if they are not padded. 1362 */ 1363 if (short_frame(skb->len)) 1364 goto csum_unnecessary; 1365 1366 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 1367 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 1368 goto csum_unnecessary; 1369 1370 stats->csum_complete++; 1371 skb->ip_summed = CHECKSUM_COMPLETE; 1372 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 1373 1374 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) 1375 return; /* CQE csum covers all received bytes */ 1376 1377 /* csum might need some fixups ...*/ 1378 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); 1379 return; 1380 } 1381 1382 csum_unnecessary: 1383 if (likely((cqe->hds_ip_ext & CQE_L3_OK) && 1384 (cqe->hds_ip_ext & CQE_L4_OK))) { 1385 skb->ip_summed = CHECKSUM_UNNECESSARY; 1386 if (cqe_is_tunneled(cqe)) { 1387 skb->csum_level = 1; 1388 skb->encapsulation = 1; 1389 stats->csum_unnecessary_inner++; 1390 return; 1391 } 1392 stats->csum_unnecessary++; 1393 return; 1394 } 1395 csum_none: 1396 skb->ip_summed = CHECKSUM_NONE; 1397 stats->csum_none++; 1398 } 1399 1400 #define MLX5E_CE_BIT_MASK 0x80 1401 1402 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, 1403 u32 cqe_bcnt, 1404 struct mlx5e_rq *rq, 1405 struct sk_buff *skb) 1406 { 1407 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 1408 struct mlx5e_rq_stats *stats = rq->stats; 1409 struct net_device *netdev = rq->netdev; 1410 1411 skb->mac_len = ETH_HLEN; 1412 1413 mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1414 1415 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1416 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); 1417 1418 if (lro_num_seg > 1) { 1419 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 1420 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 1421 /* Subtract one since we already counted this as one 1422 * "regular" packet in mlx5e_complete_rx_cqe() 1423 */ 1424 stats->packets += lro_num_seg - 1; 1425 stats->lro_packets++; 1426 stats->lro_bytes += cqe_bcnt; 1427 } 1428 1429 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) 1430 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 1431 rq->clock, get_cqe_ts(cqe)); 1432 skb_record_rx_queue(skb, rq->ix); 1433 1434 if (likely(netdev->features & NETIF_F_RXHASH)) 1435 mlx5e_skb_set_hash(cqe, skb); 1436 1437 if (cqe_has_vlan(cqe)) { 1438 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1439 be16_to_cpu(cqe->vlan_info)); 1440 stats->removed_vlan_packets++; 1441 } 1442 1443 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; 1444 1445 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); 1446 /* checking CE bit in cqe - MSB in ml_path field */ 1447 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) 1448 mlx5e_enable_ecn(rq, skb); 1449 1450 skb->protocol = eth_type_trans(skb, netdev); 1451 1452 if (unlikely(mlx5e_skb_is_multicast(skb))) 1453 stats->mcast_packets++; 1454 } 1455 1456 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, 1457 struct mlx5_cqe64 *cqe, 1458 u32 cqe_bcnt, 1459 struct sk_buff *skb) 1460 { 1461 struct mlx5e_rq_stats *stats = rq->stats; 1462 1463 stats->packets++; 1464 stats->gro_packets++; 1465 stats->bytes += cqe_bcnt; 1466 stats->gro_bytes += cqe_bcnt; 1467 if (NAPI_GRO_CB(skb)->count != 1) 1468 return; 1469 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1470 skb_reset_network_header(skb); 1471 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { 1472 napi_gro_receive(rq->cq.napi, skb); 1473 rq->hw_gro_data->skb = NULL; 1474 } 1475 } 1476 1477 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, 1478 struct mlx5_cqe64 *cqe, 1479 u32 cqe_bcnt, 1480 struct sk_buff *skb) 1481 { 1482 struct mlx5e_rq_stats *stats = rq->stats; 1483 1484 stats->packets++; 1485 stats->bytes += cqe_bcnt; 1486 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1487 } 1488 1489 static inline 1490 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, 1491 u32 frag_size, u16 headroom, 1492 u32 cqe_bcnt) 1493 { 1494 struct sk_buff *skb = build_skb(va, frag_size); 1495 1496 if (unlikely(!skb)) { 1497 rq->stats->buff_alloc_err++; 1498 return NULL; 1499 } 1500 1501 skb_reserve(skb, headroom); 1502 skb_put(skb, cqe_bcnt); 1503 1504 return skb; 1505 } 1506 1507 static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, 1508 u32 len, struct xdp_buff *xdp) 1509 { 1510 xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); 1511 xdp_prepare_buff(xdp, va, headroom, len, false); 1512 } 1513 1514 static struct sk_buff * 1515 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1516 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) 1517 { 1518 struct mlx5e_dma_info *di = wi->di; 1519 u16 rx_headroom = rq->buff.headroom; 1520 struct xdp_buff xdp; 1521 struct sk_buff *skb; 1522 void *va, *data; 1523 u32 frag_size; 1524 1525 va = page_address(di->page) + wi->offset; 1526 data = va + rx_headroom; 1527 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1528 1529 dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, 1530 frag_size, DMA_FROM_DEVICE); 1531 net_prefetchw(va); /* xdp_frame data area */ 1532 net_prefetch(data); 1533 1534 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 1535 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp)) 1536 return NULL; /* page/packet was consumed by XDP */ 1537 1538 rx_headroom = xdp.data - xdp.data_hard_start; 1539 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1540 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); 1541 if (unlikely(!skb)) 1542 return NULL; 1543 1544 /* queue up for recycling/reuse */ 1545 page_ref_inc(di->page); 1546 1547 return skb; 1548 } 1549 1550 static struct sk_buff * 1551 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1552 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) 1553 { 1554 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 1555 struct mlx5e_wqe_frag_info *head_wi = wi; 1556 u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1557 u16 frag_headlen = headlen; 1558 u16 byte_cnt = cqe_bcnt - headlen; 1559 struct sk_buff *skb; 1560 1561 /* XDP is not supported in this configuration, as incoming packets 1562 * might spread among multiple pages. 1563 */ 1564 skb = napi_alloc_skb(rq->cq.napi, 1565 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1566 if (unlikely(!skb)) { 1567 rq->stats->buff_alloc_err++; 1568 return NULL; 1569 } 1570 1571 net_prefetchw(skb->data); 1572 1573 while (byte_cnt) { 1574 u16 frag_consumed_bytes = 1575 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); 1576 1577 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, 1578 frag_consumed_bytes, frag_info->frag_stride); 1579 byte_cnt -= frag_consumed_bytes; 1580 frag_headlen = 0; 1581 frag_info++; 1582 wi++; 1583 } 1584 1585 /* copy header */ 1586 mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset, 1587 headlen); 1588 /* skb linear part was allocated with headlen and aligned to long */ 1589 skb->tail += headlen; 1590 skb->len += headlen; 1591 1592 return skb; 1593 } 1594 1595 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1596 { 1597 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; 1598 struct mlx5e_priv *priv = rq->priv; 1599 1600 if (cqe_syndrome_needs_recover(err_cqe->syndrome) && 1601 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { 1602 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); 1603 queue_work(priv->wq, &rq->recover_work); 1604 } 1605 } 1606 1607 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1608 { 1609 trigger_report(rq, cqe); 1610 rq->stats->wqe_err++; 1611 } 1612 1613 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1614 { 1615 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1616 struct mlx5e_wqe_frag_info *wi; 1617 struct sk_buff *skb; 1618 u32 cqe_bcnt; 1619 u16 ci; 1620 1621 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1622 wi = get_frag(rq, ci); 1623 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1624 1625 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1626 mlx5e_handle_rx_err_cqe(rq, cqe); 1627 goto free_wqe; 1628 } 1629 1630 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1631 mlx5e_skb_from_cqe_linear, 1632 mlx5e_skb_from_cqe_nonlinear, 1633 rq, cqe, wi, cqe_bcnt); 1634 if (!skb) { 1635 /* probably for XDP */ 1636 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1637 /* do not return page to cache, 1638 * it will be returned on XDP_TX completion. 1639 */ 1640 goto wq_cyc_pop; 1641 } 1642 goto free_wqe; 1643 } 1644 1645 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1646 1647 if (mlx5e_cqe_regb_chain(cqe)) 1648 if (!mlx5e_tc_update_skb(cqe, skb)) { 1649 dev_kfree_skb_any(skb); 1650 goto free_wqe; 1651 } 1652 1653 napi_gro_receive(rq->cq.napi, skb); 1654 1655 free_wqe: 1656 mlx5e_free_rx_wqe(rq, wi, true); 1657 wq_cyc_pop: 1658 mlx5_wq_cyc_pop(wq); 1659 } 1660 1661 #ifdef CONFIG_MLX5_ESWITCH 1662 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1663 { 1664 struct net_device *netdev = rq->netdev; 1665 struct mlx5e_priv *priv = netdev_priv(netdev); 1666 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1667 struct mlx5_eswitch_rep *rep = rpriv->rep; 1668 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1669 struct mlx5e_wqe_frag_info *wi; 1670 struct sk_buff *skb; 1671 u32 cqe_bcnt; 1672 u16 ci; 1673 1674 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1675 wi = get_frag(rq, ci); 1676 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1677 1678 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1679 mlx5e_handle_rx_err_cqe(rq, cqe); 1680 goto free_wqe; 1681 } 1682 1683 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1684 mlx5e_skb_from_cqe_linear, 1685 mlx5e_skb_from_cqe_nonlinear, 1686 rq, cqe, wi, cqe_bcnt); 1687 if (!skb) { 1688 /* probably for XDP */ 1689 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1690 /* do not return page to cache, 1691 * it will be returned on XDP_TX completion. 1692 */ 1693 goto wq_cyc_pop; 1694 } 1695 goto free_wqe; 1696 } 1697 1698 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1699 1700 if (rep->vlan && skb_vlan_tag_present(skb)) 1701 skb_vlan_pop(skb); 1702 1703 mlx5e_rep_tc_receive(cqe, rq, skb); 1704 1705 free_wqe: 1706 mlx5e_free_rx_wqe(rq, wi, true); 1707 wq_cyc_pop: 1708 mlx5_wq_cyc_pop(wq); 1709 } 1710 1711 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1712 { 1713 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1714 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1715 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; 1716 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1717 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1718 u32 head_offset = wqe_offset & (PAGE_SIZE - 1); 1719 u32 page_idx = wqe_offset >> PAGE_SHIFT; 1720 struct mlx5e_rx_wqe_ll *wqe; 1721 struct mlx5_wq_ll *wq; 1722 struct sk_buff *skb; 1723 u16 cqe_bcnt; 1724 1725 wi->consumed_strides += cstrides; 1726 1727 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1728 mlx5e_handle_rx_err_cqe(rq, cqe); 1729 goto mpwrq_cqe_out; 1730 } 1731 1732 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1733 struct mlx5e_rq_stats *stats = rq->stats; 1734 1735 stats->mpwqe_filler_cqes++; 1736 stats->mpwqe_filler_strides += cstrides; 1737 goto mpwrq_cqe_out; 1738 } 1739 1740 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1741 1742 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1743 mlx5e_skb_from_cqe_mpwrq_linear, 1744 mlx5e_skb_from_cqe_mpwrq_nonlinear, 1745 rq, wi, cqe_bcnt, head_offset, page_idx); 1746 if (!skb) 1747 goto mpwrq_cqe_out; 1748 1749 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1750 1751 mlx5e_rep_tc_receive(cqe, rq, skb); 1752 1753 mpwrq_cqe_out: 1754 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1755 return; 1756 1757 wq = &rq->mpwqe.wq; 1758 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1759 mlx5e_free_rx_mpwqe(rq, wi, true); 1760 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1761 } 1762 1763 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { 1764 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1765 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1766 }; 1767 #endif 1768 1769 static void 1770 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 1771 u32 data_bcnt, u32 data_offset) 1772 { 1773 net_prefetchw(skb->data); 1774 1775 while (data_bcnt) { 1776 u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); 1777 unsigned int truesize; 1778 1779 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) 1780 truesize = pg_consumed_bytes; 1781 else 1782 truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); 1783 1784 mlx5e_add_skb_frag(rq, skb, di, data_offset, 1785 pg_consumed_bytes, truesize); 1786 1787 data_bcnt -= pg_consumed_bytes; 1788 data_offset = 0; 1789 di++; 1790 } 1791 } 1792 1793 static struct sk_buff * 1794 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1795 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1796 { 1797 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1798 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; 1799 u32 frag_offset = head_offset + headlen; 1800 u32 byte_cnt = cqe_bcnt - headlen; 1801 struct mlx5e_dma_info *head_di = di; 1802 struct sk_buff *skb; 1803 1804 skb = napi_alloc_skb(rq->cq.napi, 1805 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1806 if (unlikely(!skb)) { 1807 rq->stats->buff_alloc_err++; 1808 return NULL; 1809 } 1810 1811 net_prefetchw(skb->data); 1812 1813 if (unlikely(frag_offset >= PAGE_SIZE)) { 1814 di++; 1815 frag_offset -= PAGE_SIZE; 1816 } 1817 1818 mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset); 1819 /* copy header */ 1820 mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen); 1821 /* skb linear part was allocated with headlen and aligned to long */ 1822 skb->tail += headlen; 1823 skb->len += headlen; 1824 1825 return skb; 1826 } 1827 1828 static struct sk_buff * 1829 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1830 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1831 { 1832 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; 1833 u16 rx_headroom = rq->buff.headroom; 1834 u32 cqe_bcnt32 = cqe_bcnt; 1835 struct xdp_buff xdp; 1836 struct sk_buff *skb; 1837 void *va, *data; 1838 u32 frag_size; 1839 1840 /* Check packet size. Note LRO doesn't use linear SKB */ 1841 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 1842 rq->stats->oversize_pkts_sw_drop++; 1843 return NULL; 1844 } 1845 1846 va = page_address(di->page) + head_offset; 1847 data = va + rx_headroom; 1848 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1849 1850 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, 1851 frag_size, DMA_FROM_DEVICE); 1852 net_prefetchw(va); /* xdp_frame data area */ 1853 net_prefetch(data); 1854 1855 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp); 1856 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) { 1857 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) 1858 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 1859 return NULL; /* page/packet was consumed by XDP */ 1860 } 1861 1862 rx_headroom = xdp.data - xdp.data_hard_start; 1863 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1864 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); 1865 if (unlikely(!skb)) 1866 return NULL; 1867 1868 /* queue up for recycling/reuse */ 1869 page_ref_inc(di->page); 1870 1871 return skb; 1872 } 1873 1874 static void 1875 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1876 struct mlx5_cqe64 *cqe, u16 header_index) 1877 { 1878 struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; 1879 u16 head_offset = head->addr & (PAGE_SIZE - 1); 1880 u16 head_size = cqe->shampo.header_size; 1881 u16 rx_headroom = rq->buff.headroom; 1882 struct sk_buff *skb = NULL; 1883 void *hdr, *data; 1884 u32 frag_size; 1885 1886 hdr = page_address(head->page) + head_offset; 1887 data = hdr + rx_headroom; 1888 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); 1889 1890 if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { 1891 /* build SKB around header */ 1892 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); 1893 prefetchw(hdr); 1894 prefetch(data); 1895 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); 1896 1897 if (unlikely(!skb)) 1898 return; 1899 1900 /* queue up for recycling/reuse */ 1901 page_ref_inc(head->page); 1902 1903 } else { 1904 /* allocate SKB and copy header for large header */ 1905 rq->stats->gro_large_hds++; 1906 skb = napi_alloc_skb(rq->cq.napi, 1907 ALIGN(head_size, sizeof(long))); 1908 if (unlikely(!skb)) { 1909 rq->stats->buff_alloc_err++; 1910 return; 1911 } 1912 1913 prefetchw(skb->data); 1914 mlx5e_copy_skb_header(rq->pdev, skb, head, 1915 head_offset + rx_headroom, 1916 rx_headroom, head_size); 1917 /* skb linear part was allocated with headlen and aligned to long */ 1918 skb->tail += head_size; 1919 skb->len += head_size; 1920 } 1921 rq->hw_gro_data->skb = skb; 1922 NAPI_GRO_CB(skb)->count = 1; 1923 skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; 1924 } 1925 1926 static void 1927 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz) 1928 { 1929 skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1930 unsigned int frag_size = skb_frag_size(last_frag); 1931 unsigned int frag_truesize; 1932 1933 frag_truesize = ALIGN(frag_size, BIT(log_stride_sz)); 1934 skb->truesize += frag_truesize - frag_size; 1935 } 1936 1937 static void 1938 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 1939 { 1940 struct sk_buff *skb = rq->hw_gro_data->skb; 1941 struct mlx5e_rq_stats *stats = rq->stats; 1942 1943 stats->gro_skbs++; 1944 if (likely(skb_shinfo(skb)->nr_frags)) 1945 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); 1946 if (NAPI_GRO_CB(skb)->count > 1) 1947 mlx5e_shampo_update_hdr(rq, cqe, match); 1948 napi_gro_receive(rq->cq.napi, skb); 1949 rq->hw_gro_data->skb = NULL; 1950 } 1951 1952 static bool 1953 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) 1954 { 1955 int nr_frags = skb_shinfo(skb)->nr_frags; 1956 1957 return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE; 1958 } 1959 1960 static void 1961 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) 1962 { 1963 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 1964 u64 addr = shampo->info[header_index].addr; 1965 1966 if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { 1967 shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); 1968 mlx5e_page_release(rq, &shampo->info[header_index], true); 1969 } 1970 bitmap_clear(shampo->bitmap, header_index, 1); 1971 } 1972 1973 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1974 { 1975 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; 1976 u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); 1977 u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); 1978 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1979 u32 data_offset = wqe_offset & (PAGE_SIZE - 1); 1980 u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1981 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1982 u32 page_idx = wqe_offset >> PAGE_SHIFT; 1983 struct sk_buff **skb = &rq->hw_gro_data->skb; 1984 bool flush = cqe->shampo.flush; 1985 bool match = cqe->shampo.match; 1986 struct mlx5e_rq_stats *stats = rq->stats; 1987 struct mlx5e_rx_wqe_ll *wqe; 1988 struct mlx5e_dma_info *di; 1989 struct mlx5e_mpw_info *wi; 1990 struct mlx5_wq_ll *wq; 1991 1992 wi = &rq->mpwqe.info[wqe_id]; 1993 wi->consumed_strides += cstrides; 1994 1995 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1996 mlx5e_handle_rx_err_cqe(rq, cqe); 1997 goto mpwrq_cqe_out; 1998 } 1999 2000 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2001 stats->mpwqe_filler_cqes++; 2002 stats->mpwqe_filler_strides += cstrides; 2003 goto mpwrq_cqe_out; 2004 } 2005 2006 stats->gro_match_packets += match; 2007 2008 if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { 2009 match = false; 2010 mlx5e_shampo_flush_skb(rq, cqe, match); 2011 } 2012 2013 if (!*skb) { 2014 mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); 2015 if (unlikely(!*skb)) 2016 goto free_hd_entry; 2017 } else { 2018 NAPI_GRO_CB(*skb)->count++; 2019 if (NAPI_GRO_CB(*skb)->count == 2 && 2020 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { 2021 void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 2022 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - 2023 sizeof(struct iphdr); 2024 struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff); 2025 2026 rq->hw_gro_data->second_ip_id = ntohs(iph->id); 2027 } 2028 } 2029 2030 di = &wi->umr.dma_info[page_idx]; 2031 mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); 2032 2033 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); 2034 if (flush) 2035 mlx5e_shampo_flush_skb(rq, cqe, match); 2036 free_hd_entry: 2037 mlx5e_free_rx_shampo_hd_entry(rq, header_index); 2038 mpwrq_cqe_out: 2039 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2040 return; 2041 2042 wq = &rq->mpwqe.wq; 2043 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2044 mlx5e_free_rx_mpwqe(rq, wi, true); 2045 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2046 } 2047 2048 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2049 { 2050 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 2051 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 2052 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; 2053 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 2054 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 2055 u32 head_offset = wqe_offset & (PAGE_SIZE - 1); 2056 u32 page_idx = wqe_offset >> PAGE_SHIFT; 2057 struct mlx5e_rx_wqe_ll *wqe; 2058 struct mlx5_wq_ll *wq; 2059 struct sk_buff *skb; 2060 u16 cqe_bcnt; 2061 2062 wi->consumed_strides += cstrides; 2063 2064 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2065 mlx5e_handle_rx_err_cqe(rq, cqe); 2066 goto mpwrq_cqe_out; 2067 } 2068 2069 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2070 struct mlx5e_rq_stats *stats = rq->stats; 2071 2072 stats->mpwqe_filler_cqes++; 2073 stats->mpwqe_filler_strides += cstrides; 2074 goto mpwrq_cqe_out; 2075 } 2076 2077 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 2078 2079 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 2080 mlx5e_skb_from_cqe_mpwrq_linear, 2081 mlx5e_skb_from_cqe_mpwrq_nonlinear, 2082 rq, wi, cqe_bcnt, head_offset, page_idx); 2083 if (!skb) 2084 goto mpwrq_cqe_out; 2085 2086 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2087 2088 if (mlx5e_cqe_regb_chain(cqe)) 2089 if (!mlx5e_tc_update_skb(cqe, skb)) { 2090 dev_kfree_skb_any(skb); 2091 goto mpwrq_cqe_out; 2092 } 2093 2094 napi_gro_receive(rq->cq.napi, skb); 2095 2096 mpwrq_cqe_out: 2097 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2098 return; 2099 2100 wq = &rq->mpwqe.wq; 2101 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2102 mlx5e_free_rx_mpwqe(rq, wi, true); 2103 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2104 } 2105 2106 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) 2107 { 2108 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 2109 struct mlx5_cqwq *cqwq = &cq->wq; 2110 struct mlx5_cqe64 *cqe; 2111 int work_done = 0; 2112 2113 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 2114 return 0; 2115 2116 if (rq->cqd.left) { 2117 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); 2118 if (work_done >= budget) 2119 goto out; 2120 } 2121 2122 cqe = mlx5_cqwq_get_cqe(cqwq); 2123 if (!cqe) { 2124 if (unlikely(work_done)) 2125 goto out; 2126 return 0; 2127 } 2128 2129 do { 2130 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 2131 work_done += 2132 mlx5e_decompress_cqes_start(rq, cqwq, 2133 budget - work_done); 2134 continue; 2135 } 2136 2137 mlx5_cqwq_pop(cqwq); 2138 2139 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 2140 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, 2141 rq, cqe); 2142 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); 2143 2144 out: 2145 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) 2146 mlx5e_shampo_flush_skb(rq, NULL, false); 2147 2148 if (rcu_access_pointer(rq->xdp_prog)) 2149 mlx5e_xdp_rx_poll_complete(rq); 2150 2151 mlx5_cqwq_update_db_record(cqwq); 2152 2153 /* ensure cq space is freed before enabling more cqes */ 2154 wmb(); 2155 2156 return work_done; 2157 } 2158 2159 #ifdef CONFIG_MLX5_CORE_IPOIB 2160 2161 #define MLX5_IB_GRH_SGID_OFFSET 8 2162 #define MLX5_IB_GRH_DGID_OFFSET 24 2163 #define MLX5_GID_SIZE 16 2164 2165 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 2166 struct mlx5_cqe64 *cqe, 2167 u32 cqe_bcnt, 2168 struct sk_buff *skb) 2169 { 2170 struct hwtstamp_config *tstamp; 2171 struct mlx5e_rq_stats *stats; 2172 struct net_device *netdev; 2173 struct mlx5e_priv *priv; 2174 char *pseudo_header; 2175 u32 flags_rqpn; 2176 u32 qpn; 2177 u8 *dgid; 2178 u8 g; 2179 2180 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; 2181 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); 2182 2183 /* No mapping present, cannot process SKB. This might happen if a child 2184 * interface is going down while having unprocessed CQEs on parent RQ 2185 */ 2186 if (unlikely(!netdev)) { 2187 /* TODO: add drop counters support */ 2188 skb->dev = NULL; 2189 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); 2190 return; 2191 } 2192 2193 priv = mlx5i_epriv(netdev); 2194 tstamp = &priv->tstamp; 2195 stats = rq->stats; 2196 2197 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 2198 g = (flags_rqpn >> 28) & 3; 2199 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; 2200 if ((!g) || dgid[0] != 0xff) 2201 skb->pkt_type = PACKET_HOST; 2202 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) 2203 skb->pkt_type = PACKET_BROADCAST; 2204 else 2205 skb->pkt_type = PACKET_MULTICAST; 2206 2207 /* Drop packets that this interface sent, ie multicast packets 2208 * that the HCA has replicated. 2209 */ 2210 if (g && (qpn == (flags_rqpn & 0xffffff)) && 2211 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, 2212 MLX5_GID_SIZE) == 0)) { 2213 skb->dev = NULL; 2214 return; 2215 } 2216 2217 skb_pull(skb, MLX5_IB_GRH_BYTES); 2218 2219 skb->protocol = *((__be16 *)(skb->data)); 2220 2221 if (netdev->features & NETIF_F_RXCSUM) { 2222 skb->ip_summed = CHECKSUM_COMPLETE; 2223 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 2224 stats->csum_complete++; 2225 } else { 2226 skb->ip_summed = CHECKSUM_NONE; 2227 stats->csum_none++; 2228 } 2229 2230 if (unlikely(mlx5e_rx_hw_stamp(tstamp))) 2231 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 2232 rq->clock, get_cqe_ts(cqe)); 2233 skb_record_rx_queue(skb, rq->ix); 2234 2235 if (likely(netdev->features & NETIF_F_RXHASH)) 2236 mlx5e_skb_set_hash(cqe, skb); 2237 2238 /* 20 bytes of ipoib header and 4 for encap existing */ 2239 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); 2240 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); 2241 skb_reset_mac_header(skb); 2242 skb_pull(skb, MLX5_IPOIB_HARD_LEN); 2243 2244 skb->dev = netdev; 2245 2246 stats->packets++; 2247 stats->bytes += cqe_bcnt; 2248 } 2249 2250 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2251 { 2252 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2253 struct mlx5e_wqe_frag_info *wi; 2254 struct sk_buff *skb; 2255 u32 cqe_bcnt; 2256 u16 ci; 2257 2258 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2259 wi = get_frag(rq, ci); 2260 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2261 2262 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2263 rq->stats->wqe_err++; 2264 goto wq_free_wqe; 2265 } 2266 2267 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 2268 mlx5e_skb_from_cqe_linear, 2269 mlx5e_skb_from_cqe_nonlinear, 2270 rq, cqe, wi, cqe_bcnt); 2271 if (!skb) 2272 goto wq_free_wqe; 2273 2274 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2275 if (unlikely(!skb->dev)) { 2276 dev_kfree_skb_any(skb); 2277 goto wq_free_wqe; 2278 } 2279 napi_gro_receive(rq->cq.napi, skb); 2280 2281 wq_free_wqe: 2282 mlx5e_free_rx_wqe(rq, wi, true); 2283 mlx5_wq_cyc_pop(wq); 2284 } 2285 2286 const struct mlx5e_rx_handlers mlx5i_rx_handlers = { 2287 .handle_rx_cqe = mlx5i_handle_rx_cqe, 2288 .handle_rx_cqe_mpwqe = NULL, /* Not supported */ 2289 }; 2290 #endif /* CONFIG_MLX5_CORE_IPOIB */ 2291 2292 #ifdef CONFIG_MLX5_EN_IPSEC 2293 2294 static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2295 { 2296 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2297 struct mlx5e_wqe_frag_info *wi; 2298 struct sk_buff *skb; 2299 u32 cqe_bcnt; 2300 u16 ci; 2301 2302 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2303 wi = get_frag(rq, ci); 2304 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2305 2306 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2307 rq->stats->wqe_err++; 2308 goto wq_free_wqe; 2309 } 2310 2311 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 2312 mlx5e_skb_from_cqe_linear, 2313 mlx5e_skb_from_cqe_nonlinear, 2314 rq, cqe, wi, cqe_bcnt); 2315 if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ 2316 goto wq_free_wqe; 2317 2318 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); 2319 if (unlikely(!skb)) 2320 goto wq_free_wqe; 2321 2322 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2323 napi_gro_receive(rq->cq.napi, skb); 2324 2325 wq_free_wqe: 2326 mlx5e_free_rx_wqe(rq, wi, true); 2327 mlx5_wq_cyc_pop(wq); 2328 } 2329 2330 #endif /* CONFIG_MLX5_EN_IPSEC */ 2331 2332 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) 2333 { 2334 struct net_device *netdev = rq->netdev; 2335 struct mlx5_core_dev *mdev = rq->mdev; 2336 struct mlx5e_priv *priv = rq->priv; 2337 2338 switch (rq->wq_type) { 2339 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2340 rq->mpwqe.skb_from_cqe_mpwrq = xsk ? 2341 mlx5e_xsk_skb_from_cqe_mpwrq_linear : 2342 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? 2343 mlx5e_skb_from_cqe_mpwrq_linear : 2344 mlx5e_skb_from_cqe_mpwrq_nonlinear; 2345 rq->post_wqes = mlx5e_post_rx_mpwqes; 2346 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 2347 2348 if (mlx5_fpga_is_ipsec_device(mdev)) { 2349 netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); 2350 return -EINVAL; 2351 } 2352 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 2353 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; 2354 if (!rq->handle_rx_cqe) { 2355 netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n"); 2356 return -EINVAL; 2357 } 2358 } else { 2359 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; 2360 if (!rq->handle_rx_cqe) { 2361 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); 2362 return -EINVAL; 2363 } 2364 } 2365 2366 break; 2367 default: /* MLX5_WQ_TYPE_CYCLIC */ 2368 rq->wqe.skb_from_cqe = xsk ? 2369 mlx5e_xsk_skb_from_cqe_linear : 2370 mlx5e_rx_is_linear_skb(params, NULL) ? 2371 mlx5e_skb_from_cqe_linear : 2372 mlx5e_skb_from_cqe_nonlinear; 2373 rq->post_wqes = mlx5e_post_rx_wqes; 2374 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2375 2376 #ifdef CONFIG_MLX5_EN_IPSEC 2377 if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && 2378 priv->ipsec) 2379 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; 2380 else 2381 #endif 2382 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 2383 if (!rq->handle_rx_cqe) { 2384 netdev_err(netdev, "RX handler of RQ is not set\n"); 2385 return -EINVAL; 2386 } 2387 } 2388 2389 return 0; 2390 } 2391 2392 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2393 { 2394 struct mlx5e_priv *priv = netdev_priv(rq->netdev); 2395 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2396 struct mlx5e_wqe_frag_info *wi; 2397 struct devlink_port *dl_port; 2398 struct sk_buff *skb; 2399 u32 cqe_bcnt; 2400 u16 trap_id; 2401 u16 ci; 2402 2403 trap_id = get_cqe_flow_tag(cqe); 2404 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2405 wi = get_frag(rq, ci); 2406 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2407 2408 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2409 rq->stats->wqe_err++; 2410 goto free_wqe; 2411 } 2412 2413 skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); 2414 if (!skb) 2415 goto free_wqe; 2416 2417 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2418 skb_push(skb, ETH_HLEN); 2419 2420 dl_port = mlx5e_devlink_get_dl_port(priv); 2421 mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); 2422 dev_kfree_skb_any(skb); 2423 2424 free_wqe: 2425 mlx5e_free_rx_wqe(rq, wi, false); 2426 mlx5_wq_cyc_pop(wq); 2427 } 2428 2429 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) 2430 { 2431 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ? 2432 mlx5e_skb_from_cqe_linear : 2433 mlx5e_skb_from_cqe_nonlinear; 2434 rq->post_wqes = mlx5e_post_rx_wqes; 2435 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2436 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; 2437 } 2438