1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/tcp.h> 36 #include <linux/bitmap.h> 37 #include <linux/filter.h> 38 #include <net/ip6_checksum.h> 39 #include <net/page_pool.h> 40 #include <net/inet_ecn.h> 41 #include <net/gro.h> 42 #include <net/udp.h> 43 #include <net/tcp.h> 44 #include <net/xdp_sock_drv.h> 45 #include "en.h" 46 #include "en/txrx.h" 47 #include "en_tc.h" 48 #include "eswitch.h" 49 #include "en_rep.h" 50 #include "en/rep/tc.h" 51 #include "ipoib/ipoib.h" 52 #include "en_accel/ipsec.h" 53 #include "en_accel/macsec.h" 54 #include "en_accel/ipsec_rxtx.h" 55 #include "en_accel/ktls_txrx.h" 56 #include "en/xdp.h" 57 #include "en/xsk/rx.h" 58 #include "en/health.h" 59 #include "en/params.h" 60 #include "devlink.h" 61 #include "en/devlink.h" 62 63 static struct sk_buff * 64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 65 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 66 static struct sk_buff * 67 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 68 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 69 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 70 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 71 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 72 73 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { 74 .handle_rx_cqe = mlx5e_handle_rx_cqe, 75 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 76 .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, 77 }; 78 79 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 80 { 81 return config->rx_filter == HWTSTAMP_FILTER_ALL; 82 } 83 84 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, 85 u32 cqcc, void *data) 86 { 87 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 88 89 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); 90 } 91 92 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, 93 struct mlx5_cqwq *wq, 94 u32 cqcc) 95 { 96 struct mlx5e_cq_decomp *cqd = &rq->cqd; 97 struct mlx5_cqe64 *title = &cqd->title; 98 99 mlx5e_read_cqe_slot(wq, cqcc, title); 100 cqd->left = be32_to_cpu(title->byte_cnt); 101 cqd->wqe_counter = be16_to_cpu(title->wqe_counter); 102 rq->stats->cqe_compress_blks++; 103 } 104 105 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, 106 struct mlx5e_cq_decomp *cqd, 107 u32 cqcc) 108 { 109 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); 110 cqd->mini_arr_idx = 0; 111 } 112 113 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) 114 { 115 u32 cqcc = wq->cc; 116 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; 117 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 118 u32 wq_sz = mlx5_cqwq_get_size(wq); 119 u32 ci_top = min_t(u32, wq_sz, ci + n); 120 121 for (; ci < ci_top; ci++, n--) { 122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 123 124 cqe->op_own = op_own; 125 } 126 127 if (unlikely(ci == wq_sz)) { 128 op_own = !op_own; 129 for (ci = 0; ci < n; ci++) { 130 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 131 132 cqe->op_own = op_own; 133 } 134 } 135 } 136 137 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, 138 struct mlx5_cqwq *wq, 139 u32 cqcc) 140 { 141 struct mlx5e_cq_decomp *cqd = &rq->cqd; 142 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; 143 struct mlx5_cqe64 *title = &cqd->title; 144 145 title->byte_cnt = mini_cqe->byte_cnt; 146 title->check_sum = mini_cqe->checksum; 147 title->op_own &= 0xf0; 148 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); 149 150 /* state bit set implies linked-list striding RQ wq type and 151 * HW stride index capability supported 152 */ 153 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { 154 title->wqe_counter = mini_cqe->stridx; 155 return; 156 } 157 158 /* HW stride index capability not supported */ 159 title->wqe_counter = cpu_to_be16(cqd->wqe_counter); 160 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 161 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); 162 else 163 cqd->wqe_counter = 164 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); 165 } 166 167 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, 168 struct mlx5_cqwq *wq, 169 u32 cqcc) 170 { 171 struct mlx5e_cq_decomp *cqd = &rq->cqd; 172 173 mlx5e_decompress_cqe(rq, wq, cqcc); 174 cqd->title.rss_hash_type = 0; 175 cqd->title.rss_hash_result = 0; 176 } 177 178 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, 179 struct mlx5_cqwq *wq, 180 int update_owner_only, 181 int budget_rem) 182 { 183 struct mlx5e_cq_decomp *cqd = &rq->cqd; 184 u32 cqcc = wq->cc + update_owner_only; 185 u32 cqe_count; 186 u32 i; 187 188 cqe_count = min_t(u32, cqd->left, budget_rem); 189 190 for (i = update_owner_only; i < cqe_count; 191 i++, cqd->mini_arr_idx++, cqcc++) { 192 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) 193 mlx5e_read_mini_arr_slot(wq, cqd, cqcc); 194 195 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); 196 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 197 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 198 rq, &cqd->title); 199 } 200 mlx5e_cqes_update_owner(wq, cqcc - wq->cc); 201 wq->cc = cqcc; 202 cqd->left -= cqe_count; 203 rq->stats->cqe_compress_pkts += cqe_count; 204 205 return cqe_count; 206 } 207 208 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, 209 struct mlx5_cqwq *wq, 210 int budget_rem) 211 { 212 struct mlx5e_cq_decomp *cqd = &rq->cqd; 213 u32 cc = wq->cc; 214 215 mlx5e_read_title_slot(rq, wq, cc); 216 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); 217 mlx5e_decompress_cqe(rq, wq, cc); 218 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 219 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 220 rq, &cqd->title); 221 cqd->mini_arr_idx++; 222 223 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; 224 } 225 226 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) 227 { 228 struct mlx5e_page_cache *cache = &rq->page_cache; 229 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); 230 struct mlx5e_rq_stats *stats = rq->stats; 231 232 if (tail_next == cache->head) { 233 stats->cache_full++; 234 return false; 235 } 236 237 if (!dev_page_is_reusable(page)) { 238 stats->cache_waive++; 239 return false; 240 } 241 242 cache->page_cache[cache->tail] = page; 243 cache->tail = tail_next; 244 return true; 245 } 246 247 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au) 248 { 249 struct mlx5e_page_cache *cache = &rq->page_cache; 250 struct mlx5e_rq_stats *stats = rq->stats; 251 dma_addr_t addr; 252 253 if (unlikely(cache->head == cache->tail)) { 254 stats->cache_empty++; 255 return false; 256 } 257 258 if (page_ref_count(cache->page_cache[cache->head]) != 1) { 259 stats->cache_busy++; 260 return false; 261 } 262 263 au->page = cache->page_cache[cache->head]; 264 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); 265 stats->cache_reuse++; 266 267 addr = page_pool_get_dma_addr(au->page); 268 /* Non-XSK always uses PAGE_SIZE. */ 269 dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE); 270 return true; 271 } 272 273 static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au) 274 { 275 dma_addr_t addr; 276 277 if (mlx5e_rx_cache_get(rq, au)) 278 return 0; 279 280 au->page = page_pool_dev_alloc_pages(rq->page_pool); 281 if (unlikely(!au->page)) 282 return -ENOMEM; 283 284 /* Non-XSK always uses PAGE_SIZE. */ 285 addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE, 286 rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); 287 if (unlikely(dma_mapping_error(rq->pdev, addr))) { 288 page_pool_recycle_direct(rq->page_pool, au->page); 289 au->page = NULL; 290 return -ENOMEM; 291 } 292 page_pool_set_dma_addr(au->page, addr); 293 294 return 0; 295 } 296 297 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) 298 { 299 dma_addr_t dma_addr = page_pool_get_dma_addr(page); 300 301 dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir, 302 DMA_ATTR_SKIP_CPU_SYNC); 303 page_pool_set_dma_addr(page, 0); 304 } 305 306 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle) 307 { 308 if (likely(recycle)) { 309 if (mlx5e_rx_cache_put(rq, page)) 310 return; 311 312 mlx5e_page_dma_unmap(rq, page); 313 page_pool_recycle_direct(rq->page_pool, page); 314 } else { 315 mlx5e_page_dma_unmap(rq, page); 316 page_pool_release_page(rq->page_pool, page); 317 put_page(page); 318 } 319 } 320 321 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, 322 struct mlx5e_wqe_frag_info *frag) 323 { 324 int err = 0; 325 326 if (!frag->offset) 327 /* On first frag (offset == 0), replenish page (alloc_unit actually). 328 * Other frags that point to the same alloc_unit (with a different 329 * offset) should just use the new one without replenishing again 330 * by themselves. 331 */ 332 err = mlx5e_page_alloc_pool(rq, frag->au); 333 334 return err; 335 } 336 337 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, 338 struct mlx5e_wqe_frag_info *frag, 339 bool recycle) 340 { 341 if (frag->last_in_page) 342 mlx5e_page_release_dynamic(rq, frag->au->page, recycle); 343 } 344 345 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) 346 { 347 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; 348 } 349 350 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, 351 u16 ix) 352 { 353 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); 354 int err; 355 int i; 356 357 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { 358 dma_addr_t addr; 359 u16 headroom; 360 361 err = mlx5e_get_rx_frag(rq, frag); 362 if (unlikely(err)) 363 goto free_frags; 364 365 headroom = i == 0 ? rq->buff.headroom : 0; 366 addr = page_pool_get_dma_addr(frag->au->page); 367 wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); 368 } 369 370 return 0; 371 372 free_frags: 373 while (--i >= 0) 374 mlx5e_put_rx_frag(rq, --frag, true); 375 376 return err; 377 } 378 379 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, 380 struct mlx5e_wqe_frag_info *wi, 381 bool recycle) 382 { 383 int i; 384 385 if (rq->xsk_pool) { 386 /* The `recycle` parameter is ignored, and the page is always 387 * put into the Reuse Ring, because there is no way to return 388 * the page to the userspace when the interface goes down. 389 */ 390 xsk_buff_free(wi->au->xsk); 391 return; 392 } 393 394 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) 395 mlx5e_put_rx_frag(rq, wi, recycle); 396 } 397 398 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 399 { 400 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); 401 402 mlx5e_free_rx_wqe(rq, wi, false); 403 } 404 405 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) 406 { 407 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 408 int i; 409 410 for (i = 0; i < wqe_bulk; i++) { 411 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); 412 struct mlx5e_rx_wqe_cyc *wqe; 413 414 wqe = mlx5_wq_cyc_get_wqe(wq, j); 415 416 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j))) 417 break; 418 } 419 420 return i; 421 } 422 423 static inline void 424 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, 425 union mlx5e_alloc_unit *au, u32 frag_offset, u32 len, 426 unsigned int truesize) 427 { 428 dma_addr_t addr = page_pool_get_dma_addr(au->page); 429 430 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE); 431 page_ref_inc(au->page); 432 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 433 au->page, frag_offset, len, truesize); 434 } 435 436 static inline void 437 mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, 438 struct page *page, dma_addr_t addr, 439 int offset_from, int dma_offset, u32 headlen) 440 { 441 const void *from = page_address(page) + offset_from; 442 /* Aligning len to sizeof(long) optimizes memcpy performance */ 443 unsigned int len = ALIGN(headlen, sizeof(long)); 444 445 dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE); 446 skb_copy_to_linear_data(skb, from, len); 447 } 448 449 static void 450 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) 451 { 452 union mlx5e_alloc_unit *alloc_units = wi->alloc_units; 453 bool no_xdp_xmit; 454 int i; 455 456 /* A common case for AF_XDP. */ 457 if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe)) 458 return; 459 460 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); 461 462 if (rq->xsk_pool) { 463 /* The `recycle` parameter is ignored, and the page is always 464 * put into the Reuse Ring, because there is no way to return 465 * the page to the userspace when the interface goes down. 466 */ 467 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) 468 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 469 xsk_buff_free(alloc_units[i].xsk); 470 } else { 471 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) 472 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 473 mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle); 474 } 475 } 476 477 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) 478 { 479 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 480 481 do { 482 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); 483 484 mlx5_wq_ll_push(wq, next_wqe_index); 485 } while (--n); 486 487 /* ensure wqes are visible to device before updating doorbell record */ 488 dma_wmb(); 489 490 mlx5_wq_ll_update_db_record(wq); 491 } 492 493 /* This function returns the size of the continuous free space inside a bitmap 494 * that starts from first and no longer than len including circular ones. 495 */ 496 static int bitmap_find_window(unsigned long *bitmap, int len, 497 int bitmap_size, int first) 498 { 499 int next_one, count; 500 501 next_one = find_next_bit(bitmap, bitmap_size, first); 502 if (next_one == bitmap_size) { 503 if (bitmap_size - first >= len) 504 return len; 505 next_one = find_next_bit(bitmap, bitmap_size, 0); 506 count = next_one + bitmap_size - first; 507 } else { 508 count = next_one - first; 509 } 510 511 return min(len, count); 512 } 513 514 static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, 515 __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) 516 { 517 memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); 518 umr_wqe->ctrl.opmod_idx_opcode = 519 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 520 MLX5_OPCODE_UMR); 521 umr_wqe->ctrl.umr_mkey = key; 522 umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) 523 | MLX5E_KLM_UMR_DS_CNT(klm_len)); 524 umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; 525 umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); 526 umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); 527 umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 528 } 529 530 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, 531 struct mlx5e_icosq *sq, 532 u16 klm_entries, u16 index) 533 { 534 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 535 u16 entries, pi, header_offset, err, wqe_bbs, new_entries; 536 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; 537 struct page *page = shampo->last_page; 538 u64 addr = shampo->last_addr; 539 struct mlx5e_dma_info *dma_info; 540 struct mlx5e_umr_wqe *umr_wqe; 541 int headroom, i; 542 543 headroom = rq->buff.headroom; 544 new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); 545 entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT); 546 wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); 547 pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); 548 umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 549 build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); 550 551 for (i = 0; i < entries; i++, index++) { 552 dma_info = &shampo->info[index]; 553 if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < 554 MLX5_UMR_KLM_ALIGNMENT)) 555 goto update_klm; 556 header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << 557 MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; 558 if (!(header_offset & (PAGE_SIZE - 1))) { 559 union mlx5e_alloc_unit au; 560 561 err = mlx5e_page_alloc_pool(rq, &au); 562 if (unlikely(err)) 563 goto err_unmap; 564 page = dma_info->page = au.page; 565 addr = dma_info->addr = page_pool_get_dma_addr(au.page); 566 } else { 567 dma_info->addr = addr + header_offset; 568 dma_info->page = page; 569 } 570 571 update_klm: 572 umr_wqe->inline_klms[i].bcount = 573 cpu_to_be32(MLX5E_RX_MAX_HEAD); 574 umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); 575 umr_wqe->inline_klms[i].va = 576 cpu_to_be64(dma_info->addr + headroom); 577 } 578 579 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 580 .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, 581 .num_wqebbs = wqe_bbs, 582 .shampo.len = new_entries, 583 }; 584 585 shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); 586 shampo->last_page = page; 587 shampo->last_addr = addr; 588 sq->pc += wqe_bbs; 589 sq->doorbell_cseg = &umr_wqe->ctrl; 590 591 return 0; 592 593 err_unmap: 594 while (--i >= 0) { 595 dma_info = &shampo->info[--index]; 596 if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { 597 dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); 598 mlx5e_page_release_dynamic(rq, dma_info->page, true); 599 } 600 } 601 rq->stats->buff_alloc_err++; 602 return err; 603 } 604 605 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) 606 { 607 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 608 u16 klm_entries, num_wqe, index, entries_before; 609 struct mlx5e_icosq *sq = rq->icosq; 610 int i, err, max_klm_entries, len; 611 612 max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); 613 klm_entries = bitmap_find_window(shampo->bitmap, 614 shampo->hd_per_wqe, 615 shampo->hd_per_wq, shampo->pi); 616 if (!klm_entries) 617 return 0; 618 619 klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); 620 index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT); 621 entries_before = shampo->hd_per_wq - index; 622 623 if (unlikely(entries_before < klm_entries)) 624 num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + 625 DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); 626 else 627 num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); 628 629 for (i = 0; i < num_wqe; i++) { 630 len = (klm_entries > max_klm_entries) ? max_klm_entries : 631 klm_entries; 632 if (unlikely(index + len > shampo->hd_per_wq)) 633 len = shampo->hd_per_wq - index; 634 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); 635 if (unlikely(err)) 636 return err; 637 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); 638 klm_entries -= len; 639 } 640 641 return 0; 642 } 643 644 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 645 { 646 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); 647 union mlx5e_alloc_unit *au = &wi->alloc_units[0]; 648 struct mlx5e_icosq *sq = rq->icosq; 649 struct mlx5_wq_cyc *wq = &sq->wq; 650 struct mlx5e_umr_wqe *umr_wqe; 651 u32 offset; /* 17-bit value with MTT. */ 652 u16 pi; 653 int err; 654 int i; 655 656 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { 657 err = mlx5e_alloc_rx_hd_mpwqe(rq); 658 if (unlikely(err)) 659 goto err; 660 } 661 662 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); 663 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); 664 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); 665 666 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) { 667 dma_addr_t addr; 668 669 err = mlx5e_page_alloc_pool(rq, au); 670 if (unlikely(err)) 671 goto err_unmap; 672 addr = page_pool_get_dma_addr(au->page); 673 umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { 674 .ptag = cpu_to_be64(addr | MLX5_EN_WR), 675 }; 676 } 677 678 bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); 679 wi->consumed_strides = 0; 680 681 umr_wqe->ctrl.opmod_idx_opcode = 682 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 683 MLX5_OPCODE_UMR); 684 685 offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; 686 umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); 687 688 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 689 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, 690 .num_wqebbs = rq->mpwqe.umr_wqebbs, 691 .umr.rq = rq, 692 }; 693 694 sq->pc += rq->mpwqe.umr_wqebbs; 695 696 sq->doorbell_cseg = &umr_wqe->ctrl; 697 698 return 0; 699 700 err_unmap: 701 while (--i >= 0) { 702 au--; 703 mlx5e_page_release_dynamic(rq, au->page, true); 704 } 705 706 err: 707 rq->stats->buff_alloc_err++; 708 709 return err; 710 } 711 712 /* This function is responsible to dealloc SHAMPO header buffer. 713 * close == true specifies that we are in the middle of closing RQ operation so 714 * we go over all the entries and if they are not in use we free them, 715 * otherwise we only go over a specific range inside the header buffer that are 716 * not in use. 717 */ 718 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) 719 { 720 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 721 int hd_per_wq = shampo->hd_per_wq; 722 struct page *deleted_page = NULL; 723 struct mlx5e_dma_info *hd_info; 724 int i, index = start; 725 726 for (i = 0; i < len; i++, index++) { 727 if (index == hd_per_wq) 728 index = 0; 729 730 if (close && !test_bit(index, shampo->bitmap)) 731 continue; 732 733 hd_info = &shampo->info[index]; 734 hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); 735 if (hd_info->page != deleted_page) { 736 deleted_page = hd_info->page; 737 mlx5e_page_release_dynamic(rq, hd_info->page, false); 738 } 739 } 740 741 if (start + len > hd_per_wq) { 742 len -= hd_per_wq - start; 743 bitmap_clear(shampo->bitmap, start, hd_per_wq - start); 744 start = 0; 745 } 746 747 bitmap_clear(shampo->bitmap, start, len); 748 } 749 750 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 751 { 752 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); 753 /* Don't recycle, this function is called on rq/netdev close */ 754 mlx5e_free_rx_mpwqe(rq, wi, false); 755 } 756 757 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 758 { 759 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 760 int wqe_bulk, count; 761 bool busy = false; 762 u16 head; 763 764 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 765 return false; 766 767 if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk) 768 return false; 769 770 if (rq->page_pool) 771 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 772 773 wqe_bulk = mlx5_wq_cyc_missing(wq); 774 head = mlx5_wq_cyc_get_head(wq); 775 776 /* Don't allow any newly allocated WQEs to share the same page with old 777 * WQEs that aren't completed yet. Stop earlier. 778 */ 779 wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask; 780 781 if (!rq->xsk_pool) 782 count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); 783 else if (likely(!rq->xsk_pool->dma_need_sync)) 784 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk); 785 else 786 /* If dma_need_sync is true, it's more efficient to call 787 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch, 788 * because the latter does the same check and returns only one 789 * frame. 790 */ 791 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk); 792 793 mlx5_wq_cyc_push_n(wq, count); 794 if (unlikely(count != wqe_bulk)) { 795 rq->stats->buff_alloc_err++; 796 busy = true; 797 } 798 799 /* ensure wqes are visible to device before updating doorbell record */ 800 dma_wmb(); 801 802 mlx5_wq_cyc_update_db_record(wq); 803 804 return busy; 805 } 806 807 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) 808 { 809 u16 sqcc; 810 811 sqcc = sq->cc; 812 813 while (sqcc != sq->pc) { 814 struct mlx5e_icosq_wqe_info *wi; 815 u16 ci; 816 817 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 818 wi = &sq->db.wqe_info[ci]; 819 sqcc += wi->num_wqebbs; 820 #ifdef CONFIG_MLX5_EN_TLS 821 switch (wi->wqe_type) { 822 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 823 mlx5e_ktls_handle_ctx_completion(wi); 824 break; 825 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 826 mlx5e_ktls_handle_get_psv_completion(wi, sq); 827 break; 828 } 829 #endif 830 } 831 sq->cc = sqcc; 832 } 833 834 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr, 835 struct mlx5e_icosq *sq) 836 { 837 struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq); 838 struct mlx5e_shampo_hd *shampo; 839 /* assume 1:1 relationship between RQ and icosq */ 840 struct mlx5e_rq *rq = &c->rq; 841 int end, from, len = umr.len; 842 843 shampo = rq->mpwqe.shampo; 844 end = shampo->hd_per_wq; 845 from = shampo->ci; 846 if (from + len > shampo->hd_per_wq) { 847 len -= end - from; 848 bitmap_set(shampo->bitmap, from, end - from); 849 from = 0; 850 } 851 852 bitmap_set(shampo->bitmap, from, len); 853 shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1); 854 } 855 856 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) 857 { 858 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); 859 struct mlx5_cqe64 *cqe; 860 u16 sqcc; 861 int i; 862 863 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 864 return 0; 865 866 cqe = mlx5_cqwq_get_cqe(&cq->wq); 867 if (likely(!cqe)) 868 return 0; 869 870 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 871 * otherwise a cq overrun may occur 872 */ 873 sqcc = sq->cc; 874 875 i = 0; 876 do { 877 u16 wqe_counter; 878 bool last_wqe; 879 880 mlx5_cqwq_pop(&cq->wq); 881 882 wqe_counter = be16_to_cpu(cqe->wqe_counter); 883 884 do { 885 struct mlx5e_icosq_wqe_info *wi; 886 u16 ci; 887 888 last_wqe = (sqcc == wqe_counter); 889 890 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 891 wi = &sq->db.wqe_info[ci]; 892 sqcc += wi->num_wqebbs; 893 894 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 895 netdev_WARN_ONCE(cq->netdev, 896 "Bad OP in ICOSQ CQE: 0x%x\n", 897 get_cqe_opcode(cqe)); 898 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 899 (struct mlx5_err_cqe *)cqe); 900 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 901 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 902 queue_work(cq->priv->wq, &sq->recover_work); 903 break; 904 } 905 906 switch (wi->wqe_type) { 907 case MLX5E_ICOSQ_WQE_UMR_RX: 908 wi->umr.rq->mpwqe.umr_completed++; 909 break; 910 case MLX5E_ICOSQ_WQE_NOP: 911 break; 912 case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR: 913 mlx5e_handle_shampo_hd_umr(wi->shampo, sq); 914 break; 915 #ifdef CONFIG_MLX5_EN_TLS 916 case MLX5E_ICOSQ_WQE_UMR_TLS: 917 break; 918 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 919 mlx5e_ktls_handle_ctx_completion(wi); 920 break; 921 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 922 mlx5e_ktls_handle_get_psv_completion(wi, sq); 923 break; 924 #endif 925 default: 926 netdev_WARN_ONCE(cq->netdev, 927 "Bad WQE type in ICOSQ WQE info: 0x%x\n", 928 wi->wqe_type); 929 } 930 } while (!last_wqe); 931 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 932 933 sq->cc = sqcc; 934 935 mlx5_cqwq_update_db_record(&cq->wq); 936 937 return i; 938 } 939 940 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) 941 { 942 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 943 u8 umr_completed = rq->mpwqe.umr_completed; 944 struct mlx5e_icosq *sq = rq->icosq; 945 int alloc_err = 0; 946 u8 missing, i; 947 u16 head; 948 949 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 950 return false; 951 952 if (umr_completed) { 953 mlx5e_post_rx_mpwqe(rq, umr_completed); 954 rq->mpwqe.umr_in_progress -= umr_completed; 955 rq->mpwqe.umr_completed = 0; 956 } 957 958 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; 959 960 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) 961 rq->stats->congst_umr++; 962 963 if (likely(missing < rq->mpwqe.min_wqe_bulk)) 964 return false; 965 966 if (rq->page_pool) 967 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 968 969 head = rq->mpwqe.actual_wq_head; 970 i = missing; 971 do { 972 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : 973 mlx5e_alloc_rx_mpwqe(rq, head); 974 975 if (unlikely(alloc_err)) 976 break; 977 head = mlx5_wq_ll_get_wqe_next_ix(wq, head); 978 } while (--i); 979 980 rq->mpwqe.umr_last_bulk = missing - i; 981 if (sq->doorbell_cseg) { 982 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); 983 sq->doorbell_cseg = NULL; 984 } 985 986 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; 987 rq->mpwqe.actual_wq_head = head; 988 989 /* If XSK Fill Ring doesn't have enough frames, report the error, so 990 * that one of the actions can be performed: 991 * 1. If need_wakeup is used, signal that the application has to kick 992 * the driver when it refills the Fill Ring. 993 * 2. Otherwise, busy poll by rescheduling the NAPI poll. 994 */ 995 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) 996 return true; 997 998 return false; 999 } 1000 1001 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) 1002 { 1003 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 1004 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || 1005 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); 1006 1007 tcp->check = 0; 1008 tcp->psh = get_cqe_lro_tcppsh(cqe); 1009 1010 if (tcp_ack) { 1011 tcp->ack = 1; 1012 tcp->ack_seq = cqe->lro.ack_seq_num; 1013 tcp->window = cqe->lro.tcp_win; 1014 } 1015 } 1016 1017 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 1018 u32 cqe_bcnt) 1019 { 1020 struct ethhdr *eth = (struct ethhdr *)(skb->data); 1021 struct tcphdr *tcp; 1022 int network_depth = 0; 1023 __wsum check; 1024 __be16 proto; 1025 u16 tot_len; 1026 void *ip_p; 1027 1028 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); 1029 1030 tot_len = cqe_bcnt - network_depth; 1031 ip_p = skb->data + network_depth; 1032 1033 if (proto == htons(ETH_P_IP)) { 1034 struct iphdr *ipv4 = ip_p; 1035 1036 tcp = ip_p + sizeof(struct iphdr); 1037 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1038 1039 ipv4->ttl = cqe->lro.min_ttl; 1040 ipv4->tot_len = cpu_to_be16(tot_len); 1041 ipv4->check = 0; 1042 ipv4->check = ip_fast_csum((unsigned char *)ipv4, 1043 ipv4->ihl); 1044 1045 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1046 check = csum_partial(tcp, tcp->doff * 4, 1047 csum_unfold((__force __sum16)cqe->check_sum)); 1048 /* Almost done, don't forget the pseudo header */ 1049 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, 1050 tot_len - sizeof(struct iphdr), 1051 IPPROTO_TCP, check); 1052 } else { 1053 u16 payload_len = tot_len - sizeof(struct ipv6hdr); 1054 struct ipv6hdr *ipv6 = ip_p; 1055 1056 tcp = ip_p + sizeof(struct ipv6hdr); 1057 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1058 1059 ipv6->hop_limit = cqe->lro.min_ttl; 1060 ipv6->payload_len = cpu_to_be16(payload_len); 1061 1062 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1063 check = csum_partial(tcp, tcp->doff * 4, 1064 csum_unfold((__force __sum16)cqe->check_sum)); 1065 /* Almost done, don't forget the pseudo header */ 1066 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, 1067 IPPROTO_TCP, check); 1068 } 1069 } 1070 1071 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) 1072 { 1073 struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; 1074 u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; 1075 1076 return page_address(last_head->page) + head_offset; 1077 } 1078 1079 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) 1080 { 1081 int udp_off = rq->hw_gro_data->fk.control.thoff; 1082 struct sk_buff *skb = rq->hw_gro_data->skb; 1083 struct udphdr *uh; 1084 1085 uh = (struct udphdr *)(skb->data + udp_off); 1086 uh->len = htons(skb->len - udp_off); 1087 1088 if (uh->check) 1089 uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr, 1090 ipv4->daddr, 0); 1091 1092 skb->csum_start = (unsigned char *)uh - skb->head; 1093 skb->csum_offset = offsetof(struct udphdr, check); 1094 1095 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1096 } 1097 1098 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) 1099 { 1100 int udp_off = rq->hw_gro_data->fk.control.thoff; 1101 struct sk_buff *skb = rq->hw_gro_data->skb; 1102 struct udphdr *uh; 1103 1104 uh = (struct udphdr *)(skb->data + udp_off); 1105 uh->len = htons(skb->len - udp_off); 1106 1107 if (uh->check) 1108 uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr, 1109 &ipv6->daddr, 0); 1110 1111 skb->csum_start = (unsigned char *)uh - skb->head; 1112 skb->csum_offset = offsetof(struct udphdr, check); 1113 1114 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1115 } 1116 1117 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1118 struct tcphdr *skb_tcp_hd) 1119 { 1120 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 1121 struct tcphdr *last_tcp_hd; 1122 void *last_hd_addr; 1123 1124 last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 1125 last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; 1126 tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH); 1127 } 1128 1129 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, 1130 struct mlx5_cqe64 *cqe, bool match) 1131 { 1132 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1133 struct sk_buff *skb = rq->hw_gro_data->skb; 1134 struct tcphdr *tcp; 1135 1136 tcp = (struct tcphdr *)(skb->data + tcp_off); 1137 if (match) 1138 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1139 1140 tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr, 1141 ipv4->daddr, 0); 1142 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 1143 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) 1144 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 1145 1146 skb->csum_start = (unsigned char *)tcp - skb->head; 1147 skb->csum_offset = offsetof(struct tcphdr, check); 1148 1149 if (tcp->cwr) 1150 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1151 } 1152 1153 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, 1154 struct mlx5_cqe64 *cqe, bool match) 1155 { 1156 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1157 struct sk_buff *skb = rq->hw_gro_data->skb; 1158 struct tcphdr *tcp; 1159 1160 tcp = (struct tcphdr *)(skb->data + tcp_off); 1161 if (match) 1162 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1163 1164 tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr, 1165 &ipv6->daddr, 0); 1166 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 1167 skb->csum_start = (unsigned char *)tcp - skb->head; 1168 skb->csum_offset = offsetof(struct tcphdr, check); 1169 1170 if (tcp->cwr) 1171 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1172 } 1173 1174 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 1175 { 1176 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); 1177 struct sk_buff *skb = rq->hw_gro_data->skb; 1178 1179 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 1180 skb->ip_summed = CHECKSUM_PARTIAL; 1181 1182 if (is_ipv4) { 1183 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); 1184 struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff); 1185 __be16 newlen = htons(skb->len - nhoff); 1186 1187 csum_replace2(&ipv4->check, ipv4->tot_len, newlen); 1188 ipv4->tot_len = newlen; 1189 1190 if (ipv4->protocol == IPPROTO_TCP) 1191 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); 1192 else 1193 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); 1194 } else { 1195 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); 1196 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff); 1197 1198 ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6)); 1199 1200 if (ipv6->nexthdr == IPPROTO_TCP) 1201 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); 1202 else 1203 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); 1204 } 1205 } 1206 1207 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, 1208 struct sk_buff *skb) 1209 { 1210 u8 cht = cqe->rss_hash_type; 1211 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : 1212 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : 1213 PKT_HASH_TYPE_NONE; 1214 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); 1215 } 1216 1217 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, 1218 __be16 *proto) 1219 { 1220 *proto = ((struct ethhdr *)skb->data)->h_proto; 1221 *proto = __vlan_get_protocol(skb, *proto, network_depth); 1222 1223 if (*proto == htons(ETH_P_IP)) 1224 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); 1225 1226 if (*proto == htons(ETH_P_IPV6)) 1227 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); 1228 1229 return false; 1230 } 1231 1232 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) 1233 { 1234 int network_depth = 0; 1235 __be16 proto; 1236 void *ip; 1237 int rc; 1238 1239 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) 1240 return; 1241 1242 ip = skb->data + network_depth; 1243 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : 1244 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); 1245 1246 rq->stats->ecn_mark += !!rc; 1247 } 1248 1249 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) 1250 { 1251 void *ip_p = skb->data + network_depth; 1252 1253 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : 1254 ((struct ipv6hdr *)ip_p)->nexthdr; 1255 } 1256 1257 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 1258 1259 #define MAX_PADDING 8 1260 1261 static void 1262 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, 1263 struct mlx5e_rq_stats *stats) 1264 { 1265 stats->csum_complete_tail_slow++; 1266 skb->csum = csum_block_add(skb->csum, 1267 skb_checksum(skb, offset, len, 0), 1268 offset); 1269 } 1270 1271 static void 1272 tail_padding_csum(struct sk_buff *skb, int offset, 1273 struct mlx5e_rq_stats *stats) 1274 { 1275 u8 tail_padding[MAX_PADDING]; 1276 int len = skb->len - offset; 1277 void *tail; 1278 1279 if (unlikely(len > MAX_PADDING)) { 1280 tail_padding_csum_slow(skb, offset, len, stats); 1281 return; 1282 } 1283 1284 tail = skb_header_pointer(skb, offset, len, tail_padding); 1285 if (unlikely(!tail)) { 1286 tail_padding_csum_slow(skb, offset, len, stats); 1287 return; 1288 } 1289 1290 stats->csum_complete_tail++; 1291 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); 1292 } 1293 1294 static void 1295 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, 1296 struct mlx5e_rq_stats *stats) 1297 { 1298 struct ipv6hdr *ip6; 1299 struct iphdr *ip4; 1300 int pkt_len; 1301 1302 /* Fixup vlan headers, if any */ 1303 if (network_depth > ETH_HLEN) 1304 /* CQE csum is calculated from the IP header and does 1305 * not cover VLAN headers (if present). This will add 1306 * the checksum manually. 1307 */ 1308 skb->csum = csum_partial(skb->data + ETH_HLEN, 1309 network_depth - ETH_HLEN, 1310 skb->csum); 1311 1312 /* Fixup tail padding, if any */ 1313 switch (proto) { 1314 case htons(ETH_P_IP): 1315 ip4 = (struct iphdr *)(skb->data + network_depth); 1316 pkt_len = network_depth + ntohs(ip4->tot_len); 1317 break; 1318 case htons(ETH_P_IPV6): 1319 ip6 = (struct ipv6hdr *)(skb->data + network_depth); 1320 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); 1321 break; 1322 default: 1323 return; 1324 } 1325 1326 if (likely(pkt_len >= skb->len)) 1327 return; 1328 1329 tail_padding_csum(skb, pkt_len, stats); 1330 } 1331 1332 static inline void mlx5e_handle_csum(struct net_device *netdev, 1333 struct mlx5_cqe64 *cqe, 1334 struct mlx5e_rq *rq, 1335 struct sk_buff *skb, 1336 bool lro) 1337 { 1338 struct mlx5e_rq_stats *stats = rq->stats; 1339 int network_depth = 0; 1340 __be16 proto; 1341 1342 if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) 1343 goto csum_none; 1344 1345 if (lro) { 1346 skb->ip_summed = CHECKSUM_UNNECESSARY; 1347 stats->csum_unnecessary++; 1348 return; 1349 } 1350 1351 /* True when explicitly set via priv flag, or XDP prog is loaded */ 1352 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || 1353 get_cqe_tls_offload(cqe)) 1354 goto csum_unnecessary; 1355 1356 /* CQE csum doesn't cover padding octets in short ethernet 1357 * frames. And the pad field is appended prior to calculating 1358 * and appending the FCS field. 1359 * 1360 * Detecting these padded frames requires to verify and parse 1361 * IP headers, so we simply force all those small frames to be 1362 * CHECKSUM_UNNECESSARY even if they are not padded. 1363 */ 1364 if (short_frame(skb->len)) 1365 goto csum_unnecessary; 1366 1367 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 1368 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 1369 goto csum_unnecessary; 1370 1371 stats->csum_complete++; 1372 skb->ip_summed = CHECKSUM_COMPLETE; 1373 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 1374 1375 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) 1376 return; /* CQE csum covers all received bytes */ 1377 1378 /* csum might need some fixups ...*/ 1379 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); 1380 return; 1381 } 1382 1383 csum_unnecessary: 1384 if (likely((cqe->hds_ip_ext & CQE_L3_OK) && 1385 (cqe->hds_ip_ext & CQE_L4_OK))) { 1386 skb->ip_summed = CHECKSUM_UNNECESSARY; 1387 if (cqe_is_tunneled(cqe)) { 1388 skb->csum_level = 1; 1389 skb->encapsulation = 1; 1390 stats->csum_unnecessary_inner++; 1391 return; 1392 } 1393 stats->csum_unnecessary++; 1394 return; 1395 } 1396 csum_none: 1397 skb->ip_summed = CHECKSUM_NONE; 1398 stats->csum_none++; 1399 } 1400 1401 #define MLX5E_CE_BIT_MASK 0x80 1402 1403 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, 1404 u32 cqe_bcnt, 1405 struct mlx5e_rq *rq, 1406 struct sk_buff *skb) 1407 { 1408 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 1409 struct mlx5e_rq_stats *stats = rq->stats; 1410 struct net_device *netdev = rq->netdev; 1411 1412 skb->mac_len = ETH_HLEN; 1413 1414 if (unlikely(get_cqe_tls_offload(cqe))) 1415 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1416 1417 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1418 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); 1419 1420 if (unlikely(mlx5e_macsec_is_rx_flow(cqe))) 1421 mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); 1422 1423 if (lro_num_seg > 1) { 1424 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 1425 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 1426 /* Subtract one since we already counted this as one 1427 * "regular" packet in mlx5e_complete_rx_cqe() 1428 */ 1429 stats->packets += lro_num_seg - 1; 1430 stats->lro_packets++; 1431 stats->lro_bytes += cqe_bcnt; 1432 } 1433 1434 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) 1435 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 1436 rq->clock, get_cqe_ts(cqe)); 1437 skb_record_rx_queue(skb, rq->ix); 1438 1439 if (likely(netdev->features & NETIF_F_RXHASH)) 1440 mlx5e_skb_set_hash(cqe, skb); 1441 1442 if (cqe_has_vlan(cqe)) { 1443 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1444 be16_to_cpu(cqe->vlan_info)); 1445 stats->removed_vlan_packets++; 1446 } 1447 1448 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; 1449 1450 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); 1451 /* checking CE bit in cqe - MSB in ml_path field */ 1452 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) 1453 mlx5e_enable_ecn(rq, skb); 1454 1455 skb->protocol = eth_type_trans(skb, netdev); 1456 1457 if (unlikely(mlx5e_skb_is_multicast(skb))) 1458 stats->mcast_packets++; 1459 } 1460 1461 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, 1462 struct mlx5_cqe64 *cqe, 1463 u32 cqe_bcnt, 1464 struct sk_buff *skb) 1465 { 1466 struct mlx5e_rq_stats *stats = rq->stats; 1467 1468 stats->packets++; 1469 stats->gro_packets++; 1470 stats->bytes += cqe_bcnt; 1471 stats->gro_bytes += cqe_bcnt; 1472 if (NAPI_GRO_CB(skb)->count != 1) 1473 return; 1474 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1475 skb_reset_network_header(skb); 1476 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { 1477 napi_gro_receive(rq->cq.napi, skb); 1478 rq->hw_gro_data->skb = NULL; 1479 } 1480 } 1481 1482 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, 1483 struct mlx5_cqe64 *cqe, 1484 u32 cqe_bcnt, 1485 struct sk_buff *skb) 1486 { 1487 struct mlx5e_rq_stats *stats = rq->stats; 1488 1489 stats->packets++; 1490 stats->bytes += cqe_bcnt; 1491 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1492 } 1493 1494 static inline 1495 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, 1496 u32 frag_size, u16 headroom, 1497 u32 cqe_bcnt, u32 metasize) 1498 { 1499 struct sk_buff *skb = build_skb(va, frag_size); 1500 1501 if (unlikely(!skb)) { 1502 rq->stats->buff_alloc_err++; 1503 return NULL; 1504 } 1505 1506 skb_reserve(skb, headroom); 1507 skb_put(skb, cqe_bcnt); 1508 1509 if (metasize) 1510 skb_metadata_set(skb, metasize); 1511 1512 return skb; 1513 } 1514 1515 static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, 1516 u32 len, struct xdp_buff *xdp) 1517 { 1518 xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); 1519 xdp_prepare_buff(xdp, va, headroom, len, true); 1520 } 1521 1522 static struct sk_buff * 1523 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, 1524 u32 cqe_bcnt) 1525 { 1526 union mlx5e_alloc_unit *au = wi->au; 1527 u16 rx_headroom = rq->buff.headroom; 1528 struct bpf_prog *prog; 1529 struct sk_buff *skb; 1530 u32 metasize = 0; 1531 void *va, *data; 1532 dma_addr_t addr; 1533 u32 frag_size; 1534 1535 va = page_address(au->page) + wi->offset; 1536 data = va + rx_headroom; 1537 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1538 1539 addr = page_pool_get_dma_addr(au->page); 1540 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, 1541 frag_size, DMA_FROM_DEVICE); 1542 net_prefetch(data); 1543 1544 prog = rcu_dereference(rq->xdp_prog); 1545 if (prog) { 1546 struct xdp_buff xdp; 1547 1548 net_prefetchw(va); /* xdp_frame data area */ 1549 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 1550 if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) 1551 return NULL; /* page/packet was consumed by XDP */ 1552 1553 rx_headroom = xdp.data - xdp.data_hard_start; 1554 metasize = xdp.data - xdp.data_meta; 1555 cqe_bcnt = xdp.data_end - xdp.data; 1556 } 1557 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1558 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); 1559 if (unlikely(!skb)) 1560 return NULL; 1561 1562 /* queue up for recycling/reuse */ 1563 page_ref_inc(au->page); 1564 1565 return skb; 1566 } 1567 1568 static struct sk_buff * 1569 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, 1570 u32 cqe_bcnt) 1571 { 1572 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 1573 struct mlx5e_wqe_frag_info *head_wi = wi; 1574 union mlx5e_alloc_unit *au = wi->au; 1575 u16 rx_headroom = rq->buff.headroom; 1576 struct skb_shared_info *sinfo; 1577 u32 frag_consumed_bytes; 1578 struct bpf_prog *prog; 1579 struct xdp_buff xdp; 1580 struct sk_buff *skb; 1581 dma_addr_t addr; 1582 u32 truesize; 1583 void *va; 1584 1585 va = page_address(au->page) + wi->offset; 1586 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); 1587 1588 addr = page_pool_get_dma_addr(au->page); 1589 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, 1590 rq->buff.frame0_sz, DMA_FROM_DEVICE); 1591 net_prefetchw(va); /* xdp_frame data area */ 1592 net_prefetch(va + rx_headroom); 1593 1594 mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp); 1595 sinfo = xdp_get_shared_info_from_buff(&xdp); 1596 truesize = 0; 1597 1598 cqe_bcnt -= frag_consumed_bytes; 1599 frag_info++; 1600 wi++; 1601 1602 while (cqe_bcnt) { 1603 skb_frag_t *frag; 1604 1605 au = wi->au; 1606 1607 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); 1608 1609 addr = page_pool_get_dma_addr(au->page); 1610 dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, 1611 frag_consumed_bytes, DMA_FROM_DEVICE); 1612 1613 if (!xdp_buff_has_frags(&xdp)) { 1614 /* Init on the first fragment to avoid cold cache access 1615 * when possible. 1616 */ 1617 sinfo->nr_frags = 0; 1618 sinfo->xdp_frags_size = 0; 1619 xdp_buff_set_frags_flag(&xdp); 1620 } 1621 1622 frag = &sinfo->frags[sinfo->nr_frags++]; 1623 __skb_frag_set_page(frag, au->page); 1624 skb_frag_off_set(frag, wi->offset); 1625 skb_frag_size_set(frag, frag_consumed_bytes); 1626 1627 if (page_is_pfmemalloc(au->page)) 1628 xdp_buff_set_frag_pfmemalloc(&xdp); 1629 1630 sinfo->xdp_frags_size += frag_consumed_bytes; 1631 truesize += frag_info->frag_stride; 1632 1633 cqe_bcnt -= frag_consumed_bytes; 1634 frag_info++; 1635 wi++; 1636 } 1637 1638 au = head_wi->au; 1639 1640 prog = rcu_dereference(rq->xdp_prog); 1641 if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { 1642 if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1643 int i; 1644 1645 for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++) 1646 mlx5e_put_rx_frag(rq, &head_wi[i], true); 1647 } 1648 return NULL; /* page/packet was consumed by XDP */ 1649 } 1650 1651 skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz, 1652 xdp.data - xdp.data_hard_start, 1653 xdp.data_end - xdp.data, 1654 xdp.data - xdp.data_meta); 1655 if (unlikely(!skb)) 1656 return NULL; 1657 1658 page_ref_inc(au->page); 1659 1660 if (unlikely(xdp_buff_has_frags(&xdp))) { 1661 int i; 1662 1663 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 1664 xdp_update_skb_shared_info(skb, wi - head_wi - 1, 1665 sinfo->xdp_frags_size, truesize, 1666 xdp_buff_is_frag_pfmemalloc(&xdp)); 1667 1668 for (i = 0; i < sinfo->nr_frags; i++) { 1669 skb_frag_t *frag = &sinfo->frags[i]; 1670 1671 page_ref_inc(skb_frag_page(frag)); 1672 } 1673 } 1674 1675 return skb; 1676 } 1677 1678 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1679 { 1680 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; 1681 struct mlx5e_priv *priv = rq->priv; 1682 1683 if (cqe_syndrome_needs_recover(err_cqe->syndrome) && 1684 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { 1685 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); 1686 queue_work(priv->wq, &rq->recover_work); 1687 } 1688 } 1689 1690 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1691 { 1692 trigger_report(rq, cqe); 1693 rq->stats->wqe_err++; 1694 } 1695 1696 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1697 { 1698 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1699 struct mlx5e_wqe_frag_info *wi; 1700 struct sk_buff *skb; 1701 u32 cqe_bcnt; 1702 u16 ci; 1703 1704 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1705 wi = get_frag(rq, ci); 1706 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1707 1708 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1709 mlx5e_handle_rx_err_cqe(rq, cqe); 1710 goto free_wqe; 1711 } 1712 1713 skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe, 1714 mlx5e_skb_from_cqe_linear, 1715 mlx5e_skb_from_cqe_nonlinear, 1716 mlx5e_xsk_skb_from_cqe_linear, 1717 rq, wi, cqe_bcnt); 1718 if (!skb) { 1719 /* probably for XDP */ 1720 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1721 /* do not return page to cache, 1722 * it will be returned on XDP_TX completion. 1723 */ 1724 goto wq_cyc_pop; 1725 } 1726 goto free_wqe; 1727 } 1728 1729 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1730 1731 if (mlx5e_cqe_regb_chain(cqe)) 1732 if (!mlx5e_tc_update_skb(cqe, skb)) { 1733 dev_kfree_skb_any(skb); 1734 goto free_wqe; 1735 } 1736 1737 napi_gro_receive(rq->cq.napi, skb); 1738 1739 free_wqe: 1740 mlx5e_free_rx_wqe(rq, wi, true); 1741 wq_cyc_pop: 1742 mlx5_wq_cyc_pop(wq); 1743 } 1744 1745 #ifdef CONFIG_MLX5_ESWITCH 1746 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1747 { 1748 struct net_device *netdev = rq->netdev; 1749 struct mlx5e_priv *priv = netdev_priv(netdev); 1750 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1751 struct mlx5_eswitch_rep *rep = rpriv->rep; 1752 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1753 struct mlx5e_wqe_frag_info *wi; 1754 struct sk_buff *skb; 1755 u32 cqe_bcnt; 1756 u16 ci; 1757 1758 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1759 wi = get_frag(rq, ci); 1760 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1761 1762 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1763 mlx5e_handle_rx_err_cqe(rq, cqe); 1764 goto free_wqe; 1765 } 1766 1767 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1768 mlx5e_skb_from_cqe_linear, 1769 mlx5e_skb_from_cqe_nonlinear, 1770 rq, wi, cqe_bcnt); 1771 if (!skb) { 1772 /* probably for XDP */ 1773 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1774 /* do not return page to cache, 1775 * it will be returned on XDP_TX completion. 1776 */ 1777 goto wq_cyc_pop; 1778 } 1779 goto free_wqe; 1780 } 1781 1782 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1783 1784 if (rep->vlan && skb_vlan_tag_present(skb)) 1785 skb_vlan_pop(skb); 1786 1787 mlx5e_rep_tc_receive(cqe, rq, skb); 1788 1789 free_wqe: 1790 mlx5e_free_rx_wqe(rq, wi, true); 1791 wq_cyc_pop: 1792 mlx5_wq_cyc_pop(wq); 1793 } 1794 1795 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1796 { 1797 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1798 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1799 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); 1800 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1801 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1802 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); 1803 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; 1804 struct mlx5e_rx_wqe_ll *wqe; 1805 struct mlx5_wq_ll *wq; 1806 struct sk_buff *skb; 1807 u16 cqe_bcnt; 1808 1809 wi->consumed_strides += cstrides; 1810 1811 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1812 mlx5e_handle_rx_err_cqe(rq, cqe); 1813 goto mpwrq_cqe_out; 1814 } 1815 1816 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1817 struct mlx5e_rq_stats *stats = rq->stats; 1818 1819 stats->mpwqe_filler_cqes++; 1820 stats->mpwqe_filler_strides += cstrides; 1821 goto mpwrq_cqe_out; 1822 } 1823 1824 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1825 1826 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1827 mlx5e_skb_from_cqe_mpwrq_linear, 1828 mlx5e_skb_from_cqe_mpwrq_nonlinear, 1829 rq, wi, cqe_bcnt, head_offset, page_idx); 1830 if (!skb) 1831 goto mpwrq_cqe_out; 1832 1833 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1834 1835 mlx5e_rep_tc_receive(cqe, rq, skb); 1836 1837 mpwrq_cqe_out: 1838 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1839 return; 1840 1841 wq = &rq->mpwqe.wq; 1842 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1843 mlx5e_free_rx_mpwqe(rq, wi, true); 1844 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1845 } 1846 1847 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { 1848 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1849 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1850 }; 1851 #endif 1852 1853 static void 1854 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, 1855 union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset) 1856 { 1857 net_prefetchw(skb->data); 1858 1859 while (data_bcnt) { 1860 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ 1861 u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); 1862 unsigned int truesize; 1863 1864 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) 1865 truesize = pg_consumed_bytes; 1866 else 1867 truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); 1868 1869 mlx5e_add_skb_frag(rq, skb, au, data_offset, 1870 pg_consumed_bytes, truesize); 1871 1872 data_bcnt -= pg_consumed_bytes; 1873 data_offset = 0; 1874 au++; 1875 } 1876 } 1877 1878 static struct sk_buff * 1879 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1880 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1881 { 1882 union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx]; 1883 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1884 u32 frag_offset = head_offset + headlen; 1885 u32 byte_cnt = cqe_bcnt - headlen; 1886 union mlx5e_alloc_unit *head_au = au; 1887 struct sk_buff *skb; 1888 dma_addr_t addr; 1889 1890 skb = napi_alloc_skb(rq->cq.napi, 1891 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1892 if (unlikely(!skb)) { 1893 rq->stats->buff_alloc_err++; 1894 return NULL; 1895 } 1896 1897 net_prefetchw(skb->data); 1898 1899 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ 1900 if (unlikely(frag_offset >= PAGE_SIZE)) { 1901 au++; 1902 frag_offset -= PAGE_SIZE; 1903 } 1904 1905 mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset); 1906 /* copy header */ 1907 addr = page_pool_get_dma_addr(head_au->page); 1908 mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr, 1909 head_offset, head_offset, headlen); 1910 /* skb linear part was allocated with headlen and aligned to long */ 1911 skb->tail += headlen; 1912 skb->len += headlen; 1913 1914 return skb; 1915 } 1916 1917 static struct sk_buff * 1918 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1919 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1920 { 1921 union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx]; 1922 u16 rx_headroom = rq->buff.headroom; 1923 struct bpf_prog *prog; 1924 struct sk_buff *skb; 1925 u32 metasize = 0; 1926 void *va, *data; 1927 dma_addr_t addr; 1928 u32 frag_size; 1929 1930 /* Check packet size. Note LRO doesn't use linear SKB */ 1931 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 1932 rq->stats->oversize_pkts_sw_drop++; 1933 return NULL; 1934 } 1935 1936 va = page_address(au->page) + head_offset; 1937 data = va + rx_headroom; 1938 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1939 1940 addr = page_pool_get_dma_addr(au->page); 1941 dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, 1942 frag_size, DMA_FROM_DEVICE); 1943 net_prefetch(data); 1944 1945 prog = rcu_dereference(rq->xdp_prog); 1946 if (prog) { 1947 struct xdp_buff xdp; 1948 1949 net_prefetchw(va); /* xdp_frame data area */ 1950 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 1951 if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { 1952 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) 1953 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 1954 return NULL; /* page/packet was consumed by XDP */ 1955 } 1956 1957 rx_headroom = xdp.data - xdp.data_hard_start; 1958 metasize = xdp.data - xdp.data_meta; 1959 cqe_bcnt = xdp.data_end - xdp.data; 1960 } 1961 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1962 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); 1963 if (unlikely(!skb)) 1964 return NULL; 1965 1966 /* queue up for recycling/reuse */ 1967 page_ref_inc(au->page); 1968 1969 return skb; 1970 } 1971 1972 static struct sk_buff * 1973 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1974 struct mlx5_cqe64 *cqe, u16 header_index) 1975 { 1976 struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; 1977 u16 head_offset = head->addr & (PAGE_SIZE - 1); 1978 u16 head_size = cqe->shampo.header_size; 1979 u16 rx_headroom = rq->buff.headroom; 1980 struct sk_buff *skb = NULL; 1981 void *hdr, *data; 1982 u32 frag_size; 1983 1984 hdr = page_address(head->page) + head_offset; 1985 data = hdr + rx_headroom; 1986 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); 1987 1988 if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { 1989 /* build SKB around header */ 1990 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); 1991 prefetchw(hdr); 1992 prefetch(data); 1993 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); 1994 1995 if (unlikely(!skb)) 1996 return NULL; 1997 1998 /* queue up for recycling/reuse */ 1999 page_ref_inc(head->page); 2000 2001 } else { 2002 /* allocate SKB and copy header for large header */ 2003 rq->stats->gro_large_hds++; 2004 skb = napi_alloc_skb(rq->cq.napi, 2005 ALIGN(head_size, sizeof(long))); 2006 if (unlikely(!skb)) { 2007 rq->stats->buff_alloc_err++; 2008 return NULL; 2009 } 2010 2011 prefetchw(skb->data); 2012 mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr, 2013 head_offset + rx_headroom, 2014 rx_headroom, head_size); 2015 /* skb linear part was allocated with headlen and aligned to long */ 2016 skb->tail += head_size; 2017 skb->len += head_size; 2018 } 2019 return skb; 2020 } 2021 2022 static void 2023 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz) 2024 { 2025 skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 2026 unsigned int frag_size = skb_frag_size(last_frag); 2027 unsigned int frag_truesize; 2028 2029 frag_truesize = ALIGN(frag_size, BIT(log_stride_sz)); 2030 skb->truesize += frag_truesize - frag_size; 2031 } 2032 2033 static void 2034 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 2035 { 2036 struct sk_buff *skb = rq->hw_gro_data->skb; 2037 struct mlx5e_rq_stats *stats = rq->stats; 2038 2039 stats->gro_skbs++; 2040 if (likely(skb_shinfo(skb)->nr_frags)) 2041 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); 2042 if (NAPI_GRO_CB(skb)->count > 1) 2043 mlx5e_shampo_update_hdr(rq, cqe, match); 2044 napi_gro_receive(rq->cq.napi, skb); 2045 rq->hw_gro_data->skb = NULL; 2046 } 2047 2048 static bool 2049 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) 2050 { 2051 int nr_frags = skb_shinfo(skb)->nr_frags; 2052 2053 return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; 2054 } 2055 2056 static void 2057 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) 2058 { 2059 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 2060 u64 addr = shampo->info[header_index].addr; 2061 2062 if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { 2063 shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); 2064 mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true); 2065 } 2066 bitmap_clear(shampo->bitmap, header_index, 1); 2067 } 2068 2069 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2070 { 2071 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; 2072 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 2073 u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); 2074 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 2075 u32 data_offset = wqe_offset & (PAGE_SIZE - 1); 2076 u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 2077 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 2078 u32 page_idx = wqe_offset >> PAGE_SHIFT; 2079 u16 head_size = cqe->shampo.header_size; 2080 struct sk_buff **skb = &rq->hw_gro_data->skb; 2081 bool flush = cqe->shampo.flush; 2082 bool match = cqe->shampo.match; 2083 struct mlx5e_rq_stats *stats = rq->stats; 2084 struct mlx5e_rx_wqe_ll *wqe; 2085 union mlx5e_alloc_unit *au; 2086 struct mlx5e_mpw_info *wi; 2087 struct mlx5_wq_ll *wq; 2088 2089 wi = mlx5e_get_mpw_info(rq, wqe_id); 2090 wi->consumed_strides += cstrides; 2091 2092 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2093 mlx5e_handle_rx_err_cqe(rq, cqe); 2094 goto mpwrq_cqe_out; 2095 } 2096 2097 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2098 stats->mpwqe_filler_cqes++; 2099 stats->mpwqe_filler_strides += cstrides; 2100 goto mpwrq_cqe_out; 2101 } 2102 2103 stats->gro_match_packets += match; 2104 2105 if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { 2106 match = false; 2107 mlx5e_shampo_flush_skb(rq, cqe, match); 2108 } 2109 2110 if (!*skb) { 2111 if (likely(head_size)) 2112 *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); 2113 else 2114 *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset, 2115 page_idx); 2116 if (unlikely(!*skb)) 2117 goto free_hd_entry; 2118 2119 NAPI_GRO_CB(*skb)->count = 1; 2120 skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size; 2121 } else { 2122 NAPI_GRO_CB(*skb)->count++; 2123 if (NAPI_GRO_CB(*skb)->count == 2 && 2124 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { 2125 void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 2126 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - 2127 sizeof(struct iphdr); 2128 struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff); 2129 2130 rq->hw_gro_data->second_ip_id = ntohs(iph->id); 2131 } 2132 } 2133 2134 if (likely(head_size)) { 2135 au = &wi->alloc_units[page_idx]; 2136 mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset); 2137 } 2138 2139 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); 2140 if (flush) 2141 mlx5e_shampo_flush_skb(rq, cqe, match); 2142 free_hd_entry: 2143 mlx5e_free_rx_shampo_hd_entry(rq, header_index); 2144 mpwrq_cqe_out: 2145 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2146 return; 2147 2148 wq = &rq->mpwqe.wq; 2149 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2150 mlx5e_free_rx_mpwqe(rq, wi, true); 2151 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2152 } 2153 2154 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2155 { 2156 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 2157 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 2158 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); 2159 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 2160 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 2161 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); 2162 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; 2163 struct mlx5e_rx_wqe_ll *wqe; 2164 struct mlx5_wq_ll *wq; 2165 struct sk_buff *skb; 2166 u16 cqe_bcnt; 2167 2168 wi->consumed_strides += cstrides; 2169 2170 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2171 mlx5e_handle_rx_err_cqe(rq, cqe); 2172 goto mpwrq_cqe_out; 2173 } 2174 2175 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2176 struct mlx5e_rq_stats *stats = rq->stats; 2177 2178 stats->mpwqe_filler_cqes++; 2179 stats->mpwqe_filler_strides += cstrides; 2180 goto mpwrq_cqe_out; 2181 } 2182 2183 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 2184 2185 skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq, 2186 mlx5e_skb_from_cqe_mpwrq_linear, 2187 mlx5e_skb_from_cqe_mpwrq_nonlinear, 2188 mlx5e_xsk_skb_from_cqe_mpwrq_linear, 2189 rq, wi, cqe_bcnt, head_offset, page_idx); 2190 if (!skb) 2191 goto mpwrq_cqe_out; 2192 2193 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2194 2195 if (mlx5e_cqe_regb_chain(cqe)) 2196 if (!mlx5e_tc_update_skb(cqe, skb)) { 2197 dev_kfree_skb_any(skb); 2198 goto mpwrq_cqe_out; 2199 } 2200 2201 napi_gro_receive(rq->cq.napi, skb); 2202 2203 mpwrq_cqe_out: 2204 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2205 return; 2206 2207 wq = &rq->mpwqe.wq; 2208 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2209 mlx5e_free_rx_mpwqe(rq, wi, true); 2210 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2211 } 2212 2213 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) 2214 { 2215 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 2216 struct mlx5_cqwq *cqwq = &cq->wq; 2217 struct mlx5_cqe64 *cqe; 2218 int work_done = 0; 2219 2220 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 2221 return 0; 2222 2223 if (rq->cqd.left) { 2224 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); 2225 if (work_done >= budget) 2226 goto out; 2227 } 2228 2229 cqe = mlx5_cqwq_get_cqe(cqwq); 2230 if (!cqe) { 2231 if (unlikely(work_done)) 2232 goto out; 2233 return 0; 2234 } 2235 2236 do { 2237 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 2238 work_done += 2239 mlx5e_decompress_cqes_start(rq, cqwq, 2240 budget - work_done); 2241 continue; 2242 } 2243 2244 mlx5_cqwq_pop(cqwq); 2245 2246 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 2247 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, 2248 rq, cqe); 2249 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); 2250 2251 out: 2252 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) 2253 mlx5e_shampo_flush_skb(rq, NULL, false); 2254 2255 if (rcu_access_pointer(rq->xdp_prog)) 2256 mlx5e_xdp_rx_poll_complete(rq); 2257 2258 mlx5_cqwq_update_db_record(cqwq); 2259 2260 /* ensure cq space is freed before enabling more cqes */ 2261 wmb(); 2262 2263 return work_done; 2264 } 2265 2266 #ifdef CONFIG_MLX5_CORE_IPOIB 2267 2268 #define MLX5_IB_GRH_SGID_OFFSET 8 2269 #define MLX5_IB_GRH_DGID_OFFSET 24 2270 #define MLX5_GID_SIZE 16 2271 2272 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 2273 struct mlx5_cqe64 *cqe, 2274 u32 cqe_bcnt, 2275 struct sk_buff *skb) 2276 { 2277 struct hwtstamp_config *tstamp; 2278 struct mlx5e_rq_stats *stats; 2279 struct net_device *netdev; 2280 struct mlx5e_priv *priv; 2281 char *pseudo_header; 2282 u32 flags_rqpn; 2283 u32 qpn; 2284 u8 *dgid; 2285 u8 g; 2286 2287 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; 2288 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); 2289 2290 /* No mapping present, cannot process SKB. This might happen if a child 2291 * interface is going down while having unprocessed CQEs on parent RQ 2292 */ 2293 if (unlikely(!netdev)) { 2294 /* TODO: add drop counters support */ 2295 skb->dev = NULL; 2296 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); 2297 return; 2298 } 2299 2300 priv = mlx5i_epriv(netdev); 2301 tstamp = &priv->tstamp; 2302 stats = rq->stats; 2303 2304 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 2305 g = (flags_rqpn >> 28) & 3; 2306 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; 2307 if ((!g) || dgid[0] != 0xff) 2308 skb->pkt_type = PACKET_HOST; 2309 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) 2310 skb->pkt_type = PACKET_BROADCAST; 2311 else 2312 skb->pkt_type = PACKET_MULTICAST; 2313 2314 /* Drop packets that this interface sent, ie multicast packets 2315 * that the HCA has replicated. 2316 */ 2317 if (g && (qpn == (flags_rqpn & 0xffffff)) && 2318 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, 2319 MLX5_GID_SIZE) == 0)) { 2320 skb->dev = NULL; 2321 return; 2322 } 2323 2324 skb_pull(skb, MLX5_IB_GRH_BYTES); 2325 2326 skb->protocol = *((__be16 *)(skb->data)); 2327 2328 if (netdev->features & NETIF_F_RXCSUM) { 2329 skb->ip_summed = CHECKSUM_COMPLETE; 2330 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 2331 stats->csum_complete++; 2332 } else { 2333 skb->ip_summed = CHECKSUM_NONE; 2334 stats->csum_none++; 2335 } 2336 2337 if (unlikely(mlx5e_rx_hw_stamp(tstamp))) 2338 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 2339 rq->clock, get_cqe_ts(cqe)); 2340 skb_record_rx_queue(skb, rq->ix); 2341 2342 if (likely(netdev->features & NETIF_F_RXHASH)) 2343 mlx5e_skb_set_hash(cqe, skb); 2344 2345 /* 20 bytes of ipoib header and 4 for encap existing */ 2346 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); 2347 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); 2348 skb_reset_mac_header(skb); 2349 skb_pull(skb, MLX5_IPOIB_HARD_LEN); 2350 2351 skb->dev = netdev; 2352 2353 stats->packets++; 2354 stats->bytes += cqe_bcnt; 2355 } 2356 2357 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2358 { 2359 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2360 struct mlx5e_wqe_frag_info *wi; 2361 struct sk_buff *skb; 2362 u32 cqe_bcnt; 2363 u16 ci; 2364 2365 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2366 wi = get_frag(rq, ci); 2367 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2368 2369 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2370 rq->stats->wqe_err++; 2371 goto wq_free_wqe; 2372 } 2373 2374 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 2375 mlx5e_skb_from_cqe_linear, 2376 mlx5e_skb_from_cqe_nonlinear, 2377 rq, wi, cqe_bcnt); 2378 if (!skb) 2379 goto wq_free_wqe; 2380 2381 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2382 if (unlikely(!skb->dev)) { 2383 dev_kfree_skb_any(skb); 2384 goto wq_free_wqe; 2385 } 2386 napi_gro_receive(rq->cq.napi, skb); 2387 2388 wq_free_wqe: 2389 mlx5e_free_rx_wqe(rq, wi, true); 2390 mlx5_wq_cyc_pop(wq); 2391 } 2392 2393 const struct mlx5e_rx_handlers mlx5i_rx_handlers = { 2394 .handle_rx_cqe = mlx5i_handle_rx_cqe, 2395 .handle_rx_cqe_mpwqe = NULL, /* Not supported */ 2396 }; 2397 #endif /* CONFIG_MLX5_CORE_IPOIB */ 2398 2399 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) 2400 { 2401 struct net_device *netdev = rq->netdev; 2402 struct mlx5_core_dev *mdev = rq->mdev; 2403 struct mlx5e_priv *priv = rq->priv; 2404 2405 switch (rq->wq_type) { 2406 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2407 rq->mpwqe.skb_from_cqe_mpwrq = xsk ? 2408 mlx5e_xsk_skb_from_cqe_mpwrq_linear : 2409 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? 2410 mlx5e_skb_from_cqe_mpwrq_linear : 2411 mlx5e_skb_from_cqe_mpwrq_nonlinear; 2412 rq->post_wqes = mlx5e_post_rx_mpwqes; 2413 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 2414 2415 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 2416 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; 2417 if (!rq->handle_rx_cqe) { 2418 netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n"); 2419 return -EINVAL; 2420 } 2421 } else { 2422 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; 2423 if (!rq->handle_rx_cqe) { 2424 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); 2425 return -EINVAL; 2426 } 2427 } 2428 2429 break; 2430 default: /* MLX5_WQ_TYPE_CYCLIC */ 2431 rq->wqe.skb_from_cqe = xsk ? 2432 mlx5e_xsk_skb_from_cqe_linear : 2433 mlx5e_rx_is_linear_skb(mdev, params, NULL) ? 2434 mlx5e_skb_from_cqe_linear : 2435 mlx5e_skb_from_cqe_nonlinear; 2436 rq->post_wqes = mlx5e_post_rx_wqes; 2437 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2438 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 2439 if (!rq->handle_rx_cqe) { 2440 netdev_err(netdev, "RX handler of RQ is not set\n"); 2441 return -EINVAL; 2442 } 2443 } 2444 2445 return 0; 2446 } 2447 2448 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2449 { 2450 struct mlx5e_priv *priv = netdev_priv(rq->netdev); 2451 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2452 struct mlx5e_wqe_frag_info *wi; 2453 struct devlink_port *dl_port; 2454 struct sk_buff *skb; 2455 u32 cqe_bcnt; 2456 u16 trap_id; 2457 u16 ci; 2458 2459 trap_id = get_cqe_flow_tag(cqe); 2460 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2461 wi = get_frag(rq, ci); 2462 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2463 2464 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2465 rq->stats->wqe_err++; 2466 goto free_wqe; 2467 } 2468 2469 skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt); 2470 if (!skb) 2471 goto free_wqe; 2472 2473 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2474 skb_push(skb, ETH_HLEN); 2475 2476 dl_port = mlx5e_devlink_get_dl_port(priv); 2477 mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); 2478 dev_kfree_skb_any(skb); 2479 2480 free_wqe: 2481 mlx5e_free_rx_wqe(rq, wi, false); 2482 mlx5_wq_cyc_pop(wq); 2483 } 2484 2485 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) 2486 { 2487 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ? 2488 mlx5e_skb_from_cqe_linear : 2489 mlx5e_skb_from_cqe_nonlinear; 2490 rq->post_wqes = mlx5e_post_rx_wqes; 2491 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2492 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; 2493 } 2494