1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/tcp.h> 36 #include <linux/bitmap.h> 37 #include <linux/filter.h> 38 #include <net/ip6_checksum.h> 39 #include <net/page_pool.h> 40 #include <net/inet_ecn.h> 41 #include <net/gro.h> 42 #include <net/udp.h> 43 #include <net/tcp.h> 44 #include <net/xdp_sock_drv.h> 45 #include "en.h" 46 #include "en/txrx.h" 47 #include "en_tc.h" 48 #include "eswitch.h" 49 #include "en_rep.h" 50 #include "en/rep/tc.h" 51 #include "ipoib/ipoib.h" 52 #include "en_accel/ipsec.h" 53 #include "en_accel/macsec.h" 54 #include "en_accel/ipsec_rxtx.h" 55 #include "en_accel/ktls_txrx.h" 56 #include "en/xdp.h" 57 #include "en/xsk/rx.h" 58 #include "en/health.h" 59 #include "en/params.h" 60 #include "devlink.h" 61 #include "en/devlink.h" 62 63 static struct sk_buff * 64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 65 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 66 static struct sk_buff * 67 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 68 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 69 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 70 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 71 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 72 73 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { 74 .handle_rx_cqe = mlx5e_handle_rx_cqe, 75 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 76 .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, 77 }; 78 79 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 80 { 81 return config->rx_filter == HWTSTAMP_FILTER_ALL; 82 } 83 84 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, 85 u32 cqcc, void *data) 86 { 87 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 88 89 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); 90 } 91 92 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, 93 struct mlx5_cqe64 *cqe) 94 { 95 struct mlx5e_cq_decomp *cqd = &rq->cqd; 96 struct mlx5_cqe64 *title = &cqd->title; 97 98 memcpy(title, cqe, sizeof(struct mlx5_cqe64)); 99 100 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) 101 return; 102 103 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 104 cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) + 105 mpwrq_get_cqe_consumed_strides(title); 106 else 107 cqd->wqe_counter = 108 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); 109 } 110 111 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, 112 struct mlx5_cqwq *wq, 113 u32 cqcc) 114 { 115 struct mlx5e_cq_decomp *cqd = &rq->cqd; 116 struct mlx5_cqe64 *title = &cqd->title; 117 118 mlx5e_read_cqe_slot(wq, cqcc, title); 119 cqd->left = be32_to_cpu(title->byte_cnt); 120 cqd->wqe_counter = be16_to_cpu(title->wqe_counter); 121 rq->stats->cqe_compress_blks++; 122 } 123 124 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, 125 struct mlx5e_cq_decomp *cqd, 126 u32 cqcc) 127 { 128 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); 129 cqd->mini_arr_idx = 0; 130 } 131 132 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) 133 { 134 u32 cqcc = wq->cc; 135 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; 136 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); 137 u32 wq_sz = mlx5_cqwq_get_size(wq); 138 u32 ci_top = min_t(u32, wq_sz, ci + n); 139 140 for (; ci < ci_top; ci++, n--) { 141 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 142 143 cqe->op_own = op_own; 144 } 145 146 if (unlikely(ci == wq_sz)) { 147 op_own = !op_own; 148 for (ci = 0; ci < n; ci++) { 149 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); 150 151 cqe->op_own = op_own; 152 } 153 } 154 } 155 156 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, 157 struct mlx5_cqwq *wq, 158 u32 cqcc) 159 { 160 struct mlx5e_cq_decomp *cqd = &rq->cqd; 161 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; 162 struct mlx5_cqe64 *title = &cqd->title; 163 164 title->byte_cnt = mini_cqe->byte_cnt; 165 title->check_sum = mini_cqe->checksum; 166 title->op_own &= 0xf0; 167 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); 168 169 /* state bit set implies linked-list striding RQ wq type and 170 * HW stride index capability supported 171 */ 172 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { 173 title->wqe_counter = mini_cqe->stridx; 174 return; 175 } 176 177 /* HW stride index capability not supported */ 178 title->wqe_counter = cpu_to_be16(cqd->wqe_counter); 179 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 180 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); 181 else 182 cqd->wqe_counter = 183 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); 184 } 185 186 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, 187 struct mlx5_cqwq *wq, 188 u32 cqcc) 189 { 190 struct mlx5e_cq_decomp *cqd = &rq->cqd; 191 192 mlx5e_decompress_cqe(rq, wq, cqcc); 193 cqd->title.rss_hash_type = 0; 194 cqd->title.rss_hash_result = 0; 195 } 196 197 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq, 198 struct mlx5_cqwq *wq, 199 struct mlx5_cqe64 *cqe, 200 int budget_rem) 201 { 202 struct mlx5e_cq_decomp *cqd = &rq->cqd; 203 u32 cqcc, left; 204 u32 i; 205 206 left = get_cqe_enhanced_num_mini_cqes(cqe); 207 /* Here we avoid breaking the cqe compression session in the middle 208 * in case budget is not sufficient to handle all of it. In this case 209 * we return work_done == budget_rem to give 'busy' napi indication. 210 */ 211 if (unlikely(left > budget_rem)) 212 return budget_rem; 213 214 cqcc = wq->cc; 215 cqd->mini_arr_idx = 0; 216 memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64)); 217 for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) { 218 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); 219 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 220 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, 221 rq, &cqd->title); 222 } 223 wq->cc = cqcc; 224 rq->stats->cqe_compress_pkts += left; 225 226 return left; 227 } 228 229 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, 230 struct mlx5_cqwq *wq, 231 int update_owner_only, 232 int budget_rem) 233 { 234 struct mlx5e_cq_decomp *cqd = &rq->cqd; 235 u32 cqcc = wq->cc + update_owner_only; 236 u32 cqe_count; 237 u32 i; 238 239 cqe_count = min_t(u32, cqd->left, budget_rem); 240 241 for (i = update_owner_only; i < cqe_count; 242 i++, cqd->mini_arr_idx++, cqcc++) { 243 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) 244 mlx5e_read_mini_arr_slot(wq, cqd, cqcc); 245 246 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); 247 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 248 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 249 rq, &cqd->title); 250 } 251 mlx5e_cqes_update_owner(wq, cqcc - wq->cc); 252 wq->cc = cqcc; 253 cqd->left -= cqe_count; 254 rq->stats->cqe_compress_pkts += cqe_count; 255 256 return cqe_count; 257 } 258 259 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, 260 struct mlx5_cqwq *wq, 261 int budget_rem) 262 { 263 struct mlx5e_cq_decomp *cqd = &rq->cqd; 264 u32 cc = wq->cc; 265 266 mlx5e_read_title_slot(rq, wq, cc); 267 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); 268 mlx5e_decompress_cqe(rq, wq, cc); 269 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 270 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, 271 rq, &cqd->title); 272 cqd->mini_arr_idx++; 273 274 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem); 275 } 276 277 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) 278 { 279 struct mlx5e_page_cache *cache = &rq->page_cache; 280 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); 281 struct mlx5e_rq_stats *stats = rq->stats; 282 283 if (tail_next == cache->head) { 284 stats->cache_full++; 285 return false; 286 } 287 288 if (!dev_page_is_reusable(page)) { 289 stats->cache_waive++; 290 return false; 291 } 292 293 cache->page_cache[cache->tail] = page; 294 cache->tail = tail_next; 295 return true; 296 } 297 298 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au) 299 { 300 struct mlx5e_page_cache *cache = &rq->page_cache; 301 struct mlx5e_rq_stats *stats = rq->stats; 302 dma_addr_t addr; 303 304 if (unlikely(cache->head == cache->tail)) { 305 stats->cache_empty++; 306 return false; 307 } 308 309 if (page_ref_count(cache->page_cache[cache->head]) != 1) { 310 stats->cache_busy++; 311 return false; 312 } 313 314 au->page = cache->page_cache[cache->head]; 315 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); 316 stats->cache_reuse++; 317 318 addr = page_pool_get_dma_addr(au->page); 319 /* Non-XSK always uses PAGE_SIZE. */ 320 dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir); 321 return true; 322 } 323 324 static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au) 325 { 326 dma_addr_t addr; 327 328 if (mlx5e_rx_cache_get(rq, au)) 329 return 0; 330 331 au->page = page_pool_dev_alloc_pages(rq->page_pool); 332 if (unlikely(!au->page)) 333 return -ENOMEM; 334 335 /* Non-XSK always uses PAGE_SIZE. */ 336 addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir); 337 if (unlikely(dma_mapping_error(rq->pdev, addr))) { 338 page_pool_recycle_direct(rq->page_pool, au->page); 339 au->page = NULL; 340 return -ENOMEM; 341 } 342 page_pool_set_dma_addr(au->page, addr); 343 344 return 0; 345 } 346 347 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) 348 { 349 dma_addr_t dma_addr = page_pool_get_dma_addr(page); 350 351 dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir, 352 DMA_ATTR_SKIP_CPU_SYNC); 353 page_pool_set_dma_addr(page, 0); 354 } 355 356 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle) 357 { 358 if (likely(recycle)) { 359 if (mlx5e_rx_cache_put(rq, page)) 360 return; 361 362 mlx5e_page_dma_unmap(rq, page); 363 page_pool_recycle_direct(rq->page_pool, page); 364 } else { 365 mlx5e_page_dma_unmap(rq, page); 366 page_pool_release_page(rq->page_pool, page); 367 put_page(page); 368 } 369 } 370 371 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, 372 struct mlx5e_wqe_frag_info *frag) 373 { 374 int err = 0; 375 376 if (!frag->offset) 377 /* On first frag (offset == 0), replenish page (alloc_unit actually). 378 * Other frags that point to the same alloc_unit (with a different 379 * offset) should just use the new one without replenishing again 380 * by themselves. 381 */ 382 err = mlx5e_page_alloc_pool(rq, frag->au); 383 384 return err; 385 } 386 387 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, 388 struct mlx5e_wqe_frag_info *frag, 389 bool recycle) 390 { 391 if (frag->last_in_page) 392 mlx5e_page_release_dynamic(rq, frag->au->page, recycle); 393 } 394 395 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) 396 { 397 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; 398 } 399 400 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, 401 u16 ix) 402 { 403 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); 404 int err; 405 int i; 406 407 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { 408 dma_addr_t addr; 409 u16 headroom; 410 411 err = mlx5e_get_rx_frag(rq, frag); 412 if (unlikely(err)) 413 goto free_frags; 414 415 headroom = i == 0 ? rq->buff.headroom : 0; 416 addr = page_pool_get_dma_addr(frag->au->page); 417 wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); 418 } 419 420 return 0; 421 422 free_frags: 423 while (--i >= 0) 424 mlx5e_put_rx_frag(rq, --frag, true); 425 426 return err; 427 } 428 429 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, 430 struct mlx5e_wqe_frag_info *wi, 431 bool recycle) 432 { 433 int i; 434 435 if (rq->xsk_pool) { 436 /* The `recycle` parameter is ignored, and the page is always 437 * put into the Reuse Ring, because there is no way to return 438 * the page to the userspace when the interface goes down. 439 */ 440 xsk_buff_free(wi->au->xsk); 441 return; 442 } 443 444 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) 445 mlx5e_put_rx_frag(rq, wi, recycle); 446 } 447 448 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 449 { 450 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); 451 452 mlx5e_free_rx_wqe(rq, wi, false); 453 } 454 455 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) 456 { 457 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 458 int i; 459 460 for (i = 0; i < wqe_bulk; i++) { 461 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); 462 struct mlx5e_rx_wqe_cyc *wqe; 463 464 wqe = mlx5_wq_cyc_get_wqe(wq, j); 465 466 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j))) 467 break; 468 } 469 470 return i; 471 } 472 473 static inline void 474 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, 475 union mlx5e_alloc_unit *au, u32 frag_offset, u32 len, 476 unsigned int truesize) 477 { 478 dma_addr_t addr = page_pool_get_dma_addr(au->page); 479 480 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, 481 rq->buff.map_dir); 482 page_ref_inc(au->page); 483 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 484 au->page, frag_offset, len, truesize); 485 } 486 487 static inline void 488 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, 489 struct page *page, dma_addr_t addr, 490 int offset_from, int dma_offset, u32 headlen) 491 { 492 const void *from = page_address(page) + offset_from; 493 /* Aligning len to sizeof(long) optimizes memcpy performance */ 494 unsigned int len = ALIGN(headlen, sizeof(long)); 495 496 dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len, 497 rq->buff.map_dir); 498 skb_copy_to_linear_data(skb, from, len); 499 } 500 501 static void 502 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) 503 { 504 union mlx5e_alloc_unit *alloc_units = wi->alloc_units; 505 bool no_xdp_xmit; 506 int i; 507 508 /* A common case for AF_XDP. */ 509 if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe)) 510 return; 511 512 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); 513 514 if (rq->xsk_pool) { 515 /* The `recycle` parameter is ignored, and the page is always 516 * put into the Reuse Ring, because there is no way to return 517 * the page to the userspace when the interface goes down. 518 */ 519 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) 520 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 521 xsk_buff_free(alloc_units[i].xsk); 522 } else { 523 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) 524 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 525 mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle); 526 } 527 } 528 529 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) 530 { 531 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 532 533 do { 534 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); 535 536 mlx5_wq_ll_push(wq, next_wqe_index); 537 } while (--n); 538 539 /* ensure wqes are visible to device before updating doorbell record */ 540 dma_wmb(); 541 542 mlx5_wq_ll_update_db_record(wq); 543 } 544 545 /* This function returns the size of the continuous free space inside a bitmap 546 * that starts from first and no longer than len including circular ones. 547 */ 548 static int bitmap_find_window(unsigned long *bitmap, int len, 549 int bitmap_size, int first) 550 { 551 int next_one, count; 552 553 next_one = find_next_bit(bitmap, bitmap_size, first); 554 if (next_one == bitmap_size) { 555 if (bitmap_size - first >= len) 556 return len; 557 next_one = find_next_bit(bitmap, bitmap_size, 0); 558 count = next_one + bitmap_size - first; 559 } else { 560 count = next_one - first; 561 } 562 563 return min(len, count); 564 } 565 566 static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, 567 __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) 568 { 569 memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); 570 umr_wqe->ctrl.opmod_idx_opcode = 571 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 572 MLX5_OPCODE_UMR); 573 umr_wqe->ctrl.umr_mkey = key; 574 umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) 575 | MLX5E_KLM_UMR_DS_CNT(klm_len)); 576 umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; 577 umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); 578 umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); 579 umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 580 } 581 582 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, 583 struct mlx5e_icosq *sq, 584 u16 klm_entries, u16 index) 585 { 586 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 587 u16 entries, pi, header_offset, err, wqe_bbs, new_entries; 588 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; 589 struct page *page = shampo->last_page; 590 u64 addr = shampo->last_addr; 591 struct mlx5e_dma_info *dma_info; 592 struct mlx5e_umr_wqe *umr_wqe; 593 int headroom, i; 594 595 headroom = rq->buff.headroom; 596 new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); 597 entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); 598 wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); 599 pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); 600 umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 601 build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); 602 603 for (i = 0; i < entries; i++, index++) { 604 dma_info = &shampo->info[index]; 605 if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < 606 MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)) 607 goto update_klm; 608 header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << 609 MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; 610 if (!(header_offset & (PAGE_SIZE - 1))) { 611 union mlx5e_alloc_unit au; 612 613 err = mlx5e_page_alloc_pool(rq, &au); 614 if (unlikely(err)) 615 goto err_unmap; 616 page = dma_info->page = au.page; 617 addr = dma_info->addr = page_pool_get_dma_addr(au.page); 618 } else { 619 dma_info->addr = addr + header_offset; 620 dma_info->page = page; 621 } 622 623 update_klm: 624 umr_wqe->inline_klms[i].bcount = 625 cpu_to_be32(MLX5E_RX_MAX_HEAD); 626 umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); 627 umr_wqe->inline_klms[i].va = 628 cpu_to_be64(dma_info->addr + headroom); 629 } 630 631 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 632 .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, 633 .num_wqebbs = wqe_bbs, 634 .shampo.len = new_entries, 635 }; 636 637 shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); 638 shampo->last_page = page; 639 shampo->last_addr = addr; 640 sq->pc += wqe_bbs; 641 sq->doorbell_cseg = &umr_wqe->ctrl; 642 643 return 0; 644 645 err_unmap: 646 while (--i >= 0) { 647 dma_info = &shampo->info[--index]; 648 if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { 649 dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); 650 mlx5e_page_release_dynamic(rq, dma_info->page, true); 651 } 652 } 653 rq->stats->buff_alloc_err++; 654 return err; 655 } 656 657 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) 658 { 659 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 660 u16 klm_entries, num_wqe, index, entries_before; 661 struct mlx5e_icosq *sq = rq->icosq; 662 int i, err, max_klm_entries, len; 663 664 max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); 665 klm_entries = bitmap_find_window(shampo->bitmap, 666 shampo->hd_per_wqe, 667 shampo->hd_per_wq, shampo->pi); 668 if (!klm_entries) 669 return 0; 670 671 klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); 672 index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); 673 entries_before = shampo->hd_per_wq - index; 674 675 if (unlikely(entries_before < klm_entries)) 676 num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + 677 DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); 678 else 679 num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); 680 681 for (i = 0; i < num_wqe; i++) { 682 len = (klm_entries > max_klm_entries) ? max_klm_entries : 683 klm_entries; 684 if (unlikely(index + len > shampo->hd_per_wq)) 685 len = shampo->hd_per_wq - index; 686 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); 687 if (unlikely(err)) 688 return err; 689 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); 690 klm_entries -= len; 691 } 692 693 return 0; 694 } 695 696 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 697 { 698 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); 699 union mlx5e_alloc_unit *au = &wi->alloc_units[0]; 700 struct mlx5e_icosq *sq = rq->icosq; 701 struct mlx5_wq_cyc *wq = &sq->wq; 702 struct mlx5e_umr_wqe *umr_wqe; 703 u32 offset; /* 17-bit value with MTT. */ 704 u16 pi; 705 int err; 706 int i; 707 708 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { 709 err = mlx5e_alloc_rx_hd_mpwqe(rq); 710 if (unlikely(err)) 711 goto err; 712 } 713 714 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); 715 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); 716 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); 717 718 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) { 719 dma_addr_t addr; 720 721 err = mlx5e_page_alloc_pool(rq, au); 722 if (unlikely(err)) 723 goto err_unmap; 724 addr = page_pool_get_dma_addr(au->page); 725 umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { 726 .ptag = cpu_to_be64(addr | MLX5_EN_WR), 727 }; 728 } 729 730 /* Pad if needed, in case the value set to ucseg->xlt_octowords 731 * in mlx5e_build_umr_wqe() needed alignment. 732 */ 733 if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) { 734 int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) - 735 rq->mpwqe.pages_per_wqe; 736 737 memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0, 738 sizeof(*umr_wqe->inline_mtts) * pad); 739 } 740 741 bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); 742 wi->consumed_strides = 0; 743 744 umr_wqe->ctrl.opmod_idx_opcode = 745 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 746 MLX5_OPCODE_UMR); 747 748 offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; 749 umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); 750 751 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 752 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, 753 .num_wqebbs = rq->mpwqe.umr_wqebbs, 754 .umr.rq = rq, 755 }; 756 757 sq->pc += rq->mpwqe.umr_wqebbs; 758 759 sq->doorbell_cseg = &umr_wqe->ctrl; 760 761 return 0; 762 763 err_unmap: 764 while (--i >= 0) { 765 au--; 766 mlx5e_page_release_dynamic(rq, au->page, true); 767 } 768 769 err: 770 rq->stats->buff_alloc_err++; 771 772 return err; 773 } 774 775 /* This function is responsible to dealloc SHAMPO header buffer. 776 * close == true specifies that we are in the middle of closing RQ operation so 777 * we go over all the entries and if they are not in use we free them, 778 * otherwise we only go over a specific range inside the header buffer that are 779 * not in use. 780 */ 781 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) 782 { 783 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 784 int hd_per_wq = shampo->hd_per_wq; 785 struct page *deleted_page = NULL; 786 struct mlx5e_dma_info *hd_info; 787 int i, index = start; 788 789 for (i = 0; i < len; i++, index++) { 790 if (index == hd_per_wq) 791 index = 0; 792 793 if (close && !test_bit(index, shampo->bitmap)) 794 continue; 795 796 hd_info = &shampo->info[index]; 797 hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); 798 if (hd_info->page != deleted_page) { 799 deleted_page = hd_info->page; 800 mlx5e_page_release_dynamic(rq, hd_info->page, false); 801 } 802 } 803 804 if (start + len > hd_per_wq) { 805 len -= hd_per_wq - start; 806 bitmap_clear(shampo->bitmap, start, hd_per_wq - start); 807 start = 0; 808 } 809 810 bitmap_clear(shampo->bitmap, start, len); 811 } 812 813 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 814 { 815 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); 816 /* Don't recycle, this function is called on rq/netdev close */ 817 mlx5e_free_rx_mpwqe(rq, wi, false); 818 } 819 820 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 821 { 822 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 823 int wqe_bulk, count; 824 bool busy = false; 825 u16 head; 826 827 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 828 return false; 829 830 if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk) 831 return false; 832 833 if (rq->page_pool) 834 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 835 836 wqe_bulk = mlx5_wq_cyc_missing(wq); 837 head = mlx5_wq_cyc_get_head(wq); 838 839 /* Don't allow any newly allocated WQEs to share the same page with old 840 * WQEs that aren't completed yet. Stop earlier. 841 */ 842 wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask; 843 844 if (!rq->xsk_pool) 845 count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); 846 else if (likely(!rq->xsk_pool->dma_need_sync)) 847 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk); 848 else 849 /* If dma_need_sync is true, it's more efficient to call 850 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch, 851 * because the latter does the same check and returns only one 852 * frame. 853 */ 854 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk); 855 856 mlx5_wq_cyc_push_n(wq, count); 857 if (unlikely(count != wqe_bulk)) { 858 rq->stats->buff_alloc_err++; 859 busy = true; 860 } 861 862 /* ensure wqes are visible to device before updating doorbell record */ 863 dma_wmb(); 864 865 mlx5_wq_cyc_update_db_record(wq); 866 867 return busy; 868 } 869 870 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) 871 { 872 u16 sqcc; 873 874 sqcc = sq->cc; 875 876 while (sqcc != sq->pc) { 877 struct mlx5e_icosq_wqe_info *wi; 878 u16 ci; 879 880 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 881 wi = &sq->db.wqe_info[ci]; 882 sqcc += wi->num_wqebbs; 883 #ifdef CONFIG_MLX5_EN_TLS 884 switch (wi->wqe_type) { 885 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 886 mlx5e_ktls_handle_ctx_completion(wi); 887 break; 888 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 889 mlx5e_ktls_handle_get_psv_completion(wi, sq); 890 break; 891 } 892 #endif 893 } 894 sq->cc = sqcc; 895 } 896 897 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr, 898 struct mlx5e_icosq *sq) 899 { 900 struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq); 901 struct mlx5e_shampo_hd *shampo; 902 /* assume 1:1 relationship between RQ and icosq */ 903 struct mlx5e_rq *rq = &c->rq; 904 int end, from, len = umr.len; 905 906 shampo = rq->mpwqe.shampo; 907 end = shampo->hd_per_wq; 908 from = shampo->ci; 909 if (from + len > shampo->hd_per_wq) { 910 len -= end - from; 911 bitmap_set(shampo->bitmap, from, end - from); 912 from = 0; 913 } 914 915 bitmap_set(shampo->bitmap, from, len); 916 shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1); 917 } 918 919 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) 920 { 921 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); 922 struct mlx5_cqe64 *cqe; 923 u16 sqcc; 924 int i; 925 926 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 927 return 0; 928 929 cqe = mlx5_cqwq_get_cqe(&cq->wq); 930 if (likely(!cqe)) 931 return 0; 932 933 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 934 * otherwise a cq overrun may occur 935 */ 936 sqcc = sq->cc; 937 938 i = 0; 939 do { 940 u16 wqe_counter; 941 bool last_wqe; 942 943 mlx5_cqwq_pop(&cq->wq); 944 945 wqe_counter = be16_to_cpu(cqe->wqe_counter); 946 947 do { 948 struct mlx5e_icosq_wqe_info *wi; 949 u16 ci; 950 951 last_wqe = (sqcc == wqe_counter); 952 953 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 954 wi = &sq->db.wqe_info[ci]; 955 sqcc += wi->num_wqebbs; 956 957 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 958 netdev_WARN_ONCE(cq->netdev, 959 "Bad OP in ICOSQ CQE: 0x%x\n", 960 get_cqe_opcode(cqe)); 961 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 962 (struct mlx5_err_cqe *)cqe); 963 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 964 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 965 queue_work(cq->priv->wq, &sq->recover_work); 966 break; 967 } 968 969 switch (wi->wqe_type) { 970 case MLX5E_ICOSQ_WQE_UMR_RX: 971 wi->umr.rq->mpwqe.umr_completed++; 972 break; 973 case MLX5E_ICOSQ_WQE_NOP: 974 break; 975 case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR: 976 mlx5e_handle_shampo_hd_umr(wi->shampo, sq); 977 break; 978 #ifdef CONFIG_MLX5_EN_TLS 979 case MLX5E_ICOSQ_WQE_UMR_TLS: 980 break; 981 case MLX5E_ICOSQ_WQE_SET_PSV_TLS: 982 mlx5e_ktls_handle_ctx_completion(wi); 983 break; 984 case MLX5E_ICOSQ_WQE_GET_PSV_TLS: 985 mlx5e_ktls_handle_get_psv_completion(wi, sq); 986 break; 987 #endif 988 default: 989 netdev_WARN_ONCE(cq->netdev, 990 "Bad WQE type in ICOSQ WQE info: 0x%x\n", 991 wi->wqe_type); 992 } 993 } while (!last_wqe); 994 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 995 996 sq->cc = sqcc; 997 998 mlx5_cqwq_update_db_record(&cq->wq); 999 1000 return i; 1001 } 1002 1003 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) 1004 { 1005 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 1006 u8 umr_completed = rq->mpwqe.umr_completed; 1007 struct mlx5e_icosq *sq = rq->icosq; 1008 int alloc_err = 0; 1009 u8 missing, i; 1010 u16 head; 1011 1012 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 1013 return false; 1014 1015 if (umr_completed) { 1016 mlx5e_post_rx_mpwqe(rq, umr_completed); 1017 rq->mpwqe.umr_in_progress -= umr_completed; 1018 rq->mpwqe.umr_completed = 0; 1019 } 1020 1021 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; 1022 1023 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) 1024 rq->stats->congst_umr++; 1025 1026 if (likely(missing < rq->mpwqe.min_wqe_bulk)) 1027 return false; 1028 1029 if (rq->page_pool) 1030 page_pool_nid_changed(rq->page_pool, numa_mem_id()); 1031 1032 head = rq->mpwqe.actual_wq_head; 1033 i = missing; 1034 do { 1035 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : 1036 mlx5e_alloc_rx_mpwqe(rq, head); 1037 1038 if (unlikely(alloc_err)) 1039 break; 1040 head = mlx5_wq_ll_get_wqe_next_ix(wq, head); 1041 } while (--i); 1042 1043 rq->mpwqe.umr_last_bulk = missing - i; 1044 if (sq->doorbell_cseg) { 1045 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); 1046 sq->doorbell_cseg = NULL; 1047 } 1048 1049 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; 1050 rq->mpwqe.actual_wq_head = head; 1051 1052 /* If XSK Fill Ring doesn't have enough frames, report the error, so 1053 * that one of the actions can be performed: 1054 * 1. If need_wakeup is used, signal that the application has to kick 1055 * the driver when it refills the Fill Ring. 1056 * 2. Otherwise, busy poll by rescheduling the NAPI poll. 1057 */ 1058 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) 1059 return true; 1060 1061 return false; 1062 } 1063 1064 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) 1065 { 1066 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 1067 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || 1068 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); 1069 1070 tcp->check = 0; 1071 tcp->psh = get_cqe_lro_tcppsh(cqe); 1072 1073 if (tcp_ack) { 1074 tcp->ack = 1; 1075 tcp->ack_seq = cqe->lro.ack_seq_num; 1076 tcp->window = cqe->lro.tcp_win; 1077 } 1078 } 1079 1080 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 1081 u32 cqe_bcnt) 1082 { 1083 struct ethhdr *eth = (struct ethhdr *)(skb->data); 1084 struct tcphdr *tcp; 1085 int network_depth = 0; 1086 __wsum check; 1087 __be16 proto; 1088 u16 tot_len; 1089 void *ip_p; 1090 1091 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); 1092 1093 tot_len = cqe_bcnt - network_depth; 1094 ip_p = skb->data + network_depth; 1095 1096 if (proto == htons(ETH_P_IP)) { 1097 struct iphdr *ipv4 = ip_p; 1098 1099 tcp = ip_p + sizeof(struct iphdr); 1100 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1101 1102 ipv4->ttl = cqe->lro.min_ttl; 1103 ipv4->tot_len = cpu_to_be16(tot_len); 1104 ipv4->check = 0; 1105 ipv4->check = ip_fast_csum((unsigned char *)ipv4, 1106 ipv4->ihl); 1107 1108 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1109 check = csum_partial(tcp, tcp->doff * 4, 1110 csum_unfold((__force __sum16)cqe->check_sum)); 1111 /* Almost done, don't forget the pseudo header */ 1112 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, 1113 tot_len - sizeof(struct iphdr), 1114 IPPROTO_TCP, check); 1115 } else { 1116 u16 payload_len = tot_len - sizeof(struct ipv6hdr); 1117 struct ipv6hdr *ipv6 = ip_p; 1118 1119 tcp = ip_p + sizeof(struct ipv6hdr); 1120 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1121 1122 ipv6->hop_limit = cqe->lro.min_ttl; 1123 ipv6->payload_len = cpu_to_be16(payload_len); 1124 1125 mlx5e_lro_update_tcp_hdr(cqe, tcp); 1126 check = csum_partial(tcp, tcp->doff * 4, 1127 csum_unfold((__force __sum16)cqe->check_sum)); 1128 /* Almost done, don't forget the pseudo header */ 1129 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, 1130 IPPROTO_TCP, check); 1131 } 1132 } 1133 1134 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) 1135 { 1136 struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; 1137 u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; 1138 1139 return page_address(last_head->page) + head_offset; 1140 } 1141 1142 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) 1143 { 1144 int udp_off = rq->hw_gro_data->fk.control.thoff; 1145 struct sk_buff *skb = rq->hw_gro_data->skb; 1146 struct udphdr *uh; 1147 1148 uh = (struct udphdr *)(skb->data + udp_off); 1149 uh->len = htons(skb->len - udp_off); 1150 1151 if (uh->check) 1152 uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr, 1153 ipv4->daddr, 0); 1154 1155 skb->csum_start = (unsigned char *)uh - skb->head; 1156 skb->csum_offset = offsetof(struct udphdr, check); 1157 1158 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1159 } 1160 1161 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) 1162 { 1163 int udp_off = rq->hw_gro_data->fk.control.thoff; 1164 struct sk_buff *skb = rq->hw_gro_data->skb; 1165 struct udphdr *uh; 1166 1167 uh = (struct udphdr *)(skb->data + udp_off); 1168 uh->len = htons(skb->len - udp_off); 1169 1170 if (uh->check) 1171 uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr, 1172 &ipv6->daddr, 0); 1173 1174 skb->csum_start = (unsigned char *)uh - skb->head; 1175 skb->csum_offset = offsetof(struct udphdr, check); 1176 1177 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 1178 } 1179 1180 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1181 struct tcphdr *skb_tcp_hd) 1182 { 1183 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 1184 struct tcphdr *last_tcp_hd; 1185 void *last_hd_addr; 1186 1187 last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 1188 last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; 1189 tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH); 1190 } 1191 1192 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, 1193 struct mlx5_cqe64 *cqe, bool match) 1194 { 1195 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1196 struct sk_buff *skb = rq->hw_gro_data->skb; 1197 struct tcphdr *tcp; 1198 1199 tcp = (struct tcphdr *)(skb->data + tcp_off); 1200 if (match) 1201 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1202 1203 tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr, 1204 ipv4->daddr, 0); 1205 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 1206 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) 1207 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 1208 1209 skb->csum_start = (unsigned char *)tcp - skb->head; 1210 skb->csum_offset = offsetof(struct tcphdr, check); 1211 1212 if (tcp->cwr) 1213 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1214 } 1215 1216 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, 1217 struct mlx5_cqe64 *cqe, bool match) 1218 { 1219 int tcp_off = rq->hw_gro_data->fk.control.thoff; 1220 struct sk_buff *skb = rq->hw_gro_data->skb; 1221 struct tcphdr *tcp; 1222 1223 tcp = (struct tcphdr *)(skb->data + tcp_off); 1224 if (match) 1225 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); 1226 1227 tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr, 1228 &ipv6->daddr, 0); 1229 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 1230 skb->csum_start = (unsigned char *)tcp - skb->head; 1231 skb->csum_offset = offsetof(struct tcphdr, check); 1232 1233 if (tcp->cwr) 1234 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1235 } 1236 1237 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 1238 { 1239 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); 1240 struct sk_buff *skb = rq->hw_gro_data->skb; 1241 1242 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 1243 skb->ip_summed = CHECKSUM_PARTIAL; 1244 1245 if (is_ipv4) { 1246 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); 1247 struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff); 1248 __be16 newlen = htons(skb->len - nhoff); 1249 1250 csum_replace2(&ipv4->check, ipv4->tot_len, newlen); 1251 ipv4->tot_len = newlen; 1252 1253 if (ipv4->protocol == IPPROTO_TCP) 1254 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); 1255 else 1256 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); 1257 } else { 1258 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); 1259 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff); 1260 1261 ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6)); 1262 1263 if (ipv6->nexthdr == IPPROTO_TCP) 1264 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); 1265 else 1266 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); 1267 } 1268 } 1269 1270 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, 1271 struct sk_buff *skb) 1272 { 1273 u8 cht = cqe->rss_hash_type; 1274 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : 1275 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : 1276 PKT_HASH_TYPE_NONE; 1277 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); 1278 } 1279 1280 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, 1281 __be16 *proto) 1282 { 1283 *proto = ((struct ethhdr *)skb->data)->h_proto; 1284 *proto = __vlan_get_protocol(skb, *proto, network_depth); 1285 1286 if (*proto == htons(ETH_P_IP)) 1287 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); 1288 1289 if (*proto == htons(ETH_P_IPV6)) 1290 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); 1291 1292 return false; 1293 } 1294 1295 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) 1296 { 1297 int network_depth = 0; 1298 __be16 proto; 1299 void *ip; 1300 int rc; 1301 1302 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) 1303 return; 1304 1305 ip = skb->data + network_depth; 1306 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : 1307 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); 1308 1309 rq->stats->ecn_mark += !!rc; 1310 } 1311 1312 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) 1313 { 1314 void *ip_p = skb->data + network_depth; 1315 1316 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : 1317 ((struct ipv6hdr *)ip_p)->nexthdr; 1318 } 1319 1320 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 1321 1322 #define MAX_PADDING 8 1323 1324 static void 1325 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, 1326 struct mlx5e_rq_stats *stats) 1327 { 1328 stats->csum_complete_tail_slow++; 1329 skb->csum = csum_block_add(skb->csum, 1330 skb_checksum(skb, offset, len, 0), 1331 offset); 1332 } 1333 1334 static void 1335 tail_padding_csum(struct sk_buff *skb, int offset, 1336 struct mlx5e_rq_stats *stats) 1337 { 1338 u8 tail_padding[MAX_PADDING]; 1339 int len = skb->len - offset; 1340 void *tail; 1341 1342 if (unlikely(len > MAX_PADDING)) { 1343 tail_padding_csum_slow(skb, offset, len, stats); 1344 return; 1345 } 1346 1347 tail = skb_header_pointer(skb, offset, len, tail_padding); 1348 if (unlikely(!tail)) { 1349 tail_padding_csum_slow(skb, offset, len, stats); 1350 return; 1351 } 1352 1353 stats->csum_complete_tail++; 1354 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); 1355 } 1356 1357 static void 1358 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, 1359 struct mlx5e_rq_stats *stats) 1360 { 1361 struct ipv6hdr *ip6; 1362 struct iphdr *ip4; 1363 int pkt_len; 1364 1365 /* Fixup vlan headers, if any */ 1366 if (network_depth > ETH_HLEN) 1367 /* CQE csum is calculated from the IP header and does 1368 * not cover VLAN headers (if present). This will add 1369 * the checksum manually. 1370 */ 1371 skb->csum = csum_partial(skb->data + ETH_HLEN, 1372 network_depth - ETH_HLEN, 1373 skb->csum); 1374 1375 /* Fixup tail padding, if any */ 1376 switch (proto) { 1377 case htons(ETH_P_IP): 1378 ip4 = (struct iphdr *)(skb->data + network_depth); 1379 pkt_len = network_depth + ntohs(ip4->tot_len); 1380 break; 1381 case htons(ETH_P_IPV6): 1382 ip6 = (struct ipv6hdr *)(skb->data + network_depth); 1383 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); 1384 break; 1385 default: 1386 return; 1387 } 1388 1389 if (likely(pkt_len >= skb->len)) 1390 return; 1391 1392 tail_padding_csum(skb, pkt_len, stats); 1393 } 1394 1395 static inline void mlx5e_handle_csum(struct net_device *netdev, 1396 struct mlx5_cqe64 *cqe, 1397 struct mlx5e_rq *rq, 1398 struct sk_buff *skb, 1399 bool lro) 1400 { 1401 struct mlx5e_rq_stats *stats = rq->stats; 1402 int network_depth = 0; 1403 __be16 proto; 1404 1405 if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) 1406 goto csum_none; 1407 1408 if (lro) { 1409 skb->ip_summed = CHECKSUM_UNNECESSARY; 1410 stats->csum_unnecessary++; 1411 return; 1412 } 1413 1414 /* True when explicitly set via priv flag, or XDP prog is loaded */ 1415 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || 1416 get_cqe_tls_offload(cqe)) 1417 goto csum_unnecessary; 1418 1419 /* CQE csum doesn't cover padding octets in short ethernet 1420 * frames. And the pad field is appended prior to calculating 1421 * and appending the FCS field. 1422 * 1423 * Detecting these padded frames requires to verify and parse 1424 * IP headers, so we simply force all those small frames to be 1425 * CHECKSUM_UNNECESSARY even if they are not padded. 1426 */ 1427 if (short_frame(skb->len)) 1428 goto csum_unnecessary; 1429 1430 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 1431 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 1432 goto csum_unnecessary; 1433 1434 stats->csum_complete++; 1435 skb->ip_summed = CHECKSUM_COMPLETE; 1436 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 1437 1438 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) 1439 return; /* CQE csum covers all received bytes */ 1440 1441 /* csum might need some fixups ...*/ 1442 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); 1443 return; 1444 } 1445 1446 csum_unnecessary: 1447 if (likely((cqe->hds_ip_ext & CQE_L3_OK) && 1448 (cqe->hds_ip_ext & CQE_L4_OK))) { 1449 skb->ip_summed = CHECKSUM_UNNECESSARY; 1450 if (cqe_is_tunneled(cqe)) { 1451 skb->csum_level = 1; 1452 skb->encapsulation = 1; 1453 stats->csum_unnecessary_inner++; 1454 return; 1455 } 1456 stats->csum_unnecessary++; 1457 return; 1458 } 1459 csum_none: 1460 skb->ip_summed = CHECKSUM_NONE; 1461 stats->csum_none++; 1462 } 1463 1464 #define MLX5E_CE_BIT_MASK 0x80 1465 1466 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, 1467 u32 cqe_bcnt, 1468 struct mlx5e_rq *rq, 1469 struct sk_buff *skb) 1470 { 1471 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 1472 struct mlx5e_rq_stats *stats = rq->stats; 1473 struct net_device *netdev = rq->netdev; 1474 1475 skb->mac_len = ETH_HLEN; 1476 1477 if (unlikely(get_cqe_tls_offload(cqe))) 1478 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1479 1480 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1481 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); 1482 1483 if (unlikely(mlx5e_macsec_is_rx_flow(cqe))) 1484 mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); 1485 1486 if (lro_num_seg > 1) { 1487 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 1488 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 1489 /* Subtract one since we already counted this as one 1490 * "regular" packet in mlx5e_complete_rx_cqe() 1491 */ 1492 stats->packets += lro_num_seg - 1; 1493 stats->lro_packets++; 1494 stats->lro_bytes += cqe_bcnt; 1495 } 1496 1497 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) 1498 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 1499 rq->clock, get_cqe_ts(cqe)); 1500 skb_record_rx_queue(skb, rq->ix); 1501 1502 if (likely(netdev->features & NETIF_F_RXHASH)) 1503 mlx5e_skb_set_hash(cqe, skb); 1504 1505 if (cqe_has_vlan(cqe)) { 1506 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1507 be16_to_cpu(cqe->vlan_info)); 1508 stats->removed_vlan_packets++; 1509 } 1510 1511 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; 1512 1513 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); 1514 /* checking CE bit in cqe - MSB in ml_path field */ 1515 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) 1516 mlx5e_enable_ecn(rq, skb); 1517 1518 skb->protocol = eth_type_trans(skb, netdev); 1519 1520 if (unlikely(mlx5e_skb_is_multicast(skb))) 1521 stats->mcast_packets++; 1522 } 1523 1524 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, 1525 struct mlx5_cqe64 *cqe, 1526 u32 cqe_bcnt, 1527 struct sk_buff *skb) 1528 { 1529 struct mlx5e_rq_stats *stats = rq->stats; 1530 1531 stats->packets++; 1532 stats->gro_packets++; 1533 stats->bytes += cqe_bcnt; 1534 stats->gro_bytes += cqe_bcnt; 1535 if (NAPI_GRO_CB(skb)->count != 1) 1536 return; 1537 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1538 skb_reset_network_header(skb); 1539 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { 1540 napi_gro_receive(rq->cq.napi, skb); 1541 rq->hw_gro_data->skb = NULL; 1542 } 1543 } 1544 1545 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, 1546 struct mlx5_cqe64 *cqe, 1547 u32 cqe_bcnt, 1548 struct sk_buff *skb) 1549 { 1550 struct mlx5e_rq_stats *stats = rq->stats; 1551 1552 stats->packets++; 1553 stats->bytes += cqe_bcnt; 1554 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 1555 } 1556 1557 static inline 1558 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, 1559 u32 frag_size, u16 headroom, 1560 u32 cqe_bcnt, u32 metasize) 1561 { 1562 struct sk_buff *skb = build_skb(va, frag_size); 1563 1564 if (unlikely(!skb)) { 1565 rq->stats->buff_alloc_err++; 1566 return NULL; 1567 } 1568 1569 skb_reserve(skb, headroom); 1570 skb_put(skb, cqe_bcnt); 1571 1572 if (metasize) 1573 skb_metadata_set(skb, metasize); 1574 1575 return skb; 1576 } 1577 1578 static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, 1579 u32 len, struct xdp_buff *xdp) 1580 { 1581 xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); 1582 xdp_prepare_buff(xdp, va, headroom, len, true); 1583 } 1584 1585 static struct sk_buff * 1586 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, 1587 u32 cqe_bcnt) 1588 { 1589 union mlx5e_alloc_unit *au = wi->au; 1590 u16 rx_headroom = rq->buff.headroom; 1591 struct bpf_prog *prog; 1592 struct sk_buff *skb; 1593 u32 metasize = 0; 1594 void *va, *data; 1595 dma_addr_t addr; 1596 u32 frag_size; 1597 1598 va = page_address(au->page) + wi->offset; 1599 data = va + rx_headroom; 1600 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1601 1602 addr = page_pool_get_dma_addr(au->page); 1603 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, 1604 frag_size, rq->buff.map_dir); 1605 net_prefetch(data); 1606 1607 prog = rcu_dereference(rq->xdp_prog); 1608 if (prog) { 1609 struct xdp_buff xdp; 1610 1611 net_prefetchw(va); /* xdp_frame data area */ 1612 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 1613 if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) 1614 return NULL; /* page/packet was consumed by XDP */ 1615 1616 rx_headroom = xdp.data - xdp.data_hard_start; 1617 metasize = xdp.data - xdp.data_meta; 1618 cqe_bcnt = xdp.data_end - xdp.data; 1619 } 1620 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 1621 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); 1622 if (unlikely(!skb)) 1623 return NULL; 1624 1625 /* queue up for recycling/reuse */ 1626 page_ref_inc(au->page); 1627 1628 return skb; 1629 } 1630 1631 static struct sk_buff * 1632 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, 1633 u32 cqe_bcnt) 1634 { 1635 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 1636 struct mlx5e_wqe_frag_info *head_wi = wi; 1637 union mlx5e_alloc_unit *au = wi->au; 1638 u16 rx_headroom = rq->buff.headroom; 1639 struct skb_shared_info *sinfo; 1640 u32 frag_consumed_bytes; 1641 struct bpf_prog *prog; 1642 struct xdp_buff xdp; 1643 struct sk_buff *skb; 1644 dma_addr_t addr; 1645 u32 truesize; 1646 void *va; 1647 1648 va = page_address(au->page) + wi->offset; 1649 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); 1650 1651 addr = page_pool_get_dma_addr(au->page); 1652 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, 1653 rq->buff.frame0_sz, rq->buff.map_dir); 1654 net_prefetchw(va); /* xdp_frame data area */ 1655 net_prefetch(va + rx_headroom); 1656 1657 mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp); 1658 sinfo = xdp_get_shared_info_from_buff(&xdp); 1659 truesize = 0; 1660 1661 cqe_bcnt -= frag_consumed_bytes; 1662 frag_info++; 1663 wi++; 1664 1665 while (cqe_bcnt) { 1666 skb_frag_t *frag; 1667 1668 au = wi->au; 1669 1670 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); 1671 1672 addr = page_pool_get_dma_addr(au->page); 1673 dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, 1674 frag_consumed_bytes, rq->buff.map_dir); 1675 1676 if (!xdp_buff_has_frags(&xdp)) { 1677 /* Init on the first fragment to avoid cold cache access 1678 * when possible. 1679 */ 1680 sinfo->nr_frags = 0; 1681 sinfo->xdp_frags_size = 0; 1682 xdp_buff_set_frags_flag(&xdp); 1683 } 1684 1685 frag = &sinfo->frags[sinfo->nr_frags++]; 1686 __skb_frag_set_page(frag, au->page); 1687 skb_frag_off_set(frag, wi->offset); 1688 skb_frag_size_set(frag, frag_consumed_bytes); 1689 1690 if (page_is_pfmemalloc(au->page)) 1691 xdp_buff_set_frag_pfmemalloc(&xdp); 1692 1693 sinfo->xdp_frags_size += frag_consumed_bytes; 1694 truesize += frag_info->frag_stride; 1695 1696 cqe_bcnt -= frag_consumed_bytes; 1697 frag_info++; 1698 wi++; 1699 } 1700 1701 au = head_wi->au; 1702 1703 prog = rcu_dereference(rq->xdp_prog); 1704 if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { 1705 if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1706 int i; 1707 1708 for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++) 1709 mlx5e_put_rx_frag(rq, &head_wi[i], true); 1710 } 1711 return NULL; /* page/packet was consumed by XDP */ 1712 } 1713 1714 skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz, 1715 xdp.data - xdp.data_hard_start, 1716 xdp.data_end - xdp.data, 1717 xdp.data - xdp.data_meta); 1718 if (unlikely(!skb)) 1719 return NULL; 1720 1721 page_ref_inc(au->page); 1722 1723 if (unlikely(xdp_buff_has_frags(&xdp))) { 1724 int i; 1725 1726 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 1727 xdp_update_skb_shared_info(skb, wi - head_wi - 1, 1728 sinfo->xdp_frags_size, truesize, 1729 xdp_buff_is_frag_pfmemalloc(&xdp)); 1730 1731 for (i = 0; i < sinfo->nr_frags; i++) { 1732 skb_frag_t *frag = &sinfo->frags[i]; 1733 1734 page_ref_inc(skb_frag_page(frag)); 1735 } 1736 } 1737 1738 return skb; 1739 } 1740 1741 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1742 { 1743 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; 1744 struct mlx5e_priv *priv = rq->priv; 1745 1746 if (cqe_syndrome_needs_recover(err_cqe->syndrome) && 1747 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { 1748 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); 1749 queue_work(priv->wq, &rq->recover_work); 1750 } 1751 } 1752 1753 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1754 { 1755 trigger_report(rq, cqe); 1756 rq->stats->wqe_err++; 1757 } 1758 1759 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1760 { 1761 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1762 struct mlx5e_wqe_frag_info *wi; 1763 struct sk_buff *skb; 1764 u32 cqe_bcnt; 1765 u16 ci; 1766 1767 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1768 wi = get_frag(rq, ci); 1769 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1770 1771 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1772 mlx5e_handle_rx_err_cqe(rq, cqe); 1773 goto free_wqe; 1774 } 1775 1776 skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe, 1777 mlx5e_skb_from_cqe_linear, 1778 mlx5e_skb_from_cqe_nonlinear, 1779 mlx5e_xsk_skb_from_cqe_linear, 1780 rq, wi, cqe_bcnt); 1781 if (!skb) { 1782 /* probably for XDP */ 1783 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1784 /* do not return page to cache, 1785 * it will be returned on XDP_TX completion. 1786 */ 1787 goto wq_cyc_pop; 1788 } 1789 goto free_wqe; 1790 } 1791 1792 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1793 1794 if (mlx5e_cqe_regb_chain(cqe)) 1795 if (!mlx5e_tc_update_skb(cqe, skb)) { 1796 dev_kfree_skb_any(skb); 1797 goto free_wqe; 1798 } 1799 1800 napi_gro_receive(rq->cq.napi, skb); 1801 1802 free_wqe: 1803 mlx5e_free_rx_wqe(rq, wi, true); 1804 wq_cyc_pop: 1805 mlx5_wq_cyc_pop(wq); 1806 } 1807 1808 #ifdef CONFIG_MLX5_ESWITCH 1809 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1810 { 1811 struct net_device *netdev = rq->netdev; 1812 struct mlx5e_priv *priv = netdev_priv(netdev); 1813 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1814 struct mlx5_eswitch_rep *rep = rpriv->rep; 1815 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1816 struct mlx5e_wqe_frag_info *wi; 1817 struct sk_buff *skb; 1818 u32 cqe_bcnt; 1819 u16 ci; 1820 1821 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 1822 wi = get_frag(rq, ci); 1823 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 1824 1825 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1826 mlx5e_handle_rx_err_cqe(rq, cqe); 1827 goto free_wqe; 1828 } 1829 1830 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 1831 mlx5e_skb_from_cqe_linear, 1832 mlx5e_skb_from_cqe_nonlinear, 1833 rq, wi, cqe_bcnt); 1834 if (!skb) { 1835 /* probably for XDP */ 1836 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1837 /* do not return page to cache, 1838 * it will be returned on XDP_TX completion. 1839 */ 1840 goto wq_cyc_pop; 1841 } 1842 goto free_wqe; 1843 } 1844 1845 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1846 1847 if (rep->vlan && skb_vlan_tag_present(skb)) 1848 skb_vlan_pop(skb); 1849 1850 mlx5e_rep_tc_receive(cqe, rq, skb); 1851 1852 free_wqe: 1853 mlx5e_free_rx_wqe(rq, wi, true); 1854 wq_cyc_pop: 1855 mlx5_wq_cyc_pop(wq); 1856 } 1857 1858 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1859 { 1860 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1861 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1862 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); 1863 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1864 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1865 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); 1866 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; 1867 struct mlx5e_rx_wqe_ll *wqe; 1868 struct mlx5_wq_ll *wq; 1869 struct sk_buff *skb; 1870 u16 cqe_bcnt; 1871 1872 wi->consumed_strides += cstrides; 1873 1874 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1875 mlx5e_handle_rx_err_cqe(rq, cqe); 1876 goto mpwrq_cqe_out; 1877 } 1878 1879 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1880 struct mlx5e_rq_stats *stats = rq->stats; 1881 1882 stats->mpwqe_filler_cqes++; 1883 stats->mpwqe_filler_strides += cstrides; 1884 goto mpwrq_cqe_out; 1885 } 1886 1887 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1888 1889 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1890 mlx5e_skb_from_cqe_mpwrq_linear, 1891 mlx5e_skb_from_cqe_mpwrq_nonlinear, 1892 rq, wi, cqe_bcnt, head_offset, page_idx); 1893 if (!skb) 1894 goto mpwrq_cqe_out; 1895 1896 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1897 1898 mlx5e_rep_tc_receive(cqe, rq, skb); 1899 1900 mpwrq_cqe_out: 1901 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1902 return; 1903 1904 wq = &rq->mpwqe.wq; 1905 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1906 mlx5e_free_rx_mpwqe(rq, wi, true); 1907 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1908 } 1909 1910 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { 1911 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1912 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1913 }; 1914 #endif 1915 1916 static void 1917 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, 1918 union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset) 1919 { 1920 net_prefetchw(skb->data); 1921 1922 while (data_bcnt) { 1923 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ 1924 u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); 1925 unsigned int truesize; 1926 1927 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) 1928 truesize = pg_consumed_bytes; 1929 else 1930 truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); 1931 1932 mlx5e_add_skb_frag(rq, skb, au, data_offset, 1933 pg_consumed_bytes, truesize); 1934 1935 data_bcnt -= pg_consumed_bytes; 1936 data_offset = 0; 1937 au++; 1938 } 1939 } 1940 1941 static struct sk_buff * 1942 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1943 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1944 { 1945 union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx]; 1946 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); 1947 u32 frag_offset = head_offset + headlen; 1948 u32 byte_cnt = cqe_bcnt - headlen; 1949 union mlx5e_alloc_unit *head_au = au; 1950 struct sk_buff *skb; 1951 dma_addr_t addr; 1952 1953 skb = napi_alloc_skb(rq->cq.napi, 1954 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); 1955 if (unlikely(!skb)) { 1956 rq->stats->buff_alloc_err++; 1957 return NULL; 1958 } 1959 1960 net_prefetchw(skb->data); 1961 1962 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ 1963 if (unlikely(frag_offset >= PAGE_SIZE)) { 1964 au++; 1965 frag_offset -= PAGE_SIZE; 1966 } 1967 1968 mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset); 1969 /* copy header */ 1970 addr = page_pool_get_dma_addr(head_au->page); 1971 mlx5e_copy_skb_header(rq, skb, head_au->page, addr, 1972 head_offset, head_offset, headlen); 1973 /* skb linear part was allocated with headlen and aligned to long */ 1974 skb->tail += headlen; 1975 skb->len += headlen; 1976 1977 return skb; 1978 } 1979 1980 static struct sk_buff * 1981 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1982 u16 cqe_bcnt, u32 head_offset, u32 page_idx) 1983 { 1984 union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx]; 1985 u16 rx_headroom = rq->buff.headroom; 1986 struct bpf_prog *prog; 1987 struct sk_buff *skb; 1988 u32 metasize = 0; 1989 void *va, *data; 1990 dma_addr_t addr; 1991 u32 frag_size; 1992 1993 /* Check packet size. Note LRO doesn't use linear SKB */ 1994 if (unlikely(cqe_bcnt > rq->hw_mtu)) { 1995 rq->stats->oversize_pkts_sw_drop++; 1996 return NULL; 1997 } 1998 1999 va = page_address(au->page) + head_offset; 2000 data = va + rx_headroom; 2001 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 2002 2003 addr = page_pool_get_dma_addr(au->page); 2004 dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, 2005 frag_size, rq->buff.map_dir); 2006 net_prefetch(data); 2007 2008 prog = rcu_dereference(rq->xdp_prog); 2009 if (prog) { 2010 struct xdp_buff xdp; 2011 2012 net_prefetchw(va); /* xdp_frame data area */ 2013 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); 2014 if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { 2015 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) 2016 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ 2017 return NULL; /* page/packet was consumed by XDP */ 2018 } 2019 2020 rx_headroom = xdp.data - xdp.data_hard_start; 2021 metasize = xdp.data - xdp.data_meta; 2022 cqe_bcnt = xdp.data_end - xdp.data; 2023 } 2024 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); 2025 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); 2026 if (unlikely(!skb)) 2027 return NULL; 2028 2029 /* queue up for recycling/reuse */ 2030 page_ref_inc(au->page); 2031 2032 return skb; 2033 } 2034 2035 static struct sk_buff * 2036 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 2037 struct mlx5_cqe64 *cqe, u16 header_index) 2038 { 2039 struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; 2040 u16 head_offset = head->addr & (PAGE_SIZE - 1); 2041 u16 head_size = cqe->shampo.header_size; 2042 u16 rx_headroom = rq->buff.headroom; 2043 struct sk_buff *skb = NULL; 2044 void *hdr, *data; 2045 u32 frag_size; 2046 2047 hdr = page_address(head->page) + head_offset; 2048 data = hdr + rx_headroom; 2049 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); 2050 2051 if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { 2052 /* build SKB around header */ 2053 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); 2054 prefetchw(hdr); 2055 prefetch(data); 2056 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); 2057 2058 if (unlikely(!skb)) 2059 return NULL; 2060 2061 /* queue up for recycling/reuse */ 2062 page_ref_inc(head->page); 2063 2064 } else { 2065 /* allocate SKB and copy header for large header */ 2066 rq->stats->gro_large_hds++; 2067 skb = napi_alloc_skb(rq->cq.napi, 2068 ALIGN(head_size, sizeof(long))); 2069 if (unlikely(!skb)) { 2070 rq->stats->buff_alloc_err++; 2071 return NULL; 2072 } 2073 2074 prefetchw(skb->data); 2075 mlx5e_copy_skb_header(rq, skb, head->page, head->addr, 2076 head_offset + rx_headroom, 2077 rx_headroom, head_size); 2078 /* skb linear part was allocated with headlen and aligned to long */ 2079 skb->tail += head_size; 2080 skb->len += head_size; 2081 } 2082 return skb; 2083 } 2084 2085 static void 2086 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz) 2087 { 2088 skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 2089 unsigned int frag_size = skb_frag_size(last_frag); 2090 unsigned int frag_truesize; 2091 2092 frag_truesize = ALIGN(frag_size, BIT(log_stride_sz)); 2093 skb->truesize += frag_truesize - frag_size; 2094 } 2095 2096 static void 2097 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) 2098 { 2099 struct sk_buff *skb = rq->hw_gro_data->skb; 2100 struct mlx5e_rq_stats *stats = rq->stats; 2101 2102 stats->gro_skbs++; 2103 if (likely(skb_shinfo(skb)->nr_frags)) 2104 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); 2105 if (NAPI_GRO_CB(skb)->count > 1) 2106 mlx5e_shampo_update_hdr(rq, cqe, match); 2107 napi_gro_receive(rq->cq.napi, skb); 2108 rq->hw_gro_data->skb = NULL; 2109 } 2110 2111 static bool 2112 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) 2113 { 2114 int nr_frags = skb_shinfo(skb)->nr_frags; 2115 2116 return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; 2117 } 2118 2119 static void 2120 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) 2121 { 2122 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 2123 u64 addr = shampo->info[header_index].addr; 2124 2125 if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { 2126 shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); 2127 mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true); 2128 } 2129 bitmap_clear(shampo->bitmap, header_index, 1); 2130 } 2131 2132 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2133 { 2134 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; 2135 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 2136 u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); 2137 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 2138 u32 data_offset = wqe_offset & (PAGE_SIZE - 1); 2139 u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 2140 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 2141 u32 page_idx = wqe_offset >> PAGE_SHIFT; 2142 u16 head_size = cqe->shampo.header_size; 2143 struct sk_buff **skb = &rq->hw_gro_data->skb; 2144 bool flush = cqe->shampo.flush; 2145 bool match = cqe->shampo.match; 2146 struct mlx5e_rq_stats *stats = rq->stats; 2147 struct mlx5e_rx_wqe_ll *wqe; 2148 union mlx5e_alloc_unit *au; 2149 struct mlx5e_mpw_info *wi; 2150 struct mlx5_wq_ll *wq; 2151 2152 wi = mlx5e_get_mpw_info(rq, wqe_id); 2153 wi->consumed_strides += cstrides; 2154 2155 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2156 mlx5e_handle_rx_err_cqe(rq, cqe); 2157 goto mpwrq_cqe_out; 2158 } 2159 2160 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2161 stats->mpwqe_filler_cqes++; 2162 stats->mpwqe_filler_strides += cstrides; 2163 goto mpwrq_cqe_out; 2164 } 2165 2166 stats->gro_match_packets += match; 2167 2168 if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { 2169 match = false; 2170 mlx5e_shampo_flush_skb(rq, cqe, match); 2171 } 2172 2173 if (!*skb) { 2174 if (likely(head_size)) 2175 *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); 2176 else 2177 *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset, 2178 page_idx); 2179 if (unlikely(!*skb)) 2180 goto free_hd_entry; 2181 2182 NAPI_GRO_CB(*skb)->count = 1; 2183 skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size; 2184 } else { 2185 NAPI_GRO_CB(*skb)->count++; 2186 if (NAPI_GRO_CB(*skb)->count == 2 && 2187 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { 2188 void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); 2189 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - 2190 sizeof(struct iphdr); 2191 struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff); 2192 2193 rq->hw_gro_data->second_ip_id = ntohs(iph->id); 2194 } 2195 } 2196 2197 if (likely(head_size)) { 2198 au = &wi->alloc_units[page_idx]; 2199 mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset); 2200 } 2201 2202 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); 2203 if (flush) 2204 mlx5e_shampo_flush_skb(rq, cqe, match); 2205 free_hd_entry: 2206 mlx5e_free_rx_shampo_hd_entry(rq, header_index); 2207 mpwrq_cqe_out: 2208 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2209 return; 2210 2211 wq = &rq->mpwqe.wq; 2212 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2213 mlx5e_free_rx_mpwqe(rq, wi, true); 2214 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2215 } 2216 2217 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2218 { 2219 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 2220 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 2221 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); 2222 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 2223 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 2224 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); 2225 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; 2226 struct mlx5e_rx_wqe_ll *wqe; 2227 struct mlx5_wq_ll *wq; 2228 struct sk_buff *skb; 2229 u16 cqe_bcnt; 2230 2231 wi->consumed_strides += cstrides; 2232 2233 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2234 mlx5e_handle_rx_err_cqe(rq, cqe); 2235 goto mpwrq_cqe_out; 2236 } 2237 2238 if (unlikely(mpwrq_is_filler_cqe(cqe))) { 2239 struct mlx5e_rq_stats *stats = rq->stats; 2240 2241 stats->mpwqe_filler_cqes++; 2242 stats->mpwqe_filler_strides += cstrides; 2243 goto mpwrq_cqe_out; 2244 } 2245 2246 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 2247 2248 skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq, 2249 mlx5e_skb_from_cqe_mpwrq_linear, 2250 mlx5e_skb_from_cqe_mpwrq_nonlinear, 2251 mlx5e_xsk_skb_from_cqe_mpwrq_linear, 2252 rq, wi, cqe_bcnt, head_offset, page_idx); 2253 if (!skb) 2254 goto mpwrq_cqe_out; 2255 2256 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2257 2258 if (mlx5e_cqe_regb_chain(cqe)) 2259 if (!mlx5e_tc_update_skb(cqe, skb)) { 2260 dev_kfree_skb_any(skb); 2261 goto mpwrq_cqe_out; 2262 } 2263 2264 napi_gro_receive(rq->cq.napi, skb); 2265 2266 mpwrq_cqe_out: 2267 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 2268 return; 2269 2270 wq = &rq->mpwqe.wq; 2271 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 2272 mlx5e_free_rx_mpwqe(rq, wi, true); 2273 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 2274 } 2275 2276 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq, 2277 struct mlx5_cqwq *cqwq, 2278 int budget_rem) 2279 { 2280 struct mlx5_cqe64 *cqe, *title_cqe = NULL; 2281 struct mlx5e_cq_decomp *cqd = &rq->cqd; 2282 int work_done = 0; 2283 2284 cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq); 2285 if (!cqe) 2286 return work_done; 2287 2288 if (cqd->last_cqe_title && 2289 (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) { 2290 rq->stats->cqe_compress_blks++; 2291 cqd->last_cqe_title = false; 2292 } 2293 2294 do { 2295 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 2296 if (title_cqe) { 2297 mlx5e_read_enhanced_title_slot(rq, title_cqe); 2298 title_cqe = NULL; 2299 rq->stats->cqe_compress_blks++; 2300 } 2301 work_done += 2302 mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe, 2303 budget_rem - work_done); 2304 continue; 2305 } 2306 title_cqe = cqe; 2307 mlx5_cqwq_pop(cqwq); 2308 2309 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 2310 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, 2311 rq, cqe); 2312 work_done++; 2313 } while (work_done < budget_rem && 2314 (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq))); 2315 2316 /* last cqe might be title on next poll bulk */ 2317 if (title_cqe) { 2318 mlx5e_read_enhanced_title_slot(rq, title_cqe); 2319 cqd->last_cqe_title = true; 2320 } 2321 2322 return work_done; 2323 } 2324 2325 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq, 2326 struct mlx5_cqwq *cqwq, 2327 int budget_rem) 2328 { 2329 struct mlx5_cqe64 *cqe; 2330 int work_done = 0; 2331 2332 if (rq->cqd.left) 2333 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem); 2334 2335 while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) { 2336 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 2337 work_done += 2338 mlx5e_decompress_cqes_start(rq, cqwq, 2339 budget_rem - work_done); 2340 continue; 2341 } 2342 2343 mlx5_cqwq_pop(cqwq); 2344 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, 2345 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, 2346 rq, cqe); 2347 work_done++; 2348 } 2349 2350 return work_done; 2351 } 2352 2353 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) 2354 { 2355 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 2356 struct mlx5_cqwq *cqwq = &cq->wq; 2357 int work_done; 2358 2359 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 2360 return 0; 2361 2362 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) 2363 work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq, 2364 budget); 2365 else 2366 work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq, 2367 budget); 2368 2369 if (work_done == 0) 2370 return 0; 2371 2372 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) 2373 mlx5e_shampo_flush_skb(rq, NULL, false); 2374 2375 if (rcu_access_pointer(rq->xdp_prog)) 2376 mlx5e_xdp_rx_poll_complete(rq); 2377 2378 mlx5_cqwq_update_db_record(cqwq); 2379 2380 /* ensure cq space is freed before enabling more cqes */ 2381 wmb(); 2382 2383 return work_done; 2384 } 2385 2386 #ifdef CONFIG_MLX5_CORE_IPOIB 2387 2388 #define MLX5_IB_GRH_SGID_OFFSET 8 2389 #define MLX5_IB_GRH_DGID_OFFSET 24 2390 #define MLX5_GID_SIZE 16 2391 2392 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 2393 struct mlx5_cqe64 *cqe, 2394 u32 cqe_bcnt, 2395 struct sk_buff *skb) 2396 { 2397 struct hwtstamp_config *tstamp; 2398 struct mlx5e_rq_stats *stats; 2399 struct net_device *netdev; 2400 struct mlx5e_priv *priv; 2401 char *pseudo_header; 2402 u32 flags_rqpn; 2403 u32 qpn; 2404 u8 *dgid; 2405 u8 g; 2406 2407 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; 2408 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); 2409 2410 /* No mapping present, cannot process SKB. This might happen if a child 2411 * interface is going down while having unprocessed CQEs on parent RQ 2412 */ 2413 if (unlikely(!netdev)) { 2414 /* TODO: add drop counters support */ 2415 skb->dev = NULL; 2416 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); 2417 return; 2418 } 2419 2420 priv = mlx5i_epriv(netdev); 2421 tstamp = &priv->tstamp; 2422 stats = rq->stats; 2423 2424 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 2425 g = (flags_rqpn >> 28) & 3; 2426 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; 2427 if ((!g) || dgid[0] != 0xff) 2428 skb->pkt_type = PACKET_HOST; 2429 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) 2430 skb->pkt_type = PACKET_BROADCAST; 2431 else 2432 skb->pkt_type = PACKET_MULTICAST; 2433 2434 /* Drop packets that this interface sent, ie multicast packets 2435 * that the HCA has replicated. 2436 */ 2437 if (g && (qpn == (flags_rqpn & 0xffffff)) && 2438 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, 2439 MLX5_GID_SIZE) == 0)) { 2440 skb->dev = NULL; 2441 return; 2442 } 2443 2444 skb_pull(skb, MLX5_IB_GRH_BYTES); 2445 2446 skb->protocol = *((__be16 *)(skb->data)); 2447 2448 if (netdev->features & NETIF_F_RXCSUM) { 2449 skb->ip_summed = CHECKSUM_COMPLETE; 2450 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 2451 stats->csum_complete++; 2452 } else { 2453 skb->ip_summed = CHECKSUM_NONE; 2454 stats->csum_none++; 2455 } 2456 2457 if (unlikely(mlx5e_rx_hw_stamp(tstamp))) 2458 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, 2459 rq->clock, get_cqe_ts(cqe)); 2460 skb_record_rx_queue(skb, rq->ix); 2461 2462 if (likely(netdev->features & NETIF_F_RXHASH)) 2463 mlx5e_skb_set_hash(cqe, skb); 2464 2465 /* 20 bytes of ipoib header and 4 for encap existing */ 2466 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); 2467 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); 2468 skb_reset_mac_header(skb); 2469 skb_pull(skb, MLX5_IPOIB_HARD_LEN); 2470 2471 skb->dev = netdev; 2472 2473 stats->packets++; 2474 stats->bytes += cqe_bcnt; 2475 } 2476 2477 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2478 { 2479 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2480 struct mlx5e_wqe_frag_info *wi; 2481 struct sk_buff *skb; 2482 u32 cqe_bcnt; 2483 u16 ci; 2484 2485 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2486 wi = get_frag(rq, ci); 2487 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2488 2489 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2490 rq->stats->wqe_err++; 2491 goto wq_free_wqe; 2492 } 2493 2494 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 2495 mlx5e_skb_from_cqe_linear, 2496 mlx5e_skb_from_cqe_nonlinear, 2497 rq, wi, cqe_bcnt); 2498 if (!skb) 2499 goto wq_free_wqe; 2500 2501 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2502 if (unlikely(!skb->dev)) { 2503 dev_kfree_skb_any(skb); 2504 goto wq_free_wqe; 2505 } 2506 napi_gro_receive(rq->cq.napi, skb); 2507 2508 wq_free_wqe: 2509 mlx5e_free_rx_wqe(rq, wi, true); 2510 mlx5_wq_cyc_pop(wq); 2511 } 2512 2513 const struct mlx5e_rx_handlers mlx5i_rx_handlers = { 2514 .handle_rx_cqe = mlx5i_handle_rx_cqe, 2515 .handle_rx_cqe_mpwqe = NULL, /* Not supported */ 2516 }; 2517 #endif /* CONFIG_MLX5_CORE_IPOIB */ 2518 2519 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) 2520 { 2521 struct net_device *netdev = rq->netdev; 2522 struct mlx5_core_dev *mdev = rq->mdev; 2523 struct mlx5e_priv *priv = rq->priv; 2524 2525 switch (rq->wq_type) { 2526 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2527 rq->mpwqe.skb_from_cqe_mpwrq = xsk ? 2528 mlx5e_xsk_skb_from_cqe_mpwrq_linear : 2529 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? 2530 mlx5e_skb_from_cqe_mpwrq_linear : 2531 mlx5e_skb_from_cqe_mpwrq_nonlinear; 2532 rq->post_wqes = mlx5e_post_rx_mpwqes; 2533 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 2534 2535 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 2536 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; 2537 if (!rq->handle_rx_cqe) { 2538 netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n"); 2539 return -EINVAL; 2540 } 2541 } else { 2542 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; 2543 if (!rq->handle_rx_cqe) { 2544 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); 2545 return -EINVAL; 2546 } 2547 } 2548 2549 break; 2550 default: /* MLX5_WQ_TYPE_CYCLIC */ 2551 rq->wqe.skb_from_cqe = xsk ? 2552 mlx5e_xsk_skb_from_cqe_linear : 2553 mlx5e_rx_is_linear_skb(mdev, params, NULL) ? 2554 mlx5e_skb_from_cqe_linear : 2555 mlx5e_skb_from_cqe_nonlinear; 2556 rq->post_wqes = mlx5e_post_rx_wqes; 2557 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2558 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 2559 if (!rq->handle_rx_cqe) { 2560 netdev_err(netdev, "RX handler of RQ is not set\n"); 2561 return -EINVAL; 2562 } 2563 } 2564 2565 return 0; 2566 } 2567 2568 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2569 { 2570 struct mlx5e_priv *priv = netdev_priv(rq->netdev); 2571 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2572 struct mlx5e_wqe_frag_info *wi; 2573 struct devlink_port *dl_port; 2574 struct sk_buff *skb; 2575 u32 cqe_bcnt; 2576 u16 trap_id; 2577 u16 ci; 2578 2579 trap_id = get_cqe_flow_tag(cqe); 2580 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2581 wi = get_frag(rq, ci); 2582 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2583 2584 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2585 rq->stats->wqe_err++; 2586 goto free_wqe; 2587 } 2588 2589 skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt); 2590 if (!skb) 2591 goto free_wqe; 2592 2593 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2594 skb_push(skb, ETH_HLEN); 2595 2596 dl_port = mlx5e_devlink_get_dl_port(priv); 2597 mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); 2598 dev_kfree_skb_any(skb); 2599 2600 free_wqe: 2601 mlx5e_free_rx_wqe(rq, wi, false); 2602 mlx5_wq_cyc_pop(wq); 2603 } 2604 2605 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) 2606 { 2607 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ? 2608 mlx5e_skb_from_cqe_linear : 2609 mlx5e_skb_from_cqe_nonlinear; 2610 rq->post_wqes = mlx5e_post_rx_wqes; 2611 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2612 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; 2613 } 2614