1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <net/busy_poll.h> 35 #include <linux/mlx4/cq.h> 36 #include <linux/slab.h> 37 #include <linux/mlx4/qp.h> 38 #include <linux/skbuff.h> 39 #include <linux/rculist.h> 40 #include <linux/if_ether.h> 41 #include <linux/if_vlan.h> 42 #include <linux/vmalloc.h> 43 #include <linux/irq.h> 44 45 #include "mlx4_en.h" 46 47 static int mlx4_alloc_pages(struct mlx4_en_priv *priv, 48 struct mlx4_en_rx_alloc *page_alloc, 49 const struct mlx4_en_frag_info *frag_info, 50 gfp_t _gfp) 51 { 52 int order; 53 struct page *page; 54 dma_addr_t dma; 55 56 for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { 57 gfp_t gfp = _gfp; 58 59 if (order) 60 gfp |= __GFP_COMP | __GFP_NOWARN; 61 page = alloc_pages(gfp, order); 62 if (likely(page)) 63 break; 64 if (--order < 0 || 65 ((PAGE_SIZE << order) < frag_info->frag_size)) 66 return -ENOMEM; 67 } 68 dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, 69 PCI_DMA_FROMDEVICE); 70 if (dma_mapping_error(priv->ddev, dma)) { 71 put_page(page); 72 return -ENOMEM; 73 } 74 page_alloc->page_size = PAGE_SIZE << order; 75 page_alloc->page = page; 76 page_alloc->dma = dma; 77 page_alloc->page_offset = frag_info->frag_align; 78 /* Not doing get_page() for each frag is a big win 79 * on asymetric workloads. Note we can not use atomic_set(). 80 */ 81 atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, 82 &page->_count); 83 return 0; 84 } 85 86 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, 87 struct mlx4_en_rx_desc *rx_desc, 88 struct mlx4_en_rx_alloc *frags, 89 struct mlx4_en_rx_alloc *ring_alloc, 90 gfp_t gfp) 91 { 92 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 93 const struct mlx4_en_frag_info *frag_info; 94 struct page *page; 95 dma_addr_t dma; 96 int i; 97 98 for (i = 0; i < priv->num_frags; i++) { 99 frag_info = &priv->frag_info[i]; 100 page_alloc[i] = ring_alloc[i]; 101 page_alloc[i].page_offset += frag_info->frag_stride; 102 103 if (page_alloc[i].page_offset + frag_info->frag_stride <= 104 ring_alloc[i].page_size) 105 continue; 106 107 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 108 goto out; 109 } 110 111 for (i = 0; i < priv->num_frags; i++) { 112 frags[i] = ring_alloc[i]; 113 dma = ring_alloc[i].dma + ring_alloc[i].page_offset; 114 ring_alloc[i] = page_alloc[i]; 115 rx_desc->data[i].addr = cpu_to_be64(dma); 116 } 117 118 return 0; 119 120 out: 121 while (i--) { 122 frag_info = &priv->frag_info[i]; 123 if (page_alloc[i].page != ring_alloc[i].page) { 124 dma_unmap_page(priv->ddev, page_alloc[i].dma, 125 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 126 page = page_alloc[i].page; 127 atomic_set(&page->_count, 1); 128 put_page(page); 129 } 130 } 131 return -ENOMEM; 132 } 133 134 static void mlx4_en_free_frag(struct mlx4_en_priv *priv, 135 struct mlx4_en_rx_alloc *frags, 136 int i) 137 { 138 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 139 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; 140 141 142 if (next_frag_end > frags[i].page_size) 143 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, 144 PCI_DMA_FROMDEVICE); 145 146 if (frags[i].page) 147 put_page(frags[i].page); 148 } 149 150 static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, 151 struct mlx4_en_rx_ring *ring) 152 { 153 int i; 154 struct mlx4_en_rx_alloc *page_alloc; 155 156 for (i = 0; i < priv->num_frags; i++) { 157 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 158 159 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], 160 frag_info, GFP_KERNEL)) 161 goto out; 162 } 163 return 0; 164 165 out: 166 while (i--) { 167 struct page *page; 168 169 page_alloc = &ring->page_alloc[i]; 170 dma_unmap_page(priv->ddev, page_alloc->dma, 171 page_alloc->page_size, PCI_DMA_FROMDEVICE); 172 page = page_alloc->page; 173 atomic_set(&page->_count, 1); 174 put_page(page); 175 page_alloc->page = NULL; 176 } 177 return -ENOMEM; 178 } 179 180 static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, 181 struct mlx4_en_rx_ring *ring) 182 { 183 struct mlx4_en_rx_alloc *page_alloc; 184 int i; 185 186 for (i = 0; i < priv->num_frags; i++) { 187 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 188 189 page_alloc = &ring->page_alloc[i]; 190 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 191 i, page_count(page_alloc->page)); 192 193 dma_unmap_page(priv->ddev, page_alloc->dma, 194 page_alloc->page_size, PCI_DMA_FROMDEVICE); 195 while (page_alloc->page_offset + frag_info->frag_stride < 196 page_alloc->page_size) { 197 put_page(page_alloc->page); 198 page_alloc->page_offset += frag_info->frag_stride; 199 } 200 page_alloc->page = NULL; 201 } 202 } 203 204 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 205 struct mlx4_en_rx_ring *ring, int index) 206 { 207 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; 208 int possible_frags; 209 int i; 210 211 /* Set size and memtype fields */ 212 for (i = 0; i < priv->num_frags; i++) { 213 rx_desc->data[i].byte_count = 214 cpu_to_be32(priv->frag_info[i].frag_size); 215 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); 216 } 217 218 /* If the number of used fragments does not fill up the ring stride, 219 * remaining (unused) fragments must be padded with null address/size 220 * and a special memory key */ 221 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; 222 for (i = priv->num_frags; i < possible_frags; i++) { 223 rx_desc->data[i].byte_count = 0; 224 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); 225 rx_desc->data[i].addr = 0; 226 } 227 } 228 229 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 230 struct mlx4_en_rx_ring *ring, int index, 231 gfp_t gfp) 232 { 233 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 234 struct mlx4_en_rx_alloc *frags = ring->rx_info + 235 (index << priv->log_rx_info); 236 237 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); 238 } 239 240 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 241 { 242 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 243 } 244 245 static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, 246 struct mlx4_en_rx_ring *ring, 247 int index) 248 { 249 struct mlx4_en_rx_alloc *frags; 250 int nr; 251 252 frags = ring->rx_info + (index << priv->log_rx_info); 253 for (nr = 0; nr < priv->num_frags; nr++) { 254 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 255 mlx4_en_free_frag(priv, frags, nr); 256 } 257 } 258 259 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 260 { 261 struct mlx4_en_rx_ring *ring; 262 int ring_ind; 263 int buf_ind; 264 int new_size; 265 266 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 267 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 268 ring = priv->rx_ring[ring_ind]; 269 270 if (mlx4_en_prepare_rx_desc(priv, ring, 271 ring->actual_size, 272 GFP_KERNEL)) { 273 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 274 en_err(priv, "Failed to allocate enough rx buffers\n"); 275 return -ENOMEM; 276 } else { 277 new_size = rounddown_pow_of_two(ring->actual_size); 278 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", 279 ring->actual_size, new_size); 280 goto reduce_rings; 281 } 282 } 283 ring->actual_size++; 284 ring->prod++; 285 } 286 } 287 return 0; 288 289 reduce_rings: 290 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 291 ring = priv->rx_ring[ring_ind]; 292 while (ring->actual_size > new_size) { 293 ring->actual_size--; 294 ring->prod--; 295 mlx4_en_free_rx_desc(priv, ring, ring->actual_size); 296 } 297 } 298 299 return 0; 300 } 301 302 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 303 struct mlx4_en_rx_ring *ring) 304 { 305 int index; 306 307 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 308 ring->cons, ring->prod); 309 310 /* Unmap and free Rx buffers */ 311 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 312 while (ring->cons != ring->prod) { 313 index = ring->cons & ring->size_mask; 314 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 315 mlx4_en_free_rx_desc(priv, ring, index); 316 ++ring->cons; 317 } 318 } 319 320 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) 321 { 322 int i; 323 int num_of_eqs; 324 int num_rx_rings; 325 struct mlx4_dev *dev = mdev->dev; 326 327 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 328 if (!dev->caps.comp_pool) 329 num_of_eqs = max_t(int, MIN_RX_RINGS, 330 min_t(int, 331 dev->caps.num_comp_vectors, 332 DEF_RX_RINGS)); 333 else 334 num_of_eqs = min_t(int, MAX_MSIX_P_PORT, 335 dev->caps.comp_pool/ 336 dev->caps.num_ports) - 1; 337 338 num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : 339 min_t(int, num_of_eqs, 340 netif_get_num_default_rss_queues()); 341 mdev->profile.prof[i].rx_ring_num = 342 rounddown_pow_of_two(num_rx_rings); 343 } 344 } 345 346 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 347 struct mlx4_en_rx_ring **pring, 348 u32 size, u16 stride, int node) 349 { 350 struct mlx4_en_dev *mdev = priv->mdev; 351 struct mlx4_en_rx_ring *ring; 352 int err = -ENOMEM; 353 int tmp; 354 355 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); 356 if (!ring) { 357 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 358 if (!ring) { 359 en_err(priv, "Failed to allocate RX ring structure\n"); 360 return -ENOMEM; 361 } 362 } 363 364 ring->prod = 0; 365 ring->cons = 0; 366 ring->size = size; 367 ring->size_mask = size - 1; 368 ring->stride = stride; 369 ring->log_stride = ffs(ring->stride) - 1; 370 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 371 372 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 373 sizeof(struct mlx4_en_rx_alloc)); 374 ring->rx_info = vmalloc_node(tmp, node); 375 if (!ring->rx_info) { 376 ring->rx_info = vmalloc(tmp); 377 if (!ring->rx_info) { 378 err = -ENOMEM; 379 goto err_ring; 380 } 381 } 382 383 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 384 ring->rx_info, tmp); 385 386 /* Allocate HW buffers on provided NUMA node */ 387 set_dev_node(&mdev->dev->pdev->dev, node); 388 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 389 ring->buf_size, 2 * PAGE_SIZE); 390 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 391 if (err) 392 goto err_info; 393 394 err = mlx4_en_map_buffer(&ring->wqres.buf); 395 if (err) { 396 en_err(priv, "Failed to map RX buffer\n"); 397 goto err_hwq; 398 } 399 ring->buf = ring->wqres.buf.direct.buf; 400 401 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; 402 403 *pring = ring; 404 return 0; 405 406 err_hwq: 407 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 408 err_info: 409 vfree(ring->rx_info); 410 ring->rx_info = NULL; 411 err_ring: 412 kfree(ring); 413 *pring = NULL; 414 415 return err; 416 } 417 418 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) 419 { 420 struct mlx4_en_rx_ring *ring; 421 int i; 422 int ring_ind; 423 int err; 424 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 425 DS_SIZE * priv->num_frags); 426 427 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 428 ring = priv->rx_ring[ring_ind]; 429 430 ring->prod = 0; 431 ring->cons = 0; 432 ring->actual_size = 0; 433 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; 434 435 ring->stride = stride; 436 if (ring->stride <= TXBB_SIZE) 437 ring->buf += TXBB_SIZE; 438 439 ring->log_stride = ffs(ring->stride) - 1; 440 ring->buf_size = ring->size * ring->stride; 441 442 memset(ring->buf, 0, ring->buf_size); 443 mlx4_en_update_rx_prod_db(ring); 444 445 /* Initialize all descriptors */ 446 for (i = 0; i < ring->size; i++) 447 mlx4_en_init_rx_desc(priv, ring, i); 448 449 /* Initialize page allocators */ 450 err = mlx4_en_init_allocator(priv, ring); 451 if (err) { 452 en_err(priv, "Failed initializing ring allocator\n"); 453 if (ring->stride <= TXBB_SIZE) 454 ring->buf -= TXBB_SIZE; 455 ring_ind--; 456 goto err_allocator; 457 } 458 } 459 err = mlx4_en_fill_rx_buffers(priv); 460 if (err) 461 goto err_buffers; 462 463 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 464 ring = priv->rx_ring[ring_ind]; 465 466 ring->size_mask = ring->actual_size - 1; 467 mlx4_en_update_rx_prod_db(ring); 468 } 469 470 return 0; 471 472 err_buffers: 473 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 474 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); 475 476 ring_ind = priv->rx_ring_num - 1; 477 err_allocator: 478 while (ring_ind >= 0) { 479 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) 480 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; 481 mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); 482 ring_ind--; 483 } 484 return err; 485 } 486 487 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 488 struct mlx4_en_rx_ring **pring, 489 u32 size, u16 stride) 490 { 491 struct mlx4_en_dev *mdev = priv->mdev; 492 struct mlx4_en_rx_ring *ring = *pring; 493 494 mlx4_en_unmap_buffer(&ring->wqres.buf); 495 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 496 vfree(ring->rx_info); 497 ring->rx_info = NULL; 498 kfree(ring); 499 *pring = NULL; 500 #ifdef CONFIG_RFS_ACCEL 501 mlx4_en_cleanup_filters(priv); 502 #endif 503 } 504 505 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 506 struct mlx4_en_rx_ring *ring) 507 { 508 mlx4_en_free_rx_buf(priv, ring); 509 if (ring->stride <= TXBB_SIZE) 510 ring->buf -= TXBB_SIZE; 511 mlx4_en_destroy_allocator(priv, ring); 512 } 513 514 515 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, 516 struct mlx4_en_rx_desc *rx_desc, 517 struct mlx4_en_rx_alloc *frags, 518 struct sk_buff *skb, 519 int length) 520 { 521 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; 522 struct mlx4_en_frag_info *frag_info; 523 int nr; 524 dma_addr_t dma; 525 526 /* Collect used fragments while replacing them in the HW descriptors */ 527 for (nr = 0; nr < priv->num_frags; nr++) { 528 frag_info = &priv->frag_info[nr]; 529 if (length <= frag_info->frag_prefix_size) 530 break; 531 if (!frags[nr].page) 532 goto fail; 533 534 dma = be64_to_cpu(rx_desc->data[nr].addr); 535 dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, 536 DMA_FROM_DEVICE); 537 538 /* Save page reference in skb */ 539 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 540 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 541 skb_frags_rx[nr].page_offset = frags[nr].page_offset; 542 skb->truesize += frag_info->frag_stride; 543 frags[nr].page = NULL; 544 } 545 /* Adjust size of last fragment to match actual length */ 546 if (nr > 0) 547 skb_frag_size_set(&skb_frags_rx[nr - 1], 548 length - priv->frag_info[nr - 1].frag_prefix_size); 549 return nr; 550 551 fail: 552 while (nr > 0) { 553 nr--; 554 __skb_frag_unref(&skb_frags_rx[nr]); 555 } 556 return 0; 557 } 558 559 560 static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, 561 struct mlx4_en_rx_desc *rx_desc, 562 struct mlx4_en_rx_alloc *frags, 563 unsigned int length) 564 { 565 struct sk_buff *skb; 566 void *va; 567 int used_frags; 568 dma_addr_t dma; 569 570 skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); 571 if (!skb) { 572 en_dbg(RX_ERR, priv, "Failed allocating skb\n"); 573 return NULL; 574 } 575 skb_reserve(skb, NET_IP_ALIGN); 576 skb->len = length; 577 578 /* Get pointer to first fragment so we could copy the headers into the 579 * (linear part of the) skb */ 580 va = page_address(frags[0].page) + frags[0].page_offset; 581 582 if (length <= SMALL_PACKET_SIZE) { 583 /* We are copying all relevant data to the skb - temporarily 584 * sync buffers for the copy */ 585 dma = be64_to_cpu(rx_desc->data[0].addr); 586 dma_sync_single_for_cpu(priv->ddev, dma, length, 587 DMA_FROM_DEVICE); 588 skb_copy_to_linear_data(skb, va, length); 589 skb->tail += length; 590 } else { 591 unsigned int pull_len; 592 593 /* Move relevant fragments to skb */ 594 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, 595 skb, length); 596 if (unlikely(!used_frags)) { 597 kfree_skb(skb); 598 return NULL; 599 } 600 skb_shinfo(skb)->nr_frags = used_frags; 601 602 pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); 603 /* Copy headers into the skb linear buffer */ 604 memcpy(skb->data, va, pull_len); 605 skb->tail += pull_len; 606 607 /* Skip headers in first fragment */ 608 skb_shinfo(skb)->frags[0].page_offset += pull_len; 609 610 /* Adjust size of first fragment */ 611 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); 612 skb->data_len = length - pull_len; 613 } 614 return skb; 615 } 616 617 static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) 618 { 619 int i; 620 int offset = ETH_HLEN; 621 622 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { 623 if (*(skb->data + offset) != (unsigned char) (i & 0xff)) 624 goto out_loopback; 625 } 626 /* Loopback found */ 627 priv->loopback_ok = 1; 628 629 out_loopback: 630 dev_kfree_skb_any(skb); 631 } 632 633 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, 634 struct mlx4_en_rx_ring *ring) 635 { 636 int index = ring->prod & ring->size_mask; 637 638 while ((u32) (ring->prod - ring->cons) < ring->actual_size) { 639 if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) 640 break; 641 ring->prod++; 642 index = ring->prod & ring->size_mask; 643 } 644 } 645 646 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 647 { 648 struct mlx4_en_priv *priv = netdev_priv(dev); 649 struct mlx4_en_dev *mdev = priv->mdev; 650 struct mlx4_cqe *cqe; 651 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; 652 struct mlx4_en_rx_alloc *frags; 653 struct mlx4_en_rx_desc *rx_desc; 654 struct sk_buff *skb; 655 int index; 656 int nr; 657 unsigned int length; 658 int polled = 0; 659 int ip_summed; 660 int factor = priv->cqe_factor; 661 u64 timestamp; 662 bool l2_tunnel; 663 664 if (!priv->port_up) 665 return 0; 666 667 if (budget <= 0) 668 return polled; 669 670 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 671 * descriptor offset can be deduced from the CQE index instead of 672 * reading 'cqe->index' */ 673 index = cq->mcq.cons_index & ring->size_mask; 674 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; 675 676 /* Process all completed CQEs */ 677 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 678 cq->mcq.cons_index & cq->size)) { 679 680 frags = ring->rx_info + (index << priv->log_rx_info); 681 rx_desc = ring->buf + (index << ring->log_stride); 682 683 /* 684 * make sure we read the CQE after we read the ownership bit 685 */ 686 rmb(); 687 688 /* Drop packet on bad receive or bad checksum */ 689 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 690 MLX4_CQE_OPCODE_ERROR)) { 691 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", 692 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, 693 ((struct mlx4_err_cqe *)cqe)->syndrome); 694 goto next; 695 } 696 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 697 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 698 goto next; 699 } 700 701 /* Check if we need to drop the packet if SRIOV is not enabled 702 * and not performing the selftest or flb disabled 703 */ 704 if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { 705 struct ethhdr *ethh; 706 dma_addr_t dma; 707 /* Get pointer to first fragment since we haven't 708 * skb yet and cast it to ethhdr struct 709 */ 710 dma = be64_to_cpu(rx_desc->data[0].addr); 711 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 712 DMA_FROM_DEVICE); 713 ethh = (struct ethhdr *)(page_address(frags[0].page) + 714 frags[0].page_offset); 715 716 if (is_multicast_ether_addr(ethh->h_dest)) { 717 struct mlx4_mac_entry *entry; 718 struct hlist_head *bucket; 719 unsigned int mac_hash; 720 721 /* Drop the packet, since HW loopback-ed it */ 722 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; 723 bucket = &priv->mac_hash[mac_hash]; 724 rcu_read_lock(); 725 hlist_for_each_entry_rcu(entry, bucket, hlist) { 726 if (ether_addr_equal_64bits(entry->mac, 727 ethh->h_source)) { 728 rcu_read_unlock(); 729 goto next; 730 } 731 } 732 rcu_read_unlock(); 733 } 734 } 735 736 /* 737 * Packet is OK - process it. 738 */ 739 length = be32_to_cpu(cqe->byte_cnt); 740 length -= ring->fcs_del; 741 ring->bytes += length; 742 ring->packets++; 743 l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && 744 (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); 745 746 if (likely(dev->features & NETIF_F_RXCSUM)) { 747 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 748 (cqe->checksum == cpu_to_be16(0xffff))) { 749 ring->csum_ok++; 750 /* This packet is eligible for GRO if it is: 751 * - DIX Ethernet (type interpretation) 752 * - TCP/IP (v4) 753 * - without IP options 754 * - not an IP fragment 755 * - no LLS polling in progress 756 */ 757 if (!mlx4_en_cq_busy_polling(cq) && 758 (dev->features & NETIF_F_GRO)) { 759 struct sk_buff *gro_skb = napi_get_frags(&cq->napi); 760 if (!gro_skb) 761 goto next; 762 763 nr = mlx4_en_complete_rx_desc(priv, 764 rx_desc, frags, gro_skb, 765 length); 766 if (!nr) 767 goto next; 768 769 skb_shinfo(gro_skb)->nr_frags = nr; 770 gro_skb->len = length; 771 gro_skb->data_len = length; 772 gro_skb->ip_summed = CHECKSUM_UNNECESSARY; 773 774 if (l2_tunnel) 775 gro_skb->csum_level = 1; 776 if ((cqe->vlan_my_qpn & 777 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && 778 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 779 u16 vid = be16_to_cpu(cqe->sl_vid); 780 781 __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); 782 } 783 784 if (dev->features & NETIF_F_RXHASH) 785 skb_set_hash(gro_skb, 786 be32_to_cpu(cqe->immed_rss_invalid), 787 PKT_HASH_TYPE_L3); 788 789 skb_record_rx_queue(gro_skb, cq->ring); 790 skb_mark_napi_id(gro_skb, &cq->napi); 791 792 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 793 timestamp = mlx4_en_get_cqe_ts(cqe); 794 mlx4_en_fill_hwtstamps(mdev, 795 skb_hwtstamps(gro_skb), 796 timestamp); 797 } 798 799 napi_gro_frags(&cq->napi); 800 goto next; 801 } 802 803 /* GRO not possible, complete processing here */ 804 ip_summed = CHECKSUM_UNNECESSARY; 805 } else { 806 ip_summed = CHECKSUM_NONE; 807 ring->csum_none++; 808 } 809 } else { 810 ip_summed = CHECKSUM_NONE; 811 ring->csum_none++; 812 } 813 814 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 815 if (!skb) { 816 priv->stats.rx_dropped++; 817 goto next; 818 } 819 820 if (unlikely(priv->validate_loopback)) { 821 validate_loopback(priv, skb); 822 goto next; 823 } 824 825 skb->ip_summed = ip_summed; 826 skb->protocol = eth_type_trans(skb, dev); 827 skb_record_rx_queue(skb, cq->ring); 828 829 if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) 830 skb->csum_level = 1; 831 832 if (dev->features & NETIF_F_RXHASH) 833 skb_set_hash(skb, 834 be32_to_cpu(cqe->immed_rss_invalid), 835 PKT_HASH_TYPE_L3); 836 837 if ((be32_to_cpu(cqe->vlan_my_qpn) & 838 MLX4_CQE_VLAN_PRESENT_MASK) && 839 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) 840 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); 841 842 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 843 timestamp = mlx4_en_get_cqe_ts(cqe); 844 mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), 845 timestamp); 846 } 847 848 skb_mark_napi_id(skb, &cq->napi); 849 850 if (!mlx4_en_cq_busy_polling(cq)) 851 napi_gro_receive(&cq->napi, skb); 852 else 853 netif_receive_skb(skb); 854 855 next: 856 for (nr = 0; nr < priv->num_frags; nr++) 857 mlx4_en_free_frag(priv, frags, nr); 858 859 ++cq->mcq.cons_index; 860 index = (cq->mcq.cons_index) & ring->size_mask; 861 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; 862 if (++polled == budget) 863 goto out; 864 } 865 866 out: 867 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 868 mlx4_cq_set_ci(&cq->mcq); 869 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 870 ring->cons = cq->mcq.cons_index; 871 mlx4_en_refill_rx_buffers(priv, ring); 872 mlx4_en_update_rx_prod_db(ring); 873 return polled; 874 } 875 876 877 void mlx4_en_rx_irq(struct mlx4_cq *mcq) 878 { 879 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 880 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 881 882 if (priv->port_up) 883 napi_schedule(&cq->napi); 884 else 885 mlx4_en_arm_cq(priv, cq); 886 } 887 888 /* Rx CQ polling - called by NAPI */ 889 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) 890 { 891 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 892 struct net_device *dev = cq->dev; 893 struct mlx4_en_priv *priv = netdev_priv(dev); 894 int done; 895 896 if (!mlx4_en_cq_lock_napi(cq)) 897 return budget; 898 899 done = mlx4_en_process_rx_cq(dev, cq, budget); 900 901 mlx4_en_cq_unlock_napi(cq); 902 903 /* If we used up all the quota - we're probably not done yet... */ 904 if (done == budget) { 905 int cpu_curr; 906 const struct cpumask *aff; 907 908 INC_PERF_COUNTER(priv->pstats.napi_quota); 909 910 cpu_curr = smp_processor_id(); 911 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; 912 913 if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { 914 /* Current cpu is not according to smp_irq_affinity - 915 * probably affinity changed. need to stop this NAPI 916 * poll, and restart it on the right CPU 917 */ 918 napi_complete(napi); 919 mlx4_en_arm_cq(priv, cq); 920 return 0; 921 } 922 } else { 923 /* Done for now */ 924 napi_complete(napi); 925 mlx4_en_arm_cq(priv, cq); 926 } 927 return done; 928 } 929 930 static const int frag_sizes[] = { 931 FRAG_SZ0, 932 FRAG_SZ1, 933 FRAG_SZ2, 934 FRAG_SZ3 935 }; 936 937 void mlx4_en_calc_rx_buf(struct net_device *dev) 938 { 939 struct mlx4_en_priv *priv = netdev_priv(dev); 940 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN; 941 int buf_size = 0; 942 int i = 0; 943 944 while (buf_size < eff_mtu) { 945 priv->frag_info[i].frag_size = 946 (eff_mtu > buf_size + frag_sizes[i]) ? 947 frag_sizes[i] : eff_mtu - buf_size; 948 priv->frag_info[i].frag_prefix_size = buf_size; 949 if (!i) { 950 priv->frag_info[i].frag_align = NET_IP_ALIGN; 951 priv->frag_info[i].frag_stride = 952 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); 953 } else { 954 priv->frag_info[i].frag_align = 0; 955 priv->frag_info[i].frag_stride = 956 ALIGN(frag_sizes[i], SMP_CACHE_BYTES); 957 } 958 buf_size += priv->frag_info[i].frag_size; 959 i++; 960 } 961 962 priv->num_frags = i; 963 priv->rx_skb_size = eff_mtu; 964 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 965 966 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", 967 eff_mtu, priv->num_frags); 968 for (i = 0; i < priv->num_frags; i++) { 969 en_err(priv, 970 " frag:%d - size:%d prefix:%d align:%d stride:%d\n", 971 i, 972 priv->frag_info[i].frag_size, 973 priv->frag_info[i].frag_prefix_size, 974 priv->frag_info[i].frag_align, 975 priv->frag_info[i].frag_stride); 976 } 977 } 978 979 /* RSS related functions */ 980 981 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, 982 struct mlx4_en_rx_ring *ring, 983 enum mlx4_qp_state *state, 984 struct mlx4_qp *qp) 985 { 986 struct mlx4_en_dev *mdev = priv->mdev; 987 struct mlx4_qp_context *context; 988 int err = 0; 989 990 context = kmalloc(sizeof(*context), GFP_KERNEL); 991 if (!context) 992 return -ENOMEM; 993 994 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); 995 if (err) { 996 en_err(priv, "Failed to allocate qp #%x\n", qpn); 997 goto out; 998 } 999 qp->event = mlx4_en_sqp_event; 1000 1001 memset(context, 0, sizeof *context); 1002 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 1003 qpn, ring->cqn, -1, context); 1004 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 1005 1006 /* Cancel FCS removal if FW allows */ 1007 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { 1008 context->param3 |= cpu_to_be32(1 << 29); 1009 ring->fcs_del = ETH_FCS_LEN; 1010 } else 1011 ring->fcs_del = 0; 1012 1013 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 1014 if (err) { 1015 mlx4_qp_remove(mdev->dev, qp); 1016 mlx4_qp_free(mdev->dev, qp); 1017 } 1018 mlx4_en_update_rx_prod_db(ring); 1019 out: 1020 kfree(context); 1021 return err; 1022 } 1023 1024 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) 1025 { 1026 int err; 1027 u32 qpn; 1028 1029 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); 1030 if (err) { 1031 en_err(priv, "Failed reserving drop qpn\n"); 1032 return err; 1033 } 1034 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); 1035 if (err) { 1036 en_err(priv, "Failed allocating drop qp\n"); 1037 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 1038 return err; 1039 } 1040 1041 return 0; 1042 } 1043 1044 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) 1045 { 1046 u32 qpn; 1047 1048 qpn = priv->drop_qp.qpn; 1049 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); 1050 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); 1051 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 1052 } 1053 1054 /* Allocate rx qp's and configure them according to rss map */ 1055 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 1056 { 1057 struct mlx4_en_dev *mdev = priv->mdev; 1058 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1059 struct mlx4_qp_context context; 1060 struct mlx4_rss_context *rss_context; 1061 int rss_rings; 1062 void *ptr; 1063 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 1064 MLX4_RSS_TCP_IPV6); 1065 int i, qpn; 1066 int err = 0; 1067 int good_qps = 0; 1068 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, 1069 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, 1070 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; 1071 1072 en_dbg(DRV, priv, "Configuring rss steering\n"); 1073 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, 1074 priv->rx_ring_num, 1075 &rss_map->base_qpn); 1076 if (err) { 1077 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); 1078 return err; 1079 } 1080 1081 for (i = 0; i < priv->rx_ring_num; i++) { 1082 qpn = rss_map->base_qpn + i; 1083 err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], 1084 &rss_map->state[i], 1085 &rss_map->qps[i]); 1086 if (err) 1087 goto rss_err; 1088 1089 ++good_qps; 1090 } 1091 1092 /* Configure RSS indirection qp */ 1093 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); 1094 if (err) { 1095 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1096 goto rss_err; 1097 } 1098 rss_map->indir_qp.event = mlx4_en_sqp_event; 1099 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 1100 priv->rx_ring[0]->cqn, -1, &context); 1101 1102 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 1103 rss_rings = priv->rx_ring_num; 1104 else 1105 rss_rings = priv->prof->rss_rings; 1106 1107 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) 1108 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 1109 rss_context = ptr; 1110 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | 1111 (rss_map->base_qpn)); 1112 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 1113 if (priv->mdev->profile.udp_rss) { 1114 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; 1115 rss_context->base_qpn_udp = rss_context->default_qpn; 1116 } 1117 1118 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1119 en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); 1120 rss_mask |= MLX4_RSS_BY_INNER_HEADERS; 1121 } 1122 1123 rss_context->flags = rss_mask; 1124 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 1125 for (i = 0; i < 10; i++) 1126 rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); 1127 1128 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 1129 &rss_map->indir_qp, &rss_map->indir_state); 1130 if (err) 1131 goto indir_err; 1132 1133 return 0; 1134 1135 indir_err: 1136 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 1137 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 1138 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 1139 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 1140 rss_err: 1141 for (i = 0; i < good_qps; i++) { 1142 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 1143 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 1144 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 1145 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 1146 } 1147 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1148 return err; 1149 } 1150 1151 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) 1152 { 1153 struct mlx4_en_dev *mdev = priv->mdev; 1154 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1155 int i; 1156 1157 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 1158 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 1159 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 1160 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 1161 1162 for (i = 0; i < priv->rx_ring_num; i++) { 1163 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 1164 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 1165 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 1166 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 1167 } 1168 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1169 } 1170