1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/mlx4/cq.h> 35 #include <linux/slab.h> 36 #include <linux/mlx4/qp.h> 37 #include <linux/skbuff.h> 38 #include <linux/if_ether.h> 39 #include <linux/if_vlan.h> 40 #include <linux/vmalloc.h> 41 42 #include "mlx4_en.h" 43 44 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, 45 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_alloc *frags, 47 struct mlx4_en_rx_alloc *ring_alloc) 48 { 49 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 50 struct mlx4_en_frag_info *frag_info; 51 struct page *page; 52 dma_addr_t dma; 53 int i; 54 55 for (i = 0; i < priv->num_frags; i++) { 56 frag_info = &priv->frag_info[i]; 57 if (ring_alloc[i].offset == frag_info->last_offset) { 58 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, 59 MLX4_EN_ALLOC_ORDER); 60 if (!page) 61 goto out; 62 dma = dma_map_page(priv->ddev, page, 0, 63 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 64 if (dma_mapping_error(priv->ddev, dma)) { 65 put_page(page); 66 goto out; 67 } 68 page_alloc[i].page = page; 69 page_alloc[i].dma = dma; 70 page_alloc[i].offset = frag_info->frag_align; 71 } else { 72 page_alloc[i].page = ring_alloc[i].page; 73 get_page(ring_alloc[i].page); 74 page_alloc[i].dma = ring_alloc[i].dma; 75 page_alloc[i].offset = ring_alloc[i].offset + 76 frag_info->frag_stride; 77 } 78 } 79 80 for (i = 0; i < priv->num_frags; i++) { 81 frags[i] = ring_alloc[i]; 82 dma = ring_alloc[i].dma + ring_alloc[i].offset; 83 ring_alloc[i] = page_alloc[i]; 84 rx_desc->data[i].addr = cpu_to_be64(dma); 85 } 86 87 return 0; 88 89 90 out: 91 while (i--) { 92 frag_info = &priv->frag_info[i]; 93 if (ring_alloc[i].offset == frag_info->last_offset) 94 dma_unmap_page(priv->ddev, page_alloc[i].dma, 95 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 96 put_page(page_alloc[i].page); 97 } 98 return -ENOMEM; 99 } 100 101 static void mlx4_en_free_frag(struct mlx4_en_priv *priv, 102 struct mlx4_en_rx_alloc *frags, 103 int i) 104 { 105 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 106 107 if (frags[i].offset == frag_info->last_offset) { 108 dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE, 109 PCI_DMA_FROMDEVICE); 110 } 111 if (frags[i].page) 112 put_page(frags[i].page); 113 } 114 115 static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, 116 struct mlx4_en_rx_ring *ring) 117 { 118 struct mlx4_en_rx_alloc *page_alloc; 119 int i; 120 121 for (i = 0; i < priv->num_frags; i++) { 122 page_alloc = &ring->page_alloc[i]; 123 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, 124 MLX4_EN_ALLOC_ORDER); 125 if (!page_alloc->page) 126 goto out; 127 128 page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0, 129 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 130 if (dma_mapping_error(priv->ddev, page_alloc->dma)) { 131 put_page(page_alloc->page); 132 page_alloc->page = NULL; 133 goto out; 134 } 135 page_alloc->offset = priv->frag_info[i].frag_align; 136 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 137 i, page_alloc->page); 138 } 139 return 0; 140 141 out: 142 while (i--) { 143 page_alloc = &ring->page_alloc[i]; 144 dma_unmap_page(priv->ddev, page_alloc->dma, 145 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 146 put_page(page_alloc->page); 147 page_alloc->page = NULL; 148 } 149 return -ENOMEM; 150 } 151 152 static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, 153 struct mlx4_en_rx_ring *ring) 154 { 155 struct mlx4_en_rx_alloc *page_alloc; 156 int i; 157 158 for (i = 0; i < priv->num_frags; i++) { 159 page_alloc = &ring->page_alloc[i]; 160 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 161 i, page_count(page_alloc->page)); 162 163 dma_unmap_page(priv->ddev, page_alloc->dma, 164 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 165 put_page(page_alloc->page); 166 page_alloc->page = NULL; 167 } 168 } 169 170 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 171 struct mlx4_en_rx_ring *ring, int index) 172 { 173 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; 174 int possible_frags; 175 int i; 176 177 /* Set size and memtype fields */ 178 for (i = 0; i < priv->num_frags; i++) { 179 rx_desc->data[i].byte_count = 180 cpu_to_be32(priv->frag_info[i].frag_size); 181 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); 182 } 183 184 /* If the number of used fragments does not fill up the ring stride, 185 * remaining (unused) fragments must be padded with null address/size 186 * and a special memory key */ 187 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; 188 for (i = priv->num_frags; i < possible_frags; i++) { 189 rx_desc->data[i].byte_count = 0; 190 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); 191 rx_desc->data[i].addr = 0; 192 } 193 } 194 195 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 196 struct mlx4_en_rx_ring *ring, int index) 197 { 198 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 199 struct mlx4_en_rx_alloc *frags = ring->rx_info + 200 (index << priv->log_rx_info); 201 202 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc); 203 } 204 205 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 206 { 207 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 208 } 209 210 static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, 211 struct mlx4_en_rx_ring *ring, 212 int index) 213 { 214 struct mlx4_en_rx_alloc *frags; 215 int nr; 216 217 frags = ring->rx_info + (index << priv->log_rx_info); 218 for (nr = 0; nr < priv->num_frags; nr++) { 219 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 220 mlx4_en_free_frag(priv, frags, nr); 221 } 222 } 223 224 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 225 { 226 struct mlx4_en_rx_ring *ring; 227 int ring_ind; 228 int buf_ind; 229 int new_size; 230 231 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 232 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 233 ring = &priv->rx_ring[ring_ind]; 234 235 if (mlx4_en_prepare_rx_desc(priv, ring, 236 ring->actual_size)) { 237 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 238 en_err(priv, "Failed to allocate " 239 "enough rx buffers\n"); 240 return -ENOMEM; 241 } else { 242 new_size = rounddown_pow_of_two(ring->actual_size); 243 en_warn(priv, "Only %d buffers allocated " 244 "reducing ring size to %d", 245 ring->actual_size, new_size); 246 goto reduce_rings; 247 } 248 } 249 ring->actual_size++; 250 ring->prod++; 251 } 252 } 253 return 0; 254 255 reduce_rings: 256 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 257 ring = &priv->rx_ring[ring_ind]; 258 while (ring->actual_size > new_size) { 259 ring->actual_size--; 260 ring->prod--; 261 mlx4_en_free_rx_desc(priv, ring, ring->actual_size); 262 } 263 } 264 265 return 0; 266 } 267 268 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 269 struct mlx4_en_rx_ring *ring) 270 { 271 int index; 272 273 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 274 ring->cons, ring->prod); 275 276 /* Unmap and free Rx buffers */ 277 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 278 while (ring->cons != ring->prod) { 279 index = ring->cons & ring->size_mask; 280 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 281 mlx4_en_free_rx_desc(priv, ring, index); 282 ++ring->cons; 283 } 284 } 285 286 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 287 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 288 { 289 struct mlx4_en_dev *mdev = priv->mdev; 290 int err = -ENOMEM; 291 int tmp; 292 293 ring->prod = 0; 294 ring->cons = 0; 295 ring->size = size; 296 ring->size_mask = size - 1; 297 ring->stride = stride; 298 ring->log_stride = ffs(ring->stride) - 1; 299 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 300 301 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 302 sizeof(struct mlx4_en_rx_alloc)); 303 ring->rx_info = vmalloc(tmp); 304 if (!ring->rx_info) 305 return -ENOMEM; 306 307 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 308 ring->rx_info, tmp); 309 310 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 311 ring->buf_size, 2 * PAGE_SIZE); 312 if (err) 313 goto err_ring; 314 315 err = mlx4_en_map_buffer(&ring->wqres.buf); 316 if (err) { 317 en_err(priv, "Failed to map RX buffer\n"); 318 goto err_hwq; 319 } 320 ring->buf = ring->wqres.buf.direct.buf; 321 322 return 0; 323 324 err_hwq: 325 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 326 err_ring: 327 vfree(ring->rx_info); 328 ring->rx_info = NULL; 329 return err; 330 } 331 332 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) 333 { 334 struct mlx4_en_rx_ring *ring; 335 int i; 336 int ring_ind; 337 int err; 338 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 339 DS_SIZE * priv->num_frags); 340 341 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 342 ring = &priv->rx_ring[ring_ind]; 343 344 ring->prod = 0; 345 ring->cons = 0; 346 ring->actual_size = 0; 347 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; 348 349 ring->stride = stride; 350 if (ring->stride <= TXBB_SIZE) 351 ring->buf += TXBB_SIZE; 352 353 ring->log_stride = ffs(ring->stride) - 1; 354 ring->buf_size = ring->size * ring->stride; 355 356 memset(ring->buf, 0, ring->buf_size); 357 mlx4_en_update_rx_prod_db(ring); 358 359 /* Initialize all descriptors */ 360 for (i = 0; i < ring->size; i++) 361 mlx4_en_init_rx_desc(priv, ring, i); 362 363 /* Initialize page allocators */ 364 err = mlx4_en_init_allocator(priv, ring); 365 if (err) { 366 en_err(priv, "Failed initializing ring allocator\n"); 367 if (ring->stride <= TXBB_SIZE) 368 ring->buf -= TXBB_SIZE; 369 ring_ind--; 370 goto err_allocator; 371 } 372 } 373 err = mlx4_en_fill_rx_buffers(priv); 374 if (err) 375 goto err_buffers; 376 377 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 378 ring = &priv->rx_ring[ring_ind]; 379 380 ring->size_mask = ring->actual_size - 1; 381 mlx4_en_update_rx_prod_db(ring); 382 } 383 384 return 0; 385 386 err_buffers: 387 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 388 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); 389 390 ring_ind = priv->rx_ring_num - 1; 391 err_allocator: 392 while (ring_ind >= 0) { 393 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) 394 priv->rx_ring[ring_ind].buf -= TXBB_SIZE; 395 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 396 ring_ind--; 397 } 398 return err; 399 } 400 401 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 402 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 403 { 404 struct mlx4_en_dev *mdev = priv->mdev; 405 406 mlx4_en_unmap_buffer(&ring->wqres.buf); 407 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 408 vfree(ring->rx_info); 409 ring->rx_info = NULL; 410 #ifdef CONFIG_RFS_ACCEL 411 mlx4_en_cleanup_filters(priv, ring); 412 #endif 413 } 414 415 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 416 struct mlx4_en_rx_ring *ring) 417 { 418 mlx4_en_free_rx_buf(priv, ring); 419 if (ring->stride <= TXBB_SIZE) 420 ring->buf -= TXBB_SIZE; 421 mlx4_en_destroy_allocator(priv, ring); 422 } 423 424 425 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, 426 struct mlx4_en_rx_desc *rx_desc, 427 struct mlx4_en_rx_alloc *frags, 428 struct sk_buff *skb, 429 int length) 430 { 431 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; 432 struct mlx4_en_frag_info *frag_info; 433 int nr; 434 dma_addr_t dma; 435 436 /* Collect used fragments while replacing them in the HW descriptors */ 437 for (nr = 0; nr < priv->num_frags; nr++) { 438 frag_info = &priv->frag_info[nr]; 439 if (length <= frag_info->frag_prefix_size) 440 break; 441 if (!frags[nr].page) 442 goto fail; 443 444 dma = be64_to_cpu(rx_desc->data[nr].addr); 445 dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, 446 DMA_FROM_DEVICE); 447 448 /* Save page reference in skb */ 449 get_page(frags[nr].page); 450 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 451 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 452 skb_frags_rx[nr].page_offset = frags[nr].offset; 453 skb->truesize += frag_info->frag_stride; 454 } 455 /* Adjust size of last fragment to match actual length */ 456 if (nr > 0) 457 skb_frag_size_set(&skb_frags_rx[nr - 1], 458 length - priv->frag_info[nr - 1].frag_prefix_size); 459 return nr; 460 461 fail: 462 while (nr > 0) { 463 nr--; 464 __skb_frag_unref(&skb_frags_rx[nr]); 465 } 466 return 0; 467 } 468 469 470 static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, 471 struct mlx4_en_rx_desc *rx_desc, 472 struct mlx4_en_rx_alloc *frags, 473 unsigned int length) 474 { 475 struct sk_buff *skb; 476 void *va; 477 int used_frags; 478 dma_addr_t dma; 479 480 skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); 481 if (!skb) { 482 en_dbg(RX_ERR, priv, "Failed allocating skb\n"); 483 return NULL; 484 } 485 skb_reserve(skb, NET_IP_ALIGN); 486 skb->len = length; 487 488 /* Get pointer to first fragment so we could copy the headers into the 489 * (linear part of the) skb */ 490 va = page_address(frags[0].page) + frags[0].offset; 491 492 if (length <= SMALL_PACKET_SIZE) { 493 /* We are copying all relevant data to the skb - temporarily 494 * sync buffers for the copy */ 495 dma = be64_to_cpu(rx_desc->data[0].addr); 496 dma_sync_single_for_cpu(priv->ddev, dma, length, 497 DMA_FROM_DEVICE); 498 skb_copy_to_linear_data(skb, va, length); 499 skb->tail += length; 500 } else { 501 /* Move relevant fragments to skb */ 502 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, 503 skb, length); 504 if (unlikely(!used_frags)) { 505 kfree_skb(skb); 506 return NULL; 507 } 508 skb_shinfo(skb)->nr_frags = used_frags; 509 510 /* Copy headers into the skb linear buffer */ 511 memcpy(skb->data, va, HEADER_COPY_SIZE); 512 skb->tail += HEADER_COPY_SIZE; 513 514 /* Skip headers in first fragment */ 515 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; 516 517 /* Adjust size of first fragment */ 518 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); 519 skb->data_len = length - HEADER_COPY_SIZE; 520 } 521 return skb; 522 } 523 524 static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) 525 { 526 int i; 527 int offset = ETH_HLEN; 528 529 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { 530 if (*(skb->data + offset) != (unsigned char) (i & 0xff)) 531 goto out_loopback; 532 } 533 /* Loopback found */ 534 priv->loopback_ok = 1; 535 536 out_loopback: 537 dev_kfree_skb_any(skb); 538 } 539 540 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, 541 struct mlx4_en_rx_ring *ring) 542 { 543 int index = ring->prod & ring->size_mask; 544 545 while ((u32) (ring->prod - ring->cons) < ring->actual_size) { 546 if (mlx4_en_prepare_rx_desc(priv, ring, index)) 547 break; 548 ring->prod++; 549 index = ring->prod & ring->size_mask; 550 } 551 } 552 553 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 554 { 555 struct mlx4_en_priv *priv = netdev_priv(dev); 556 struct mlx4_cqe *cqe; 557 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 558 struct mlx4_en_rx_alloc *frags; 559 struct mlx4_en_rx_desc *rx_desc; 560 struct sk_buff *skb; 561 int index; 562 int nr; 563 unsigned int length; 564 int polled = 0; 565 int ip_summed; 566 int factor = priv->cqe_factor; 567 568 if (!priv->port_up) 569 return 0; 570 571 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 572 * descriptor offset can be deduced from the CQE index instead of 573 * reading 'cqe->index' */ 574 index = cq->mcq.cons_index & ring->size_mask; 575 cqe = &cq->buf[(index << factor) + factor]; 576 577 /* Process all completed CQEs */ 578 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 579 cq->mcq.cons_index & cq->size)) { 580 581 frags = ring->rx_info + (index << priv->log_rx_info); 582 rx_desc = ring->buf + (index << ring->log_stride); 583 584 /* 585 * make sure we read the CQE after we read the ownership bit 586 */ 587 rmb(); 588 589 /* Drop packet on bad receive or bad checksum */ 590 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 591 MLX4_CQE_OPCODE_ERROR)) { 592 en_err(priv, "CQE completed in error - vendor " 593 "syndrom:%d syndrom:%d\n", 594 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 595 ((struct mlx4_err_cqe *) cqe)->syndrome); 596 goto next; 597 } 598 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 599 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 600 goto next; 601 } 602 603 /* Check if we need to drop the packet if SRIOV is not enabled 604 * and not performing the selftest or flb disabled 605 */ 606 if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { 607 struct ethhdr *ethh; 608 dma_addr_t dma; 609 /* Get pointer to first fragment since we haven't 610 * skb yet and cast it to ethhdr struct 611 */ 612 dma = be64_to_cpu(rx_desc->data[0].addr); 613 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 614 DMA_FROM_DEVICE); 615 ethh = (struct ethhdr *)(page_address(frags[0].page) + 616 frags[0].offset); 617 618 if (is_multicast_ether_addr(ethh->h_dest)) { 619 struct mlx4_mac_entry *entry; 620 struct hlist_node *n; 621 struct hlist_head *bucket; 622 unsigned int mac_hash; 623 624 /* Drop the packet, since HW loopback-ed it */ 625 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; 626 bucket = &priv->mac_hash[mac_hash]; 627 rcu_read_lock(); 628 hlist_for_each_entry_rcu(entry, n, bucket, hlist) { 629 if (ether_addr_equal_64bits(entry->mac, 630 ethh->h_source)) { 631 rcu_read_unlock(); 632 goto next; 633 } 634 } 635 rcu_read_unlock(); 636 } 637 } 638 639 /* 640 * Packet is OK - process it. 641 */ 642 length = be32_to_cpu(cqe->byte_cnt); 643 length -= ring->fcs_del; 644 ring->bytes += length; 645 ring->packets++; 646 647 if (likely(dev->features & NETIF_F_RXCSUM)) { 648 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 649 (cqe->checksum == cpu_to_be16(0xffff))) { 650 ring->csum_ok++; 651 /* This packet is eligible for GRO if it is: 652 * - DIX Ethernet (type interpretation) 653 * - TCP/IP (v4) 654 * - without IP options 655 * - not an IP fragment */ 656 if (dev->features & NETIF_F_GRO) { 657 struct sk_buff *gro_skb = napi_get_frags(&cq->napi); 658 if (!gro_skb) 659 goto next; 660 661 nr = mlx4_en_complete_rx_desc(priv, 662 rx_desc, frags, gro_skb, 663 length); 664 if (!nr) 665 goto next; 666 667 skb_shinfo(gro_skb)->nr_frags = nr; 668 gro_skb->len = length; 669 gro_skb->data_len = length; 670 gro_skb->ip_summed = CHECKSUM_UNNECESSARY; 671 672 if (cqe->vlan_my_qpn & 673 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { 674 u16 vid = be16_to_cpu(cqe->sl_vid); 675 676 __vlan_hwaccel_put_tag(gro_skb, vid); 677 } 678 679 if (dev->features & NETIF_F_RXHASH) 680 gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); 681 682 skb_record_rx_queue(gro_skb, cq->ring); 683 napi_gro_frags(&cq->napi); 684 685 goto next; 686 } 687 688 /* GRO not possible, complete processing here */ 689 ip_summed = CHECKSUM_UNNECESSARY; 690 } else { 691 ip_summed = CHECKSUM_NONE; 692 ring->csum_none++; 693 } 694 } else { 695 ip_summed = CHECKSUM_NONE; 696 ring->csum_none++; 697 } 698 699 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 700 if (!skb) { 701 priv->stats.rx_dropped++; 702 goto next; 703 } 704 705 if (unlikely(priv->validate_loopback)) { 706 validate_loopback(priv, skb); 707 goto next; 708 } 709 710 skb->ip_summed = ip_summed; 711 skb->protocol = eth_type_trans(skb, dev); 712 skb_record_rx_queue(skb, cq->ring); 713 714 if (dev->features & NETIF_F_RXHASH) 715 skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); 716 717 if (be32_to_cpu(cqe->vlan_my_qpn) & 718 MLX4_CQE_VLAN_PRESENT_MASK) 719 __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); 720 721 /* Push it up the stack */ 722 netif_receive_skb(skb); 723 724 next: 725 for (nr = 0; nr < priv->num_frags; nr++) 726 mlx4_en_free_frag(priv, frags, nr); 727 728 ++cq->mcq.cons_index; 729 index = (cq->mcq.cons_index) & ring->size_mask; 730 cqe = &cq->buf[(index << factor) + factor]; 731 if (++polled == budget) 732 goto out; 733 } 734 735 out: 736 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 737 mlx4_cq_set_ci(&cq->mcq); 738 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 739 ring->cons = cq->mcq.cons_index; 740 mlx4_en_refill_rx_buffers(priv, ring); 741 mlx4_en_update_rx_prod_db(ring); 742 return polled; 743 } 744 745 746 void mlx4_en_rx_irq(struct mlx4_cq *mcq) 747 { 748 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 749 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 750 751 if (priv->port_up) 752 napi_schedule(&cq->napi); 753 else 754 mlx4_en_arm_cq(priv, cq); 755 } 756 757 /* Rx CQ polling - called by NAPI */ 758 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) 759 { 760 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 761 struct net_device *dev = cq->dev; 762 struct mlx4_en_priv *priv = netdev_priv(dev); 763 int done; 764 765 done = mlx4_en_process_rx_cq(dev, cq, budget); 766 767 /* If we used up all the quota - we're probably not done yet... */ 768 if (done == budget) 769 INC_PERF_COUNTER(priv->pstats.napi_quota); 770 else { 771 /* Done for now */ 772 napi_complete(napi); 773 mlx4_en_arm_cq(priv, cq); 774 } 775 return done; 776 } 777 778 779 /* Calculate the last offset position that accommodates a full fragment 780 * (assuming fagment size = stride-align) */ 781 static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) 782 { 783 u16 res = MLX4_EN_ALLOC_SIZE % stride; 784 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; 785 786 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " 787 "res:%d offset:%d\n", stride, align, res, offset); 788 return offset; 789 } 790 791 792 static int frag_sizes[] = { 793 FRAG_SZ0, 794 FRAG_SZ1, 795 FRAG_SZ2, 796 FRAG_SZ3 797 }; 798 799 void mlx4_en_calc_rx_buf(struct net_device *dev) 800 { 801 struct mlx4_en_priv *priv = netdev_priv(dev); 802 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; 803 int buf_size = 0; 804 int i = 0; 805 806 while (buf_size < eff_mtu) { 807 priv->frag_info[i].frag_size = 808 (eff_mtu > buf_size + frag_sizes[i]) ? 809 frag_sizes[i] : eff_mtu - buf_size; 810 priv->frag_info[i].frag_prefix_size = buf_size; 811 if (!i) { 812 priv->frag_info[i].frag_align = NET_IP_ALIGN; 813 priv->frag_info[i].frag_stride = 814 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); 815 } else { 816 priv->frag_info[i].frag_align = 0; 817 priv->frag_info[i].frag_stride = 818 ALIGN(frag_sizes[i], SMP_CACHE_BYTES); 819 } 820 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( 821 priv, priv->frag_info[i].frag_stride, 822 priv->frag_info[i].frag_align); 823 buf_size += priv->frag_info[i].frag_size; 824 i++; 825 } 826 827 priv->num_frags = i; 828 priv->rx_skb_size = eff_mtu; 829 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 830 831 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 832 "num_frags:%d):\n", eff_mtu, priv->num_frags); 833 for (i = 0; i < priv->num_frags; i++) { 834 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 835 "stride:%d last_offset:%d\n", i, 836 priv->frag_info[i].frag_size, 837 priv->frag_info[i].frag_prefix_size, 838 priv->frag_info[i].frag_align, 839 priv->frag_info[i].frag_stride, 840 priv->frag_info[i].last_offset); 841 } 842 } 843 844 /* RSS related functions */ 845 846 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, 847 struct mlx4_en_rx_ring *ring, 848 enum mlx4_qp_state *state, 849 struct mlx4_qp *qp) 850 { 851 struct mlx4_en_dev *mdev = priv->mdev; 852 struct mlx4_qp_context *context; 853 int err = 0; 854 855 context = kmalloc(sizeof(*context), GFP_KERNEL); 856 if (!context) 857 return -ENOMEM; 858 859 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 860 if (err) { 861 en_err(priv, "Failed to allocate qp #%x\n", qpn); 862 goto out; 863 } 864 qp->event = mlx4_en_sqp_event; 865 866 memset(context, 0, sizeof *context); 867 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 868 qpn, ring->cqn, -1, context); 869 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 870 871 /* Cancel FCS removal if FW allows */ 872 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { 873 context->param3 |= cpu_to_be32(1 << 29); 874 ring->fcs_del = ETH_FCS_LEN; 875 } else 876 ring->fcs_del = 0; 877 878 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 879 if (err) { 880 mlx4_qp_remove(mdev->dev, qp); 881 mlx4_qp_free(mdev->dev, qp); 882 } 883 mlx4_en_update_rx_prod_db(ring); 884 out: 885 kfree(context); 886 return err; 887 } 888 889 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) 890 { 891 int err; 892 u32 qpn; 893 894 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); 895 if (err) { 896 en_err(priv, "Failed reserving drop qpn\n"); 897 return err; 898 } 899 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); 900 if (err) { 901 en_err(priv, "Failed allocating drop qp\n"); 902 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 903 return err; 904 } 905 906 return 0; 907 } 908 909 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) 910 { 911 u32 qpn; 912 913 qpn = priv->drop_qp.qpn; 914 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); 915 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); 916 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 917 } 918 919 /* Allocate rx qp's and configure them according to rss map */ 920 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 921 { 922 struct mlx4_en_dev *mdev = priv->mdev; 923 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 924 struct mlx4_qp_context context; 925 struct mlx4_rss_context *rss_context; 926 int rss_rings; 927 void *ptr; 928 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 929 MLX4_RSS_TCP_IPV6); 930 int i, qpn; 931 int err = 0; 932 int good_qps = 0; 933 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, 934 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, 935 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; 936 937 en_dbg(DRV, priv, "Configuring rss steering\n"); 938 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, 939 priv->rx_ring_num, 940 &rss_map->base_qpn); 941 if (err) { 942 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); 943 return err; 944 } 945 946 for (i = 0; i < priv->rx_ring_num; i++) { 947 qpn = rss_map->base_qpn + i; 948 err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], 949 &rss_map->state[i], 950 &rss_map->qps[i]); 951 if (err) 952 goto rss_err; 953 954 ++good_qps; 955 } 956 957 /* Configure RSS indirection qp */ 958 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 959 if (err) { 960 en_err(priv, "Failed to allocate RSS indirection QP\n"); 961 goto rss_err; 962 } 963 rss_map->indir_qp.event = mlx4_en_sqp_event; 964 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 965 priv->rx_ring[0].cqn, -1, &context); 966 967 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 968 rss_rings = priv->rx_ring_num; 969 else 970 rss_rings = priv->prof->rss_rings; 971 972 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) 973 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 974 rss_context = ptr; 975 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | 976 (rss_map->base_qpn)); 977 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 978 if (priv->mdev->profile.udp_rss) { 979 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; 980 rss_context->base_qpn_udp = rss_context->default_qpn; 981 } 982 rss_context->flags = rss_mask; 983 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 984 for (i = 0; i < 10; i++) 985 rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); 986 987 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 988 &rss_map->indir_qp, &rss_map->indir_state); 989 if (err) 990 goto indir_err; 991 992 return 0; 993 994 indir_err: 995 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 996 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 997 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 998 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 999 rss_err: 1000 for (i = 0; i < good_qps; i++) { 1001 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 1002 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 1003 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 1004 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 1005 } 1006 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1007 return err; 1008 } 1009 1010 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) 1011 { 1012 struct mlx4_en_dev *mdev = priv->mdev; 1013 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1014 int i; 1015 1016 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 1017 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 1018 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 1019 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 1020 1021 for (i = 0; i < priv->rx_ring_num; i++) { 1022 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 1023 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 1024 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 1025 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 1026 } 1027 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1028 } 1029