1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 43 { 44 return 1 << dev->oper_log_mgm_entry_size; 45 } 46 47 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 48 { 49 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 50 } 51 52 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 53 struct mlx4_cmd_mailbox *mailbox, 54 u32 size, 55 u64 *reg_id) 56 { 57 u64 imm; 58 int err = 0; 59 60 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 61 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 62 MLX4_CMD_NATIVE); 63 if (err) 64 return err; 65 *reg_id = imm; 66 67 return err; 68 } 69 70 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 71 { 72 int err = 0; 73 74 err = mlx4_cmd(dev, regid, 0, 0, 75 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 76 MLX4_CMD_NATIVE); 77 78 return err; 79 } 80 81 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 82 struct mlx4_cmd_mailbox *mailbox) 83 { 84 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 85 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 86 } 87 88 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 89 struct mlx4_cmd_mailbox *mailbox) 90 { 91 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 92 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 93 } 94 95 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 96 struct mlx4_cmd_mailbox *mailbox) 97 { 98 u32 in_mod; 99 100 in_mod = (u32) port << 16 | steer << 1; 101 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 102 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 103 MLX4_CMD_NATIVE); 104 } 105 106 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 107 u16 *hash, u8 op_mod) 108 { 109 u64 imm; 110 int err; 111 112 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 113 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 114 MLX4_CMD_NATIVE); 115 116 if (!err) 117 *hash = imm; 118 119 return err; 120 } 121 122 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 123 enum mlx4_steer_type steer, 124 u32 qpn) 125 { 126 struct mlx4_steer *s_steer; 127 struct mlx4_promisc_qp *pqp; 128 129 if (port < 1 || port > dev->caps.num_ports) 130 return NULL; 131 132 s_steer = &mlx4_priv(dev)->steer[port - 1]; 133 134 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 135 if (pqp->qpn == qpn) 136 return pqp; 137 } 138 /* not found */ 139 return NULL; 140 } 141 142 /* 143 * Add new entry to steering data structure. 144 * All promisc QPs should be added as well 145 */ 146 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 147 enum mlx4_steer_type steer, 148 unsigned int index, u32 qpn) 149 { 150 struct mlx4_steer *s_steer; 151 struct mlx4_cmd_mailbox *mailbox; 152 struct mlx4_mgm *mgm; 153 u32 members_count; 154 struct mlx4_steer_index *new_entry; 155 struct mlx4_promisc_qp *pqp; 156 struct mlx4_promisc_qp *dqp = NULL; 157 u32 prot; 158 int err; 159 160 if (port < 1 || port > dev->caps.num_ports) 161 return -EINVAL; 162 163 s_steer = &mlx4_priv(dev)->steer[port - 1]; 164 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 165 if (!new_entry) 166 return -ENOMEM; 167 168 INIT_LIST_HEAD(&new_entry->duplicates); 169 new_entry->index = index; 170 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 171 172 /* If the given qpn is also a promisc qp, 173 * it should be inserted to duplicates list 174 */ 175 pqp = get_promisc_qp(dev, port, steer, qpn); 176 if (pqp) { 177 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 178 if (!dqp) { 179 err = -ENOMEM; 180 goto out_alloc; 181 } 182 dqp->qpn = qpn; 183 list_add_tail(&dqp->list, &new_entry->duplicates); 184 } 185 186 /* if no promisc qps for this vep, we are done */ 187 if (list_empty(&s_steer->promisc_qps[steer])) 188 return 0; 189 190 /* now need to add all the promisc qps to the new 191 * steering entry, as they should also receive the packets 192 * destined to this address */ 193 mailbox = mlx4_alloc_cmd_mailbox(dev); 194 if (IS_ERR(mailbox)) { 195 err = -ENOMEM; 196 goto out_alloc; 197 } 198 mgm = mailbox->buf; 199 200 err = mlx4_READ_ENTRY(dev, index, mailbox); 201 if (err) 202 goto out_mailbox; 203 204 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 205 prot = be32_to_cpu(mgm->members_count) >> 30; 206 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 207 /* don't add already existing qpn */ 208 if (pqp->qpn == qpn) 209 continue; 210 if (members_count == dev->caps.num_qp_per_mgm) { 211 /* out of space */ 212 err = -ENOMEM; 213 goto out_mailbox; 214 } 215 216 /* add the qpn */ 217 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 218 } 219 /* update the qps count and update the entry with all the promisc qps*/ 220 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 221 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 222 223 out_mailbox: 224 mlx4_free_cmd_mailbox(dev, mailbox); 225 if (!err) 226 return 0; 227 out_alloc: 228 if (dqp) { 229 list_del(&dqp->list); 230 kfree(dqp); 231 } 232 list_del(&new_entry->list); 233 kfree(new_entry); 234 return err; 235 } 236 237 /* update the data structures with existing steering entry */ 238 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 239 enum mlx4_steer_type steer, 240 unsigned int index, u32 qpn) 241 { 242 struct mlx4_steer *s_steer; 243 struct mlx4_steer_index *tmp_entry, *entry = NULL; 244 struct mlx4_promisc_qp *pqp; 245 struct mlx4_promisc_qp *dqp; 246 247 if (port < 1 || port > dev->caps.num_ports) 248 return -EINVAL; 249 250 s_steer = &mlx4_priv(dev)->steer[port - 1]; 251 252 pqp = get_promisc_qp(dev, port, steer, qpn); 253 if (!pqp) 254 return 0; /* nothing to do */ 255 256 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 257 if (tmp_entry->index == index) { 258 entry = tmp_entry; 259 break; 260 } 261 } 262 if (unlikely(!entry)) { 263 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 264 return -EINVAL; 265 } 266 267 /* the given qpn is listed as a promisc qpn 268 * we need to add it as a duplicate to this entry 269 * for future references */ 270 list_for_each_entry(dqp, &entry->duplicates, list) { 271 if (qpn == dqp->qpn) 272 return 0; /* qp is already duplicated */ 273 } 274 275 /* add the qp as a duplicate on this index */ 276 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 277 if (!dqp) 278 return -ENOMEM; 279 dqp->qpn = qpn; 280 list_add_tail(&dqp->list, &entry->duplicates); 281 282 return 0; 283 } 284 285 /* Check whether a qpn is a duplicate on steering entry 286 * If so, it should not be removed from mgm */ 287 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 288 enum mlx4_steer_type steer, 289 unsigned int index, u32 qpn) 290 { 291 struct mlx4_steer *s_steer; 292 struct mlx4_steer_index *tmp_entry, *entry = NULL; 293 struct mlx4_promisc_qp *dqp, *tmp_dqp; 294 295 if (port < 1 || port > dev->caps.num_ports) 296 return NULL; 297 298 s_steer = &mlx4_priv(dev)->steer[port - 1]; 299 300 /* if qp is not promisc, it cannot be duplicated */ 301 if (!get_promisc_qp(dev, port, steer, qpn)) 302 return false; 303 304 /* The qp is promisc qp so it is a duplicate on this index 305 * Find the index entry, and remove the duplicate */ 306 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 307 if (tmp_entry->index == index) { 308 entry = tmp_entry; 309 break; 310 } 311 } 312 if (unlikely(!entry)) { 313 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 314 return false; 315 } 316 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 317 if (dqp->qpn == qpn) { 318 list_del(&dqp->list); 319 kfree(dqp); 320 } 321 } 322 return true; 323 } 324 325 /* Returns true if all the QPs != tqpn contained in this entry 326 * are Promisc QPs. Returns false otherwise. 327 */ 328 static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, 329 enum mlx4_steer_type steer, 330 unsigned int index, u32 tqpn, 331 u32 *members_count) 332 { 333 struct mlx4_cmd_mailbox *mailbox; 334 struct mlx4_mgm *mgm; 335 u32 m_count; 336 bool ret = false; 337 int i; 338 339 if (port < 1 || port > dev->caps.num_ports) 340 return false; 341 342 mailbox = mlx4_alloc_cmd_mailbox(dev); 343 if (IS_ERR(mailbox)) 344 return false; 345 mgm = mailbox->buf; 346 347 if (mlx4_READ_ENTRY(dev, index, mailbox)) 348 goto out; 349 m_count = be32_to_cpu(mgm->members_count) & 0xffffff; 350 if (members_count) 351 *members_count = m_count; 352 353 for (i = 0; i < m_count; i++) { 354 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 355 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 356 /* the qp is not promisc, the entry can't be removed */ 357 goto out; 358 } 359 } 360 ret = true; 361 out: 362 mlx4_free_cmd_mailbox(dev, mailbox); 363 return ret; 364 } 365 366 /* IF a steering entry contains only promisc QPs, it can be removed. */ 367 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 368 enum mlx4_steer_type steer, 369 unsigned int index, u32 tqpn) 370 { 371 struct mlx4_steer *s_steer; 372 struct mlx4_steer_index *entry = NULL, *tmp_entry; 373 u32 members_count; 374 bool ret = false; 375 376 if (port < 1 || port > dev->caps.num_ports) 377 return NULL; 378 379 s_steer = &mlx4_priv(dev)->steer[port - 1]; 380 381 if (!promisc_steering_entry(dev, port, steer, index, 382 tqpn, &members_count)) 383 goto out; 384 385 /* All the qps currently registered for this entry are promiscuous, 386 * Checking for duplicates */ 387 ret = true; 388 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 389 if (entry->index == index) { 390 if (list_empty(&entry->duplicates) || 391 members_count == 1) { 392 struct mlx4_promisc_qp *pqp, *tmp_pqp; 393 /* If there is only 1 entry in duplicates then 394 * this is the QP we want to delete, going over 395 * the list and deleting the entry. 396 */ 397 list_del(&entry->list); 398 list_for_each_entry_safe(pqp, tmp_pqp, 399 &entry->duplicates, 400 list) { 401 list_del(&pqp->list); 402 kfree(pqp); 403 } 404 kfree(entry); 405 } else { 406 /* This entry contains duplicates so it shouldn't be removed */ 407 ret = false; 408 goto out; 409 } 410 } 411 } 412 413 out: 414 return ret; 415 } 416 417 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 418 enum mlx4_steer_type steer, u32 qpn) 419 { 420 struct mlx4_steer *s_steer; 421 struct mlx4_cmd_mailbox *mailbox; 422 struct mlx4_mgm *mgm; 423 struct mlx4_steer_index *entry; 424 struct mlx4_promisc_qp *pqp; 425 struct mlx4_promisc_qp *dqp; 426 u32 members_count; 427 u32 prot; 428 int i; 429 bool found; 430 int err; 431 struct mlx4_priv *priv = mlx4_priv(dev); 432 433 if (port < 1 || port > dev->caps.num_ports) 434 return -EINVAL; 435 436 s_steer = &mlx4_priv(dev)->steer[port - 1]; 437 438 mutex_lock(&priv->mcg_table.mutex); 439 440 if (get_promisc_qp(dev, port, steer, qpn)) { 441 err = 0; /* Noting to do, already exists */ 442 goto out_mutex; 443 } 444 445 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 446 if (!pqp) { 447 err = -ENOMEM; 448 goto out_mutex; 449 } 450 pqp->qpn = qpn; 451 452 mailbox = mlx4_alloc_cmd_mailbox(dev); 453 if (IS_ERR(mailbox)) { 454 err = -ENOMEM; 455 goto out_alloc; 456 } 457 mgm = mailbox->buf; 458 459 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 460 /* The promisc QP needs to be added for each one of the steering 461 * entries. If it already exists, needs to be added as 462 * a duplicate for this entry. 463 */ 464 list_for_each_entry(entry, 465 &s_steer->steer_entries[steer], 466 list) { 467 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 468 if (err) 469 goto out_mailbox; 470 471 members_count = be32_to_cpu(mgm->members_count) & 472 0xffffff; 473 prot = be32_to_cpu(mgm->members_count) >> 30; 474 found = false; 475 for (i = 0; i < members_count; i++) { 476 if ((be32_to_cpu(mgm->qp[i]) & 477 MGM_QPN_MASK) == qpn) { 478 /* Entry already exists. 479 * Add to duplicates. 480 */ 481 dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); 482 if (!dqp) { 483 err = -ENOMEM; 484 goto out_mailbox; 485 } 486 dqp->qpn = qpn; 487 list_add_tail(&dqp->list, 488 &entry->duplicates); 489 found = true; 490 } 491 } 492 if (!found) { 493 /* Need to add the qpn to mgm */ 494 if (members_count == 495 dev->caps.num_qp_per_mgm) { 496 /* entry is full */ 497 err = -ENOMEM; 498 goto out_mailbox; 499 } 500 mgm->qp[members_count++] = 501 cpu_to_be32(qpn & MGM_QPN_MASK); 502 mgm->members_count = 503 cpu_to_be32(members_count | 504 (prot << 30)); 505 err = mlx4_WRITE_ENTRY(dev, entry->index, 506 mailbox); 507 if (err) 508 goto out_mailbox; 509 } 510 } 511 } 512 513 /* add the new qpn to list of promisc qps */ 514 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 515 /* now need to add all the promisc qps to default entry */ 516 memset(mgm, 0, sizeof *mgm); 517 members_count = 0; 518 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { 519 if (members_count == dev->caps.num_qp_per_mgm) { 520 /* entry is full */ 521 err = -ENOMEM; 522 goto out_list; 523 } 524 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 525 } 526 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 527 528 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 529 if (err) 530 goto out_list; 531 532 mlx4_free_cmd_mailbox(dev, mailbox); 533 mutex_unlock(&priv->mcg_table.mutex); 534 return 0; 535 536 out_list: 537 list_del(&pqp->list); 538 out_mailbox: 539 mlx4_free_cmd_mailbox(dev, mailbox); 540 out_alloc: 541 kfree(pqp); 542 out_mutex: 543 mutex_unlock(&priv->mcg_table.mutex); 544 return err; 545 } 546 547 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 548 enum mlx4_steer_type steer, u32 qpn) 549 { 550 struct mlx4_priv *priv = mlx4_priv(dev); 551 struct mlx4_steer *s_steer; 552 struct mlx4_cmd_mailbox *mailbox; 553 struct mlx4_mgm *mgm; 554 struct mlx4_steer_index *entry, *tmp_entry; 555 struct mlx4_promisc_qp *pqp; 556 struct mlx4_promisc_qp *dqp; 557 u32 members_count; 558 bool found; 559 bool back_to_list = false; 560 int i; 561 int err; 562 563 if (port < 1 || port > dev->caps.num_ports) 564 return -EINVAL; 565 566 s_steer = &mlx4_priv(dev)->steer[port - 1]; 567 mutex_lock(&priv->mcg_table.mutex); 568 569 pqp = get_promisc_qp(dev, port, steer, qpn); 570 if (unlikely(!pqp)) { 571 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 572 /* nothing to do */ 573 err = 0; 574 goto out_mutex; 575 } 576 577 /*remove from list of promisc qps */ 578 list_del(&pqp->list); 579 580 /* set the default entry not to include the removed one */ 581 mailbox = mlx4_alloc_cmd_mailbox(dev); 582 if (IS_ERR(mailbox)) { 583 err = -ENOMEM; 584 back_to_list = true; 585 goto out_list; 586 } 587 mgm = mailbox->buf; 588 members_count = 0; 589 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 590 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 591 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 592 593 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 594 if (err) 595 goto out_mailbox; 596 597 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 598 /* Remove the QP from all the steering entries */ 599 list_for_each_entry_safe(entry, tmp_entry, 600 &s_steer->steer_entries[steer], 601 list) { 602 found = false; 603 list_for_each_entry(dqp, &entry->duplicates, list) { 604 if (dqp->qpn == qpn) { 605 found = true; 606 break; 607 } 608 } 609 if (found) { 610 /* A duplicate, no need to change the MGM, 611 * only update the duplicates list 612 */ 613 list_del(&dqp->list); 614 kfree(dqp); 615 } else { 616 int loc = -1; 617 618 err = mlx4_READ_ENTRY(dev, 619 entry->index, 620 mailbox); 621 if (err) 622 goto out_mailbox; 623 members_count = 624 be32_to_cpu(mgm->members_count) & 625 0xffffff; 626 if (!members_count) { 627 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n", 628 qpn, entry->index); 629 list_del(&entry->list); 630 kfree(entry); 631 continue; 632 } 633 634 for (i = 0; i < members_count; ++i) 635 if ((be32_to_cpu(mgm->qp[i]) & 636 MGM_QPN_MASK) == qpn) { 637 loc = i; 638 break; 639 } 640 641 if (loc < 0) { 642 mlx4_err(dev, "QP %06x wasn't found in entry %d\n", 643 qpn, entry->index); 644 err = -EINVAL; 645 goto out_mailbox; 646 } 647 648 /* Copy the last QP in this MGM 649 * over removed QP 650 */ 651 mgm->qp[loc] = mgm->qp[members_count - 1]; 652 mgm->qp[members_count - 1] = 0; 653 mgm->members_count = 654 cpu_to_be32(--members_count | 655 (MLX4_PROT_ETH << 30)); 656 657 err = mlx4_WRITE_ENTRY(dev, 658 entry->index, 659 mailbox); 660 if (err) 661 goto out_mailbox; 662 } 663 } 664 } 665 666 out_mailbox: 667 mlx4_free_cmd_mailbox(dev, mailbox); 668 out_list: 669 if (back_to_list) 670 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 671 else 672 kfree(pqp); 673 out_mutex: 674 mutex_unlock(&priv->mcg_table.mutex); 675 return err; 676 } 677 678 /* 679 * Caller must hold MCG table semaphore. gid and mgm parameters must 680 * be properly aligned for command interface. 681 * 682 * Returns 0 unless a firmware command error occurs. 683 * 684 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 685 * and *mgm holds MGM entry. 686 * 687 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 688 * previous entry in hash chain and *mgm holds AMGM entry. 689 * 690 * If no AMGM exists for given gid, *index = -1, *prev = index of last 691 * entry in hash chain and *mgm holds end of hash chain. 692 */ 693 static int find_entry(struct mlx4_dev *dev, u8 port, 694 u8 *gid, enum mlx4_protocol prot, 695 struct mlx4_cmd_mailbox *mgm_mailbox, 696 int *prev, int *index) 697 { 698 struct mlx4_cmd_mailbox *mailbox; 699 struct mlx4_mgm *mgm = mgm_mailbox->buf; 700 u8 *mgid; 701 int err; 702 u16 hash; 703 u8 op_mod = (prot == MLX4_PROT_ETH) ? 704 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 705 706 mailbox = mlx4_alloc_cmd_mailbox(dev); 707 if (IS_ERR(mailbox)) 708 return -ENOMEM; 709 mgid = mailbox->buf; 710 711 memcpy(mgid, gid, 16); 712 713 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 714 mlx4_free_cmd_mailbox(dev, mailbox); 715 if (err) 716 return err; 717 718 if (0) 719 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 720 721 *index = hash; 722 *prev = -1; 723 724 do { 725 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 726 if (err) 727 return err; 728 729 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 730 if (*index != hash) { 731 mlx4_err(dev, "Found zero MGID in AMGM\n"); 732 err = -EINVAL; 733 } 734 return err; 735 } 736 737 if (!memcmp(mgm->gid, gid, 16) && 738 be32_to_cpu(mgm->members_count) >> 30 == prot) 739 return err; 740 741 *prev = *index; 742 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 743 } while (*index); 744 745 *index = -1; 746 return err; 747 } 748 749 static const u8 __promisc_mode[] = { 750 [MLX4_FS_REGULAR] = 0x0, 751 [MLX4_FS_ALL_DEFAULT] = 0x1, 752 [MLX4_FS_MC_DEFAULT] = 0x3, 753 [MLX4_FS_MIRROR_RX_PORT] = 0x4, 754 [MLX4_FS_MIRROR_SX_PORT] = 0x5, 755 [MLX4_FS_UC_SNIFFER] = 0x6, 756 [MLX4_FS_MC_SNIFFER] = 0x7, 757 }; 758 759 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 760 enum mlx4_net_trans_promisc_mode flow_type) 761 { 762 if (flow_type >= MLX4_FS_MODE_NUM) { 763 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 764 return -EINVAL; 765 } 766 return __promisc_mode[flow_type]; 767 } 768 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); 769 770 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 771 struct mlx4_net_trans_rule_hw_ctrl *hw) 772 { 773 u8 flags = 0; 774 775 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 776 flags |= ctrl->exclusive ? (1 << 2) : 0; 777 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 778 779 hw->flags = flags; 780 hw->type = __promisc_mode[ctrl->promisc_mode]; 781 hw->prio = cpu_to_be16(ctrl->priority); 782 hw->port = ctrl->port; 783 hw->qpn = cpu_to_be32(ctrl->qpn); 784 } 785 786 const u16 __sw_id_hw[] = { 787 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 788 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 789 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 790 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 791 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 792 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, 793 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 794 }; 795 796 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 797 enum mlx4_net_trans_rule_id id) 798 { 799 if (id >= MLX4_NET_TRANS_RULE_NUM) { 800 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 801 return -EINVAL; 802 } 803 return __sw_id_hw[id]; 804 } 805 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); 806 807 static const int __rule_hw_sz[] = { 808 [MLX4_NET_TRANS_RULE_ID_ETH] = 809 sizeof(struct mlx4_net_trans_rule_hw_eth), 810 [MLX4_NET_TRANS_RULE_ID_IB] = 811 sizeof(struct mlx4_net_trans_rule_hw_ib), 812 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 813 [MLX4_NET_TRANS_RULE_ID_IPV4] = 814 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 815 [MLX4_NET_TRANS_RULE_ID_TCP] = 816 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 817 [MLX4_NET_TRANS_RULE_ID_UDP] = 818 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 819 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 820 sizeof(struct mlx4_net_trans_rule_hw_vxlan) 821 }; 822 823 int mlx4_hw_rule_sz(struct mlx4_dev *dev, 824 enum mlx4_net_trans_rule_id id) 825 { 826 if (id >= MLX4_NET_TRANS_RULE_NUM) { 827 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 828 return -EINVAL; 829 } 830 831 return __rule_hw_sz[id]; 832 } 833 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); 834 835 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 836 struct _rule_hw *rule_hw) 837 { 838 if (mlx4_hw_rule_sz(dev, spec->id) < 0) 839 return -EINVAL; 840 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); 841 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 842 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; 843 844 switch (spec->id) { 845 case MLX4_NET_TRANS_RULE_ID_ETH: 846 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 847 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 848 ETH_ALEN); 849 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 850 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 851 ETH_ALEN); 852 if (spec->eth.ether_type_enable) { 853 rule_hw->eth.ether_type_enable = 1; 854 rule_hw->eth.ether_type = spec->eth.ether_type; 855 } 856 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 857 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 858 break; 859 860 case MLX4_NET_TRANS_RULE_ID_IB: 861 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 862 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 863 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 864 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 865 break; 866 867 case MLX4_NET_TRANS_RULE_ID_IPV6: 868 return -EOPNOTSUPP; 869 870 case MLX4_NET_TRANS_RULE_ID_IPV4: 871 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 872 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 873 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 874 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 875 break; 876 877 case MLX4_NET_TRANS_RULE_ID_TCP: 878 case MLX4_NET_TRANS_RULE_ID_UDP: 879 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 880 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 881 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 882 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 883 break; 884 885 case MLX4_NET_TRANS_RULE_ID_VXLAN: 886 rule_hw->vxlan.vni = 887 cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); 888 rule_hw->vxlan.vni_mask = 889 cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); 890 break; 891 892 default: 893 return -EINVAL; 894 } 895 896 return __rule_hw_sz[spec->id]; 897 } 898 899 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 900 struct mlx4_net_trans_rule *rule) 901 { 902 #define BUF_SIZE 256 903 struct mlx4_spec_list *cur; 904 char buf[BUF_SIZE]; 905 int len = 0; 906 907 mlx4_err(dev, "%s", str); 908 len += snprintf(buf + len, BUF_SIZE - len, 909 "port = %d prio = 0x%x qp = 0x%x ", 910 rule->port, rule->priority, rule->qpn); 911 912 list_for_each_entry(cur, &rule->list, list) { 913 switch (cur->id) { 914 case MLX4_NET_TRANS_RULE_ID_ETH: 915 len += snprintf(buf + len, BUF_SIZE - len, 916 "dmac = %pM ", &cur->eth.dst_mac); 917 if (cur->eth.ether_type) 918 len += snprintf(buf + len, BUF_SIZE - len, 919 "ethertype = 0x%x ", 920 be16_to_cpu(cur->eth.ether_type)); 921 if (cur->eth.vlan_id) 922 len += snprintf(buf + len, BUF_SIZE - len, 923 "vlan-id = %d ", 924 be16_to_cpu(cur->eth.vlan_id)); 925 break; 926 927 case MLX4_NET_TRANS_RULE_ID_IPV4: 928 if (cur->ipv4.src_ip) 929 len += snprintf(buf + len, BUF_SIZE - len, 930 "src-ip = %pI4 ", 931 &cur->ipv4.src_ip); 932 if (cur->ipv4.dst_ip) 933 len += snprintf(buf + len, BUF_SIZE - len, 934 "dst-ip = %pI4 ", 935 &cur->ipv4.dst_ip); 936 break; 937 938 case MLX4_NET_TRANS_RULE_ID_TCP: 939 case MLX4_NET_TRANS_RULE_ID_UDP: 940 if (cur->tcp_udp.src_port) 941 len += snprintf(buf + len, BUF_SIZE - len, 942 "src-port = %d ", 943 be16_to_cpu(cur->tcp_udp.src_port)); 944 if (cur->tcp_udp.dst_port) 945 len += snprintf(buf + len, BUF_SIZE - len, 946 "dst-port = %d ", 947 be16_to_cpu(cur->tcp_udp.dst_port)); 948 break; 949 950 case MLX4_NET_TRANS_RULE_ID_IB: 951 len += snprintf(buf + len, BUF_SIZE - len, 952 "dst-gid = %pI6\n", cur->ib.dst_gid); 953 len += snprintf(buf + len, BUF_SIZE - len, 954 "dst-gid-mask = %pI6\n", 955 cur->ib.dst_gid_msk); 956 break; 957 958 case MLX4_NET_TRANS_RULE_ID_VXLAN: 959 len += snprintf(buf + len, BUF_SIZE - len, 960 "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); 961 break; 962 case MLX4_NET_TRANS_RULE_ID_IPV6: 963 break; 964 965 default: 966 break; 967 } 968 } 969 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 970 mlx4_err(dev, "%s", buf); 971 972 if (len >= BUF_SIZE) 973 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n"); 974 } 975 976 int mlx4_flow_attach(struct mlx4_dev *dev, 977 struct mlx4_net_trans_rule *rule, u64 *reg_id) 978 { 979 struct mlx4_cmd_mailbox *mailbox; 980 struct mlx4_spec_list *cur; 981 u32 size = 0; 982 int ret; 983 984 mailbox = mlx4_alloc_cmd_mailbox(dev); 985 if (IS_ERR(mailbox)) 986 return PTR_ERR(mailbox); 987 988 trans_rule_ctrl_to_hw(rule, mailbox->buf); 989 990 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 991 992 list_for_each_entry(cur, &rule->list, list) { 993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 994 if (ret < 0) { 995 mlx4_free_cmd_mailbox(dev, mailbox); 996 return ret; 997 } 998 size += ret; 999 } 1000 1001 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 1002 if (ret == -ENOMEM) { 1003 mlx4_err_rule(dev, 1004 "mcg table is full. Fail to register network rule\n", 1005 rule); 1006 } else if (ret) { 1007 if (ret == -ENXIO) { 1008 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) 1009 mlx4_err_rule(dev, 1010 "DMFS is not enabled, " 1011 "failed to register network rule.\n", 1012 rule); 1013 else 1014 mlx4_err_rule(dev, 1015 "Rule exceeds the dmfs_high_rate_mode limitations, " 1016 "failed to register network rule.\n", 1017 rule); 1018 1019 } else { 1020 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 1021 } 1022 } 1023 1024 mlx4_free_cmd_mailbox(dev, mailbox); 1025 1026 return ret; 1027 } 1028 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 1029 1030 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 1031 { 1032 int err; 1033 1034 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 1035 if (err) 1036 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 1037 reg_id); 1038 return err; 1039 } 1040 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 1041 1042 int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, 1043 int port, int qpn, u16 prio, u64 *reg_id) 1044 { 1045 int err; 1046 struct mlx4_spec_list spec_eth_outer = { {NULL} }; 1047 struct mlx4_spec_list spec_vxlan = { {NULL} }; 1048 struct mlx4_spec_list spec_eth_inner = { {NULL} }; 1049 1050 struct mlx4_net_trans_rule rule = { 1051 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1052 .exclusive = 0, 1053 .allow_loopback = 1, 1054 .promisc_mode = MLX4_FS_REGULAR, 1055 }; 1056 1057 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1058 1059 rule.port = port; 1060 rule.qpn = qpn; 1061 rule.priority = prio; 1062 INIT_LIST_HEAD(&rule.list); 1063 1064 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; 1065 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); 1066 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1067 1068 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ 1069 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ 1070 1071 list_add_tail(&spec_eth_outer.list, &rule.list); 1072 list_add_tail(&spec_vxlan.list, &rule.list); 1073 list_add_tail(&spec_eth_inner.list, &rule.list); 1074 1075 err = mlx4_flow_attach(dev, &rule, reg_id); 1076 return err; 1077 } 1078 EXPORT_SYMBOL(mlx4_tunnel_steer_add); 1079 1080 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1081 u32 max_range_qpn) 1082 { 1083 int err; 1084 u64 in_param; 1085 1086 in_param = ((u64) min_range_qpn) << 32; 1087 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 1088 1089 err = mlx4_cmd(dev, in_param, 0, 0, 1090 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 1091 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1092 1093 return err; 1094 } 1095 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 1096 1097 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1098 int block_mcast_loopback, enum mlx4_protocol prot, 1099 enum mlx4_steer_type steer) 1100 { 1101 struct mlx4_priv *priv = mlx4_priv(dev); 1102 struct mlx4_cmd_mailbox *mailbox; 1103 struct mlx4_mgm *mgm; 1104 u32 members_count; 1105 int index = -1, prev; 1106 int link = 0; 1107 int i; 1108 int err; 1109 u8 port = gid[5]; 1110 u8 new_entry = 0; 1111 1112 mailbox = mlx4_alloc_cmd_mailbox(dev); 1113 if (IS_ERR(mailbox)) 1114 return PTR_ERR(mailbox); 1115 mgm = mailbox->buf; 1116 1117 mutex_lock(&priv->mcg_table.mutex); 1118 err = find_entry(dev, port, gid, prot, 1119 mailbox, &prev, &index); 1120 if (err) 1121 goto out; 1122 1123 if (index != -1) { 1124 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 1125 new_entry = 1; 1126 memcpy(mgm->gid, gid, 16); 1127 } 1128 } else { 1129 link = 1; 1130 1131 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 1132 if (index == -1) { 1133 mlx4_err(dev, "No AMGM entries left\n"); 1134 err = -ENOMEM; 1135 goto out; 1136 } 1137 index += dev->caps.num_mgms; 1138 1139 new_entry = 1; 1140 memset(mgm, 0, sizeof *mgm); 1141 memcpy(mgm->gid, gid, 16); 1142 } 1143 1144 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1145 if (members_count == dev->caps.num_qp_per_mgm) { 1146 mlx4_err(dev, "MGM at index %x is full\n", index); 1147 err = -ENOMEM; 1148 goto out; 1149 } 1150 1151 for (i = 0; i < members_count; ++i) 1152 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1153 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1154 err = 0; 1155 goto out; 1156 } 1157 1158 if (block_mcast_loopback) 1159 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1160 (1U << MGM_BLCK_LB_BIT)); 1161 else 1162 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 1163 1164 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1165 1166 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1167 if (err) 1168 goto out; 1169 1170 if (!link) 1171 goto out; 1172 1173 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1174 if (err) 1175 goto out; 1176 1177 mgm->next_gid_index = cpu_to_be32(index << 6); 1178 1179 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1180 if (err) 1181 goto out; 1182 1183 out: 1184 if (prot == MLX4_PROT_ETH && index != -1) { 1185 /* manage the steering entry for promisc mode */ 1186 if (new_entry) 1187 err = new_steering_entry(dev, port, steer, 1188 index, qp->qpn); 1189 else 1190 err = existing_steering_entry(dev, port, steer, 1191 index, qp->qpn); 1192 } 1193 if (err && link && index != -1) { 1194 if (index < dev->caps.num_mgms) 1195 mlx4_warn(dev, "Got AMGM index %d < %d\n", 1196 index, dev->caps.num_mgms); 1197 else 1198 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1199 index - dev->caps.num_mgms, MLX4_USE_RR); 1200 } 1201 mutex_unlock(&priv->mcg_table.mutex); 1202 1203 mlx4_free_cmd_mailbox(dev, mailbox); 1204 return err; 1205 } 1206 1207 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1208 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1209 { 1210 struct mlx4_priv *priv = mlx4_priv(dev); 1211 struct mlx4_cmd_mailbox *mailbox; 1212 struct mlx4_mgm *mgm; 1213 u32 members_count; 1214 int prev, index; 1215 int i, loc = -1; 1216 int err; 1217 u8 port = gid[5]; 1218 bool removed_entry = false; 1219 1220 mailbox = mlx4_alloc_cmd_mailbox(dev); 1221 if (IS_ERR(mailbox)) 1222 return PTR_ERR(mailbox); 1223 mgm = mailbox->buf; 1224 1225 mutex_lock(&priv->mcg_table.mutex); 1226 1227 err = find_entry(dev, port, gid, prot, 1228 mailbox, &prev, &index); 1229 if (err) 1230 goto out; 1231 1232 if (index == -1) { 1233 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1234 err = -EINVAL; 1235 goto out; 1236 } 1237 1238 /* If this QP is also a promisc QP, it shouldn't be removed only if 1239 * at least one none promisc QP is also attached to this MCG 1240 */ 1241 if (prot == MLX4_PROT_ETH && 1242 check_duplicate_entry(dev, port, steer, index, qp->qpn) && 1243 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) 1244 goto out; 1245 1246 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1247 for (i = 0; i < members_count; ++i) 1248 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1249 loc = i; 1250 break; 1251 } 1252 1253 if (loc == -1) { 1254 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1255 err = -EINVAL; 1256 goto out; 1257 } 1258 1259 /* copy the last QP in this MGM over removed QP */ 1260 mgm->qp[loc] = mgm->qp[members_count - 1]; 1261 mgm->qp[members_count - 1] = 0; 1262 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1263 1264 if (prot == MLX4_PROT_ETH) 1265 removed_entry = can_remove_steering_entry(dev, port, steer, 1266 index, qp->qpn); 1267 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { 1268 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1269 goto out; 1270 } 1271 1272 /* We are going to delete the entry, members count should be 0 */ 1273 mgm->members_count = cpu_to_be32((u32) prot << 30); 1274 1275 if (prev == -1) { 1276 /* Remove entry from MGM */ 1277 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1278 if (amgm_index) { 1279 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1280 if (err) 1281 goto out; 1282 } else 1283 memset(mgm->gid, 0, 16); 1284 1285 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1286 if (err) 1287 goto out; 1288 1289 if (amgm_index) { 1290 if (amgm_index < dev->caps.num_mgms) 1291 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n", 1292 index, amgm_index, dev->caps.num_mgms); 1293 else 1294 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1295 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); 1296 } 1297 } else { 1298 /* Remove entry from AMGM */ 1299 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1300 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1301 if (err) 1302 goto out; 1303 1304 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1305 1306 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1307 if (err) 1308 goto out; 1309 1310 if (index < dev->caps.num_mgms) 1311 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n", 1312 prev, index, dev->caps.num_mgms); 1313 else 1314 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1315 index - dev->caps.num_mgms, MLX4_USE_RR); 1316 } 1317 1318 out: 1319 mutex_unlock(&priv->mcg_table.mutex); 1320 1321 mlx4_free_cmd_mailbox(dev, mailbox); 1322 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 1323 /* In case device is under an error, return success as a closing command */ 1324 err = 0; 1325 return err; 1326 } 1327 1328 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1329 u8 gid[16], u8 attach, u8 block_loopback, 1330 enum mlx4_protocol prot) 1331 { 1332 struct mlx4_cmd_mailbox *mailbox; 1333 int err = 0; 1334 int qpn; 1335 1336 if (!mlx4_is_mfunc(dev)) 1337 return -EBADF; 1338 1339 mailbox = mlx4_alloc_cmd_mailbox(dev); 1340 if (IS_ERR(mailbox)) 1341 return PTR_ERR(mailbox); 1342 1343 memcpy(mailbox->buf, gid, 16); 1344 qpn = qp->qpn; 1345 qpn |= (prot << 28); 1346 if (attach && block_loopback) 1347 qpn |= (1 << 31); 1348 1349 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1350 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1351 MLX4_CMD_WRAPPED); 1352 1353 mlx4_free_cmd_mailbox(dev, mailbox); 1354 if (err && !attach && 1355 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 1356 err = 0; 1357 return err; 1358 } 1359 1360 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1361 u8 gid[16], u8 port, 1362 int block_mcast_loopback, 1363 enum mlx4_protocol prot, u64 *reg_id) 1364 { 1365 struct mlx4_spec_list spec = { {NULL} }; 1366 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1367 1368 struct mlx4_net_trans_rule rule = { 1369 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1370 .exclusive = 0, 1371 .promisc_mode = MLX4_FS_REGULAR, 1372 .priority = MLX4_DOMAIN_NIC, 1373 }; 1374 1375 rule.allow_loopback = !block_mcast_loopback; 1376 rule.port = port; 1377 rule.qpn = qp->qpn; 1378 INIT_LIST_HEAD(&rule.list); 1379 1380 switch (prot) { 1381 case MLX4_PROT_ETH: 1382 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1383 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1384 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1385 break; 1386 1387 case MLX4_PROT_IB_IPV6: 1388 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1389 memcpy(spec.ib.dst_gid, gid, 16); 1390 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1391 break; 1392 default: 1393 return -EINVAL; 1394 } 1395 list_add_tail(&spec.list, &rule.list); 1396 1397 return mlx4_flow_attach(dev, &rule, reg_id); 1398 } 1399 1400 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1401 u8 port, int block_mcast_loopback, 1402 enum mlx4_protocol prot, u64 *reg_id) 1403 { 1404 switch (dev->caps.steering_mode) { 1405 case MLX4_STEERING_MODE_A0: 1406 if (prot == MLX4_PROT_ETH) 1407 return 0; 1408 1409 case MLX4_STEERING_MODE_B0: 1410 if (prot == MLX4_PROT_ETH) 1411 gid[7] |= (MLX4_MC_STEER << 1); 1412 1413 if (mlx4_is_mfunc(dev)) 1414 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1415 block_mcast_loopback, prot); 1416 return mlx4_qp_attach_common(dev, qp, gid, 1417 block_mcast_loopback, prot, 1418 MLX4_MC_STEER); 1419 1420 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1421 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1422 block_mcast_loopback, 1423 prot, reg_id); 1424 default: 1425 return -EINVAL; 1426 } 1427 } 1428 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1429 1430 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1431 enum mlx4_protocol prot, u64 reg_id) 1432 { 1433 switch (dev->caps.steering_mode) { 1434 case MLX4_STEERING_MODE_A0: 1435 if (prot == MLX4_PROT_ETH) 1436 return 0; 1437 1438 case MLX4_STEERING_MODE_B0: 1439 if (prot == MLX4_PROT_ETH) 1440 gid[7] |= (MLX4_MC_STEER << 1); 1441 1442 if (mlx4_is_mfunc(dev)) 1443 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1444 1445 return mlx4_qp_detach_common(dev, qp, gid, prot, 1446 MLX4_MC_STEER); 1447 1448 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1449 return mlx4_flow_detach(dev, reg_id); 1450 1451 default: 1452 return -EINVAL; 1453 } 1454 } 1455 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1456 1457 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1458 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1459 { 1460 struct mlx4_net_trans_rule rule = { 1461 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1462 .exclusive = 0, 1463 .allow_loopback = 1, 1464 }; 1465 1466 u64 *regid_p; 1467 1468 switch (mode) { 1469 case MLX4_FS_ALL_DEFAULT: 1470 regid_p = &dev->regid_promisc_array[port]; 1471 break; 1472 case MLX4_FS_MC_DEFAULT: 1473 regid_p = &dev->regid_allmulti_array[port]; 1474 break; 1475 default: 1476 return -1; 1477 } 1478 1479 if (*regid_p != 0) 1480 return -1; 1481 1482 rule.promisc_mode = mode; 1483 rule.port = port; 1484 rule.qpn = qpn; 1485 INIT_LIST_HEAD(&rule.list); 1486 mlx4_err(dev, "going promisc on %x\n", port); 1487 1488 return mlx4_flow_attach(dev, &rule, regid_p); 1489 } 1490 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1491 1492 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1493 enum mlx4_net_trans_promisc_mode mode) 1494 { 1495 int ret; 1496 u64 *regid_p; 1497 1498 switch (mode) { 1499 case MLX4_FS_ALL_DEFAULT: 1500 regid_p = &dev->regid_promisc_array[port]; 1501 break; 1502 case MLX4_FS_MC_DEFAULT: 1503 regid_p = &dev->regid_allmulti_array[port]; 1504 break; 1505 default: 1506 return -1; 1507 } 1508 1509 if (*regid_p == 0) 1510 return -1; 1511 1512 ret = mlx4_flow_detach(dev, *regid_p); 1513 if (ret == 0) 1514 *regid_p = 0; 1515 1516 return ret; 1517 } 1518 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1519 1520 int mlx4_unicast_attach(struct mlx4_dev *dev, 1521 struct mlx4_qp *qp, u8 gid[16], 1522 int block_mcast_loopback, enum mlx4_protocol prot) 1523 { 1524 if (prot == MLX4_PROT_ETH) 1525 gid[7] |= (MLX4_UC_STEER << 1); 1526 1527 if (mlx4_is_mfunc(dev)) 1528 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1529 block_mcast_loopback, prot); 1530 1531 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1532 prot, MLX4_UC_STEER); 1533 } 1534 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1535 1536 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1537 u8 gid[16], enum mlx4_protocol prot) 1538 { 1539 if (prot == MLX4_PROT_ETH) 1540 gid[7] |= (MLX4_UC_STEER << 1); 1541 1542 if (mlx4_is_mfunc(dev)) 1543 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1544 1545 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1546 } 1547 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1548 1549 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1550 struct mlx4_vhcr *vhcr, 1551 struct mlx4_cmd_mailbox *inbox, 1552 struct mlx4_cmd_mailbox *outbox, 1553 struct mlx4_cmd_info *cmd) 1554 { 1555 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1556 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); 1557 enum mlx4_steer_type steer = vhcr->in_modifier; 1558 1559 if (port < 0) 1560 return -EINVAL; 1561 1562 /* Promiscuous unicast is not allowed in mfunc */ 1563 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1564 return 0; 1565 1566 if (vhcr->op_modifier) 1567 return add_promisc_qp(dev, port, steer, qpn); 1568 else 1569 return remove_promisc_qp(dev, port, steer, qpn); 1570 } 1571 1572 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1573 enum mlx4_steer_type steer, u8 add, u8 port) 1574 { 1575 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1576 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1577 MLX4_CMD_WRAPPED); 1578 } 1579 1580 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1581 { 1582 if (mlx4_is_mfunc(dev)) 1583 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1584 1585 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1586 } 1587 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1588 1589 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1590 { 1591 if (mlx4_is_mfunc(dev)) 1592 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1593 1594 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1595 } 1596 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1597 1598 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1599 { 1600 if (mlx4_is_mfunc(dev)) 1601 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1602 1603 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1604 } 1605 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1606 1607 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1608 { 1609 if (mlx4_is_mfunc(dev)) 1610 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1611 1612 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1613 } 1614 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1615 1616 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1617 { 1618 struct mlx4_priv *priv = mlx4_priv(dev); 1619 int err; 1620 1621 /* No need for mcg_table when fw managed the mcg table*/ 1622 if (dev->caps.steering_mode == 1623 MLX4_STEERING_MODE_DEVICE_MANAGED) 1624 return 0; 1625 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1626 dev->caps.num_amgms - 1, 0, 0); 1627 if (err) 1628 return err; 1629 1630 mutex_init(&priv->mcg_table.mutex); 1631 1632 return 0; 1633 } 1634 1635 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1636 { 1637 if (dev->caps.steering_mode != 1638 MLX4_STEERING_MODE_DEVICE_MANAGED) 1639 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1640 } 1641