1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 43 44 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 45 { 46 return 1 << dev->oper_log_mgm_entry_size; 47 } 48 49 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 50 { 51 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 52 } 53 54 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 55 struct mlx4_cmd_mailbox *mailbox, 56 u32 size, 57 u64 *reg_id) 58 { 59 u64 imm; 60 int err = 0; 61 62 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 63 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 64 MLX4_CMD_NATIVE); 65 if (err) 66 return err; 67 *reg_id = imm; 68 69 return err; 70 } 71 72 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 73 { 74 int err = 0; 75 76 err = mlx4_cmd(dev, regid, 0, 0, 77 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 78 MLX4_CMD_NATIVE); 79 80 return err; 81 } 82 83 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 84 struct mlx4_cmd_mailbox *mailbox) 85 { 86 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 87 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 88 } 89 90 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 91 struct mlx4_cmd_mailbox *mailbox) 92 { 93 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 94 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 95 } 96 97 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 98 struct mlx4_cmd_mailbox *mailbox) 99 { 100 u32 in_mod; 101 102 in_mod = (u32) port << 16 | steer << 1; 103 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 104 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 105 MLX4_CMD_NATIVE); 106 } 107 108 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 109 u16 *hash, u8 op_mod) 110 { 111 u64 imm; 112 int err; 113 114 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 115 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 116 MLX4_CMD_NATIVE); 117 118 if (!err) 119 *hash = imm; 120 121 return err; 122 } 123 124 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 125 enum mlx4_steer_type steer, 126 u32 qpn) 127 { 128 struct mlx4_steer *s_steer; 129 struct mlx4_promisc_qp *pqp; 130 131 if (port < 1 || port > dev->caps.num_ports) 132 return NULL; 133 134 s_steer = &mlx4_priv(dev)->steer[port - 1]; 135 136 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 137 if (pqp->qpn == qpn) 138 return pqp; 139 } 140 /* not found */ 141 return NULL; 142 } 143 144 /* 145 * Add new entry to steering data structure. 146 * All promisc QPs should be added as well 147 */ 148 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 149 enum mlx4_steer_type steer, 150 unsigned int index, u32 qpn) 151 { 152 struct mlx4_steer *s_steer; 153 struct mlx4_cmd_mailbox *mailbox; 154 struct mlx4_mgm *mgm; 155 u32 members_count; 156 struct mlx4_steer_index *new_entry; 157 struct mlx4_promisc_qp *pqp; 158 struct mlx4_promisc_qp *dqp = NULL; 159 u32 prot; 160 int err; 161 162 if (port < 1 || port > dev->caps.num_ports) 163 return -EINVAL; 164 165 s_steer = &mlx4_priv(dev)->steer[port - 1]; 166 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 167 if (!new_entry) 168 return -ENOMEM; 169 170 INIT_LIST_HEAD(&new_entry->duplicates); 171 new_entry->index = index; 172 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 173 174 /* If the given qpn is also a promisc qp, 175 * it should be inserted to duplicates list 176 */ 177 pqp = get_promisc_qp(dev, port, steer, qpn); 178 if (pqp) { 179 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 180 if (!dqp) { 181 err = -ENOMEM; 182 goto out_alloc; 183 } 184 dqp->qpn = qpn; 185 list_add_tail(&dqp->list, &new_entry->duplicates); 186 } 187 188 /* if no promisc qps for this vep, we are done */ 189 if (list_empty(&s_steer->promisc_qps[steer])) 190 return 0; 191 192 /* now need to add all the promisc qps to the new 193 * steering entry, as they should also receive the packets 194 * destined to this address */ 195 mailbox = mlx4_alloc_cmd_mailbox(dev); 196 if (IS_ERR(mailbox)) { 197 err = -ENOMEM; 198 goto out_alloc; 199 } 200 mgm = mailbox->buf; 201 202 err = mlx4_READ_ENTRY(dev, index, mailbox); 203 if (err) 204 goto out_mailbox; 205 206 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 207 prot = be32_to_cpu(mgm->members_count) >> 30; 208 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 209 /* don't add already existing qpn */ 210 if (pqp->qpn == qpn) 211 continue; 212 if (members_count == dev->caps.num_qp_per_mgm) { 213 /* out of space */ 214 err = -ENOMEM; 215 goto out_mailbox; 216 } 217 218 /* add the qpn */ 219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 220 } 221 /* update the qps count and update the entry with all the promisc qps*/ 222 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 223 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 224 225 out_mailbox: 226 mlx4_free_cmd_mailbox(dev, mailbox); 227 if (!err) 228 return 0; 229 out_alloc: 230 if (dqp) { 231 list_del(&dqp->list); 232 kfree(dqp); 233 } 234 list_del(&new_entry->list); 235 kfree(new_entry); 236 return err; 237 } 238 239 /* update the data structures with existing steering entry */ 240 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 241 enum mlx4_steer_type steer, 242 unsigned int index, u32 qpn) 243 { 244 struct mlx4_steer *s_steer; 245 struct mlx4_steer_index *tmp_entry, *entry = NULL; 246 struct mlx4_promisc_qp *pqp; 247 struct mlx4_promisc_qp *dqp; 248 249 if (port < 1 || port > dev->caps.num_ports) 250 return -EINVAL; 251 252 s_steer = &mlx4_priv(dev)->steer[port - 1]; 253 254 pqp = get_promisc_qp(dev, port, steer, qpn); 255 if (!pqp) 256 return 0; /* nothing to do */ 257 258 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 259 if (tmp_entry->index == index) { 260 entry = tmp_entry; 261 break; 262 } 263 } 264 if (unlikely(!entry)) { 265 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 266 return -EINVAL; 267 } 268 269 /* the given qpn is listed as a promisc qpn 270 * we need to add it as a duplicate to this entry 271 * for future references */ 272 list_for_each_entry(dqp, &entry->duplicates, list) { 273 if (qpn == pqp->qpn) 274 return 0; /* qp is already duplicated */ 275 } 276 277 /* add the qp as a duplicate on this index */ 278 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 279 if (!dqp) 280 return -ENOMEM; 281 dqp->qpn = qpn; 282 list_add_tail(&dqp->list, &entry->duplicates); 283 284 return 0; 285 } 286 287 /* Check whether a qpn is a duplicate on steering entry 288 * If so, it should not be removed from mgm */ 289 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 290 enum mlx4_steer_type steer, 291 unsigned int index, u32 qpn) 292 { 293 struct mlx4_steer *s_steer; 294 struct mlx4_steer_index *tmp_entry, *entry = NULL; 295 struct mlx4_promisc_qp *dqp, *tmp_dqp; 296 297 if (port < 1 || port > dev->caps.num_ports) 298 return NULL; 299 300 s_steer = &mlx4_priv(dev)->steer[port - 1]; 301 302 /* if qp is not promisc, it cannot be duplicated */ 303 if (!get_promisc_qp(dev, port, steer, qpn)) 304 return false; 305 306 /* The qp is promisc qp so it is a duplicate on this index 307 * Find the index entry, and remove the duplicate */ 308 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 309 if (tmp_entry->index == index) { 310 entry = tmp_entry; 311 break; 312 } 313 } 314 if (unlikely(!entry)) { 315 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 316 return false; 317 } 318 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 319 if (dqp->qpn == qpn) { 320 list_del(&dqp->list); 321 kfree(dqp); 322 } 323 } 324 return true; 325 } 326 327 /* I a steering entry contains only promisc QPs, it can be removed. */ 328 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 329 enum mlx4_steer_type steer, 330 unsigned int index, u32 tqpn) 331 { 332 struct mlx4_steer *s_steer; 333 struct mlx4_cmd_mailbox *mailbox; 334 struct mlx4_mgm *mgm; 335 struct mlx4_steer_index *entry = NULL, *tmp_entry; 336 u32 qpn; 337 u32 members_count; 338 bool ret = false; 339 int i; 340 341 if (port < 1 || port > dev->caps.num_ports) 342 return NULL; 343 344 s_steer = &mlx4_priv(dev)->steer[port - 1]; 345 346 mailbox = mlx4_alloc_cmd_mailbox(dev); 347 if (IS_ERR(mailbox)) 348 return false; 349 mgm = mailbox->buf; 350 351 if (mlx4_READ_ENTRY(dev, index, mailbox)) 352 goto out; 353 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 354 for (i = 0; i < members_count; i++) { 355 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 356 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 357 /* the qp is not promisc, the entry can't be removed */ 358 goto out; 359 } 360 } 361 /* All the qps currently registered for this entry are promiscuous, 362 * Checking for duplicates */ 363 ret = true; 364 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 365 if (entry->index == index) { 366 if (list_empty(&entry->duplicates)) { 367 list_del(&entry->list); 368 kfree(entry); 369 } else { 370 /* This entry contains duplicates so it shouldn't be removed */ 371 ret = false; 372 goto out; 373 } 374 } 375 } 376 377 out: 378 mlx4_free_cmd_mailbox(dev, mailbox); 379 return ret; 380 } 381 382 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 383 enum mlx4_steer_type steer, u32 qpn) 384 { 385 struct mlx4_steer *s_steer; 386 struct mlx4_cmd_mailbox *mailbox; 387 struct mlx4_mgm *mgm; 388 struct mlx4_steer_index *entry; 389 struct mlx4_promisc_qp *pqp; 390 struct mlx4_promisc_qp *dqp; 391 u32 members_count; 392 u32 prot; 393 int i; 394 bool found; 395 int err; 396 struct mlx4_priv *priv = mlx4_priv(dev); 397 398 if (port < 1 || port > dev->caps.num_ports) 399 return -EINVAL; 400 401 s_steer = &mlx4_priv(dev)->steer[port - 1]; 402 403 mutex_lock(&priv->mcg_table.mutex); 404 405 if (get_promisc_qp(dev, port, steer, qpn)) { 406 err = 0; /* Noting to do, already exists */ 407 goto out_mutex; 408 } 409 410 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 411 if (!pqp) { 412 err = -ENOMEM; 413 goto out_mutex; 414 } 415 pqp->qpn = qpn; 416 417 mailbox = mlx4_alloc_cmd_mailbox(dev); 418 if (IS_ERR(mailbox)) { 419 err = -ENOMEM; 420 goto out_alloc; 421 } 422 mgm = mailbox->buf; 423 424 /* the promisc qp needs to be added for each one of the steering 425 * entries, if it already exists, needs to be added as a duplicate 426 * for this entry */ 427 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 428 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 429 if (err) 430 goto out_mailbox; 431 432 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 433 prot = be32_to_cpu(mgm->members_count) >> 30; 434 found = false; 435 for (i = 0; i < members_count; i++) { 436 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 437 /* Entry already exists, add to duplicates */ 438 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 439 if (!dqp) { 440 err = -ENOMEM; 441 goto out_mailbox; 442 } 443 dqp->qpn = qpn; 444 list_add_tail(&dqp->list, &entry->duplicates); 445 found = true; 446 } 447 } 448 if (!found) { 449 /* Need to add the qpn to mgm */ 450 if (members_count == dev->caps.num_qp_per_mgm) { 451 /* entry is full */ 452 err = -ENOMEM; 453 goto out_mailbox; 454 } 455 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 456 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 457 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 458 if (err) 459 goto out_mailbox; 460 } 461 } 462 463 /* add the new qpn to list of promisc qps */ 464 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 465 /* now need to add all the promisc qps to default entry */ 466 memset(mgm, 0, sizeof *mgm); 467 members_count = 0; 468 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 469 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 470 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 471 472 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 473 if (err) 474 goto out_list; 475 476 mlx4_free_cmd_mailbox(dev, mailbox); 477 mutex_unlock(&priv->mcg_table.mutex); 478 return 0; 479 480 out_list: 481 list_del(&pqp->list); 482 out_mailbox: 483 mlx4_free_cmd_mailbox(dev, mailbox); 484 out_alloc: 485 kfree(pqp); 486 out_mutex: 487 mutex_unlock(&priv->mcg_table.mutex); 488 return err; 489 } 490 491 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 492 enum mlx4_steer_type steer, u32 qpn) 493 { 494 struct mlx4_priv *priv = mlx4_priv(dev); 495 struct mlx4_steer *s_steer; 496 struct mlx4_cmd_mailbox *mailbox; 497 struct mlx4_mgm *mgm; 498 struct mlx4_steer_index *entry; 499 struct mlx4_promisc_qp *pqp; 500 struct mlx4_promisc_qp *dqp; 501 u32 members_count; 502 bool found; 503 bool back_to_list = false; 504 int loc, i; 505 int err; 506 507 if (port < 1 || port > dev->caps.num_ports) 508 return -EINVAL; 509 510 s_steer = &mlx4_priv(dev)->steer[port - 1]; 511 mutex_lock(&priv->mcg_table.mutex); 512 513 pqp = get_promisc_qp(dev, port, steer, qpn); 514 if (unlikely(!pqp)) { 515 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 516 /* nothing to do */ 517 err = 0; 518 goto out_mutex; 519 } 520 521 /*remove from list of promisc qps */ 522 list_del(&pqp->list); 523 524 /* set the default entry not to include the removed one */ 525 mailbox = mlx4_alloc_cmd_mailbox(dev); 526 if (IS_ERR(mailbox)) { 527 err = -ENOMEM; 528 back_to_list = true; 529 goto out_list; 530 } 531 mgm = mailbox->buf; 532 members_count = 0; 533 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 534 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 535 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 536 537 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 538 if (err) 539 goto out_mailbox; 540 541 /* remove the qp from all the steering entries*/ 542 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 543 found = false; 544 list_for_each_entry(dqp, &entry->duplicates, list) { 545 if (dqp->qpn == qpn) { 546 found = true; 547 break; 548 } 549 } 550 if (found) { 551 /* a duplicate, no need to change the mgm, 552 * only update the duplicates list */ 553 list_del(&dqp->list); 554 kfree(dqp); 555 } else { 556 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 557 if (err) 558 goto out_mailbox; 559 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 560 for (loc = -1, i = 0; i < members_count; ++i) 561 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 562 loc = i; 563 564 mgm->members_count = cpu_to_be32(--members_count | 565 (MLX4_PROT_ETH << 30)); 566 mgm->qp[loc] = mgm->qp[i - 1]; 567 mgm->qp[i - 1] = 0; 568 569 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 570 if (err) 571 goto out_mailbox; 572 } 573 574 } 575 576 out_mailbox: 577 mlx4_free_cmd_mailbox(dev, mailbox); 578 out_list: 579 if (back_to_list) 580 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 581 else 582 kfree(pqp); 583 out_mutex: 584 mutex_unlock(&priv->mcg_table.mutex); 585 return err; 586 } 587 588 /* 589 * Caller must hold MCG table semaphore. gid and mgm parameters must 590 * be properly aligned for command interface. 591 * 592 * Returns 0 unless a firmware command error occurs. 593 * 594 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 595 * and *mgm holds MGM entry. 596 * 597 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 598 * previous entry in hash chain and *mgm holds AMGM entry. 599 * 600 * If no AMGM exists for given gid, *index = -1, *prev = index of last 601 * entry in hash chain and *mgm holds end of hash chain. 602 */ 603 static int find_entry(struct mlx4_dev *dev, u8 port, 604 u8 *gid, enum mlx4_protocol prot, 605 struct mlx4_cmd_mailbox *mgm_mailbox, 606 int *prev, int *index) 607 { 608 struct mlx4_cmd_mailbox *mailbox; 609 struct mlx4_mgm *mgm = mgm_mailbox->buf; 610 u8 *mgid; 611 int err; 612 u16 hash; 613 u8 op_mod = (prot == MLX4_PROT_ETH) ? 614 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 615 616 mailbox = mlx4_alloc_cmd_mailbox(dev); 617 if (IS_ERR(mailbox)) 618 return -ENOMEM; 619 mgid = mailbox->buf; 620 621 memcpy(mgid, gid, 16); 622 623 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 624 mlx4_free_cmd_mailbox(dev, mailbox); 625 if (err) 626 return err; 627 628 if (0) 629 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 630 631 *index = hash; 632 *prev = -1; 633 634 do { 635 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 636 if (err) 637 return err; 638 639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 640 if (*index != hash) { 641 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 642 err = -EINVAL; 643 } 644 return err; 645 } 646 647 if (!memcmp(mgm->gid, gid, 16) && 648 be32_to_cpu(mgm->members_count) >> 30 == prot) 649 return err; 650 651 *prev = *index; 652 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 653 } while (*index); 654 655 *index = -1; 656 return err; 657 } 658 659 static const u8 __promisc_mode[] = { 660 [MLX4_FS_REGULAR] = 0x0, 661 [MLX4_FS_ALL_DEFAULT] = 0x1, 662 [MLX4_FS_MC_DEFAULT] = 0x3, 663 [MLX4_FS_UC_SNIFFER] = 0x4, 664 [MLX4_FS_MC_SNIFFER] = 0x5, 665 }; 666 667 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 668 enum mlx4_net_trans_promisc_mode flow_type) 669 { 670 if (flow_type >= MLX4_FS_MODE_NUM) { 671 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 672 return -EINVAL; 673 } 674 return __promisc_mode[flow_type]; 675 } 676 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); 677 678 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 679 struct mlx4_net_trans_rule_hw_ctrl *hw) 680 { 681 u8 flags = 0; 682 683 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 684 flags |= ctrl->exclusive ? (1 << 2) : 0; 685 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 686 687 hw->flags = flags; 688 hw->type = __promisc_mode[ctrl->promisc_mode]; 689 hw->prio = cpu_to_be16(ctrl->priority); 690 hw->port = ctrl->port; 691 hw->qpn = cpu_to_be32(ctrl->qpn); 692 } 693 694 const u16 __sw_id_hw[] = { 695 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 696 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 697 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 698 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 699 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 700 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, 701 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 702 }; 703 704 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 705 enum mlx4_net_trans_rule_id id) 706 { 707 if (id >= MLX4_NET_TRANS_RULE_NUM) { 708 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 709 return -EINVAL; 710 } 711 return __sw_id_hw[id]; 712 } 713 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); 714 715 static const int __rule_hw_sz[] = { 716 [MLX4_NET_TRANS_RULE_ID_ETH] = 717 sizeof(struct mlx4_net_trans_rule_hw_eth), 718 [MLX4_NET_TRANS_RULE_ID_IB] = 719 sizeof(struct mlx4_net_trans_rule_hw_ib), 720 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 721 [MLX4_NET_TRANS_RULE_ID_IPV4] = 722 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 723 [MLX4_NET_TRANS_RULE_ID_TCP] = 724 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 725 [MLX4_NET_TRANS_RULE_ID_UDP] = 726 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 727 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 728 sizeof(struct mlx4_net_trans_rule_hw_vxlan) 729 }; 730 731 int mlx4_hw_rule_sz(struct mlx4_dev *dev, 732 enum mlx4_net_trans_rule_id id) 733 { 734 if (id >= MLX4_NET_TRANS_RULE_NUM) { 735 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 736 return -EINVAL; 737 } 738 739 return __rule_hw_sz[id]; 740 } 741 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); 742 743 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 744 struct _rule_hw *rule_hw) 745 { 746 if (mlx4_hw_rule_sz(dev, spec->id) < 0) 747 return -EINVAL; 748 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); 749 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 750 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; 751 752 switch (spec->id) { 753 case MLX4_NET_TRANS_RULE_ID_ETH: 754 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 755 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 756 ETH_ALEN); 757 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 758 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 759 ETH_ALEN); 760 if (spec->eth.ether_type_enable) { 761 rule_hw->eth.ether_type_enable = 1; 762 rule_hw->eth.ether_type = spec->eth.ether_type; 763 } 764 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 765 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 766 break; 767 768 case MLX4_NET_TRANS_RULE_ID_IB: 769 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 770 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 771 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 772 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 773 break; 774 775 case MLX4_NET_TRANS_RULE_ID_IPV6: 776 return -EOPNOTSUPP; 777 778 case MLX4_NET_TRANS_RULE_ID_IPV4: 779 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 780 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 781 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 782 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 783 break; 784 785 case MLX4_NET_TRANS_RULE_ID_TCP: 786 case MLX4_NET_TRANS_RULE_ID_UDP: 787 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 788 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 789 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 790 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 791 break; 792 793 case MLX4_NET_TRANS_RULE_ID_VXLAN: 794 rule_hw->vxlan.vni = 795 cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); 796 rule_hw->vxlan.vni_mask = 797 cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); 798 break; 799 800 default: 801 return -EINVAL; 802 } 803 804 return __rule_hw_sz[spec->id]; 805 } 806 807 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 808 struct mlx4_net_trans_rule *rule) 809 { 810 #define BUF_SIZE 256 811 struct mlx4_spec_list *cur; 812 char buf[BUF_SIZE]; 813 int len = 0; 814 815 mlx4_err(dev, "%s", str); 816 len += snprintf(buf + len, BUF_SIZE - len, 817 "port = %d prio = 0x%x qp = 0x%x ", 818 rule->port, rule->priority, rule->qpn); 819 820 list_for_each_entry(cur, &rule->list, list) { 821 switch (cur->id) { 822 case MLX4_NET_TRANS_RULE_ID_ETH: 823 len += snprintf(buf + len, BUF_SIZE - len, 824 "dmac = %pM ", &cur->eth.dst_mac); 825 if (cur->eth.ether_type) 826 len += snprintf(buf + len, BUF_SIZE - len, 827 "ethertype = 0x%x ", 828 be16_to_cpu(cur->eth.ether_type)); 829 if (cur->eth.vlan_id) 830 len += snprintf(buf + len, BUF_SIZE - len, 831 "vlan-id = %d ", 832 be16_to_cpu(cur->eth.vlan_id)); 833 break; 834 835 case MLX4_NET_TRANS_RULE_ID_IPV4: 836 if (cur->ipv4.src_ip) 837 len += snprintf(buf + len, BUF_SIZE - len, 838 "src-ip = %pI4 ", 839 &cur->ipv4.src_ip); 840 if (cur->ipv4.dst_ip) 841 len += snprintf(buf + len, BUF_SIZE - len, 842 "dst-ip = %pI4 ", 843 &cur->ipv4.dst_ip); 844 break; 845 846 case MLX4_NET_TRANS_RULE_ID_TCP: 847 case MLX4_NET_TRANS_RULE_ID_UDP: 848 if (cur->tcp_udp.src_port) 849 len += snprintf(buf + len, BUF_SIZE - len, 850 "src-port = %d ", 851 be16_to_cpu(cur->tcp_udp.src_port)); 852 if (cur->tcp_udp.dst_port) 853 len += snprintf(buf + len, BUF_SIZE - len, 854 "dst-port = %d ", 855 be16_to_cpu(cur->tcp_udp.dst_port)); 856 break; 857 858 case MLX4_NET_TRANS_RULE_ID_IB: 859 len += snprintf(buf + len, BUF_SIZE - len, 860 "dst-gid = %pI6\n", cur->ib.dst_gid); 861 len += snprintf(buf + len, BUF_SIZE - len, 862 "dst-gid-mask = %pI6\n", 863 cur->ib.dst_gid_msk); 864 break; 865 866 case MLX4_NET_TRANS_RULE_ID_IPV6: 867 break; 868 869 default: 870 break; 871 } 872 } 873 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 874 mlx4_err(dev, "%s", buf); 875 876 if (len >= BUF_SIZE) 877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 878 } 879 880 int mlx4_flow_attach(struct mlx4_dev *dev, 881 struct mlx4_net_trans_rule *rule, u64 *reg_id) 882 { 883 struct mlx4_cmd_mailbox *mailbox; 884 struct mlx4_spec_list *cur; 885 u32 size = 0; 886 int ret; 887 888 mailbox = mlx4_alloc_cmd_mailbox(dev); 889 if (IS_ERR(mailbox)) 890 return PTR_ERR(mailbox); 891 892 trans_rule_ctrl_to_hw(rule, mailbox->buf); 893 894 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 895 896 list_for_each_entry(cur, &rule->list, list) { 897 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 898 if (ret < 0) { 899 mlx4_free_cmd_mailbox(dev, mailbox); 900 return -EINVAL; 901 } 902 size += ret; 903 } 904 905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 906 if (ret == -ENOMEM) 907 mlx4_err_rule(dev, 908 "mcg table is full. Fail to register network rule.\n", 909 rule); 910 else if (ret) 911 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 912 913 mlx4_free_cmd_mailbox(dev, mailbox); 914 915 return ret; 916 } 917 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 918 919 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 920 { 921 int err; 922 923 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 924 if (err) 925 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 926 reg_id); 927 return err; 928 } 929 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 930 931 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 932 u32 max_range_qpn) 933 { 934 int err; 935 u64 in_param; 936 937 in_param = ((u64) min_range_qpn) << 32; 938 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 939 940 err = mlx4_cmd(dev, in_param, 0, 0, 941 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 942 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 943 944 return err; 945 } 946 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 947 948 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 949 int block_mcast_loopback, enum mlx4_protocol prot, 950 enum mlx4_steer_type steer) 951 { 952 struct mlx4_priv *priv = mlx4_priv(dev); 953 struct mlx4_cmd_mailbox *mailbox; 954 struct mlx4_mgm *mgm; 955 u32 members_count; 956 int index, prev; 957 int link = 0; 958 int i; 959 int err; 960 u8 port = gid[5]; 961 u8 new_entry = 0; 962 963 mailbox = mlx4_alloc_cmd_mailbox(dev); 964 if (IS_ERR(mailbox)) 965 return PTR_ERR(mailbox); 966 mgm = mailbox->buf; 967 968 mutex_lock(&priv->mcg_table.mutex); 969 err = find_entry(dev, port, gid, prot, 970 mailbox, &prev, &index); 971 if (err) 972 goto out; 973 974 if (index != -1) { 975 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 976 new_entry = 1; 977 memcpy(mgm->gid, gid, 16); 978 } 979 } else { 980 link = 1; 981 982 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 983 if (index == -1) { 984 mlx4_err(dev, "No AMGM entries left\n"); 985 err = -ENOMEM; 986 goto out; 987 } 988 index += dev->caps.num_mgms; 989 990 new_entry = 1; 991 memset(mgm, 0, sizeof *mgm); 992 memcpy(mgm->gid, gid, 16); 993 } 994 995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 996 if (members_count == dev->caps.num_qp_per_mgm) { 997 mlx4_err(dev, "MGM at index %x is full.\n", index); 998 err = -ENOMEM; 999 goto out; 1000 } 1001 1002 for (i = 0; i < members_count; ++i) 1003 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1004 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1005 err = 0; 1006 goto out; 1007 } 1008 1009 if (block_mcast_loopback) 1010 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1011 (1U << MGM_BLCK_LB_BIT)); 1012 else 1013 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 1014 1015 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1016 1017 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1018 if (err) 1019 goto out; 1020 1021 if (!link) 1022 goto out; 1023 1024 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1025 if (err) 1026 goto out; 1027 1028 mgm->next_gid_index = cpu_to_be32(index << 6); 1029 1030 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1031 if (err) 1032 goto out; 1033 1034 out: 1035 if (prot == MLX4_PROT_ETH) { 1036 /* manage the steering entry for promisc mode */ 1037 if (new_entry) 1038 new_steering_entry(dev, port, steer, index, qp->qpn); 1039 else 1040 existing_steering_entry(dev, port, steer, 1041 index, qp->qpn); 1042 } 1043 if (err && link && index != -1) { 1044 if (index < dev->caps.num_mgms) 1045 mlx4_warn(dev, "Got AMGM index %d < %d", 1046 index, dev->caps.num_mgms); 1047 else 1048 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1049 index - dev->caps.num_mgms, MLX4_USE_RR); 1050 } 1051 mutex_unlock(&priv->mcg_table.mutex); 1052 1053 mlx4_free_cmd_mailbox(dev, mailbox); 1054 return err; 1055 } 1056 1057 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1058 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1059 { 1060 struct mlx4_priv *priv = mlx4_priv(dev); 1061 struct mlx4_cmd_mailbox *mailbox; 1062 struct mlx4_mgm *mgm; 1063 u32 members_count; 1064 int prev, index; 1065 int i, loc; 1066 int err; 1067 u8 port = gid[5]; 1068 bool removed_entry = false; 1069 1070 mailbox = mlx4_alloc_cmd_mailbox(dev); 1071 if (IS_ERR(mailbox)) 1072 return PTR_ERR(mailbox); 1073 mgm = mailbox->buf; 1074 1075 mutex_lock(&priv->mcg_table.mutex); 1076 1077 err = find_entry(dev, port, gid, prot, 1078 mailbox, &prev, &index); 1079 if (err) 1080 goto out; 1081 1082 if (index == -1) { 1083 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1084 err = -EINVAL; 1085 goto out; 1086 } 1087 1088 /* if this pq is also a promisc qp, it shouldn't be removed */ 1089 if (prot == MLX4_PROT_ETH && 1090 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1091 goto out; 1092 1093 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1094 for (loc = -1, i = 0; i < members_count; ++i) 1095 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 1096 loc = i; 1097 1098 if (loc == -1) { 1099 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1100 err = -EINVAL; 1101 goto out; 1102 } 1103 1104 1105 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1106 mgm->qp[loc] = mgm->qp[i - 1]; 1107 mgm->qp[i - 1] = 0; 1108 1109 if (prot == MLX4_PROT_ETH) 1110 removed_entry = can_remove_steering_entry(dev, port, steer, 1111 index, qp->qpn); 1112 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 1113 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1114 goto out; 1115 } 1116 1117 /* We are going to delete the entry, members count should be 0 */ 1118 mgm->members_count = cpu_to_be32((u32) prot << 30); 1119 1120 if (prev == -1) { 1121 /* Remove entry from MGM */ 1122 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1123 if (amgm_index) { 1124 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1125 if (err) 1126 goto out; 1127 } else 1128 memset(mgm->gid, 0, 16); 1129 1130 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1131 if (err) 1132 goto out; 1133 1134 if (amgm_index) { 1135 if (amgm_index < dev->caps.num_mgms) 1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1137 index, amgm_index, dev->caps.num_mgms); 1138 else 1139 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1140 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); 1141 } 1142 } else { 1143 /* Remove entry from AMGM */ 1144 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1145 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1146 if (err) 1147 goto out; 1148 1149 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1150 1151 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1152 if (err) 1153 goto out; 1154 1155 if (index < dev->caps.num_mgms) 1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1157 prev, index, dev->caps.num_mgms); 1158 else 1159 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1160 index - dev->caps.num_mgms, MLX4_USE_RR); 1161 } 1162 1163 out: 1164 mutex_unlock(&priv->mcg_table.mutex); 1165 1166 mlx4_free_cmd_mailbox(dev, mailbox); 1167 return err; 1168 } 1169 1170 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1171 u8 gid[16], u8 attach, u8 block_loopback, 1172 enum mlx4_protocol prot) 1173 { 1174 struct mlx4_cmd_mailbox *mailbox; 1175 int err = 0; 1176 int qpn; 1177 1178 if (!mlx4_is_mfunc(dev)) 1179 return -EBADF; 1180 1181 mailbox = mlx4_alloc_cmd_mailbox(dev); 1182 if (IS_ERR(mailbox)) 1183 return PTR_ERR(mailbox); 1184 1185 memcpy(mailbox->buf, gid, 16); 1186 qpn = qp->qpn; 1187 qpn |= (prot << 28); 1188 if (attach && block_loopback) 1189 qpn |= (1 << 31); 1190 1191 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1192 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1193 MLX4_CMD_WRAPPED); 1194 1195 mlx4_free_cmd_mailbox(dev, mailbox); 1196 return err; 1197 } 1198 1199 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1200 u8 gid[16], u8 port, 1201 int block_mcast_loopback, 1202 enum mlx4_protocol prot, u64 *reg_id) 1203 { 1204 struct mlx4_spec_list spec = { {NULL} }; 1205 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1206 1207 struct mlx4_net_trans_rule rule = { 1208 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1209 .exclusive = 0, 1210 .promisc_mode = MLX4_FS_REGULAR, 1211 .priority = MLX4_DOMAIN_NIC, 1212 }; 1213 1214 rule.allow_loopback = !block_mcast_loopback; 1215 rule.port = port; 1216 rule.qpn = qp->qpn; 1217 INIT_LIST_HEAD(&rule.list); 1218 1219 switch (prot) { 1220 case MLX4_PROT_ETH: 1221 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1222 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1223 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1224 break; 1225 1226 case MLX4_PROT_IB_IPV6: 1227 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1228 memcpy(spec.ib.dst_gid, gid, 16); 1229 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1230 break; 1231 default: 1232 return -EINVAL; 1233 } 1234 list_add_tail(&spec.list, &rule.list); 1235 1236 return mlx4_flow_attach(dev, &rule, reg_id); 1237 } 1238 1239 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1240 u8 port, int block_mcast_loopback, 1241 enum mlx4_protocol prot, u64 *reg_id) 1242 { 1243 switch (dev->caps.steering_mode) { 1244 case MLX4_STEERING_MODE_A0: 1245 if (prot == MLX4_PROT_ETH) 1246 return 0; 1247 1248 case MLX4_STEERING_MODE_B0: 1249 if (prot == MLX4_PROT_ETH) 1250 gid[7] |= (MLX4_MC_STEER << 1); 1251 1252 if (mlx4_is_mfunc(dev)) 1253 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1254 block_mcast_loopback, prot); 1255 return mlx4_qp_attach_common(dev, qp, gid, 1256 block_mcast_loopback, prot, 1257 MLX4_MC_STEER); 1258 1259 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1260 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1261 block_mcast_loopback, 1262 prot, reg_id); 1263 default: 1264 return -EINVAL; 1265 } 1266 } 1267 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1268 1269 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1270 enum mlx4_protocol prot, u64 reg_id) 1271 { 1272 switch (dev->caps.steering_mode) { 1273 case MLX4_STEERING_MODE_A0: 1274 if (prot == MLX4_PROT_ETH) 1275 return 0; 1276 1277 case MLX4_STEERING_MODE_B0: 1278 if (prot == MLX4_PROT_ETH) 1279 gid[7] |= (MLX4_MC_STEER << 1); 1280 1281 if (mlx4_is_mfunc(dev)) 1282 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1283 1284 return mlx4_qp_detach_common(dev, qp, gid, prot, 1285 MLX4_MC_STEER); 1286 1287 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1288 return mlx4_flow_detach(dev, reg_id); 1289 1290 default: 1291 return -EINVAL; 1292 } 1293 } 1294 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1295 1296 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1297 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1298 { 1299 struct mlx4_net_trans_rule rule; 1300 u64 *regid_p; 1301 1302 switch (mode) { 1303 case MLX4_FS_ALL_DEFAULT: 1304 regid_p = &dev->regid_promisc_array[port]; 1305 break; 1306 case MLX4_FS_MC_DEFAULT: 1307 regid_p = &dev->regid_allmulti_array[port]; 1308 break; 1309 default: 1310 return -1; 1311 } 1312 1313 if (*regid_p != 0) 1314 return -1; 1315 1316 rule.promisc_mode = mode; 1317 rule.port = port; 1318 rule.qpn = qpn; 1319 INIT_LIST_HEAD(&rule.list); 1320 mlx4_err(dev, "going promisc on %x\n", port); 1321 1322 return mlx4_flow_attach(dev, &rule, regid_p); 1323 } 1324 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1325 1326 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1327 enum mlx4_net_trans_promisc_mode mode) 1328 { 1329 int ret; 1330 u64 *regid_p; 1331 1332 switch (mode) { 1333 case MLX4_FS_ALL_DEFAULT: 1334 regid_p = &dev->regid_promisc_array[port]; 1335 break; 1336 case MLX4_FS_MC_DEFAULT: 1337 regid_p = &dev->regid_allmulti_array[port]; 1338 break; 1339 default: 1340 return -1; 1341 } 1342 1343 if (*regid_p == 0) 1344 return -1; 1345 1346 ret = mlx4_flow_detach(dev, *regid_p); 1347 if (ret == 0) 1348 *regid_p = 0; 1349 1350 return ret; 1351 } 1352 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1353 1354 int mlx4_unicast_attach(struct mlx4_dev *dev, 1355 struct mlx4_qp *qp, u8 gid[16], 1356 int block_mcast_loopback, enum mlx4_protocol prot) 1357 { 1358 if (prot == MLX4_PROT_ETH) 1359 gid[7] |= (MLX4_UC_STEER << 1); 1360 1361 if (mlx4_is_mfunc(dev)) 1362 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1363 block_mcast_loopback, prot); 1364 1365 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1366 prot, MLX4_UC_STEER); 1367 } 1368 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1369 1370 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1371 u8 gid[16], enum mlx4_protocol prot) 1372 { 1373 if (prot == MLX4_PROT_ETH) 1374 gid[7] |= (MLX4_UC_STEER << 1); 1375 1376 if (mlx4_is_mfunc(dev)) 1377 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1378 1379 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1380 } 1381 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1382 1383 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1384 struct mlx4_vhcr *vhcr, 1385 struct mlx4_cmd_mailbox *inbox, 1386 struct mlx4_cmd_mailbox *outbox, 1387 struct mlx4_cmd_info *cmd) 1388 { 1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1390 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); 1391 enum mlx4_steer_type steer = vhcr->in_modifier; 1392 1393 if (port < 0) 1394 return -EINVAL; 1395 1396 /* Promiscuous unicast is not allowed in mfunc */ 1397 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1398 return 0; 1399 1400 if (vhcr->op_modifier) 1401 return add_promisc_qp(dev, port, steer, qpn); 1402 else 1403 return remove_promisc_qp(dev, port, steer, qpn); 1404 } 1405 1406 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1407 enum mlx4_steer_type steer, u8 add, u8 port) 1408 { 1409 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1410 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1411 MLX4_CMD_WRAPPED); 1412 } 1413 1414 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1415 { 1416 if (mlx4_is_mfunc(dev)) 1417 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1418 1419 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1420 } 1421 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1422 1423 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1424 { 1425 if (mlx4_is_mfunc(dev)) 1426 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1427 1428 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1429 } 1430 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1431 1432 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1433 { 1434 if (mlx4_is_mfunc(dev)) 1435 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1436 1437 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1438 } 1439 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1440 1441 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1442 { 1443 if (mlx4_is_mfunc(dev)) 1444 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1445 1446 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1447 } 1448 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1449 1450 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1451 { 1452 struct mlx4_priv *priv = mlx4_priv(dev); 1453 int err; 1454 1455 /* No need for mcg_table when fw managed the mcg table*/ 1456 if (dev->caps.steering_mode == 1457 MLX4_STEERING_MODE_DEVICE_MANAGED) 1458 return 0; 1459 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1460 dev->caps.num_amgms - 1, 0, 0); 1461 if (err) 1462 return err; 1463 1464 mutex_init(&priv->mcg_table.mutex); 1465 1466 return 0; 1467 } 1468 1469 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1470 { 1471 if (dev->caps.steering_mode != 1472 MLX4_STEERING_MODE_DEVICE_MANAGED) 1473 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1474 } 1475