1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 #define MGM_QPN_MASK 0x00FFFFFF 43 #define MGM_BLCK_LB_BIT 30 44 45 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 46 47 struct mlx4_mgm { 48 __be32 next_gid_index; 49 __be32 members_count; 50 u32 reserved[2]; 51 u8 gid[16]; 52 __be32 qp[MLX4_MAX_QP_PER_MGM]; 53 }; 54 55 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56 { 57 return 1 << dev->oper_log_mgm_entry_size; 58 } 59 60 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 61 { 62 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 63 } 64 65 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 66 struct mlx4_cmd_mailbox *mailbox, 67 u32 size, 68 u64 *reg_id) 69 { 70 u64 imm; 71 int err = 0; 72 73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 74 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 75 MLX4_CMD_NATIVE); 76 if (err) 77 return err; 78 *reg_id = imm; 79 80 return err; 81 } 82 83 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 84 { 85 int err = 0; 86 87 err = mlx4_cmd(dev, regid, 0, 0, 88 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 89 MLX4_CMD_NATIVE); 90 91 return err; 92 } 93 94 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 95 struct mlx4_cmd_mailbox *mailbox) 96 { 97 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 98 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 99 } 100 101 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 102 struct mlx4_cmd_mailbox *mailbox) 103 { 104 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 105 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 106 } 107 108 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 109 struct mlx4_cmd_mailbox *mailbox) 110 { 111 u32 in_mod; 112 113 in_mod = (u32) port << 16 | steer << 1; 114 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 115 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 116 MLX4_CMD_NATIVE); 117 } 118 119 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 120 u16 *hash, u8 op_mod) 121 { 122 u64 imm; 123 int err; 124 125 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 126 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 127 MLX4_CMD_NATIVE); 128 129 if (!err) 130 *hash = imm; 131 132 return err; 133 } 134 135 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 136 enum mlx4_steer_type steer, 137 u32 qpn) 138 { 139 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; 140 struct mlx4_promisc_qp *pqp; 141 142 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 143 if (pqp->qpn == qpn) 144 return pqp; 145 } 146 /* not found */ 147 return NULL; 148 } 149 150 /* 151 * Add new entry to steering data structure. 152 * All promisc QPs should be added as well 153 */ 154 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 155 enum mlx4_steer_type steer, 156 unsigned int index, u32 qpn) 157 { 158 struct mlx4_steer *s_steer; 159 struct mlx4_cmd_mailbox *mailbox; 160 struct mlx4_mgm *mgm; 161 u32 members_count; 162 struct mlx4_steer_index *new_entry; 163 struct mlx4_promisc_qp *pqp; 164 struct mlx4_promisc_qp *dqp = NULL; 165 u32 prot; 166 int err; 167 168 s_steer = &mlx4_priv(dev)->steer[port - 1]; 169 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 170 if (!new_entry) 171 return -ENOMEM; 172 173 INIT_LIST_HEAD(&new_entry->duplicates); 174 new_entry->index = index; 175 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 176 177 /* If the given qpn is also a promisc qp, 178 * it should be inserted to duplicates list 179 */ 180 pqp = get_promisc_qp(dev, port, steer, qpn); 181 if (pqp) { 182 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 183 if (!dqp) { 184 err = -ENOMEM; 185 goto out_alloc; 186 } 187 dqp->qpn = qpn; 188 list_add_tail(&dqp->list, &new_entry->duplicates); 189 } 190 191 /* if no promisc qps for this vep, we are done */ 192 if (list_empty(&s_steer->promisc_qps[steer])) 193 return 0; 194 195 /* now need to add all the promisc qps to the new 196 * steering entry, as they should also receive the packets 197 * destined to this address */ 198 mailbox = mlx4_alloc_cmd_mailbox(dev); 199 if (IS_ERR(mailbox)) { 200 err = -ENOMEM; 201 goto out_alloc; 202 } 203 mgm = mailbox->buf; 204 205 err = mlx4_READ_ENTRY(dev, index, mailbox); 206 if (err) 207 goto out_mailbox; 208 209 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 210 prot = be32_to_cpu(mgm->members_count) >> 30; 211 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 212 /* don't add already existing qpn */ 213 if (pqp->qpn == qpn) 214 continue; 215 if (members_count == dev->caps.num_qp_per_mgm) { 216 /* out of space */ 217 err = -ENOMEM; 218 goto out_mailbox; 219 } 220 221 /* add the qpn */ 222 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 223 } 224 /* update the qps count and update the entry with all the promisc qps*/ 225 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 226 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 227 228 out_mailbox: 229 mlx4_free_cmd_mailbox(dev, mailbox); 230 if (!err) 231 return 0; 232 out_alloc: 233 if (dqp) { 234 list_del(&dqp->list); 235 kfree(dqp); 236 } 237 list_del(&new_entry->list); 238 kfree(new_entry); 239 return err; 240 } 241 242 /* update the data structures with existing steering entry */ 243 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 244 enum mlx4_steer_type steer, 245 unsigned int index, u32 qpn) 246 { 247 struct mlx4_steer *s_steer; 248 struct mlx4_steer_index *tmp_entry, *entry = NULL; 249 struct mlx4_promisc_qp *pqp; 250 struct mlx4_promisc_qp *dqp; 251 252 s_steer = &mlx4_priv(dev)->steer[port - 1]; 253 254 pqp = get_promisc_qp(dev, port, steer, qpn); 255 if (!pqp) 256 return 0; /* nothing to do */ 257 258 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 259 if (tmp_entry->index == index) { 260 entry = tmp_entry; 261 break; 262 } 263 } 264 if (unlikely(!entry)) { 265 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 266 return -EINVAL; 267 } 268 269 /* the given qpn is listed as a promisc qpn 270 * we need to add it as a duplicate to this entry 271 * for future references */ 272 list_for_each_entry(dqp, &entry->duplicates, list) { 273 if (qpn == pqp->qpn) 274 return 0; /* qp is already duplicated */ 275 } 276 277 /* add the qp as a duplicate on this index */ 278 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 279 if (!dqp) 280 return -ENOMEM; 281 dqp->qpn = qpn; 282 list_add_tail(&dqp->list, &entry->duplicates); 283 284 return 0; 285 } 286 287 /* Check whether a qpn is a duplicate on steering entry 288 * If so, it should not be removed from mgm */ 289 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 290 enum mlx4_steer_type steer, 291 unsigned int index, u32 qpn) 292 { 293 struct mlx4_steer *s_steer; 294 struct mlx4_steer_index *tmp_entry, *entry = NULL; 295 struct mlx4_promisc_qp *dqp, *tmp_dqp; 296 297 s_steer = &mlx4_priv(dev)->steer[port - 1]; 298 299 /* if qp is not promisc, it cannot be duplicated */ 300 if (!get_promisc_qp(dev, port, steer, qpn)) 301 return false; 302 303 /* The qp is promisc qp so it is a duplicate on this index 304 * Find the index entry, and remove the duplicate */ 305 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 306 if (tmp_entry->index == index) { 307 entry = tmp_entry; 308 break; 309 } 310 } 311 if (unlikely(!entry)) { 312 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 313 return false; 314 } 315 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 316 if (dqp->qpn == qpn) { 317 list_del(&dqp->list); 318 kfree(dqp); 319 } 320 } 321 return true; 322 } 323 324 /* I a steering entry contains only promisc QPs, it can be removed. */ 325 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 326 enum mlx4_steer_type steer, 327 unsigned int index, u32 tqpn) 328 { 329 struct mlx4_steer *s_steer; 330 struct mlx4_cmd_mailbox *mailbox; 331 struct mlx4_mgm *mgm; 332 struct mlx4_steer_index *entry = NULL, *tmp_entry; 333 u32 qpn; 334 u32 members_count; 335 bool ret = false; 336 int i; 337 338 s_steer = &mlx4_priv(dev)->steer[port - 1]; 339 340 mailbox = mlx4_alloc_cmd_mailbox(dev); 341 if (IS_ERR(mailbox)) 342 return false; 343 mgm = mailbox->buf; 344 345 if (mlx4_READ_ENTRY(dev, index, mailbox)) 346 goto out; 347 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 348 for (i = 0; i < members_count; i++) { 349 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 350 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 351 /* the qp is not promisc, the entry can't be removed */ 352 goto out; 353 } 354 } 355 /* All the qps currently registered for this entry are promiscuous, 356 * Checking for duplicates */ 357 ret = true; 358 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 359 if (entry->index == index) { 360 if (list_empty(&entry->duplicates)) { 361 list_del(&entry->list); 362 kfree(entry); 363 } else { 364 /* This entry contains duplicates so it shouldn't be removed */ 365 ret = false; 366 goto out; 367 } 368 } 369 } 370 371 out: 372 mlx4_free_cmd_mailbox(dev, mailbox); 373 return ret; 374 } 375 376 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 377 enum mlx4_steer_type steer, u32 qpn) 378 { 379 struct mlx4_steer *s_steer; 380 struct mlx4_cmd_mailbox *mailbox; 381 struct mlx4_mgm *mgm; 382 struct mlx4_steer_index *entry; 383 struct mlx4_promisc_qp *pqp; 384 struct mlx4_promisc_qp *dqp; 385 u32 members_count; 386 u32 prot; 387 int i; 388 bool found; 389 int err; 390 struct mlx4_priv *priv = mlx4_priv(dev); 391 392 s_steer = &mlx4_priv(dev)->steer[port - 1]; 393 394 mutex_lock(&priv->mcg_table.mutex); 395 396 if (get_promisc_qp(dev, port, steer, qpn)) { 397 err = 0; /* Noting to do, already exists */ 398 goto out_mutex; 399 } 400 401 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 402 if (!pqp) { 403 err = -ENOMEM; 404 goto out_mutex; 405 } 406 pqp->qpn = qpn; 407 408 mailbox = mlx4_alloc_cmd_mailbox(dev); 409 if (IS_ERR(mailbox)) { 410 err = -ENOMEM; 411 goto out_alloc; 412 } 413 mgm = mailbox->buf; 414 415 /* the promisc qp needs to be added for each one of the steering 416 * entries, if it already exists, needs to be added as a duplicate 417 * for this entry */ 418 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 419 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 420 if (err) 421 goto out_mailbox; 422 423 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 424 prot = be32_to_cpu(mgm->members_count) >> 30; 425 found = false; 426 for (i = 0; i < members_count; i++) { 427 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 428 /* Entry already exists, add to duplicates */ 429 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 430 if (!dqp) { 431 err = -ENOMEM; 432 goto out_mailbox; 433 } 434 dqp->qpn = qpn; 435 list_add_tail(&dqp->list, &entry->duplicates); 436 found = true; 437 } 438 } 439 if (!found) { 440 /* Need to add the qpn to mgm */ 441 if (members_count == dev->caps.num_qp_per_mgm) { 442 /* entry is full */ 443 err = -ENOMEM; 444 goto out_mailbox; 445 } 446 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 447 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 448 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 449 if (err) 450 goto out_mailbox; 451 } 452 } 453 454 /* add the new qpn to list of promisc qps */ 455 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 456 /* now need to add all the promisc qps to default entry */ 457 memset(mgm, 0, sizeof *mgm); 458 members_count = 0; 459 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 460 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 461 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 462 463 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 464 if (err) 465 goto out_list; 466 467 mlx4_free_cmd_mailbox(dev, mailbox); 468 mutex_unlock(&priv->mcg_table.mutex); 469 return 0; 470 471 out_list: 472 list_del(&pqp->list); 473 out_mailbox: 474 mlx4_free_cmd_mailbox(dev, mailbox); 475 out_alloc: 476 kfree(pqp); 477 out_mutex: 478 mutex_unlock(&priv->mcg_table.mutex); 479 return err; 480 } 481 482 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 483 enum mlx4_steer_type steer, u32 qpn) 484 { 485 struct mlx4_priv *priv = mlx4_priv(dev); 486 struct mlx4_steer *s_steer; 487 struct mlx4_cmd_mailbox *mailbox; 488 struct mlx4_mgm *mgm; 489 struct mlx4_steer_index *entry; 490 struct mlx4_promisc_qp *pqp; 491 struct mlx4_promisc_qp *dqp; 492 u32 members_count; 493 bool found; 494 bool back_to_list = false; 495 int loc, i; 496 int err; 497 498 s_steer = &mlx4_priv(dev)->steer[port - 1]; 499 mutex_lock(&priv->mcg_table.mutex); 500 501 pqp = get_promisc_qp(dev, port, steer, qpn); 502 if (unlikely(!pqp)) { 503 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 504 /* nothing to do */ 505 err = 0; 506 goto out_mutex; 507 } 508 509 /*remove from list of promisc qps */ 510 list_del(&pqp->list); 511 512 /* set the default entry not to include the removed one */ 513 mailbox = mlx4_alloc_cmd_mailbox(dev); 514 if (IS_ERR(mailbox)) { 515 err = -ENOMEM; 516 back_to_list = true; 517 goto out_list; 518 } 519 mgm = mailbox->buf; 520 memset(mgm, 0, sizeof *mgm); 521 members_count = 0; 522 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 523 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 524 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 525 526 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 527 if (err) 528 goto out_mailbox; 529 530 /* remove the qp from all the steering entries*/ 531 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 532 found = false; 533 list_for_each_entry(dqp, &entry->duplicates, list) { 534 if (dqp->qpn == qpn) { 535 found = true; 536 break; 537 } 538 } 539 if (found) { 540 /* a duplicate, no need to change the mgm, 541 * only update the duplicates list */ 542 list_del(&dqp->list); 543 kfree(dqp); 544 } else { 545 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 546 if (err) 547 goto out_mailbox; 548 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 549 for (loc = -1, i = 0; i < members_count; ++i) 550 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 551 loc = i; 552 553 mgm->members_count = cpu_to_be32(--members_count | 554 (MLX4_PROT_ETH << 30)); 555 mgm->qp[loc] = mgm->qp[i - 1]; 556 mgm->qp[i - 1] = 0; 557 558 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 559 if (err) 560 goto out_mailbox; 561 } 562 563 } 564 565 out_mailbox: 566 mlx4_free_cmd_mailbox(dev, mailbox); 567 out_list: 568 if (back_to_list) 569 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 570 else 571 kfree(pqp); 572 out_mutex: 573 mutex_unlock(&priv->mcg_table.mutex); 574 return err; 575 } 576 577 /* 578 * Caller must hold MCG table semaphore. gid and mgm parameters must 579 * be properly aligned for command interface. 580 * 581 * Returns 0 unless a firmware command error occurs. 582 * 583 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 584 * and *mgm holds MGM entry. 585 * 586 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 587 * previous entry in hash chain and *mgm holds AMGM entry. 588 * 589 * If no AMGM exists for given gid, *index = -1, *prev = index of last 590 * entry in hash chain and *mgm holds end of hash chain. 591 */ 592 static int find_entry(struct mlx4_dev *dev, u8 port, 593 u8 *gid, enum mlx4_protocol prot, 594 struct mlx4_cmd_mailbox *mgm_mailbox, 595 int *prev, int *index) 596 { 597 struct mlx4_cmd_mailbox *mailbox; 598 struct mlx4_mgm *mgm = mgm_mailbox->buf; 599 u8 *mgid; 600 int err; 601 u16 hash; 602 u8 op_mod = (prot == MLX4_PROT_ETH) ? 603 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 604 605 mailbox = mlx4_alloc_cmd_mailbox(dev); 606 if (IS_ERR(mailbox)) 607 return -ENOMEM; 608 mgid = mailbox->buf; 609 610 memcpy(mgid, gid, 16); 611 612 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 613 mlx4_free_cmd_mailbox(dev, mailbox); 614 if (err) 615 return err; 616 617 if (0) 618 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 619 620 *index = hash; 621 *prev = -1; 622 623 do { 624 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 625 if (err) 626 return err; 627 628 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 629 if (*index != hash) { 630 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 631 err = -EINVAL; 632 } 633 return err; 634 } 635 636 if (!memcmp(mgm->gid, gid, 16) && 637 be32_to_cpu(mgm->members_count) >> 30 == prot) 638 return err; 639 640 *prev = *index; 641 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 642 } while (*index); 643 644 *index = -1; 645 return err; 646 } 647 648 static const u8 __promisc_mode[] = { 649 [MLX4_FS_REGULAR] = 0x0, 650 [MLX4_FS_ALL_DEFAULT] = 0x1, 651 [MLX4_FS_MC_DEFAULT] = 0x3, 652 [MLX4_FS_UC_SNIFFER] = 0x4, 653 [MLX4_FS_MC_SNIFFER] = 0x5, 654 }; 655 656 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 657 enum mlx4_net_trans_promisc_mode flow_type) 658 { 659 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 660 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 661 return -EINVAL; 662 } 663 return __promisc_mode[flow_type]; 664 } 665 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); 666 667 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 668 struct mlx4_net_trans_rule_hw_ctrl *hw) 669 { 670 u8 flags = 0; 671 672 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 673 flags |= ctrl->exclusive ? (1 << 2) : 0; 674 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 675 676 hw->flags = flags; 677 hw->type = __promisc_mode[ctrl->promisc_mode]; 678 hw->prio = cpu_to_be16(ctrl->priority); 679 hw->port = ctrl->port; 680 hw->qpn = cpu_to_be32(ctrl->qpn); 681 } 682 683 const u16 __sw_id_hw[] = { 684 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 685 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 686 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 687 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 688 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 689 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 690 }; 691 692 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 693 enum mlx4_net_trans_rule_id id) 694 { 695 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 696 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 697 return -EINVAL; 698 } 699 return __sw_id_hw[id]; 700 } 701 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); 702 703 static const int __rule_hw_sz[] = { 704 [MLX4_NET_TRANS_RULE_ID_ETH] = 705 sizeof(struct mlx4_net_trans_rule_hw_eth), 706 [MLX4_NET_TRANS_RULE_ID_IB] = 707 sizeof(struct mlx4_net_trans_rule_hw_ib), 708 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 709 [MLX4_NET_TRANS_RULE_ID_IPV4] = 710 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 711 [MLX4_NET_TRANS_RULE_ID_TCP] = 712 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 713 [MLX4_NET_TRANS_RULE_ID_UDP] = 714 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 715 }; 716 717 int mlx4_hw_rule_sz(struct mlx4_dev *dev, 718 enum mlx4_net_trans_rule_id id) 719 { 720 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 721 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 722 return -EINVAL; 723 } 724 725 return __rule_hw_sz[id]; 726 } 727 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); 728 729 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 730 struct _rule_hw *rule_hw) 731 { 732 if (mlx4_hw_rule_sz(dev, spec->id) < 0) 733 return -EINVAL; 734 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); 735 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 736 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; 737 738 switch (spec->id) { 739 case MLX4_NET_TRANS_RULE_ID_ETH: 740 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 741 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 742 ETH_ALEN); 743 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 744 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 745 ETH_ALEN); 746 if (spec->eth.ether_type_enable) { 747 rule_hw->eth.ether_type_enable = 1; 748 rule_hw->eth.ether_type = spec->eth.ether_type; 749 } 750 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 751 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 752 break; 753 754 case MLX4_NET_TRANS_RULE_ID_IB: 755 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 756 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 757 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 758 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 759 break; 760 761 case MLX4_NET_TRANS_RULE_ID_IPV6: 762 return -EOPNOTSUPP; 763 764 case MLX4_NET_TRANS_RULE_ID_IPV4: 765 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 766 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 767 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 768 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 769 break; 770 771 case MLX4_NET_TRANS_RULE_ID_TCP: 772 case MLX4_NET_TRANS_RULE_ID_UDP: 773 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 774 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 775 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 776 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 777 break; 778 779 default: 780 return -EINVAL; 781 } 782 783 return __rule_hw_sz[spec->id]; 784 } 785 786 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 787 struct mlx4_net_trans_rule *rule) 788 { 789 #define BUF_SIZE 256 790 struct mlx4_spec_list *cur; 791 char buf[BUF_SIZE]; 792 int len = 0; 793 794 mlx4_err(dev, "%s", str); 795 len += snprintf(buf + len, BUF_SIZE - len, 796 "port = %d prio = 0x%x qp = 0x%x ", 797 rule->port, rule->priority, rule->qpn); 798 799 list_for_each_entry(cur, &rule->list, list) { 800 switch (cur->id) { 801 case MLX4_NET_TRANS_RULE_ID_ETH: 802 len += snprintf(buf + len, BUF_SIZE - len, 803 "dmac = %pM ", &cur->eth.dst_mac); 804 if (cur->eth.ether_type) 805 len += snprintf(buf + len, BUF_SIZE - len, 806 "ethertype = 0x%x ", 807 be16_to_cpu(cur->eth.ether_type)); 808 if (cur->eth.vlan_id) 809 len += snprintf(buf + len, BUF_SIZE - len, 810 "vlan-id = %d ", 811 be16_to_cpu(cur->eth.vlan_id)); 812 break; 813 814 case MLX4_NET_TRANS_RULE_ID_IPV4: 815 if (cur->ipv4.src_ip) 816 len += snprintf(buf + len, BUF_SIZE - len, 817 "src-ip = %pI4 ", 818 &cur->ipv4.src_ip); 819 if (cur->ipv4.dst_ip) 820 len += snprintf(buf + len, BUF_SIZE - len, 821 "dst-ip = %pI4 ", 822 &cur->ipv4.dst_ip); 823 break; 824 825 case MLX4_NET_TRANS_RULE_ID_TCP: 826 case MLX4_NET_TRANS_RULE_ID_UDP: 827 if (cur->tcp_udp.src_port) 828 len += snprintf(buf + len, BUF_SIZE - len, 829 "src-port = %d ", 830 be16_to_cpu(cur->tcp_udp.src_port)); 831 if (cur->tcp_udp.dst_port) 832 len += snprintf(buf + len, BUF_SIZE - len, 833 "dst-port = %d ", 834 be16_to_cpu(cur->tcp_udp.dst_port)); 835 break; 836 837 case MLX4_NET_TRANS_RULE_ID_IB: 838 len += snprintf(buf + len, BUF_SIZE - len, 839 "dst-gid = %pI6\n", cur->ib.dst_gid); 840 len += snprintf(buf + len, BUF_SIZE - len, 841 "dst-gid-mask = %pI6\n", 842 cur->ib.dst_gid_msk); 843 break; 844 845 case MLX4_NET_TRANS_RULE_ID_IPV6: 846 break; 847 848 default: 849 break; 850 } 851 } 852 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 853 mlx4_err(dev, "%s", buf); 854 855 if (len >= BUF_SIZE) 856 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 857 } 858 859 int mlx4_flow_attach(struct mlx4_dev *dev, 860 struct mlx4_net_trans_rule *rule, u64 *reg_id) 861 { 862 struct mlx4_cmd_mailbox *mailbox; 863 struct mlx4_spec_list *cur; 864 u32 size = 0; 865 int ret; 866 867 mailbox = mlx4_alloc_cmd_mailbox(dev); 868 if (IS_ERR(mailbox)) 869 return PTR_ERR(mailbox); 870 871 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 872 trans_rule_ctrl_to_hw(rule, mailbox->buf); 873 874 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 875 876 list_for_each_entry(cur, &rule->list, list) { 877 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 878 if (ret < 0) { 879 mlx4_free_cmd_mailbox(dev, mailbox); 880 return -EINVAL; 881 } 882 size += ret; 883 } 884 885 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 886 if (ret == -ENOMEM) 887 mlx4_err_rule(dev, 888 "mcg table is full. Fail to register network rule.\n", 889 rule); 890 else if (ret) 891 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 892 893 mlx4_free_cmd_mailbox(dev, mailbox); 894 895 return ret; 896 } 897 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 898 899 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 900 { 901 int err; 902 903 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 904 if (err) 905 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 906 reg_id); 907 return err; 908 } 909 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 910 911 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 912 int block_mcast_loopback, enum mlx4_protocol prot, 913 enum mlx4_steer_type steer) 914 { 915 struct mlx4_priv *priv = mlx4_priv(dev); 916 struct mlx4_cmd_mailbox *mailbox; 917 struct mlx4_mgm *mgm; 918 u32 members_count; 919 int index, prev; 920 int link = 0; 921 int i; 922 int err; 923 u8 port = gid[5]; 924 u8 new_entry = 0; 925 926 mailbox = mlx4_alloc_cmd_mailbox(dev); 927 if (IS_ERR(mailbox)) 928 return PTR_ERR(mailbox); 929 mgm = mailbox->buf; 930 931 mutex_lock(&priv->mcg_table.mutex); 932 err = find_entry(dev, port, gid, prot, 933 mailbox, &prev, &index); 934 if (err) 935 goto out; 936 937 if (index != -1) { 938 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 939 new_entry = 1; 940 memcpy(mgm->gid, gid, 16); 941 } 942 } else { 943 link = 1; 944 945 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 946 if (index == -1) { 947 mlx4_err(dev, "No AMGM entries left\n"); 948 err = -ENOMEM; 949 goto out; 950 } 951 index += dev->caps.num_mgms; 952 953 new_entry = 1; 954 memset(mgm, 0, sizeof *mgm); 955 memcpy(mgm->gid, gid, 16); 956 } 957 958 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 959 if (members_count == dev->caps.num_qp_per_mgm) { 960 mlx4_err(dev, "MGM at index %x is full.\n", index); 961 err = -ENOMEM; 962 goto out; 963 } 964 965 for (i = 0; i < members_count; ++i) 966 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 967 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 968 err = 0; 969 goto out; 970 } 971 972 if (block_mcast_loopback) 973 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 974 (1U << MGM_BLCK_LB_BIT)); 975 else 976 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 977 978 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 979 980 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 981 if (err) 982 goto out; 983 984 if (!link) 985 goto out; 986 987 err = mlx4_READ_ENTRY(dev, prev, mailbox); 988 if (err) 989 goto out; 990 991 mgm->next_gid_index = cpu_to_be32(index << 6); 992 993 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 994 if (err) 995 goto out; 996 997 out: 998 if (prot == MLX4_PROT_ETH) { 999 /* manage the steering entry for promisc mode */ 1000 if (new_entry) 1001 new_steering_entry(dev, port, steer, index, qp->qpn); 1002 else 1003 existing_steering_entry(dev, port, steer, 1004 index, qp->qpn); 1005 } 1006 if (err && link && index != -1) { 1007 if (index < dev->caps.num_mgms) 1008 mlx4_warn(dev, "Got AMGM index %d < %d", 1009 index, dev->caps.num_mgms); 1010 else 1011 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1012 index - dev->caps.num_mgms); 1013 } 1014 mutex_unlock(&priv->mcg_table.mutex); 1015 1016 mlx4_free_cmd_mailbox(dev, mailbox); 1017 return err; 1018 } 1019 1020 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1021 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1022 { 1023 struct mlx4_priv *priv = mlx4_priv(dev); 1024 struct mlx4_cmd_mailbox *mailbox; 1025 struct mlx4_mgm *mgm; 1026 u32 members_count; 1027 int prev, index; 1028 int i, loc; 1029 int err; 1030 u8 port = gid[5]; 1031 bool removed_entry = false; 1032 1033 mailbox = mlx4_alloc_cmd_mailbox(dev); 1034 if (IS_ERR(mailbox)) 1035 return PTR_ERR(mailbox); 1036 mgm = mailbox->buf; 1037 1038 mutex_lock(&priv->mcg_table.mutex); 1039 1040 err = find_entry(dev, port, gid, prot, 1041 mailbox, &prev, &index); 1042 if (err) 1043 goto out; 1044 1045 if (index == -1) { 1046 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1047 err = -EINVAL; 1048 goto out; 1049 } 1050 1051 /* if this pq is also a promisc qp, it shouldn't be removed */ 1052 if (prot == MLX4_PROT_ETH && 1053 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1054 goto out; 1055 1056 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1057 for (loc = -1, i = 0; i < members_count; ++i) 1058 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 1059 loc = i; 1060 1061 if (loc == -1) { 1062 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1063 err = -EINVAL; 1064 goto out; 1065 } 1066 1067 1068 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1069 mgm->qp[loc] = mgm->qp[i - 1]; 1070 mgm->qp[i - 1] = 0; 1071 1072 if (prot == MLX4_PROT_ETH) 1073 removed_entry = can_remove_steering_entry(dev, port, steer, 1074 index, qp->qpn); 1075 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 1076 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1077 goto out; 1078 } 1079 1080 /* We are going to delete the entry, members count should be 0 */ 1081 mgm->members_count = cpu_to_be32((u32) prot << 30); 1082 1083 if (prev == -1) { 1084 /* Remove entry from MGM */ 1085 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1086 if (amgm_index) { 1087 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1088 if (err) 1089 goto out; 1090 } else 1091 memset(mgm->gid, 0, 16); 1092 1093 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1094 if (err) 1095 goto out; 1096 1097 if (amgm_index) { 1098 if (amgm_index < dev->caps.num_mgms) 1099 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1100 index, amgm_index, dev->caps.num_mgms); 1101 else 1102 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1103 amgm_index - dev->caps.num_mgms); 1104 } 1105 } else { 1106 /* Remove entry from AMGM */ 1107 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1108 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1109 if (err) 1110 goto out; 1111 1112 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1113 1114 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1115 if (err) 1116 goto out; 1117 1118 if (index < dev->caps.num_mgms) 1119 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1120 prev, index, dev->caps.num_mgms); 1121 else 1122 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1123 index - dev->caps.num_mgms); 1124 } 1125 1126 out: 1127 mutex_unlock(&priv->mcg_table.mutex); 1128 1129 mlx4_free_cmd_mailbox(dev, mailbox); 1130 return err; 1131 } 1132 1133 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1134 u8 gid[16], u8 attach, u8 block_loopback, 1135 enum mlx4_protocol prot) 1136 { 1137 struct mlx4_cmd_mailbox *mailbox; 1138 int err = 0; 1139 int qpn; 1140 1141 if (!mlx4_is_mfunc(dev)) 1142 return -EBADF; 1143 1144 mailbox = mlx4_alloc_cmd_mailbox(dev); 1145 if (IS_ERR(mailbox)) 1146 return PTR_ERR(mailbox); 1147 1148 memcpy(mailbox->buf, gid, 16); 1149 qpn = qp->qpn; 1150 qpn |= (prot << 28); 1151 if (attach && block_loopback) 1152 qpn |= (1 << 31); 1153 1154 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1155 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1156 MLX4_CMD_WRAPPED); 1157 1158 mlx4_free_cmd_mailbox(dev, mailbox); 1159 return err; 1160 } 1161 1162 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1163 u8 gid[16], u8 port, 1164 int block_mcast_loopback, 1165 enum mlx4_protocol prot, u64 *reg_id) 1166 { 1167 struct mlx4_spec_list spec = { {NULL} }; 1168 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1169 1170 struct mlx4_net_trans_rule rule = { 1171 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1172 .exclusive = 0, 1173 .promisc_mode = MLX4_FS_REGULAR, 1174 .priority = MLX4_DOMAIN_NIC, 1175 }; 1176 1177 rule.allow_loopback = !block_mcast_loopback; 1178 rule.port = port; 1179 rule.qpn = qp->qpn; 1180 INIT_LIST_HEAD(&rule.list); 1181 1182 switch (prot) { 1183 case MLX4_PROT_ETH: 1184 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1185 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1186 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1187 break; 1188 1189 case MLX4_PROT_IB_IPV6: 1190 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1191 memcpy(spec.ib.dst_gid, gid, 16); 1192 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1193 break; 1194 default: 1195 return -EINVAL; 1196 } 1197 list_add_tail(&spec.list, &rule.list); 1198 1199 return mlx4_flow_attach(dev, &rule, reg_id); 1200 } 1201 1202 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1203 u8 port, int block_mcast_loopback, 1204 enum mlx4_protocol prot, u64 *reg_id) 1205 { 1206 switch (dev->caps.steering_mode) { 1207 case MLX4_STEERING_MODE_A0: 1208 if (prot == MLX4_PROT_ETH) 1209 return 0; 1210 1211 case MLX4_STEERING_MODE_B0: 1212 if (prot == MLX4_PROT_ETH) 1213 gid[7] |= (MLX4_MC_STEER << 1); 1214 1215 if (mlx4_is_mfunc(dev)) 1216 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1217 block_mcast_loopback, prot); 1218 return mlx4_qp_attach_common(dev, qp, gid, 1219 block_mcast_loopback, prot, 1220 MLX4_MC_STEER); 1221 1222 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1223 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1224 block_mcast_loopback, 1225 prot, reg_id); 1226 default: 1227 return -EINVAL; 1228 } 1229 } 1230 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1231 1232 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1233 enum mlx4_protocol prot, u64 reg_id) 1234 { 1235 switch (dev->caps.steering_mode) { 1236 case MLX4_STEERING_MODE_A0: 1237 if (prot == MLX4_PROT_ETH) 1238 return 0; 1239 1240 case MLX4_STEERING_MODE_B0: 1241 if (prot == MLX4_PROT_ETH) 1242 gid[7] |= (MLX4_MC_STEER << 1); 1243 1244 if (mlx4_is_mfunc(dev)) 1245 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1246 1247 return mlx4_qp_detach_common(dev, qp, gid, prot, 1248 MLX4_MC_STEER); 1249 1250 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1251 return mlx4_flow_detach(dev, reg_id); 1252 1253 default: 1254 return -EINVAL; 1255 } 1256 } 1257 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1258 1259 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1260 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1261 { 1262 struct mlx4_net_trans_rule rule; 1263 u64 *regid_p; 1264 1265 switch (mode) { 1266 case MLX4_FS_ALL_DEFAULT: 1267 regid_p = &dev->regid_promisc_array[port]; 1268 break; 1269 case MLX4_FS_MC_DEFAULT: 1270 regid_p = &dev->regid_allmulti_array[port]; 1271 break; 1272 default: 1273 return -1; 1274 } 1275 1276 if (*regid_p != 0) 1277 return -1; 1278 1279 rule.promisc_mode = mode; 1280 rule.port = port; 1281 rule.qpn = qpn; 1282 INIT_LIST_HEAD(&rule.list); 1283 mlx4_err(dev, "going promisc on %x\n", port); 1284 1285 return mlx4_flow_attach(dev, &rule, regid_p); 1286 } 1287 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1288 1289 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1290 enum mlx4_net_trans_promisc_mode mode) 1291 { 1292 int ret; 1293 u64 *regid_p; 1294 1295 switch (mode) { 1296 case MLX4_FS_ALL_DEFAULT: 1297 regid_p = &dev->regid_promisc_array[port]; 1298 break; 1299 case MLX4_FS_MC_DEFAULT: 1300 regid_p = &dev->regid_allmulti_array[port]; 1301 break; 1302 default: 1303 return -1; 1304 } 1305 1306 if (*regid_p == 0) 1307 return -1; 1308 1309 ret = mlx4_flow_detach(dev, *regid_p); 1310 if (ret == 0) 1311 *regid_p = 0; 1312 1313 return ret; 1314 } 1315 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1316 1317 int mlx4_unicast_attach(struct mlx4_dev *dev, 1318 struct mlx4_qp *qp, u8 gid[16], 1319 int block_mcast_loopback, enum mlx4_protocol prot) 1320 { 1321 if (prot == MLX4_PROT_ETH) 1322 gid[7] |= (MLX4_UC_STEER << 1); 1323 1324 if (mlx4_is_mfunc(dev)) 1325 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1326 block_mcast_loopback, prot); 1327 1328 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1329 prot, MLX4_UC_STEER); 1330 } 1331 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1332 1333 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1334 u8 gid[16], enum mlx4_protocol prot) 1335 { 1336 if (prot == MLX4_PROT_ETH) 1337 gid[7] |= (MLX4_UC_STEER << 1); 1338 1339 if (mlx4_is_mfunc(dev)) 1340 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1341 1342 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1343 } 1344 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1345 1346 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1347 struct mlx4_vhcr *vhcr, 1348 struct mlx4_cmd_mailbox *inbox, 1349 struct mlx4_cmd_mailbox *outbox, 1350 struct mlx4_cmd_info *cmd) 1351 { 1352 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1353 u8 port = vhcr->in_param >> 62; 1354 enum mlx4_steer_type steer = vhcr->in_modifier; 1355 1356 /* Promiscuous unicast is not allowed in mfunc */ 1357 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1358 return 0; 1359 1360 if (vhcr->op_modifier) 1361 return add_promisc_qp(dev, port, steer, qpn); 1362 else 1363 return remove_promisc_qp(dev, port, steer, qpn); 1364 } 1365 1366 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1367 enum mlx4_steer_type steer, u8 add, u8 port) 1368 { 1369 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1370 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1371 MLX4_CMD_WRAPPED); 1372 } 1373 1374 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1375 { 1376 if (mlx4_is_mfunc(dev)) 1377 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1378 1379 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1380 } 1381 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1382 1383 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1384 { 1385 if (mlx4_is_mfunc(dev)) 1386 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1387 1388 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1389 } 1390 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1391 1392 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1393 { 1394 if (mlx4_is_mfunc(dev)) 1395 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1396 1397 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1398 } 1399 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1400 1401 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1402 { 1403 if (mlx4_is_mfunc(dev)) 1404 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1405 1406 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1407 } 1408 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1409 1410 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1411 { 1412 struct mlx4_priv *priv = mlx4_priv(dev); 1413 int err; 1414 1415 /* No need for mcg_table when fw managed the mcg table*/ 1416 if (dev->caps.steering_mode == 1417 MLX4_STEERING_MODE_DEVICE_MANAGED) 1418 return 0; 1419 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1420 dev->caps.num_amgms - 1, 0, 0); 1421 if (err) 1422 return err; 1423 1424 mutex_init(&priv->mcg_table.mutex); 1425 1426 return 0; 1427 } 1428 1429 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1430 { 1431 if (dev->caps.steering_mode != 1432 MLX4_STEERING_MODE_DEVICE_MANAGED) 1433 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1434 } 1435