1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 43 44 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 45 { 46 return 1 << dev->oper_log_mgm_entry_size; 47 } 48 49 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 50 { 51 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 52 } 53 54 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 55 struct mlx4_cmd_mailbox *mailbox, 56 u32 size, 57 u64 *reg_id) 58 { 59 u64 imm; 60 int err = 0; 61 62 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 63 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 64 MLX4_CMD_NATIVE); 65 if (err) 66 return err; 67 *reg_id = imm; 68 69 return err; 70 } 71 72 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 73 { 74 int err = 0; 75 76 err = mlx4_cmd(dev, regid, 0, 0, 77 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 78 MLX4_CMD_NATIVE); 79 80 return err; 81 } 82 83 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 84 struct mlx4_cmd_mailbox *mailbox) 85 { 86 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 87 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 88 } 89 90 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 91 struct mlx4_cmd_mailbox *mailbox) 92 { 93 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 94 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 95 } 96 97 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 98 struct mlx4_cmd_mailbox *mailbox) 99 { 100 u32 in_mod; 101 102 in_mod = (u32) port << 16 | steer << 1; 103 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 104 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 105 MLX4_CMD_NATIVE); 106 } 107 108 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 109 u16 *hash, u8 op_mod) 110 { 111 u64 imm; 112 int err; 113 114 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 115 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 116 MLX4_CMD_NATIVE); 117 118 if (!err) 119 *hash = imm; 120 121 return err; 122 } 123 124 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 125 enum mlx4_steer_type steer, 126 u32 qpn) 127 { 128 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; 129 struct mlx4_promisc_qp *pqp; 130 131 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 132 if (pqp->qpn == qpn) 133 return pqp; 134 } 135 /* not found */ 136 return NULL; 137 } 138 139 /* 140 * Add new entry to steering data structure. 141 * All promisc QPs should be added as well 142 */ 143 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 144 enum mlx4_steer_type steer, 145 unsigned int index, u32 qpn) 146 { 147 struct mlx4_steer *s_steer; 148 struct mlx4_cmd_mailbox *mailbox; 149 struct mlx4_mgm *mgm; 150 u32 members_count; 151 struct mlx4_steer_index *new_entry; 152 struct mlx4_promisc_qp *pqp; 153 struct mlx4_promisc_qp *dqp = NULL; 154 u32 prot; 155 int err; 156 157 s_steer = &mlx4_priv(dev)->steer[port - 1]; 158 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 159 if (!new_entry) 160 return -ENOMEM; 161 162 INIT_LIST_HEAD(&new_entry->duplicates); 163 new_entry->index = index; 164 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 165 166 /* If the given qpn is also a promisc qp, 167 * it should be inserted to duplicates list 168 */ 169 pqp = get_promisc_qp(dev, port, steer, qpn); 170 if (pqp) { 171 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 172 if (!dqp) { 173 err = -ENOMEM; 174 goto out_alloc; 175 } 176 dqp->qpn = qpn; 177 list_add_tail(&dqp->list, &new_entry->duplicates); 178 } 179 180 /* if no promisc qps for this vep, we are done */ 181 if (list_empty(&s_steer->promisc_qps[steer])) 182 return 0; 183 184 /* now need to add all the promisc qps to the new 185 * steering entry, as they should also receive the packets 186 * destined to this address */ 187 mailbox = mlx4_alloc_cmd_mailbox(dev); 188 if (IS_ERR(mailbox)) { 189 err = -ENOMEM; 190 goto out_alloc; 191 } 192 mgm = mailbox->buf; 193 194 err = mlx4_READ_ENTRY(dev, index, mailbox); 195 if (err) 196 goto out_mailbox; 197 198 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 199 prot = be32_to_cpu(mgm->members_count) >> 30; 200 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 201 /* don't add already existing qpn */ 202 if (pqp->qpn == qpn) 203 continue; 204 if (members_count == dev->caps.num_qp_per_mgm) { 205 /* out of space */ 206 err = -ENOMEM; 207 goto out_mailbox; 208 } 209 210 /* add the qpn */ 211 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 212 } 213 /* update the qps count and update the entry with all the promisc qps*/ 214 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 215 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 216 217 out_mailbox: 218 mlx4_free_cmd_mailbox(dev, mailbox); 219 if (!err) 220 return 0; 221 out_alloc: 222 if (dqp) { 223 list_del(&dqp->list); 224 kfree(dqp); 225 } 226 list_del(&new_entry->list); 227 kfree(new_entry); 228 return err; 229 } 230 231 /* update the data structures with existing steering entry */ 232 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 233 enum mlx4_steer_type steer, 234 unsigned int index, u32 qpn) 235 { 236 struct mlx4_steer *s_steer; 237 struct mlx4_steer_index *tmp_entry, *entry = NULL; 238 struct mlx4_promisc_qp *pqp; 239 struct mlx4_promisc_qp *dqp; 240 241 s_steer = &mlx4_priv(dev)->steer[port - 1]; 242 243 pqp = get_promisc_qp(dev, port, steer, qpn); 244 if (!pqp) 245 return 0; /* nothing to do */ 246 247 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 248 if (tmp_entry->index == index) { 249 entry = tmp_entry; 250 break; 251 } 252 } 253 if (unlikely(!entry)) { 254 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 255 return -EINVAL; 256 } 257 258 /* the given qpn is listed as a promisc qpn 259 * we need to add it as a duplicate to this entry 260 * for future references */ 261 list_for_each_entry(dqp, &entry->duplicates, list) { 262 if (qpn == pqp->qpn) 263 return 0; /* qp is already duplicated */ 264 } 265 266 /* add the qp as a duplicate on this index */ 267 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 268 if (!dqp) 269 return -ENOMEM; 270 dqp->qpn = qpn; 271 list_add_tail(&dqp->list, &entry->duplicates); 272 273 return 0; 274 } 275 276 /* Check whether a qpn is a duplicate on steering entry 277 * If so, it should not be removed from mgm */ 278 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 279 enum mlx4_steer_type steer, 280 unsigned int index, u32 qpn) 281 { 282 struct mlx4_steer *s_steer; 283 struct mlx4_steer_index *tmp_entry, *entry = NULL; 284 struct mlx4_promisc_qp *dqp, *tmp_dqp; 285 286 s_steer = &mlx4_priv(dev)->steer[port - 1]; 287 288 /* if qp is not promisc, it cannot be duplicated */ 289 if (!get_promisc_qp(dev, port, steer, qpn)) 290 return false; 291 292 /* The qp is promisc qp so it is a duplicate on this index 293 * Find the index entry, and remove the duplicate */ 294 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 295 if (tmp_entry->index == index) { 296 entry = tmp_entry; 297 break; 298 } 299 } 300 if (unlikely(!entry)) { 301 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 302 return false; 303 } 304 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 305 if (dqp->qpn == qpn) { 306 list_del(&dqp->list); 307 kfree(dqp); 308 } 309 } 310 return true; 311 } 312 313 /* I a steering entry contains only promisc QPs, it can be removed. */ 314 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 315 enum mlx4_steer_type steer, 316 unsigned int index, u32 tqpn) 317 { 318 struct mlx4_steer *s_steer; 319 struct mlx4_cmd_mailbox *mailbox; 320 struct mlx4_mgm *mgm; 321 struct mlx4_steer_index *entry = NULL, *tmp_entry; 322 u32 qpn; 323 u32 members_count; 324 bool ret = false; 325 int i; 326 327 s_steer = &mlx4_priv(dev)->steer[port - 1]; 328 329 mailbox = mlx4_alloc_cmd_mailbox(dev); 330 if (IS_ERR(mailbox)) 331 return false; 332 mgm = mailbox->buf; 333 334 if (mlx4_READ_ENTRY(dev, index, mailbox)) 335 goto out; 336 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 337 for (i = 0; i < members_count; i++) { 338 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 339 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 340 /* the qp is not promisc, the entry can't be removed */ 341 goto out; 342 } 343 } 344 /* All the qps currently registered for this entry are promiscuous, 345 * Checking for duplicates */ 346 ret = true; 347 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 348 if (entry->index == index) { 349 if (list_empty(&entry->duplicates)) { 350 list_del(&entry->list); 351 kfree(entry); 352 } else { 353 /* This entry contains duplicates so it shouldn't be removed */ 354 ret = false; 355 goto out; 356 } 357 } 358 } 359 360 out: 361 mlx4_free_cmd_mailbox(dev, mailbox); 362 return ret; 363 } 364 365 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 366 enum mlx4_steer_type steer, u32 qpn) 367 { 368 struct mlx4_steer *s_steer; 369 struct mlx4_cmd_mailbox *mailbox; 370 struct mlx4_mgm *mgm; 371 struct mlx4_steer_index *entry; 372 struct mlx4_promisc_qp *pqp; 373 struct mlx4_promisc_qp *dqp; 374 u32 members_count; 375 u32 prot; 376 int i; 377 bool found; 378 int err; 379 struct mlx4_priv *priv = mlx4_priv(dev); 380 381 s_steer = &mlx4_priv(dev)->steer[port - 1]; 382 383 mutex_lock(&priv->mcg_table.mutex); 384 385 if (get_promisc_qp(dev, port, steer, qpn)) { 386 err = 0; /* Noting to do, already exists */ 387 goto out_mutex; 388 } 389 390 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 391 if (!pqp) { 392 err = -ENOMEM; 393 goto out_mutex; 394 } 395 pqp->qpn = qpn; 396 397 mailbox = mlx4_alloc_cmd_mailbox(dev); 398 if (IS_ERR(mailbox)) { 399 err = -ENOMEM; 400 goto out_alloc; 401 } 402 mgm = mailbox->buf; 403 404 /* the promisc qp needs to be added for each one of the steering 405 * entries, if it already exists, needs to be added as a duplicate 406 * for this entry */ 407 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 408 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 409 if (err) 410 goto out_mailbox; 411 412 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 413 prot = be32_to_cpu(mgm->members_count) >> 30; 414 found = false; 415 for (i = 0; i < members_count; i++) { 416 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 417 /* Entry already exists, add to duplicates */ 418 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 419 if (!dqp) { 420 err = -ENOMEM; 421 goto out_mailbox; 422 } 423 dqp->qpn = qpn; 424 list_add_tail(&dqp->list, &entry->duplicates); 425 found = true; 426 } 427 } 428 if (!found) { 429 /* Need to add the qpn to mgm */ 430 if (members_count == dev->caps.num_qp_per_mgm) { 431 /* entry is full */ 432 err = -ENOMEM; 433 goto out_mailbox; 434 } 435 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 436 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 437 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 438 if (err) 439 goto out_mailbox; 440 } 441 } 442 443 /* add the new qpn to list of promisc qps */ 444 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 445 /* now need to add all the promisc qps to default entry */ 446 memset(mgm, 0, sizeof *mgm); 447 members_count = 0; 448 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 449 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 450 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 451 452 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 453 if (err) 454 goto out_list; 455 456 mlx4_free_cmd_mailbox(dev, mailbox); 457 mutex_unlock(&priv->mcg_table.mutex); 458 return 0; 459 460 out_list: 461 list_del(&pqp->list); 462 out_mailbox: 463 mlx4_free_cmd_mailbox(dev, mailbox); 464 out_alloc: 465 kfree(pqp); 466 out_mutex: 467 mutex_unlock(&priv->mcg_table.mutex); 468 return err; 469 } 470 471 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 472 enum mlx4_steer_type steer, u32 qpn) 473 { 474 struct mlx4_priv *priv = mlx4_priv(dev); 475 struct mlx4_steer *s_steer; 476 struct mlx4_cmd_mailbox *mailbox; 477 struct mlx4_mgm *mgm; 478 struct mlx4_steer_index *entry; 479 struct mlx4_promisc_qp *pqp; 480 struct mlx4_promisc_qp *dqp; 481 u32 members_count; 482 bool found; 483 bool back_to_list = false; 484 int loc, i; 485 int err; 486 487 s_steer = &mlx4_priv(dev)->steer[port - 1]; 488 mutex_lock(&priv->mcg_table.mutex); 489 490 pqp = get_promisc_qp(dev, port, steer, qpn); 491 if (unlikely(!pqp)) { 492 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 493 /* nothing to do */ 494 err = 0; 495 goto out_mutex; 496 } 497 498 /*remove from list of promisc qps */ 499 list_del(&pqp->list); 500 501 /* set the default entry not to include the removed one */ 502 mailbox = mlx4_alloc_cmd_mailbox(dev); 503 if (IS_ERR(mailbox)) { 504 err = -ENOMEM; 505 back_to_list = true; 506 goto out_list; 507 } 508 mgm = mailbox->buf; 509 memset(mgm, 0, sizeof *mgm); 510 members_count = 0; 511 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 512 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 513 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 514 515 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 516 if (err) 517 goto out_mailbox; 518 519 /* remove the qp from all the steering entries*/ 520 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 521 found = false; 522 list_for_each_entry(dqp, &entry->duplicates, list) { 523 if (dqp->qpn == qpn) { 524 found = true; 525 break; 526 } 527 } 528 if (found) { 529 /* a duplicate, no need to change the mgm, 530 * only update the duplicates list */ 531 list_del(&dqp->list); 532 kfree(dqp); 533 } else { 534 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 535 if (err) 536 goto out_mailbox; 537 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 538 for (loc = -1, i = 0; i < members_count; ++i) 539 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 540 loc = i; 541 542 mgm->members_count = cpu_to_be32(--members_count | 543 (MLX4_PROT_ETH << 30)); 544 mgm->qp[loc] = mgm->qp[i - 1]; 545 mgm->qp[i - 1] = 0; 546 547 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 548 if (err) 549 goto out_mailbox; 550 } 551 552 } 553 554 out_mailbox: 555 mlx4_free_cmd_mailbox(dev, mailbox); 556 out_list: 557 if (back_to_list) 558 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 559 else 560 kfree(pqp); 561 out_mutex: 562 mutex_unlock(&priv->mcg_table.mutex); 563 return err; 564 } 565 566 /* 567 * Caller must hold MCG table semaphore. gid and mgm parameters must 568 * be properly aligned for command interface. 569 * 570 * Returns 0 unless a firmware command error occurs. 571 * 572 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 573 * and *mgm holds MGM entry. 574 * 575 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 576 * previous entry in hash chain and *mgm holds AMGM entry. 577 * 578 * If no AMGM exists for given gid, *index = -1, *prev = index of last 579 * entry in hash chain and *mgm holds end of hash chain. 580 */ 581 static int find_entry(struct mlx4_dev *dev, u8 port, 582 u8 *gid, enum mlx4_protocol prot, 583 struct mlx4_cmd_mailbox *mgm_mailbox, 584 int *prev, int *index) 585 { 586 struct mlx4_cmd_mailbox *mailbox; 587 struct mlx4_mgm *mgm = mgm_mailbox->buf; 588 u8 *mgid; 589 int err; 590 u16 hash; 591 u8 op_mod = (prot == MLX4_PROT_ETH) ? 592 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 593 594 mailbox = mlx4_alloc_cmd_mailbox(dev); 595 if (IS_ERR(mailbox)) 596 return -ENOMEM; 597 mgid = mailbox->buf; 598 599 memcpy(mgid, gid, 16); 600 601 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 602 mlx4_free_cmd_mailbox(dev, mailbox); 603 if (err) 604 return err; 605 606 if (0) 607 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 608 609 *index = hash; 610 *prev = -1; 611 612 do { 613 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 614 if (err) 615 return err; 616 617 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 618 if (*index != hash) { 619 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 620 err = -EINVAL; 621 } 622 return err; 623 } 624 625 if (!memcmp(mgm->gid, gid, 16) && 626 be32_to_cpu(mgm->members_count) >> 30 == prot) 627 return err; 628 629 *prev = *index; 630 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 631 } while (*index); 632 633 *index = -1; 634 return err; 635 } 636 637 static const u8 __promisc_mode[] = { 638 [MLX4_FS_REGULAR] = 0x0, 639 [MLX4_FS_ALL_DEFAULT] = 0x1, 640 [MLX4_FS_MC_DEFAULT] = 0x3, 641 [MLX4_FS_UC_SNIFFER] = 0x4, 642 [MLX4_FS_MC_SNIFFER] = 0x5, 643 }; 644 645 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 646 enum mlx4_net_trans_promisc_mode flow_type) 647 { 648 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 649 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 650 return -EINVAL; 651 } 652 return __promisc_mode[flow_type]; 653 } 654 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); 655 656 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 657 struct mlx4_net_trans_rule_hw_ctrl *hw) 658 { 659 u8 flags = 0; 660 661 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 662 flags |= ctrl->exclusive ? (1 << 2) : 0; 663 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 664 665 hw->flags = flags; 666 hw->type = __promisc_mode[ctrl->promisc_mode]; 667 hw->prio = cpu_to_be16(ctrl->priority); 668 hw->port = ctrl->port; 669 hw->qpn = cpu_to_be32(ctrl->qpn); 670 } 671 672 const u16 __sw_id_hw[] = { 673 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 674 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 675 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 676 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 677 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 678 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 679 }; 680 681 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 682 enum mlx4_net_trans_rule_id id) 683 { 684 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 685 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 686 return -EINVAL; 687 } 688 return __sw_id_hw[id]; 689 } 690 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); 691 692 static const int __rule_hw_sz[] = { 693 [MLX4_NET_TRANS_RULE_ID_ETH] = 694 sizeof(struct mlx4_net_trans_rule_hw_eth), 695 [MLX4_NET_TRANS_RULE_ID_IB] = 696 sizeof(struct mlx4_net_trans_rule_hw_ib), 697 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 698 [MLX4_NET_TRANS_RULE_ID_IPV4] = 699 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 700 [MLX4_NET_TRANS_RULE_ID_TCP] = 701 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 702 [MLX4_NET_TRANS_RULE_ID_UDP] = 703 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 704 }; 705 706 int mlx4_hw_rule_sz(struct mlx4_dev *dev, 707 enum mlx4_net_trans_rule_id id) 708 { 709 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 710 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 711 return -EINVAL; 712 } 713 714 return __rule_hw_sz[id]; 715 } 716 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); 717 718 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 719 struct _rule_hw *rule_hw) 720 { 721 if (mlx4_hw_rule_sz(dev, spec->id) < 0) 722 return -EINVAL; 723 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); 724 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 725 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; 726 727 switch (spec->id) { 728 case MLX4_NET_TRANS_RULE_ID_ETH: 729 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 730 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 731 ETH_ALEN); 732 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 733 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 734 ETH_ALEN); 735 if (spec->eth.ether_type_enable) { 736 rule_hw->eth.ether_type_enable = 1; 737 rule_hw->eth.ether_type = spec->eth.ether_type; 738 } 739 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 740 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 741 break; 742 743 case MLX4_NET_TRANS_RULE_ID_IB: 744 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 745 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 746 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 747 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 748 break; 749 750 case MLX4_NET_TRANS_RULE_ID_IPV6: 751 return -EOPNOTSUPP; 752 753 case MLX4_NET_TRANS_RULE_ID_IPV4: 754 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 755 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 756 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 757 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 758 break; 759 760 case MLX4_NET_TRANS_RULE_ID_TCP: 761 case MLX4_NET_TRANS_RULE_ID_UDP: 762 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 763 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 764 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 765 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 766 break; 767 768 default: 769 return -EINVAL; 770 } 771 772 return __rule_hw_sz[spec->id]; 773 } 774 775 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 776 struct mlx4_net_trans_rule *rule) 777 { 778 #define BUF_SIZE 256 779 struct mlx4_spec_list *cur; 780 char buf[BUF_SIZE]; 781 int len = 0; 782 783 mlx4_err(dev, "%s", str); 784 len += snprintf(buf + len, BUF_SIZE - len, 785 "port = %d prio = 0x%x qp = 0x%x ", 786 rule->port, rule->priority, rule->qpn); 787 788 list_for_each_entry(cur, &rule->list, list) { 789 switch (cur->id) { 790 case MLX4_NET_TRANS_RULE_ID_ETH: 791 len += snprintf(buf + len, BUF_SIZE - len, 792 "dmac = %pM ", &cur->eth.dst_mac); 793 if (cur->eth.ether_type) 794 len += snprintf(buf + len, BUF_SIZE - len, 795 "ethertype = 0x%x ", 796 be16_to_cpu(cur->eth.ether_type)); 797 if (cur->eth.vlan_id) 798 len += snprintf(buf + len, BUF_SIZE - len, 799 "vlan-id = %d ", 800 be16_to_cpu(cur->eth.vlan_id)); 801 break; 802 803 case MLX4_NET_TRANS_RULE_ID_IPV4: 804 if (cur->ipv4.src_ip) 805 len += snprintf(buf + len, BUF_SIZE - len, 806 "src-ip = %pI4 ", 807 &cur->ipv4.src_ip); 808 if (cur->ipv4.dst_ip) 809 len += snprintf(buf + len, BUF_SIZE - len, 810 "dst-ip = %pI4 ", 811 &cur->ipv4.dst_ip); 812 break; 813 814 case MLX4_NET_TRANS_RULE_ID_TCP: 815 case MLX4_NET_TRANS_RULE_ID_UDP: 816 if (cur->tcp_udp.src_port) 817 len += snprintf(buf + len, BUF_SIZE - len, 818 "src-port = %d ", 819 be16_to_cpu(cur->tcp_udp.src_port)); 820 if (cur->tcp_udp.dst_port) 821 len += snprintf(buf + len, BUF_SIZE - len, 822 "dst-port = %d ", 823 be16_to_cpu(cur->tcp_udp.dst_port)); 824 break; 825 826 case MLX4_NET_TRANS_RULE_ID_IB: 827 len += snprintf(buf + len, BUF_SIZE - len, 828 "dst-gid = %pI6\n", cur->ib.dst_gid); 829 len += snprintf(buf + len, BUF_SIZE - len, 830 "dst-gid-mask = %pI6\n", 831 cur->ib.dst_gid_msk); 832 break; 833 834 case MLX4_NET_TRANS_RULE_ID_IPV6: 835 break; 836 837 default: 838 break; 839 } 840 } 841 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 842 mlx4_err(dev, "%s", buf); 843 844 if (len >= BUF_SIZE) 845 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 846 } 847 848 int mlx4_flow_attach(struct mlx4_dev *dev, 849 struct mlx4_net_trans_rule *rule, u64 *reg_id) 850 { 851 struct mlx4_cmd_mailbox *mailbox; 852 struct mlx4_spec_list *cur; 853 u32 size = 0; 854 int ret; 855 856 mailbox = mlx4_alloc_cmd_mailbox(dev); 857 if (IS_ERR(mailbox)) 858 return PTR_ERR(mailbox); 859 860 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 861 trans_rule_ctrl_to_hw(rule, mailbox->buf); 862 863 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 864 865 list_for_each_entry(cur, &rule->list, list) { 866 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 867 if (ret < 0) { 868 mlx4_free_cmd_mailbox(dev, mailbox); 869 return -EINVAL; 870 } 871 size += ret; 872 } 873 874 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 875 if (ret == -ENOMEM) 876 mlx4_err_rule(dev, 877 "mcg table is full. Fail to register network rule.\n", 878 rule); 879 else if (ret) 880 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 881 882 mlx4_free_cmd_mailbox(dev, mailbox); 883 884 return ret; 885 } 886 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 887 888 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 889 { 890 int err; 891 892 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 893 if (err) 894 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 895 reg_id); 896 return err; 897 } 898 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 899 900 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 901 int block_mcast_loopback, enum mlx4_protocol prot, 902 enum mlx4_steer_type steer) 903 { 904 struct mlx4_priv *priv = mlx4_priv(dev); 905 struct mlx4_cmd_mailbox *mailbox; 906 struct mlx4_mgm *mgm; 907 u32 members_count; 908 int index, prev; 909 int link = 0; 910 int i; 911 int err; 912 u8 port = gid[5]; 913 u8 new_entry = 0; 914 915 mailbox = mlx4_alloc_cmd_mailbox(dev); 916 if (IS_ERR(mailbox)) 917 return PTR_ERR(mailbox); 918 mgm = mailbox->buf; 919 920 mutex_lock(&priv->mcg_table.mutex); 921 err = find_entry(dev, port, gid, prot, 922 mailbox, &prev, &index); 923 if (err) 924 goto out; 925 926 if (index != -1) { 927 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 928 new_entry = 1; 929 memcpy(mgm->gid, gid, 16); 930 } 931 } else { 932 link = 1; 933 934 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 935 if (index == -1) { 936 mlx4_err(dev, "No AMGM entries left\n"); 937 err = -ENOMEM; 938 goto out; 939 } 940 index += dev->caps.num_mgms; 941 942 new_entry = 1; 943 memset(mgm, 0, sizeof *mgm); 944 memcpy(mgm->gid, gid, 16); 945 } 946 947 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 948 if (members_count == dev->caps.num_qp_per_mgm) { 949 mlx4_err(dev, "MGM at index %x is full.\n", index); 950 err = -ENOMEM; 951 goto out; 952 } 953 954 for (i = 0; i < members_count; ++i) 955 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 956 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 957 err = 0; 958 goto out; 959 } 960 961 if (block_mcast_loopback) 962 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 963 (1U << MGM_BLCK_LB_BIT)); 964 else 965 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 966 967 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 968 969 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 970 if (err) 971 goto out; 972 973 if (!link) 974 goto out; 975 976 err = mlx4_READ_ENTRY(dev, prev, mailbox); 977 if (err) 978 goto out; 979 980 mgm->next_gid_index = cpu_to_be32(index << 6); 981 982 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 983 if (err) 984 goto out; 985 986 out: 987 if (prot == MLX4_PROT_ETH) { 988 /* manage the steering entry for promisc mode */ 989 if (new_entry) 990 new_steering_entry(dev, port, steer, index, qp->qpn); 991 else 992 existing_steering_entry(dev, port, steer, 993 index, qp->qpn); 994 } 995 if (err && link && index != -1) { 996 if (index < dev->caps.num_mgms) 997 mlx4_warn(dev, "Got AMGM index %d < %d", 998 index, dev->caps.num_mgms); 999 else 1000 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1001 index - dev->caps.num_mgms); 1002 } 1003 mutex_unlock(&priv->mcg_table.mutex); 1004 1005 mlx4_free_cmd_mailbox(dev, mailbox); 1006 return err; 1007 } 1008 1009 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1010 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1011 { 1012 struct mlx4_priv *priv = mlx4_priv(dev); 1013 struct mlx4_cmd_mailbox *mailbox; 1014 struct mlx4_mgm *mgm; 1015 u32 members_count; 1016 int prev, index; 1017 int i, loc; 1018 int err; 1019 u8 port = gid[5]; 1020 bool removed_entry = false; 1021 1022 mailbox = mlx4_alloc_cmd_mailbox(dev); 1023 if (IS_ERR(mailbox)) 1024 return PTR_ERR(mailbox); 1025 mgm = mailbox->buf; 1026 1027 mutex_lock(&priv->mcg_table.mutex); 1028 1029 err = find_entry(dev, port, gid, prot, 1030 mailbox, &prev, &index); 1031 if (err) 1032 goto out; 1033 1034 if (index == -1) { 1035 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1036 err = -EINVAL; 1037 goto out; 1038 } 1039 1040 /* if this pq is also a promisc qp, it shouldn't be removed */ 1041 if (prot == MLX4_PROT_ETH && 1042 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1043 goto out; 1044 1045 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1046 for (loc = -1, i = 0; i < members_count; ++i) 1047 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 1048 loc = i; 1049 1050 if (loc == -1) { 1051 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1052 err = -EINVAL; 1053 goto out; 1054 } 1055 1056 1057 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1058 mgm->qp[loc] = mgm->qp[i - 1]; 1059 mgm->qp[i - 1] = 0; 1060 1061 if (prot == MLX4_PROT_ETH) 1062 removed_entry = can_remove_steering_entry(dev, port, steer, 1063 index, qp->qpn); 1064 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 1065 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1066 goto out; 1067 } 1068 1069 /* We are going to delete the entry, members count should be 0 */ 1070 mgm->members_count = cpu_to_be32((u32) prot << 30); 1071 1072 if (prev == -1) { 1073 /* Remove entry from MGM */ 1074 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1075 if (amgm_index) { 1076 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1077 if (err) 1078 goto out; 1079 } else 1080 memset(mgm->gid, 0, 16); 1081 1082 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1083 if (err) 1084 goto out; 1085 1086 if (amgm_index) { 1087 if (amgm_index < dev->caps.num_mgms) 1088 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1089 index, amgm_index, dev->caps.num_mgms); 1090 else 1091 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1092 amgm_index - dev->caps.num_mgms); 1093 } 1094 } else { 1095 /* Remove entry from AMGM */ 1096 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1097 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1098 if (err) 1099 goto out; 1100 1101 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1102 1103 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1104 if (err) 1105 goto out; 1106 1107 if (index < dev->caps.num_mgms) 1108 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1109 prev, index, dev->caps.num_mgms); 1110 else 1111 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1112 index - dev->caps.num_mgms); 1113 } 1114 1115 out: 1116 mutex_unlock(&priv->mcg_table.mutex); 1117 1118 mlx4_free_cmd_mailbox(dev, mailbox); 1119 return err; 1120 } 1121 1122 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1123 u8 gid[16], u8 attach, u8 block_loopback, 1124 enum mlx4_protocol prot) 1125 { 1126 struct mlx4_cmd_mailbox *mailbox; 1127 int err = 0; 1128 int qpn; 1129 1130 if (!mlx4_is_mfunc(dev)) 1131 return -EBADF; 1132 1133 mailbox = mlx4_alloc_cmd_mailbox(dev); 1134 if (IS_ERR(mailbox)) 1135 return PTR_ERR(mailbox); 1136 1137 memcpy(mailbox->buf, gid, 16); 1138 qpn = qp->qpn; 1139 qpn |= (prot << 28); 1140 if (attach && block_loopback) 1141 qpn |= (1 << 31); 1142 1143 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1144 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1145 MLX4_CMD_WRAPPED); 1146 1147 mlx4_free_cmd_mailbox(dev, mailbox); 1148 return err; 1149 } 1150 1151 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1152 u8 gid[16], u8 port, 1153 int block_mcast_loopback, 1154 enum mlx4_protocol prot, u64 *reg_id) 1155 { 1156 struct mlx4_spec_list spec = { {NULL} }; 1157 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1158 1159 struct mlx4_net_trans_rule rule = { 1160 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1161 .exclusive = 0, 1162 .promisc_mode = MLX4_FS_REGULAR, 1163 .priority = MLX4_DOMAIN_NIC, 1164 }; 1165 1166 rule.allow_loopback = !block_mcast_loopback; 1167 rule.port = port; 1168 rule.qpn = qp->qpn; 1169 INIT_LIST_HEAD(&rule.list); 1170 1171 switch (prot) { 1172 case MLX4_PROT_ETH: 1173 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1174 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1175 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1176 break; 1177 1178 case MLX4_PROT_IB_IPV6: 1179 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1180 memcpy(spec.ib.dst_gid, gid, 16); 1181 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1182 break; 1183 default: 1184 return -EINVAL; 1185 } 1186 list_add_tail(&spec.list, &rule.list); 1187 1188 return mlx4_flow_attach(dev, &rule, reg_id); 1189 } 1190 1191 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1192 u8 port, int block_mcast_loopback, 1193 enum mlx4_protocol prot, u64 *reg_id) 1194 { 1195 switch (dev->caps.steering_mode) { 1196 case MLX4_STEERING_MODE_A0: 1197 if (prot == MLX4_PROT_ETH) 1198 return 0; 1199 1200 case MLX4_STEERING_MODE_B0: 1201 if (prot == MLX4_PROT_ETH) 1202 gid[7] |= (MLX4_MC_STEER << 1); 1203 1204 if (mlx4_is_mfunc(dev)) 1205 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1206 block_mcast_loopback, prot); 1207 return mlx4_qp_attach_common(dev, qp, gid, 1208 block_mcast_loopback, prot, 1209 MLX4_MC_STEER); 1210 1211 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1212 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1213 block_mcast_loopback, 1214 prot, reg_id); 1215 default: 1216 return -EINVAL; 1217 } 1218 } 1219 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1220 1221 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1222 enum mlx4_protocol prot, u64 reg_id) 1223 { 1224 switch (dev->caps.steering_mode) { 1225 case MLX4_STEERING_MODE_A0: 1226 if (prot == MLX4_PROT_ETH) 1227 return 0; 1228 1229 case MLX4_STEERING_MODE_B0: 1230 if (prot == MLX4_PROT_ETH) 1231 gid[7] |= (MLX4_MC_STEER << 1); 1232 1233 if (mlx4_is_mfunc(dev)) 1234 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1235 1236 return mlx4_qp_detach_common(dev, qp, gid, prot, 1237 MLX4_MC_STEER); 1238 1239 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1240 return mlx4_flow_detach(dev, reg_id); 1241 1242 default: 1243 return -EINVAL; 1244 } 1245 } 1246 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1247 1248 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1249 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1250 { 1251 struct mlx4_net_trans_rule rule; 1252 u64 *regid_p; 1253 1254 switch (mode) { 1255 case MLX4_FS_ALL_DEFAULT: 1256 regid_p = &dev->regid_promisc_array[port]; 1257 break; 1258 case MLX4_FS_MC_DEFAULT: 1259 regid_p = &dev->regid_allmulti_array[port]; 1260 break; 1261 default: 1262 return -1; 1263 } 1264 1265 if (*regid_p != 0) 1266 return -1; 1267 1268 rule.promisc_mode = mode; 1269 rule.port = port; 1270 rule.qpn = qpn; 1271 INIT_LIST_HEAD(&rule.list); 1272 mlx4_err(dev, "going promisc on %x\n", port); 1273 1274 return mlx4_flow_attach(dev, &rule, regid_p); 1275 } 1276 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1277 1278 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1279 enum mlx4_net_trans_promisc_mode mode) 1280 { 1281 int ret; 1282 u64 *regid_p; 1283 1284 switch (mode) { 1285 case MLX4_FS_ALL_DEFAULT: 1286 regid_p = &dev->regid_promisc_array[port]; 1287 break; 1288 case MLX4_FS_MC_DEFAULT: 1289 regid_p = &dev->regid_allmulti_array[port]; 1290 break; 1291 default: 1292 return -1; 1293 } 1294 1295 if (*regid_p == 0) 1296 return -1; 1297 1298 ret = mlx4_flow_detach(dev, *regid_p); 1299 if (ret == 0) 1300 *regid_p = 0; 1301 1302 return ret; 1303 } 1304 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1305 1306 int mlx4_unicast_attach(struct mlx4_dev *dev, 1307 struct mlx4_qp *qp, u8 gid[16], 1308 int block_mcast_loopback, enum mlx4_protocol prot) 1309 { 1310 if (prot == MLX4_PROT_ETH) 1311 gid[7] |= (MLX4_UC_STEER << 1); 1312 1313 if (mlx4_is_mfunc(dev)) 1314 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1315 block_mcast_loopback, prot); 1316 1317 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1318 prot, MLX4_UC_STEER); 1319 } 1320 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1321 1322 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1323 u8 gid[16], enum mlx4_protocol prot) 1324 { 1325 if (prot == MLX4_PROT_ETH) 1326 gid[7] |= (MLX4_UC_STEER << 1); 1327 1328 if (mlx4_is_mfunc(dev)) 1329 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1330 1331 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1332 } 1333 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1334 1335 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1336 struct mlx4_vhcr *vhcr, 1337 struct mlx4_cmd_mailbox *inbox, 1338 struct mlx4_cmd_mailbox *outbox, 1339 struct mlx4_cmd_info *cmd) 1340 { 1341 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1342 u8 port = vhcr->in_param >> 62; 1343 enum mlx4_steer_type steer = vhcr->in_modifier; 1344 1345 /* Promiscuous unicast is not allowed in mfunc */ 1346 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1347 return 0; 1348 1349 if (vhcr->op_modifier) 1350 return add_promisc_qp(dev, port, steer, qpn); 1351 else 1352 return remove_promisc_qp(dev, port, steer, qpn); 1353 } 1354 1355 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1356 enum mlx4_steer_type steer, u8 add, u8 port) 1357 { 1358 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1359 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1360 MLX4_CMD_WRAPPED); 1361 } 1362 1363 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1364 { 1365 if (mlx4_is_mfunc(dev)) 1366 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1367 1368 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1369 } 1370 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1371 1372 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1373 { 1374 if (mlx4_is_mfunc(dev)) 1375 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1376 1377 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1378 } 1379 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1380 1381 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1382 { 1383 if (mlx4_is_mfunc(dev)) 1384 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1385 1386 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1387 } 1388 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1389 1390 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1391 { 1392 if (mlx4_is_mfunc(dev)) 1393 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1394 1395 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1396 } 1397 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1398 1399 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1400 { 1401 struct mlx4_priv *priv = mlx4_priv(dev); 1402 int err; 1403 1404 /* No need for mcg_table when fw managed the mcg table*/ 1405 if (dev->caps.steering_mode == 1406 MLX4_STEERING_MODE_DEVICE_MANAGED) 1407 return 0; 1408 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1409 dev->caps.num_amgms - 1, 0, 0); 1410 if (err) 1411 return err; 1412 1413 mutex_init(&priv->mcg_table.mutex); 1414 1415 return 0; 1416 } 1417 1418 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1419 { 1420 if (dev->caps.steering_mode != 1421 MLX4_STEERING_MODE_DEVICE_MANAGED) 1422 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1423 } 1424