1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 #define MGM_QPN_MASK 0x00FFFFFF 43 #define MGM_BLCK_LB_BIT 30 44 45 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 46 47 struct mlx4_mgm { 48 __be32 next_gid_index; 49 __be32 members_count; 50 u32 reserved[2]; 51 u8 gid[16]; 52 __be32 qp[MLX4_MAX_QP_PER_MGM]; 53 }; 54 55 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56 { 57 if (dev->caps.steering_mode == 58 MLX4_STEERING_MODE_DEVICE_MANAGED) 59 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE; 60 else 61 return min((1 << mlx4_log_num_mgm_entry_size), 62 MLX4_MAX_MGM_ENTRY_SIZE); 63 } 64 65 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 66 { 67 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 68 } 69 70 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 71 struct mlx4_cmd_mailbox *mailbox, 72 u32 size, 73 u64 *reg_id) 74 { 75 u64 imm; 76 int err = 0; 77 78 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 79 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 80 MLX4_CMD_NATIVE); 81 if (err) 82 return err; 83 *reg_id = imm; 84 85 return err; 86 } 87 88 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 89 { 90 int err = 0; 91 92 err = mlx4_cmd(dev, regid, 0, 0, 93 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 94 MLX4_CMD_NATIVE); 95 96 return err; 97 } 98 99 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 100 struct mlx4_cmd_mailbox *mailbox) 101 { 102 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 103 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 104 } 105 106 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 107 struct mlx4_cmd_mailbox *mailbox) 108 { 109 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 110 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 111 } 112 113 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 114 struct mlx4_cmd_mailbox *mailbox) 115 { 116 u32 in_mod; 117 118 in_mod = (u32) port << 16 | steer << 1; 119 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 120 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 121 MLX4_CMD_NATIVE); 122 } 123 124 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 125 u16 *hash, u8 op_mod) 126 { 127 u64 imm; 128 int err; 129 130 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 131 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 132 MLX4_CMD_NATIVE); 133 134 if (!err) 135 *hash = imm; 136 137 return err; 138 } 139 140 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, 141 enum mlx4_steer_type steer, 142 u32 qpn) 143 { 144 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; 145 struct mlx4_promisc_qp *pqp; 146 147 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 148 if (pqp->qpn == qpn) 149 return pqp; 150 } 151 /* not found */ 152 return NULL; 153 } 154 155 /* 156 * Add new entry to steering data structure. 157 * All promisc QPs should be added as well 158 */ 159 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 160 enum mlx4_steer_type steer, 161 unsigned int index, u32 qpn) 162 { 163 struct mlx4_steer *s_steer; 164 struct mlx4_cmd_mailbox *mailbox; 165 struct mlx4_mgm *mgm; 166 u32 members_count; 167 struct mlx4_steer_index *new_entry; 168 struct mlx4_promisc_qp *pqp; 169 struct mlx4_promisc_qp *dqp = NULL; 170 u32 prot; 171 int err; 172 173 s_steer = &mlx4_priv(dev)->steer[port - 1]; 174 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 175 if (!new_entry) 176 return -ENOMEM; 177 178 INIT_LIST_HEAD(&new_entry->duplicates); 179 new_entry->index = index; 180 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 181 182 /* If the given qpn is also a promisc qp, 183 * it should be inserted to duplicates list 184 */ 185 pqp = get_promisc_qp(dev, 0, steer, qpn); 186 if (pqp) { 187 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 188 if (!dqp) { 189 err = -ENOMEM; 190 goto out_alloc; 191 } 192 dqp->qpn = qpn; 193 list_add_tail(&dqp->list, &new_entry->duplicates); 194 } 195 196 /* if no promisc qps for this vep, we are done */ 197 if (list_empty(&s_steer->promisc_qps[steer])) 198 return 0; 199 200 /* now need to add all the promisc qps to the new 201 * steering entry, as they should also receive the packets 202 * destined to this address */ 203 mailbox = mlx4_alloc_cmd_mailbox(dev); 204 if (IS_ERR(mailbox)) { 205 err = -ENOMEM; 206 goto out_alloc; 207 } 208 mgm = mailbox->buf; 209 210 err = mlx4_READ_ENTRY(dev, index, mailbox); 211 if (err) 212 goto out_mailbox; 213 214 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 215 prot = be32_to_cpu(mgm->members_count) >> 30; 216 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 217 /* don't add already existing qpn */ 218 if (pqp->qpn == qpn) 219 continue; 220 if (members_count == dev->caps.num_qp_per_mgm) { 221 /* out of space */ 222 err = -ENOMEM; 223 goto out_mailbox; 224 } 225 226 /* add the qpn */ 227 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 228 } 229 /* update the qps count and update the entry with all the promisc qps*/ 230 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 231 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 232 233 out_mailbox: 234 mlx4_free_cmd_mailbox(dev, mailbox); 235 if (!err) 236 return 0; 237 out_alloc: 238 if (dqp) { 239 list_del(&dqp->list); 240 kfree(dqp); 241 } 242 list_del(&new_entry->list); 243 kfree(new_entry); 244 return err; 245 } 246 247 /* update the data structures with existing steering entry */ 248 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 249 enum mlx4_steer_type steer, 250 unsigned int index, u32 qpn) 251 { 252 struct mlx4_steer *s_steer; 253 struct mlx4_steer_index *tmp_entry, *entry = NULL; 254 struct mlx4_promisc_qp *pqp; 255 struct mlx4_promisc_qp *dqp; 256 257 s_steer = &mlx4_priv(dev)->steer[port - 1]; 258 259 pqp = get_promisc_qp(dev, 0, steer, qpn); 260 if (!pqp) 261 return 0; /* nothing to do */ 262 263 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 264 if (tmp_entry->index == index) { 265 entry = tmp_entry; 266 break; 267 } 268 } 269 if (unlikely(!entry)) { 270 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 271 return -EINVAL; 272 } 273 274 /* the given qpn is listed as a promisc qpn 275 * we need to add it as a duplicate to this entry 276 * for future references */ 277 list_for_each_entry(dqp, &entry->duplicates, list) { 278 if (qpn == pqp->qpn) 279 return 0; /* qp is already duplicated */ 280 } 281 282 /* add the qp as a duplicate on this index */ 283 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 284 if (!dqp) 285 return -ENOMEM; 286 dqp->qpn = qpn; 287 list_add_tail(&dqp->list, &entry->duplicates); 288 289 return 0; 290 } 291 292 /* Check whether a qpn is a duplicate on steering entry 293 * If so, it should not be removed from mgm */ 294 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 295 enum mlx4_steer_type steer, 296 unsigned int index, u32 qpn) 297 { 298 struct mlx4_steer *s_steer; 299 struct mlx4_steer_index *tmp_entry, *entry = NULL; 300 struct mlx4_promisc_qp *dqp, *tmp_dqp; 301 302 s_steer = &mlx4_priv(dev)->steer[port - 1]; 303 304 /* if qp is not promisc, it cannot be duplicated */ 305 if (!get_promisc_qp(dev, 0, steer, qpn)) 306 return false; 307 308 /* The qp is promisc qp so it is a duplicate on this index 309 * Find the index entry, and remove the duplicate */ 310 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 311 if (tmp_entry->index == index) { 312 entry = tmp_entry; 313 break; 314 } 315 } 316 if (unlikely(!entry)) { 317 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 318 return false; 319 } 320 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 321 if (dqp->qpn == qpn) { 322 list_del(&dqp->list); 323 kfree(dqp); 324 } 325 } 326 return true; 327 } 328 329 /* I a steering entry contains only promisc QPs, it can be removed. */ 330 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 331 enum mlx4_steer_type steer, 332 unsigned int index, u32 tqpn) 333 { 334 struct mlx4_steer *s_steer; 335 struct mlx4_cmd_mailbox *mailbox; 336 struct mlx4_mgm *mgm; 337 struct mlx4_steer_index *entry = NULL, *tmp_entry; 338 u32 qpn; 339 u32 members_count; 340 bool ret = false; 341 int i; 342 343 s_steer = &mlx4_priv(dev)->steer[port - 1]; 344 345 mailbox = mlx4_alloc_cmd_mailbox(dev); 346 if (IS_ERR(mailbox)) 347 return false; 348 mgm = mailbox->buf; 349 350 if (mlx4_READ_ENTRY(dev, index, mailbox)) 351 goto out; 352 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 353 for (i = 0; i < members_count; i++) { 354 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 355 if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { 356 /* the qp is not promisc, the entry can't be removed */ 357 goto out; 358 } 359 } 360 /* All the qps currently registered for this entry are promiscuous, 361 * Checking for duplicates */ 362 ret = true; 363 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 364 if (entry->index == index) { 365 if (list_empty(&entry->duplicates)) { 366 list_del(&entry->list); 367 kfree(entry); 368 } else { 369 /* This entry contains duplicates so it shouldn't be removed */ 370 ret = false; 371 goto out; 372 } 373 } 374 } 375 376 out: 377 mlx4_free_cmd_mailbox(dev, mailbox); 378 return ret; 379 } 380 381 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 382 enum mlx4_steer_type steer, u32 qpn) 383 { 384 struct mlx4_steer *s_steer; 385 struct mlx4_cmd_mailbox *mailbox; 386 struct mlx4_mgm *mgm; 387 struct mlx4_steer_index *entry; 388 struct mlx4_promisc_qp *pqp; 389 struct mlx4_promisc_qp *dqp; 390 u32 members_count; 391 u32 prot; 392 int i; 393 bool found; 394 int err; 395 struct mlx4_priv *priv = mlx4_priv(dev); 396 397 s_steer = &mlx4_priv(dev)->steer[port - 1]; 398 399 mutex_lock(&priv->mcg_table.mutex); 400 401 if (get_promisc_qp(dev, 0, steer, qpn)) { 402 err = 0; /* Noting to do, already exists */ 403 goto out_mutex; 404 } 405 406 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 407 if (!pqp) { 408 err = -ENOMEM; 409 goto out_mutex; 410 } 411 pqp->qpn = qpn; 412 413 mailbox = mlx4_alloc_cmd_mailbox(dev); 414 if (IS_ERR(mailbox)) { 415 err = -ENOMEM; 416 goto out_alloc; 417 } 418 mgm = mailbox->buf; 419 420 /* the promisc qp needs to be added for each one of the steering 421 * entries, if it already exists, needs to be added as a duplicate 422 * for this entry */ 423 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 424 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 425 if (err) 426 goto out_mailbox; 427 428 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 429 prot = be32_to_cpu(mgm->members_count) >> 30; 430 found = false; 431 for (i = 0; i < members_count; i++) { 432 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 433 /* Entry already exists, add to duplicates */ 434 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 435 if (!dqp) { 436 err = -ENOMEM; 437 goto out_mailbox; 438 } 439 dqp->qpn = qpn; 440 list_add_tail(&dqp->list, &entry->duplicates); 441 found = true; 442 } 443 } 444 if (!found) { 445 /* Need to add the qpn to mgm */ 446 if (members_count == dev->caps.num_qp_per_mgm) { 447 /* entry is full */ 448 err = -ENOMEM; 449 goto out_mailbox; 450 } 451 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 452 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 453 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 454 if (err) 455 goto out_mailbox; 456 } 457 } 458 459 /* add the new qpn to list of promisc qps */ 460 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 461 /* now need to add all the promisc qps to default entry */ 462 memset(mgm, 0, sizeof *mgm); 463 members_count = 0; 464 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 465 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 466 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 467 468 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 469 if (err) 470 goto out_list; 471 472 mlx4_free_cmd_mailbox(dev, mailbox); 473 mutex_unlock(&priv->mcg_table.mutex); 474 return 0; 475 476 out_list: 477 list_del(&pqp->list); 478 out_mailbox: 479 mlx4_free_cmd_mailbox(dev, mailbox); 480 out_alloc: 481 kfree(pqp); 482 out_mutex: 483 mutex_unlock(&priv->mcg_table.mutex); 484 return err; 485 } 486 487 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 488 enum mlx4_steer_type steer, u32 qpn) 489 { 490 struct mlx4_priv *priv = mlx4_priv(dev); 491 struct mlx4_steer *s_steer; 492 struct mlx4_cmd_mailbox *mailbox; 493 struct mlx4_mgm *mgm; 494 struct mlx4_steer_index *entry; 495 struct mlx4_promisc_qp *pqp; 496 struct mlx4_promisc_qp *dqp; 497 u32 members_count; 498 bool found; 499 bool back_to_list = false; 500 int loc, i; 501 int err; 502 503 s_steer = &mlx4_priv(dev)->steer[port - 1]; 504 mutex_lock(&priv->mcg_table.mutex); 505 506 pqp = get_promisc_qp(dev, 0, steer, qpn); 507 if (unlikely(!pqp)) { 508 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 509 /* nothing to do */ 510 err = 0; 511 goto out_mutex; 512 } 513 514 /*remove from list of promisc qps */ 515 list_del(&pqp->list); 516 517 /* set the default entry not to include the removed one */ 518 mailbox = mlx4_alloc_cmd_mailbox(dev); 519 if (IS_ERR(mailbox)) { 520 err = -ENOMEM; 521 back_to_list = true; 522 goto out_list; 523 } 524 mgm = mailbox->buf; 525 memset(mgm, 0, sizeof *mgm); 526 members_count = 0; 527 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 528 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 529 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 530 531 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 532 if (err) 533 goto out_mailbox; 534 535 /* remove the qp from all the steering entries*/ 536 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 537 found = false; 538 list_for_each_entry(dqp, &entry->duplicates, list) { 539 if (dqp->qpn == qpn) { 540 found = true; 541 break; 542 } 543 } 544 if (found) { 545 /* a duplicate, no need to change the mgm, 546 * only update the duplicates list */ 547 list_del(&dqp->list); 548 kfree(dqp); 549 } else { 550 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 551 if (err) 552 goto out_mailbox; 553 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 554 for (loc = -1, i = 0; i < members_count; ++i) 555 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 556 loc = i; 557 558 mgm->members_count = cpu_to_be32(--members_count | 559 (MLX4_PROT_ETH << 30)); 560 mgm->qp[loc] = mgm->qp[i - 1]; 561 mgm->qp[i - 1] = 0; 562 563 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 564 if (err) 565 goto out_mailbox; 566 } 567 568 } 569 570 out_mailbox: 571 mlx4_free_cmd_mailbox(dev, mailbox); 572 out_list: 573 if (back_to_list) 574 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 575 else 576 kfree(pqp); 577 out_mutex: 578 mutex_unlock(&priv->mcg_table.mutex); 579 return err; 580 } 581 582 /* 583 * Caller must hold MCG table semaphore. gid and mgm parameters must 584 * be properly aligned for command interface. 585 * 586 * Returns 0 unless a firmware command error occurs. 587 * 588 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 589 * and *mgm holds MGM entry. 590 * 591 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 592 * previous entry in hash chain and *mgm holds AMGM entry. 593 * 594 * If no AMGM exists for given gid, *index = -1, *prev = index of last 595 * entry in hash chain and *mgm holds end of hash chain. 596 */ 597 static int find_entry(struct mlx4_dev *dev, u8 port, 598 u8 *gid, enum mlx4_protocol prot, 599 struct mlx4_cmd_mailbox *mgm_mailbox, 600 int *prev, int *index) 601 { 602 struct mlx4_cmd_mailbox *mailbox; 603 struct mlx4_mgm *mgm = mgm_mailbox->buf; 604 u8 *mgid; 605 int err; 606 u16 hash; 607 u8 op_mod = (prot == MLX4_PROT_ETH) ? 608 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 609 610 mailbox = mlx4_alloc_cmd_mailbox(dev); 611 if (IS_ERR(mailbox)) 612 return -ENOMEM; 613 mgid = mailbox->buf; 614 615 memcpy(mgid, gid, 16); 616 617 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 618 mlx4_free_cmd_mailbox(dev, mailbox); 619 if (err) 620 return err; 621 622 if (0) 623 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 624 625 *index = hash; 626 *prev = -1; 627 628 do { 629 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 630 if (err) 631 return err; 632 633 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 634 if (*index != hash) { 635 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 636 err = -EINVAL; 637 } 638 return err; 639 } 640 641 if (!memcmp(mgm->gid, gid, 16) && 642 be32_to_cpu(mgm->members_count) >> 30 == prot) 643 return err; 644 645 *prev = *index; 646 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 647 } while (*index); 648 649 *index = -1; 650 return err; 651 } 652 653 struct mlx4_net_trans_rule_hw_ctrl { 654 __be32 ctrl; 655 __be32 vf_vep_port; 656 __be32 qpn; 657 __be32 reserved; 658 }; 659 660 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 661 struct mlx4_net_trans_rule_hw_ctrl *hw) 662 { 663 static const u8 __promisc_mode[] = { 664 [MLX4_FS_PROMISC_NONE] = 0x0, 665 [MLX4_FS_PROMISC_UPLINK] = 0x1, 666 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, 667 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, 668 }; 669 670 u32 dw = 0; 671 672 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 673 dw |= ctrl->exclusive ? (1 << 2) : 0; 674 dw |= ctrl->allow_loopback ? (1 << 3) : 0; 675 dw |= __promisc_mode[ctrl->promisc_mode] << 8; 676 dw |= ctrl->priority << 16; 677 678 hw->ctrl = cpu_to_be32(dw); 679 hw->vf_vep_port = cpu_to_be32(ctrl->port); 680 hw->qpn = cpu_to_be32(ctrl->qpn); 681 } 682 683 struct mlx4_net_trans_rule_hw_ib { 684 u8 size; 685 u8 rsvd1; 686 __be16 id; 687 u32 rsvd2; 688 __be32 qpn; 689 __be32 qpn_mask; 690 u8 dst_gid[16]; 691 u8 dst_gid_msk[16]; 692 } __packed; 693 694 struct mlx4_net_trans_rule_hw_eth { 695 u8 size; 696 u8 rsvd; 697 __be16 id; 698 u8 rsvd1[6]; 699 u8 dst_mac[6]; 700 u16 rsvd2; 701 u8 dst_mac_msk[6]; 702 u16 rsvd3; 703 u8 src_mac[6]; 704 u16 rsvd4; 705 u8 src_mac_msk[6]; 706 u8 rsvd5; 707 u8 ether_type_enable; 708 __be16 ether_type; 709 __be16 vlan_id_msk; 710 __be16 vlan_id; 711 } __packed; 712 713 struct mlx4_net_trans_rule_hw_tcp_udp { 714 u8 size; 715 u8 rsvd; 716 __be16 id; 717 __be16 rsvd1[3]; 718 __be16 dst_port; 719 __be16 rsvd2; 720 __be16 dst_port_msk; 721 __be16 rsvd3; 722 __be16 src_port; 723 __be16 rsvd4; 724 __be16 src_port_msk; 725 } __packed; 726 727 struct mlx4_net_trans_rule_hw_ipv4 { 728 u8 size; 729 u8 rsvd; 730 __be16 id; 731 __be32 rsvd1; 732 __be32 dst_ip; 733 __be32 dst_ip_msk; 734 __be32 src_ip; 735 __be32 src_ip_msk; 736 } __packed; 737 738 struct _rule_hw { 739 union { 740 struct { 741 u8 size; 742 u8 rsvd; 743 __be16 id; 744 }; 745 struct mlx4_net_trans_rule_hw_eth eth; 746 struct mlx4_net_trans_rule_hw_ib ib; 747 struct mlx4_net_trans_rule_hw_ipv4 ipv4; 748 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; 749 }; 750 }; 751 752 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 753 struct _rule_hw *rule_hw) 754 { 755 static const u16 __sw_id_hw[] = { 756 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 757 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 758 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 759 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 760 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 761 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 762 }; 763 764 static const size_t __rule_hw_sz[] = { 765 [MLX4_NET_TRANS_RULE_ID_ETH] = 766 sizeof(struct mlx4_net_trans_rule_hw_eth), 767 [MLX4_NET_TRANS_RULE_ID_IB] = 768 sizeof(struct mlx4_net_trans_rule_hw_ib), 769 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 770 [MLX4_NET_TRANS_RULE_ID_IPV4] = 771 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 772 [MLX4_NET_TRANS_RULE_ID_TCP] = 773 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 774 [MLX4_NET_TRANS_RULE_ID_UDP] = 775 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 776 }; 777 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { 778 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); 779 return -EINVAL; 780 } 781 memset(rule_hw, 0, __rule_hw_sz[spec->id]); 782 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 783 rule_hw->size = __rule_hw_sz[spec->id] >> 2; 784 785 switch (spec->id) { 786 case MLX4_NET_TRANS_RULE_ID_ETH: 787 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 788 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 789 ETH_ALEN); 790 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 791 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 792 ETH_ALEN); 793 if (spec->eth.ether_type_enable) { 794 rule_hw->eth.ether_type_enable = 1; 795 rule_hw->eth.ether_type = spec->eth.ether_type; 796 } 797 rule_hw->eth.vlan_id = spec->eth.vlan_id; 798 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; 799 break; 800 801 case MLX4_NET_TRANS_RULE_ID_IB: 802 rule_hw->ib.qpn = spec->ib.r_qpn; 803 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 804 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 805 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 806 break; 807 808 case MLX4_NET_TRANS_RULE_ID_IPV6: 809 return -EOPNOTSUPP; 810 811 case MLX4_NET_TRANS_RULE_ID_IPV4: 812 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 813 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 814 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 815 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 816 break; 817 818 case MLX4_NET_TRANS_RULE_ID_TCP: 819 case MLX4_NET_TRANS_RULE_ID_UDP: 820 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 821 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 822 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 823 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 824 break; 825 826 default: 827 return -EINVAL; 828 } 829 830 return __rule_hw_sz[spec->id]; 831 } 832 833 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 834 struct mlx4_net_trans_rule *rule) 835 { 836 #define BUF_SIZE 256 837 struct mlx4_spec_list *cur; 838 char buf[BUF_SIZE]; 839 int len = 0; 840 841 mlx4_err(dev, "%s", str); 842 len += snprintf(buf + len, BUF_SIZE - len, 843 "port = %d prio = 0x%x qp = 0x%x ", 844 rule->port, rule->priority, rule->qpn); 845 846 list_for_each_entry(cur, &rule->list, list) { 847 switch (cur->id) { 848 case MLX4_NET_TRANS_RULE_ID_ETH: 849 len += snprintf(buf + len, BUF_SIZE - len, 850 "dmac = %pM ", &cur->eth.dst_mac); 851 if (cur->eth.ether_type) 852 len += snprintf(buf + len, BUF_SIZE - len, 853 "ethertype = 0x%x ", 854 be16_to_cpu(cur->eth.ether_type)); 855 if (cur->eth.vlan_id) 856 len += snprintf(buf + len, BUF_SIZE - len, 857 "vlan-id = %d ", 858 be16_to_cpu(cur->eth.vlan_id)); 859 break; 860 861 case MLX4_NET_TRANS_RULE_ID_IPV4: 862 if (cur->ipv4.src_ip) 863 len += snprintf(buf + len, BUF_SIZE - len, 864 "src-ip = %pI4 ", 865 &cur->ipv4.src_ip); 866 if (cur->ipv4.dst_ip) 867 len += snprintf(buf + len, BUF_SIZE - len, 868 "dst-ip = %pI4 ", 869 &cur->ipv4.dst_ip); 870 break; 871 872 case MLX4_NET_TRANS_RULE_ID_TCP: 873 case MLX4_NET_TRANS_RULE_ID_UDP: 874 if (cur->tcp_udp.src_port) 875 len += snprintf(buf + len, BUF_SIZE - len, 876 "src-port = %d ", 877 be16_to_cpu(cur->tcp_udp.src_port)); 878 if (cur->tcp_udp.dst_port) 879 len += snprintf(buf + len, BUF_SIZE - len, 880 "dst-port = %d ", 881 be16_to_cpu(cur->tcp_udp.dst_port)); 882 break; 883 884 case MLX4_NET_TRANS_RULE_ID_IB: 885 len += snprintf(buf + len, BUF_SIZE - len, 886 "dst-gid = %pI6\n", cur->ib.dst_gid); 887 len += snprintf(buf + len, BUF_SIZE - len, 888 "dst-gid-mask = %pI6\n", 889 cur->ib.dst_gid_msk); 890 break; 891 892 case MLX4_NET_TRANS_RULE_ID_IPV6: 893 break; 894 895 default: 896 break; 897 } 898 } 899 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 900 mlx4_err(dev, "%s", buf); 901 902 if (len >= BUF_SIZE) 903 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 904 } 905 906 int mlx4_flow_attach(struct mlx4_dev *dev, 907 struct mlx4_net_trans_rule *rule, u64 *reg_id) 908 { 909 struct mlx4_cmd_mailbox *mailbox; 910 struct mlx4_spec_list *cur; 911 u32 size = 0; 912 int ret; 913 914 mailbox = mlx4_alloc_cmd_mailbox(dev); 915 if (IS_ERR(mailbox)) 916 return PTR_ERR(mailbox); 917 918 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 919 trans_rule_ctrl_to_hw(rule, mailbox->buf); 920 921 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 922 923 list_for_each_entry(cur, &rule->list, list) { 924 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 925 if (ret < 0) { 926 mlx4_free_cmd_mailbox(dev, mailbox); 927 return -EINVAL; 928 } 929 size += ret; 930 } 931 932 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 933 if (ret == -ENOMEM) 934 mlx4_err_rule(dev, 935 "mcg table is full. Fail to register network rule.\n", 936 rule); 937 else if (ret) 938 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 939 940 mlx4_free_cmd_mailbox(dev, mailbox); 941 942 return ret; 943 } 944 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 945 946 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 947 { 948 int err; 949 950 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 951 if (err) 952 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 953 reg_id); 954 return err; 955 } 956 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 957 958 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 959 int block_mcast_loopback, enum mlx4_protocol prot, 960 enum mlx4_steer_type steer) 961 { 962 struct mlx4_priv *priv = mlx4_priv(dev); 963 struct mlx4_cmd_mailbox *mailbox; 964 struct mlx4_mgm *mgm; 965 u32 members_count; 966 int index, prev; 967 int link = 0; 968 int i; 969 int err; 970 u8 port = gid[5]; 971 u8 new_entry = 0; 972 973 mailbox = mlx4_alloc_cmd_mailbox(dev); 974 if (IS_ERR(mailbox)) 975 return PTR_ERR(mailbox); 976 mgm = mailbox->buf; 977 978 mutex_lock(&priv->mcg_table.mutex); 979 err = find_entry(dev, port, gid, prot, 980 mailbox, &prev, &index); 981 if (err) 982 goto out; 983 984 if (index != -1) { 985 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 986 new_entry = 1; 987 memcpy(mgm->gid, gid, 16); 988 } 989 } else { 990 link = 1; 991 992 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 993 if (index == -1) { 994 mlx4_err(dev, "No AMGM entries left\n"); 995 err = -ENOMEM; 996 goto out; 997 } 998 index += dev->caps.num_mgms; 999 1000 new_entry = 1; 1001 memset(mgm, 0, sizeof *mgm); 1002 memcpy(mgm->gid, gid, 16); 1003 } 1004 1005 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1006 if (members_count == dev->caps.num_qp_per_mgm) { 1007 mlx4_err(dev, "MGM at index %x is full.\n", index); 1008 err = -ENOMEM; 1009 goto out; 1010 } 1011 1012 for (i = 0; i < members_count; ++i) 1013 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1014 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1015 err = 0; 1016 goto out; 1017 } 1018 1019 if (block_mcast_loopback) 1020 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1021 (1U << MGM_BLCK_LB_BIT)); 1022 else 1023 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 1024 1025 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1026 1027 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1028 if (err) 1029 goto out; 1030 1031 if (!link) 1032 goto out; 1033 1034 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1035 if (err) 1036 goto out; 1037 1038 mgm->next_gid_index = cpu_to_be32(index << 6); 1039 1040 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1041 if (err) 1042 goto out; 1043 1044 out: 1045 if (prot == MLX4_PROT_ETH) { 1046 /* manage the steering entry for promisc mode */ 1047 if (new_entry) 1048 new_steering_entry(dev, port, steer, index, qp->qpn); 1049 else 1050 existing_steering_entry(dev, port, steer, 1051 index, qp->qpn); 1052 } 1053 if (err && link && index != -1) { 1054 if (index < dev->caps.num_mgms) 1055 mlx4_warn(dev, "Got AMGM index %d < %d", 1056 index, dev->caps.num_mgms); 1057 else 1058 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1059 index - dev->caps.num_mgms); 1060 } 1061 mutex_unlock(&priv->mcg_table.mutex); 1062 1063 mlx4_free_cmd_mailbox(dev, mailbox); 1064 return err; 1065 } 1066 1067 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1068 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1069 { 1070 struct mlx4_priv *priv = mlx4_priv(dev); 1071 struct mlx4_cmd_mailbox *mailbox; 1072 struct mlx4_mgm *mgm; 1073 u32 members_count; 1074 int prev, index; 1075 int i, loc; 1076 int err; 1077 u8 port = gid[5]; 1078 bool removed_entry = false; 1079 1080 mailbox = mlx4_alloc_cmd_mailbox(dev); 1081 if (IS_ERR(mailbox)) 1082 return PTR_ERR(mailbox); 1083 mgm = mailbox->buf; 1084 1085 mutex_lock(&priv->mcg_table.mutex); 1086 1087 err = find_entry(dev, port, gid, prot, 1088 mailbox, &prev, &index); 1089 if (err) 1090 goto out; 1091 1092 if (index == -1) { 1093 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1094 err = -EINVAL; 1095 goto out; 1096 } 1097 1098 /* if this pq is also a promisc qp, it shouldn't be removed */ 1099 if (prot == MLX4_PROT_ETH && 1100 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1101 goto out; 1102 1103 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1104 for (loc = -1, i = 0; i < members_count; ++i) 1105 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 1106 loc = i; 1107 1108 if (loc == -1) { 1109 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1110 err = -EINVAL; 1111 goto out; 1112 } 1113 1114 1115 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1116 mgm->qp[loc] = mgm->qp[i - 1]; 1117 mgm->qp[i - 1] = 0; 1118 1119 if (prot == MLX4_PROT_ETH) 1120 removed_entry = can_remove_steering_entry(dev, port, steer, 1121 index, qp->qpn); 1122 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 1123 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1124 goto out; 1125 } 1126 1127 /* We are going to delete the entry, members count should be 0 */ 1128 mgm->members_count = cpu_to_be32((u32) prot << 30); 1129 1130 if (prev == -1) { 1131 /* Remove entry from MGM */ 1132 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1133 if (amgm_index) { 1134 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1135 if (err) 1136 goto out; 1137 } else 1138 memset(mgm->gid, 0, 16); 1139 1140 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1141 if (err) 1142 goto out; 1143 1144 if (amgm_index) { 1145 if (amgm_index < dev->caps.num_mgms) 1146 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1147 index, amgm_index, dev->caps.num_mgms); 1148 else 1149 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1150 amgm_index - dev->caps.num_mgms); 1151 } 1152 } else { 1153 /* Remove entry from AMGM */ 1154 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1155 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1156 if (err) 1157 goto out; 1158 1159 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1160 1161 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1162 if (err) 1163 goto out; 1164 1165 if (index < dev->caps.num_mgms) 1166 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1167 prev, index, dev->caps.num_mgms); 1168 else 1169 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1170 index - dev->caps.num_mgms); 1171 } 1172 1173 out: 1174 mutex_unlock(&priv->mcg_table.mutex); 1175 1176 mlx4_free_cmd_mailbox(dev, mailbox); 1177 return err; 1178 } 1179 1180 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1181 u8 gid[16], u8 attach, u8 block_loopback, 1182 enum mlx4_protocol prot) 1183 { 1184 struct mlx4_cmd_mailbox *mailbox; 1185 int err = 0; 1186 int qpn; 1187 1188 if (!mlx4_is_mfunc(dev)) 1189 return -EBADF; 1190 1191 mailbox = mlx4_alloc_cmd_mailbox(dev); 1192 if (IS_ERR(mailbox)) 1193 return PTR_ERR(mailbox); 1194 1195 memcpy(mailbox->buf, gid, 16); 1196 qpn = qp->qpn; 1197 qpn |= (prot << 28); 1198 if (attach && block_loopback) 1199 qpn |= (1 << 31); 1200 1201 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1202 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1203 MLX4_CMD_WRAPPED); 1204 1205 mlx4_free_cmd_mailbox(dev, mailbox); 1206 return err; 1207 } 1208 1209 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1210 u8 port, int block_mcast_loopback, 1211 enum mlx4_protocol prot, u64 *reg_id) 1212 { 1213 1214 switch (dev->caps.steering_mode) { 1215 case MLX4_STEERING_MODE_A0: 1216 if (prot == MLX4_PROT_ETH) 1217 return 0; 1218 1219 case MLX4_STEERING_MODE_B0: 1220 if (prot == MLX4_PROT_ETH) 1221 gid[7] |= (MLX4_MC_STEER << 1); 1222 1223 if (mlx4_is_mfunc(dev)) 1224 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1225 block_mcast_loopback, prot); 1226 return mlx4_qp_attach_common(dev, qp, gid, 1227 block_mcast_loopback, prot, 1228 MLX4_MC_STEER); 1229 1230 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 1231 struct mlx4_spec_list spec = { {NULL} }; 1232 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1233 1234 struct mlx4_net_trans_rule rule = { 1235 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1236 .exclusive = 0, 1237 .promisc_mode = MLX4_FS_PROMISC_NONE, 1238 .priority = MLX4_DOMAIN_NIC, 1239 }; 1240 1241 rule.allow_loopback = ~block_mcast_loopback; 1242 rule.port = port; 1243 rule.qpn = qp->qpn; 1244 INIT_LIST_HEAD(&rule.list); 1245 1246 switch (prot) { 1247 case MLX4_PROT_ETH: 1248 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1249 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1250 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1251 break; 1252 1253 case MLX4_PROT_IB_IPV6: 1254 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1255 memcpy(spec.ib.dst_gid, gid, 16); 1256 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1257 break; 1258 default: 1259 return -EINVAL; 1260 } 1261 list_add_tail(&spec.list, &rule.list); 1262 1263 return mlx4_flow_attach(dev, &rule, reg_id); 1264 } 1265 1266 default: 1267 return -EINVAL; 1268 } 1269 } 1270 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1271 1272 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1273 enum mlx4_protocol prot, u64 reg_id) 1274 { 1275 switch (dev->caps.steering_mode) { 1276 case MLX4_STEERING_MODE_A0: 1277 if (prot == MLX4_PROT_ETH) 1278 return 0; 1279 1280 case MLX4_STEERING_MODE_B0: 1281 if (prot == MLX4_PROT_ETH) 1282 gid[7] |= (MLX4_MC_STEER << 1); 1283 1284 if (mlx4_is_mfunc(dev)) 1285 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1286 1287 return mlx4_qp_detach_common(dev, qp, gid, prot, 1288 MLX4_MC_STEER); 1289 1290 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1291 return mlx4_flow_detach(dev, reg_id); 1292 1293 default: 1294 return -EINVAL; 1295 } 1296 } 1297 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1298 1299 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1300 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1301 { 1302 struct mlx4_net_trans_rule rule; 1303 u64 *regid_p; 1304 1305 switch (mode) { 1306 case MLX4_FS_PROMISC_UPLINK: 1307 case MLX4_FS_PROMISC_FUNCTION_PORT: 1308 regid_p = &dev->regid_promisc_array[port]; 1309 break; 1310 case MLX4_FS_PROMISC_ALL_MULTI: 1311 regid_p = &dev->regid_allmulti_array[port]; 1312 break; 1313 default: 1314 return -1; 1315 } 1316 1317 if (*regid_p != 0) 1318 return -1; 1319 1320 rule.promisc_mode = mode; 1321 rule.port = port; 1322 rule.qpn = qpn; 1323 INIT_LIST_HEAD(&rule.list); 1324 mlx4_err(dev, "going promisc on %x\n", port); 1325 1326 return mlx4_flow_attach(dev, &rule, regid_p); 1327 } 1328 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1329 1330 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1331 enum mlx4_net_trans_promisc_mode mode) 1332 { 1333 int ret; 1334 u64 *regid_p; 1335 1336 switch (mode) { 1337 case MLX4_FS_PROMISC_UPLINK: 1338 case MLX4_FS_PROMISC_FUNCTION_PORT: 1339 regid_p = &dev->regid_promisc_array[port]; 1340 break; 1341 case MLX4_FS_PROMISC_ALL_MULTI: 1342 regid_p = &dev->regid_allmulti_array[port]; 1343 break; 1344 default: 1345 return -1; 1346 } 1347 1348 if (*regid_p == 0) 1349 return -1; 1350 1351 ret = mlx4_flow_detach(dev, *regid_p); 1352 if (ret == 0) 1353 *regid_p = 0; 1354 1355 return ret; 1356 } 1357 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1358 1359 int mlx4_unicast_attach(struct mlx4_dev *dev, 1360 struct mlx4_qp *qp, u8 gid[16], 1361 int block_mcast_loopback, enum mlx4_protocol prot) 1362 { 1363 if (prot == MLX4_PROT_ETH) 1364 gid[7] |= (MLX4_UC_STEER << 1); 1365 1366 if (mlx4_is_mfunc(dev)) 1367 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1368 block_mcast_loopback, prot); 1369 1370 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1371 prot, MLX4_UC_STEER); 1372 } 1373 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1374 1375 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1376 u8 gid[16], enum mlx4_protocol prot) 1377 { 1378 if (prot == MLX4_PROT_ETH) 1379 gid[7] |= (MLX4_UC_STEER << 1); 1380 1381 if (mlx4_is_mfunc(dev)) 1382 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1383 1384 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1385 } 1386 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1387 1388 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1389 struct mlx4_vhcr *vhcr, 1390 struct mlx4_cmd_mailbox *inbox, 1391 struct mlx4_cmd_mailbox *outbox, 1392 struct mlx4_cmd_info *cmd) 1393 { 1394 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1395 u8 port = vhcr->in_param >> 62; 1396 enum mlx4_steer_type steer = vhcr->in_modifier; 1397 1398 /* Promiscuous unicast is not allowed in mfunc */ 1399 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1400 return 0; 1401 1402 if (vhcr->op_modifier) 1403 return add_promisc_qp(dev, port, steer, qpn); 1404 else 1405 return remove_promisc_qp(dev, port, steer, qpn); 1406 } 1407 1408 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1409 enum mlx4_steer_type steer, u8 add, u8 port) 1410 { 1411 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1412 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1413 MLX4_CMD_WRAPPED); 1414 } 1415 1416 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1417 { 1418 if (mlx4_is_mfunc(dev)) 1419 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1420 1421 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1422 } 1423 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1424 1425 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1426 { 1427 if (mlx4_is_mfunc(dev)) 1428 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1429 1430 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1431 } 1432 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1433 1434 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1435 { 1436 if (mlx4_is_mfunc(dev)) 1437 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1438 1439 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1440 } 1441 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1442 1443 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1444 { 1445 if (mlx4_is_mfunc(dev)) 1446 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1447 1448 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1449 } 1450 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1451 1452 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1453 { 1454 struct mlx4_priv *priv = mlx4_priv(dev); 1455 int err; 1456 1457 /* No need for mcg_table when fw managed the mcg table*/ 1458 if (dev->caps.steering_mode == 1459 MLX4_STEERING_MODE_DEVICE_MANAGED) 1460 return 0; 1461 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1462 dev->caps.num_amgms - 1, 0, 0); 1463 if (err) 1464 return err; 1465 1466 mutex_init(&priv->mcg_table.mutex); 1467 1468 return 0; 1469 } 1470 1471 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1472 { 1473 if (dev->caps.steering_mode != 1474 MLX4_STEERING_MODE_DEVICE_MANAGED) 1475 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1476 } 1477