1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/if_ether.h> 35 #include <linux/if_vlan.h> 36 #include <linux/export.h> 37 38 #include <linux/mlx4/cmd.h> 39 40 #include "mlx4.h" 41 42 #define MLX4_MAC_VALID (1ull << 63) 43 44 #define MLX4_VLAN_VALID (1u << 31) 45 #define MLX4_VLAN_MASK 0xfff 46 47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL 48 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL 49 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL 50 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL 51 52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 53 { 54 int i; 55 56 mutex_init(&table->mutex); 57 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 58 table->entries[i] = 0; 59 table->refs[i] = 0; 60 } 61 table->max = 1 << dev->caps.log_num_macs; 62 table->total = 0; 63 } 64 65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) 66 { 67 int i; 68 69 mutex_init(&table->mutex); 70 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { 71 table->entries[i] = 0; 72 table->refs[i] = 0; 73 } 74 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; 75 table->total = 0; 76 } 77 78 void mlx4_init_roce_gid_table(struct mlx4_dev *dev, 79 struct mlx4_roce_gid_table *table) 80 { 81 int i; 82 83 mutex_init(&table->mutex); 84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) 85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); 86 } 87 88 static int validate_index(struct mlx4_dev *dev, 89 struct mlx4_mac_table *table, int index) 90 { 91 int err = 0; 92 93 if (index < 0 || index >= table->max || !table->entries[index]) { 94 mlx4_warn(dev, "No valid Mac entry for the given index\n"); 95 err = -EINVAL; 96 } 97 return err; 98 } 99 100 static int find_index(struct mlx4_dev *dev, 101 struct mlx4_mac_table *table, u64 mac) 102 { 103 int i; 104 105 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 106 if ((mac & MLX4_MAC_MASK) == 107 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 108 return i; 109 } 110 /* Mac not found */ 111 return -EINVAL; 112 } 113 114 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, 115 __be64 *entries) 116 { 117 struct mlx4_cmd_mailbox *mailbox; 118 u32 in_mod; 119 int err; 120 121 mailbox = mlx4_alloc_cmd_mailbox(dev); 122 if (IS_ERR(mailbox)) 123 return PTR_ERR(mailbox); 124 125 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); 126 127 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; 128 129 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 130 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 131 132 mlx4_free_cmd_mailbox(dev, mailbox); 133 return err; 134 } 135 136 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) 137 { 138 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 139 struct mlx4_mac_table *table = &info->mac_table; 140 int i; 141 142 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 143 if (!table->refs[i]) 144 continue; 145 146 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 147 *idx = i; 148 return 0; 149 } 150 } 151 152 return -ENOENT; 153 } 154 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); 155 156 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 157 { 158 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 159 struct mlx4_mac_table *table = &info->mac_table; 160 int i, err = 0; 161 int free = -1; 162 163 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", 164 (unsigned long long) mac, port); 165 166 mutex_lock(&table->mutex); 167 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 168 if (free < 0 && !table->entries[i]) { 169 free = i; 170 continue; 171 } 172 173 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 174 /* MAC already registered, increment ref count */ 175 err = i; 176 ++table->refs[i]; 177 goto out; 178 } 179 } 180 181 mlx4_dbg(dev, "Free MAC index is %d\n", free); 182 183 if (table->total == table->max) { 184 /* No free mac entries */ 185 err = -ENOSPC; 186 goto out; 187 } 188 189 /* Register new MAC */ 190 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); 191 192 err = mlx4_set_port_mac_table(dev, port, table->entries); 193 if (unlikely(err)) { 194 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 195 (unsigned long long) mac); 196 table->entries[free] = 0; 197 goto out; 198 } 199 table->refs[free] = 1; 200 err = free; 201 ++table->total; 202 out: 203 mutex_unlock(&table->mutex); 204 return err; 205 } 206 EXPORT_SYMBOL_GPL(__mlx4_register_mac); 207 208 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 209 { 210 u64 out_param = 0; 211 int err = -EINVAL; 212 213 if (mlx4_is_mfunc(dev)) { 214 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 215 err = mlx4_cmd_imm(dev, mac, &out_param, 216 ((u32) port) << 8 | (u32) RES_MAC, 217 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 218 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 219 } 220 if (err && err == -EINVAL && mlx4_is_slave(dev)) { 221 /* retry using old REG_MAC format */ 222 set_param_l(&out_param, port); 223 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 224 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 225 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 226 if (!err) 227 dev->flags |= MLX4_FLAG_OLD_REG_MAC; 228 } 229 if (err) 230 return err; 231 232 return get_param_l(&out_param); 233 } 234 return __mlx4_register_mac(dev, port, mac); 235 } 236 EXPORT_SYMBOL_GPL(mlx4_register_mac); 237 238 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port) 239 { 240 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 241 (port - 1) * (1 << dev->caps.log_num_macs); 242 } 243 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn); 244 245 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 246 { 247 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 248 struct mlx4_mac_table *table = &info->mac_table; 249 int index; 250 251 mutex_lock(&table->mutex); 252 index = find_index(dev, table, mac); 253 254 if (validate_index(dev, table, index)) 255 goto out; 256 if (--table->refs[index]) { 257 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", 258 index); 259 goto out; 260 } 261 262 table->entries[index] = 0; 263 mlx4_set_port_mac_table(dev, port, table->entries); 264 --table->total; 265 out: 266 mutex_unlock(&table->mutex); 267 } 268 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); 269 270 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 271 { 272 u64 out_param = 0; 273 274 if (mlx4_is_mfunc(dev)) { 275 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 276 (void) mlx4_cmd_imm(dev, mac, &out_param, 277 ((u32) port) << 8 | (u32) RES_MAC, 278 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 279 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 280 } else { 281 /* use old unregister mac format */ 282 set_param_l(&out_param, port); 283 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 284 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 285 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 286 } 287 return; 288 } 289 __mlx4_unregister_mac(dev, port, mac); 290 return; 291 } 292 EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 293 294 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) 295 { 296 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 297 struct mlx4_mac_table *table = &info->mac_table; 298 int index = qpn - info->base_qpn; 299 int err = 0; 300 301 /* CX1 doesn't support multi-functions */ 302 mutex_lock(&table->mutex); 303 304 err = validate_index(dev, table, index); 305 if (err) 306 goto out; 307 308 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); 309 310 err = mlx4_set_port_mac_table(dev, port, table->entries); 311 if (unlikely(err)) { 312 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 313 (unsigned long long) new_mac); 314 table->entries[index] = 0; 315 } 316 out: 317 mutex_unlock(&table->mutex); 318 return err; 319 } 320 EXPORT_SYMBOL_GPL(__mlx4_replace_mac); 321 322 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 323 __be32 *entries) 324 { 325 struct mlx4_cmd_mailbox *mailbox; 326 u32 in_mod; 327 int err; 328 329 mailbox = mlx4_alloc_cmd_mailbox(dev); 330 if (IS_ERR(mailbox)) 331 return PTR_ERR(mailbox); 332 333 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 334 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 335 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 336 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 337 338 mlx4_free_cmd_mailbox(dev, mailbox); 339 340 return err; 341 } 342 343 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) 344 { 345 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 346 int i; 347 348 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { 349 if (table->refs[i] && 350 (vid == (MLX4_VLAN_MASK & 351 be32_to_cpu(table->entries[i])))) { 352 /* VLAN already registered, increase reference count */ 353 *idx = i; 354 return 0; 355 } 356 } 357 358 return -ENOENT; 359 } 360 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); 361 362 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, 363 int *index) 364 { 365 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 366 int i, err = 0; 367 int free = -1; 368 369 mutex_lock(&table->mutex); 370 371 if (table->total == table->max) { 372 /* No free vlan entries */ 373 err = -ENOSPC; 374 goto out; 375 } 376 377 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { 378 if (free < 0 && (table->refs[i] == 0)) { 379 free = i; 380 continue; 381 } 382 383 if (table->refs[i] && 384 (vlan == (MLX4_VLAN_MASK & 385 be32_to_cpu(table->entries[i])))) { 386 /* Vlan already registered, increase references count */ 387 *index = i; 388 ++table->refs[i]; 389 goto out; 390 } 391 } 392 393 if (free < 0) { 394 err = -ENOMEM; 395 goto out; 396 } 397 398 /* Register new VLAN */ 399 table->refs[free] = 1; 400 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); 401 402 err = mlx4_set_port_vlan_table(dev, port, table->entries); 403 if (unlikely(err)) { 404 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); 405 table->refs[free] = 0; 406 table->entries[free] = 0; 407 goto out; 408 } 409 410 *index = free; 411 ++table->total; 412 out: 413 mutex_unlock(&table->mutex); 414 return err; 415 } 416 417 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 418 { 419 u64 out_param = 0; 420 int err; 421 422 if (vlan > 4095) 423 return -EINVAL; 424 425 if (mlx4_is_mfunc(dev)) { 426 err = mlx4_cmd_imm(dev, vlan, &out_param, 427 ((u32) port) << 8 | (u32) RES_VLAN, 428 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 429 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 430 if (!err) 431 *index = get_param_l(&out_param); 432 433 return err; 434 } 435 return __mlx4_register_vlan(dev, port, vlan, index); 436 } 437 EXPORT_SYMBOL_GPL(mlx4_register_vlan); 438 439 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 440 { 441 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 442 int index; 443 444 mutex_lock(&table->mutex); 445 if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { 446 mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); 447 goto out; 448 } 449 450 if (index < MLX4_VLAN_REGULAR) { 451 mlx4_warn(dev, "Trying to free special vlan index %d\n", index); 452 goto out; 453 } 454 455 if (--table->refs[index]) { 456 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", 457 table->refs[index], index); 458 goto out; 459 } 460 table->entries[index] = 0; 461 mlx4_set_port_vlan_table(dev, port, table->entries); 462 --table->total; 463 out: 464 mutex_unlock(&table->mutex); 465 } 466 467 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 468 { 469 u64 out_param = 0; 470 471 if (mlx4_is_mfunc(dev)) { 472 (void) mlx4_cmd_imm(dev, vlan, &out_param, 473 ((u32) port) << 8 | (u32) RES_VLAN, 474 RES_OP_RESERVE_AND_MAP, 475 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 476 MLX4_CMD_WRAPPED); 477 return; 478 } 479 __mlx4_unregister_vlan(dev, port, vlan); 480 } 481 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); 482 483 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) 484 { 485 struct mlx4_cmd_mailbox *inmailbox, *outmailbox; 486 u8 *inbuf, *outbuf; 487 int err; 488 489 inmailbox = mlx4_alloc_cmd_mailbox(dev); 490 if (IS_ERR(inmailbox)) 491 return PTR_ERR(inmailbox); 492 493 outmailbox = mlx4_alloc_cmd_mailbox(dev); 494 if (IS_ERR(outmailbox)) { 495 mlx4_free_cmd_mailbox(dev, inmailbox); 496 return PTR_ERR(outmailbox); 497 } 498 499 inbuf = inmailbox->buf; 500 outbuf = outmailbox->buf; 501 inbuf[0] = 1; 502 inbuf[1] = 1; 503 inbuf[2] = 1; 504 inbuf[3] = 1; 505 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); 506 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); 507 508 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, 509 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 510 MLX4_CMD_NATIVE); 511 if (!err) 512 *caps = *(__be32 *) (outbuf + 84); 513 mlx4_free_cmd_mailbox(dev, inmailbox); 514 mlx4_free_cmd_mailbox(dev, outmailbox); 515 return err; 516 } 517 static struct mlx4_roce_gid_entry zgid_entry; 518 519 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) 520 { 521 int vfs; 522 int slave_gid = slave; 523 unsigned i; 524 struct mlx4_slaves_pport slaves_pport; 525 struct mlx4_active_ports actv_ports; 526 unsigned max_port_p_one; 527 528 if (slave == 0) 529 return MLX4_ROCE_PF_GIDS; 530 531 /* Slave is a VF */ 532 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 533 actv_ports = mlx4_get_active_ports(dev, slave); 534 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + 535 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; 536 537 for (i = 1; i < max_port_p_one; i++) { 538 struct mlx4_active_ports exclusive_ports; 539 struct mlx4_slaves_pport slaves_pport_actv; 540 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 541 set_bit(i - 1, exclusive_ports.ports); 542 if (i == port) 543 continue; 544 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 545 dev, &exclusive_ports); 546 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 547 dev->num_vfs + 1); 548 } 549 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 550 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) 551 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; 552 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; 553 } 554 555 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) 556 { 557 int gids; 558 unsigned i; 559 int slave_gid = slave; 560 int vfs; 561 562 struct mlx4_slaves_pport slaves_pport; 563 struct mlx4_active_ports actv_ports; 564 unsigned max_port_p_one; 565 566 if (slave == 0) 567 return 0; 568 569 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 570 actv_ports = mlx4_get_active_ports(dev, slave); 571 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + 572 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; 573 574 for (i = 1; i < max_port_p_one; i++) { 575 struct mlx4_active_ports exclusive_ports; 576 struct mlx4_slaves_pport slaves_pport_actv; 577 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 578 set_bit(i - 1, exclusive_ports.ports); 579 if (i == port) 580 continue; 581 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 582 dev, &exclusive_ports); 583 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 584 dev->num_vfs + 1); 585 } 586 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 587 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 588 if (slave_gid <= gids % vfs) 589 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); 590 591 return MLX4_ROCE_PF_GIDS + (gids % vfs) + 592 ((gids / vfs) * (slave_gid - 1)); 593 } 594 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 595 596 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, 597 int port, struct mlx4_cmd_mailbox *mailbox) 598 { 599 struct mlx4_roce_gid_entry *gid_entry_mbox; 600 struct mlx4_priv *priv = mlx4_priv(dev); 601 int num_gids, base, offset; 602 int i, err; 603 604 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 605 base = mlx4_get_base_gid_ix(dev, slave, port); 606 607 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); 608 609 mutex_lock(&(priv->port[port].gid_table.mutex)); 610 /* Zero-out gids belonging to that slave in the port GID table */ 611 for (i = 0, offset = base; i < num_gids; offset++, i++) 612 memcpy(priv->port[port].gid_table.roce_gids[offset].raw, 613 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); 614 615 /* Now, copy roce port gids table to mailbox for passing to FW */ 616 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; 617 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 618 memcpy(gid_entry_mbox->raw, 619 priv->port[port].gid_table.roce_gids[i].raw, 620 MLX4_ROCE_GID_ENTRY_SIZE); 621 622 err = mlx4_cmd(dev, mailbox->dma, 623 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, 624 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 625 MLX4_CMD_NATIVE); 626 mutex_unlock(&(priv->port[port].gid_table.mutex)); 627 return err; 628 } 629 630 631 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) 632 { 633 struct mlx4_active_ports actv_ports; 634 struct mlx4_cmd_mailbox *mailbox; 635 int num_eth_ports, err; 636 int i; 637 638 if (slave < 0 || slave > dev->num_vfs) 639 return; 640 641 actv_ports = mlx4_get_active_ports(dev, slave); 642 643 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { 644 if (test_bit(i, actv_ports.ports)) { 645 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) 646 continue; 647 num_eth_ports++; 648 } 649 } 650 651 if (!num_eth_ports) 652 return; 653 654 /* have ETH ports. Alloc mailbox for SET_PORT command */ 655 mailbox = mlx4_alloc_cmd_mailbox(dev); 656 if (IS_ERR(mailbox)) 657 return; 658 659 for (i = 0; i < dev->caps.num_ports; i++) { 660 if (test_bit(i, actv_ports.ports)) { 661 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) 662 continue; 663 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); 664 if (err) 665 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", 666 slave, i + 1, err); 667 } 668 } 669 670 mlx4_free_cmd_mailbox(dev, mailbox); 671 return; 672 } 673 674 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 675 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 676 { 677 struct mlx4_priv *priv = mlx4_priv(dev); 678 struct mlx4_port_info *port_info; 679 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; 680 struct mlx4_slave_state *slave_st = &master->slave_state[slave]; 681 struct mlx4_set_port_rqp_calc_context *qpn_context; 682 struct mlx4_set_port_general_context *gen_context; 683 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; 684 int reset_qkey_viols; 685 int port; 686 int is_eth; 687 int num_gids; 688 int base; 689 u32 in_modifier; 690 u32 promisc; 691 u16 mtu, prev_mtu; 692 int err; 693 int i, j; 694 int offset; 695 __be32 agg_cap_mask; 696 __be32 slave_cap_mask; 697 __be32 new_cap_mask; 698 699 port = in_mod & 0xff; 700 in_modifier = in_mod >> 8; 701 is_eth = op_mod; 702 port_info = &priv->port[port]; 703 704 /* Slaves cannot perform SET_PORT operations except changing MTU */ 705 if (is_eth) { 706 if (slave != dev->caps.function && 707 in_modifier != MLX4_SET_PORT_GENERAL && 708 in_modifier != MLX4_SET_PORT_GID_TABLE) { 709 mlx4_warn(dev, "denying SET_PORT for slave:%d\n", 710 slave); 711 return -EINVAL; 712 } 713 switch (in_modifier) { 714 case MLX4_SET_PORT_RQP_CALC: 715 qpn_context = inbox->buf; 716 qpn_context->base_qpn = 717 cpu_to_be32(port_info->base_qpn); 718 qpn_context->n_mac = 0x7; 719 promisc = be32_to_cpu(qpn_context->promisc) >> 720 SET_PORT_PROMISC_SHIFT; 721 qpn_context->promisc = cpu_to_be32( 722 promisc << SET_PORT_PROMISC_SHIFT | 723 port_info->base_qpn); 724 promisc = be32_to_cpu(qpn_context->mcast) >> 725 SET_PORT_MC_PROMISC_SHIFT; 726 qpn_context->mcast = cpu_to_be32( 727 promisc << SET_PORT_MC_PROMISC_SHIFT | 728 port_info->base_qpn); 729 break; 730 case MLX4_SET_PORT_GENERAL: 731 gen_context = inbox->buf; 732 /* Mtu is configured as the max MTU among all the 733 * the functions on the port. */ 734 mtu = be16_to_cpu(gen_context->mtu); 735 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + 736 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 737 prev_mtu = slave_st->mtu[port]; 738 slave_st->mtu[port] = mtu; 739 if (mtu > master->max_mtu[port]) 740 master->max_mtu[port] = mtu; 741 if (mtu < prev_mtu && prev_mtu == 742 master->max_mtu[port]) { 743 slave_st->mtu[port] = mtu; 744 master->max_mtu[port] = mtu; 745 for (i = 0; i < dev->num_slaves; i++) { 746 master->max_mtu[port] = 747 max(master->max_mtu[port], 748 master->slave_state[i].mtu[port]); 749 } 750 } 751 752 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 753 break; 754 case MLX4_SET_PORT_GID_TABLE: 755 /* change to MULTIPLE entries: number of guest's gids 756 * need a FOR-loop here over number of gids the guest has. 757 * 1. Check no duplicates in gids passed by slave 758 */ 759 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 760 base = mlx4_get_base_gid_ix(dev, slave, port); 761 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 762 for (i = 0; i < num_gids; gid_entry_mbox++, i++) { 763 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, 764 sizeof(zgid_entry))) 765 continue; 766 gid_entry_mb1 = gid_entry_mbox + 1; 767 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { 768 if (!memcmp(gid_entry_mb1->raw, 769 zgid_entry.raw, sizeof(zgid_entry))) 770 continue; 771 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, 772 sizeof(gid_entry_mbox->raw))) { 773 /* found duplicate */ 774 return -EINVAL; 775 } 776 } 777 } 778 779 /* 2. Check that do not have duplicates in OTHER 780 * entries in the port GID table 781 */ 782 783 mutex_lock(&(priv->port[port].gid_table.mutex)); 784 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 785 if (i >= base && i < base + num_gids) 786 continue; /* don't compare to slave's current gids */ 787 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; 788 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 789 continue; 790 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 791 for (j = 0; j < num_gids; gid_entry_mbox++, j++) { 792 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, 793 sizeof(zgid_entry))) 794 continue; 795 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 796 sizeof(gid_entry_tbl->raw))) { 797 /* found duplicate */ 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", 799 slave, i); 800 mutex_unlock(&(priv->port[port].gid_table.mutex)); 801 return -EINVAL; 802 } 803 } 804 } 805 806 /* insert slave GIDs with memcpy, starting at slave's base index */ 807 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 808 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 809 memcpy(priv->port[port].gid_table.roce_gids[offset].raw, 810 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE); 811 812 /* Now, copy roce port gids table to current mailbox for passing to FW */ 813 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 814 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 815 memcpy(gid_entry_mbox->raw, 816 priv->port[port].gid_table.roce_gids[i].raw, 817 MLX4_ROCE_GID_ENTRY_SIZE); 818 819 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, 820 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 821 MLX4_CMD_NATIVE); 822 mutex_unlock(&(priv->port[port].gid_table.mutex)); 823 return err; 824 } 825 826 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, 827 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 828 MLX4_CMD_NATIVE); 829 } 830 831 /* For IB, we only consider: 832 * - The capability mask, which is set to the aggregate of all 833 * slave function capabilities 834 * - The QKey violatin counter - reset according to each request. 835 */ 836 837 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 838 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; 839 new_cap_mask = ((__be32 *) inbox->buf)[2]; 840 } else { 841 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; 842 new_cap_mask = ((__be32 *) inbox->buf)[1]; 843 } 844 845 /* slave may not set the IS_SM capability for the port */ 846 if (slave != mlx4_master_func_num(dev) && 847 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM)) 848 return -EINVAL; 849 850 /* No DEV_MGMT in multifunc mode */ 851 if (mlx4_is_mfunc(dev) && 852 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP)) 853 return -EINVAL; 854 855 agg_cap_mask = 0; 856 slave_cap_mask = 857 priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; 858 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; 859 for (i = 0; i < dev->num_slaves; i++) 860 agg_cap_mask |= 861 priv->mfunc.master.slave_state[i].ib_cap_mask[port]; 862 863 /* only clear mailbox for guests. Master may be setting 864 * MTU or PKEY table size 865 */ 866 if (slave != dev->caps.function) 867 memset(inbox->buf, 0, 256); 868 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 869 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; 870 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 871 } else { 872 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; 873 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 874 } 875 876 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, 877 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 878 if (err) 879 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = 880 slave_cap_mask; 881 return err; 882 } 883 884 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 885 struct mlx4_vhcr *vhcr, 886 struct mlx4_cmd_mailbox *inbox, 887 struct mlx4_cmd_mailbox *outbox, 888 struct mlx4_cmd_info *cmd) 889 { 890 int port = mlx4_slave_convert_port( 891 dev, slave, vhcr->in_modifier & 0xFF); 892 893 if (port < 0) 894 return -EINVAL; 895 896 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | 897 (port & 0xFF); 898 899 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 900 vhcr->op_modifier, inbox); 901 } 902 903 /* bit locations for set port command with zero op modifier */ 904 enum { 905 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ 906 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ 907 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, 908 MLX4_CHANGE_PORT_VL_CAP = 21, 909 MLX4_CHANGE_PORT_MTU_CAP = 22, 910 }; 911 912 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) 913 { 914 struct mlx4_cmd_mailbox *mailbox; 915 int err, vl_cap, pkey_tbl_flag = 0; 916 917 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 918 return 0; 919 920 mailbox = mlx4_alloc_cmd_mailbox(dev); 921 if (IS_ERR(mailbox)) 922 return PTR_ERR(mailbox); 923 924 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 925 926 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { 927 pkey_tbl_flag = 1; 928 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); 929 } 930 931 /* IB VL CAP enum isn't used by the firmware, just numerical values */ 932 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { 933 ((__be32 *) mailbox->buf)[0] = cpu_to_be32( 934 (1 << MLX4_CHANGE_PORT_MTU_CAP) | 935 (1 << MLX4_CHANGE_PORT_VL_CAP) | 936 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | 937 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | 938 (vl_cap << MLX4_SET_PORT_VL_CAP)); 939 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 940 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 941 if (err != -ENOMEM) 942 break; 943 } 944 945 mlx4_free_cmd_mailbox(dev, mailbox); 946 return err; 947 } 948 949 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, 950 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) 951 { 952 struct mlx4_cmd_mailbox *mailbox; 953 struct mlx4_set_port_general_context *context; 954 int err; 955 u32 in_mod; 956 957 mailbox = mlx4_alloc_cmd_mailbox(dev); 958 if (IS_ERR(mailbox)) 959 return PTR_ERR(mailbox); 960 context = mailbox->buf; 961 context->flags = SET_PORT_GEN_ALL_VALID; 962 context->mtu = cpu_to_be16(mtu); 963 context->pptx = (pptx * (!pfctx)) << 7; 964 context->pfctx = pfctx; 965 context->pprx = (pprx * (!pfcrx)) << 7; 966 context->pfcrx = pfcrx; 967 968 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 969 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 970 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 971 972 mlx4_free_cmd_mailbox(dev, mailbox); 973 return err; 974 } 975 EXPORT_SYMBOL(mlx4_SET_PORT_general); 976 977 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 978 u8 promisc) 979 { 980 struct mlx4_cmd_mailbox *mailbox; 981 struct mlx4_set_port_rqp_calc_context *context; 982 int err; 983 u32 in_mod; 984 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? 985 MCAST_DIRECT : MCAST_DEFAULT; 986 987 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 988 return 0; 989 990 mailbox = mlx4_alloc_cmd_mailbox(dev); 991 if (IS_ERR(mailbox)) 992 return PTR_ERR(mailbox); 993 context = mailbox->buf; 994 context->base_qpn = cpu_to_be32(base_qpn); 995 context->n_mac = dev->caps.log_num_macs; 996 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 997 base_qpn); 998 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | 999 base_qpn); 1000 context->intra_no_vlan = 0; 1001 context->no_vlan = MLX4_NO_VLAN_IDX; 1002 context->intra_vlan_miss = 0; 1003 context->vlan_miss = MLX4_VLAN_MISS_IDX; 1004 1005 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; 1006 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1007 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 1008 1009 mlx4_free_cmd_mailbox(dev, mailbox); 1010 return err; 1011 } 1012 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 1013 1014 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) 1015 { 1016 struct mlx4_cmd_mailbox *mailbox; 1017 struct mlx4_set_port_prio2tc_context *context; 1018 int err; 1019 u32 in_mod; 1020 int i; 1021 1022 mailbox = mlx4_alloc_cmd_mailbox(dev); 1023 if (IS_ERR(mailbox)) 1024 return PTR_ERR(mailbox); 1025 context = mailbox->buf; 1026 for (i = 0; i < MLX4_NUM_UP; i += 2) 1027 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; 1028 1029 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; 1030 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1031 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1032 1033 mlx4_free_cmd_mailbox(dev, mailbox); 1034 return err; 1035 } 1036 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); 1037 1038 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 1039 u8 *pg, u16 *ratelimit) 1040 { 1041 struct mlx4_cmd_mailbox *mailbox; 1042 struct mlx4_set_port_scheduler_context *context; 1043 int err; 1044 u32 in_mod; 1045 int i; 1046 1047 mailbox = mlx4_alloc_cmd_mailbox(dev); 1048 if (IS_ERR(mailbox)) 1049 return PTR_ERR(mailbox); 1050 context = mailbox->buf; 1051 1052 for (i = 0; i < MLX4_NUM_TC; i++) { 1053 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; 1054 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] : 1055 MLX4_RATELIMIT_DEFAULT; 1056 1057 tc->pg = htons(pg[i]); 1058 tc->bw_precentage = htons(tc_tx_bw[i]); 1059 1060 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS); 1061 tc->max_bw_value = htons(r); 1062 } 1063 1064 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; 1065 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1066 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1067 1068 mlx4_free_cmd_mailbox(dev, mailbox); 1069 return err; 1070 } 1071 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); 1072 1073 enum { 1074 VXLAN_ENABLE_MODIFY = 1 << 7, 1075 VXLAN_STEERING_MODIFY = 1 << 6, 1076 1077 VXLAN_ENABLE = 1 << 7, 1078 }; 1079 1080 struct mlx4_set_port_vxlan_context { 1081 u32 reserved1; 1082 u8 modify_flags; 1083 u8 reserved2; 1084 u8 enable_flags; 1085 u8 steering; 1086 }; 1087 1088 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) 1089 { 1090 int err; 1091 u32 in_mod; 1092 struct mlx4_cmd_mailbox *mailbox; 1093 struct mlx4_set_port_vxlan_context *context; 1094 1095 mailbox = mlx4_alloc_cmd_mailbox(dev); 1096 if (IS_ERR(mailbox)) 1097 return PTR_ERR(mailbox); 1098 context = mailbox->buf; 1099 memset(context, 0, sizeof(*context)); 1100 1101 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; 1102 if (enable) 1103 context->enable_flags = VXLAN_ENABLE; 1104 context->steering = steering; 1105 1106 in_mod = MLX4_SET_PORT_VXLAN << 8 | port; 1107 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1108 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1109 1110 mlx4_free_cmd_mailbox(dev, mailbox); 1111 return err; 1112 } 1113 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); 1114 1115 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1116 struct mlx4_vhcr *vhcr, 1117 struct mlx4_cmd_mailbox *inbox, 1118 struct mlx4_cmd_mailbox *outbox, 1119 struct mlx4_cmd_info *cmd) 1120 { 1121 int err = 0; 1122 1123 return err; 1124 } 1125 1126 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, 1127 u64 mac, u64 clear, u8 mode) 1128 { 1129 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, 1130 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, 1131 MLX4_CMD_WRAPPED); 1132 } 1133 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); 1134 1135 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1136 struct mlx4_vhcr *vhcr, 1137 struct mlx4_cmd_mailbox *inbox, 1138 struct mlx4_cmd_mailbox *outbox, 1139 struct mlx4_cmd_info *cmd) 1140 { 1141 int err = 0; 1142 1143 return err; 1144 } 1145 1146 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, 1147 u32 in_mod, struct mlx4_cmd_mailbox *outbox) 1148 { 1149 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, 1150 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 1151 MLX4_CMD_NATIVE); 1152 } 1153 1154 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1155 struct mlx4_vhcr *vhcr, 1156 struct mlx4_cmd_mailbox *inbox, 1157 struct mlx4_cmd_mailbox *outbox, 1158 struct mlx4_cmd_info *cmd) 1159 { 1160 if (slave != dev->caps.function) 1161 return 0; 1162 return mlx4_common_dump_eth_stats(dev, slave, 1163 vhcr->in_modifier, outbox); 1164 } 1165 1166 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) 1167 { 1168 if (!mlx4_is_mfunc(dev)) { 1169 *stats_bitmap = 0; 1170 return; 1171 } 1172 1173 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK | 1174 MLX4_STATS_TRAFFIC_DROPS_MASK | 1175 MLX4_STATS_PORT_COUNTERS_MASK); 1176 1177 if (mlx4_is_master(dev)) 1178 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; 1179 } 1180 EXPORT_SYMBOL(mlx4_set_stats_bitmap); 1181 1182 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, 1183 int *slave_id) 1184 { 1185 struct mlx4_priv *priv = mlx4_priv(dev); 1186 int i, found_ix = -1; 1187 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 1188 struct mlx4_slaves_pport slaves_pport; 1189 unsigned num_vfs; 1190 int slave_gid; 1191 1192 if (!mlx4_is_mfunc(dev)) 1193 return -EINVAL; 1194 1195 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 1196 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1197 1198 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1199 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, 1200 MLX4_ROCE_GID_ENTRY_SIZE)) { 1201 found_ix = i; 1202 break; 1203 } 1204 } 1205 1206 if (found_ix >= 0) { 1207 /* Calculate a slave_gid which is the slave number in the gid 1208 * table and not a globally unique slave number. 1209 */ 1210 if (found_ix < MLX4_ROCE_PF_GIDS) 1211 slave_gid = 0; 1212 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1213 (vf_gids / num_vfs + 1)) 1214 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / 1215 (vf_gids / num_vfs + 1)) + 1; 1216 else 1217 slave_gid = 1218 ((found_ix - MLX4_ROCE_PF_GIDS - 1219 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1220 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1221 1222 /* Calculate the globally unique slave id */ 1223 if (slave_gid) { 1224 struct mlx4_active_ports exclusive_ports; 1225 struct mlx4_active_ports actv_ports; 1226 struct mlx4_slaves_pport slaves_pport_actv; 1227 unsigned max_port_p_one; 1228 int num_vfs_before = 0; 1229 int candidate_slave_gid; 1230 1231 /* Calculate how many VFs are on the previous port, if exists */ 1232 for (i = 1; i < port; i++) { 1233 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1234 set_bit(i - 1, exclusive_ports.ports); 1235 slaves_pport_actv = 1236 mlx4_phys_to_slaves_pport_actv( 1237 dev, &exclusive_ports); 1238 num_vfs_before += bitmap_weight( 1239 slaves_pport_actv.slaves, 1240 dev->num_vfs + 1); 1241 } 1242 1243 /* candidate_slave_gid isn't necessarily the correct slave, but 1244 * it has the same number of ports and is assigned to the same 1245 * ports as the real slave we're looking for. On dual port VF, 1246 * slave_gid = [single port VFs on port <port>] + 1247 * [offset of the current slave from the first dual port VF] + 1248 * 1 (for the PF). 1249 */ 1250 candidate_slave_gid = slave_gid + num_vfs_before; 1251 1252 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); 1253 max_port_p_one = find_first_bit( 1254 actv_ports.ports, dev->caps.num_ports) + 1255 bitmap_weight(actv_ports.ports, 1256 dev->caps.num_ports) + 1; 1257 1258 /* Calculate the real slave number */ 1259 for (i = 1; i < max_port_p_one; i++) { 1260 if (i == port) 1261 continue; 1262 bitmap_zero(exclusive_ports.ports, 1263 dev->caps.num_ports); 1264 set_bit(i - 1, exclusive_ports.ports); 1265 slaves_pport_actv = 1266 mlx4_phys_to_slaves_pport_actv( 1267 dev, &exclusive_ports); 1268 slave_gid += bitmap_weight( 1269 slaves_pport_actv.slaves, 1270 dev->num_vfs + 1); 1271 } 1272 } 1273 *slave_id = slave_gid; 1274 } 1275 1276 return (found_ix >= 0) ? 0 : -EINVAL; 1277 } 1278 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); 1279 1280 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, 1281 u8 *gid) 1282 { 1283 struct mlx4_priv *priv = mlx4_priv(dev); 1284 1285 if (!mlx4_is_master(dev)) 1286 return -EINVAL; 1287 1288 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, 1289 MLX4_ROCE_GID_ENTRY_SIZE); 1290 return 0; 1291 } 1292 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1293