1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/if_ether.h> 35 #include <linux/export.h> 36 37 #include <linux/mlx4/cmd.h> 38 39 #include "mlx4.h" 40 41 #define MLX4_MAC_VALID (1ull << 63) 42 43 #define MLX4_VLAN_VALID (1u << 31) 44 #define MLX4_VLAN_MASK 0xfff 45 46 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL 47 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL 48 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL 49 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL 50 51 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 52 { 53 int i; 54 55 mutex_init(&table->mutex); 56 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 57 table->entries[i] = 0; 58 table->refs[i] = 0; 59 } 60 table->max = 1 << dev->caps.log_num_macs; 61 table->total = 0; 62 } 63 64 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) 65 { 66 int i; 67 68 mutex_init(&table->mutex); 69 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { 70 table->entries[i] = 0; 71 table->refs[i] = 0; 72 } 73 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; 74 table->total = 0; 75 } 76 77 static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, 78 u64 mac, int *qpn, u64 *reg_id) 79 { 80 __be64 be_mac; 81 int err; 82 83 mac &= MLX4_MAC_MASK; 84 be_mac = cpu_to_be64(mac << 16); 85 86 switch (dev->caps.steering_mode) { 87 case MLX4_STEERING_MODE_B0: { 88 struct mlx4_qp qp; 89 u8 gid[16] = {0}; 90 91 qp.qpn = *qpn; 92 memcpy(&gid[10], &be_mac, ETH_ALEN); 93 gid[5] = port; 94 95 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 96 break; 97 } 98 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 99 struct mlx4_spec_list spec_eth = { {NULL} }; 100 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 101 102 struct mlx4_net_trans_rule rule = { 103 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 104 .exclusive = 0, 105 .allow_loopback = 1, 106 .promisc_mode = MLX4_FS_PROMISC_NONE, 107 .priority = MLX4_DOMAIN_NIC, 108 }; 109 110 rule.port = port; 111 rule.qpn = *qpn; 112 INIT_LIST_HEAD(&rule.list); 113 114 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 115 memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN); 116 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 117 list_add_tail(&spec_eth.list, &rule.list); 118 119 err = mlx4_flow_attach(dev, &rule, reg_id); 120 break; 121 } 122 default: 123 return -EINVAL; 124 } 125 if (err) 126 mlx4_warn(dev, "Failed Attaching Unicast\n"); 127 128 return err; 129 } 130 131 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, 132 u64 mac, int qpn, u64 reg_id) 133 { 134 switch (dev->caps.steering_mode) { 135 case MLX4_STEERING_MODE_B0: { 136 struct mlx4_qp qp; 137 u8 gid[16] = {0}; 138 __be64 be_mac; 139 140 qp.qpn = qpn; 141 mac &= MLX4_MAC_MASK; 142 be_mac = cpu_to_be64(mac << 16); 143 memcpy(&gid[10], &be_mac, ETH_ALEN); 144 gid[5] = port; 145 146 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 147 break; 148 } 149 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 150 mlx4_flow_detach(dev, reg_id); 151 break; 152 } 153 default: 154 mlx4_err(dev, "Invalid steering mode.\n"); 155 } 156 } 157 158 static int validate_index(struct mlx4_dev *dev, 159 struct mlx4_mac_table *table, int index) 160 { 161 int err = 0; 162 163 if (index < 0 || index >= table->max || !table->entries[index]) { 164 mlx4_warn(dev, "No valid Mac entry for the given index\n"); 165 err = -EINVAL; 166 } 167 return err; 168 } 169 170 static int find_index(struct mlx4_dev *dev, 171 struct mlx4_mac_table *table, u64 mac) 172 { 173 int i; 174 175 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 176 if ((mac & MLX4_MAC_MASK) == 177 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 178 return i; 179 } 180 /* Mac not found */ 181 return -EINVAL; 182 } 183 184 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) 185 { 186 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 187 struct mlx4_mac_entry *entry; 188 int index = 0; 189 int err = 0; 190 u64 reg_id; 191 192 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", 193 (unsigned long long) mac); 194 index = mlx4_register_mac(dev, port, mac); 195 if (index < 0) { 196 err = index; 197 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 198 (unsigned long long) mac); 199 return err; 200 } 201 202 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 203 *qpn = info->base_qpn + index; 204 return 0; 205 } 206 207 err = mlx4_qp_reserve_range(dev, 1, 1, qpn); 208 mlx4_dbg(dev, "Reserved qp %d\n", *qpn); 209 if (err) { 210 mlx4_err(dev, "Failed to reserve qp for mac registration\n"); 211 goto qp_err; 212 } 213 214 err = mlx4_uc_steer_add(dev, port, mac, qpn, ®_id); 215 if (err) 216 goto steer_err; 217 218 entry = kmalloc(sizeof *entry, GFP_KERNEL); 219 if (!entry) { 220 err = -ENOMEM; 221 goto alloc_err; 222 } 223 entry->mac = mac; 224 entry->reg_id = reg_id; 225 err = radix_tree_insert(&info->mac_tree, *qpn, entry); 226 if (err) 227 goto insert_err; 228 return 0; 229 230 insert_err: 231 kfree(entry); 232 233 alloc_err: 234 mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id); 235 236 steer_err: 237 mlx4_qp_release_range(dev, *qpn, 1); 238 239 qp_err: 240 mlx4_unregister_mac(dev, port, mac); 241 return err; 242 } 243 EXPORT_SYMBOL_GPL(mlx4_get_eth_qp); 244 245 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn) 246 { 247 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 248 struct mlx4_mac_entry *entry; 249 250 mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n", 251 (unsigned long long) mac); 252 mlx4_unregister_mac(dev, port, mac); 253 254 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 255 entry = radix_tree_lookup(&info->mac_tree, qpn); 256 if (entry) { 257 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," 258 " qpn %d\n", port, 259 (unsigned long long) mac, qpn); 260 mlx4_uc_steer_release(dev, port, entry->mac, 261 qpn, entry->reg_id); 262 mlx4_qp_release_range(dev, qpn, 1); 263 radix_tree_delete(&info->mac_tree, qpn); 264 kfree(entry); 265 } 266 } 267 } 268 EXPORT_SYMBOL_GPL(mlx4_put_eth_qp); 269 270 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, 271 __be64 *entries) 272 { 273 struct mlx4_cmd_mailbox *mailbox; 274 u32 in_mod; 275 int err; 276 277 mailbox = mlx4_alloc_cmd_mailbox(dev); 278 if (IS_ERR(mailbox)) 279 return PTR_ERR(mailbox); 280 281 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); 282 283 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; 284 285 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 286 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 287 288 mlx4_free_cmd_mailbox(dev, mailbox); 289 return err; 290 } 291 292 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 293 { 294 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 295 struct mlx4_mac_table *table = &info->mac_table; 296 int i, err = 0; 297 int free = -1; 298 299 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", 300 (unsigned long long) mac, port); 301 302 mutex_lock(&table->mutex); 303 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 304 if (free < 0 && !table->entries[i]) { 305 free = i; 306 continue; 307 } 308 309 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 310 /* MAC already registered, Must not have duplicates */ 311 err = -EEXIST; 312 goto out; 313 } 314 } 315 316 mlx4_dbg(dev, "Free MAC index is %d\n", free); 317 318 if (table->total == table->max) { 319 /* No free mac entries */ 320 err = -ENOSPC; 321 goto out; 322 } 323 324 /* Register new MAC */ 325 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); 326 327 err = mlx4_set_port_mac_table(dev, port, table->entries); 328 if (unlikely(err)) { 329 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 330 (unsigned long long) mac); 331 table->entries[free] = 0; 332 goto out; 333 } 334 335 err = free; 336 ++table->total; 337 out: 338 mutex_unlock(&table->mutex); 339 return err; 340 } 341 EXPORT_SYMBOL_GPL(__mlx4_register_mac); 342 343 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 344 { 345 u64 out_param; 346 int err; 347 348 if (mlx4_is_mfunc(dev)) { 349 set_param_l(&out_param, port); 350 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 351 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 352 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 353 if (err) 354 return err; 355 356 return get_param_l(&out_param); 357 } 358 return __mlx4_register_mac(dev, port, mac); 359 } 360 EXPORT_SYMBOL_GPL(mlx4_register_mac); 361 362 363 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 364 { 365 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 366 struct mlx4_mac_table *table = &info->mac_table; 367 int index; 368 369 index = find_index(dev, table, mac); 370 371 mutex_lock(&table->mutex); 372 373 if (validate_index(dev, table, index)) 374 goto out; 375 376 table->entries[index] = 0; 377 mlx4_set_port_mac_table(dev, port, table->entries); 378 --table->total; 379 out: 380 mutex_unlock(&table->mutex); 381 } 382 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); 383 384 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 385 { 386 u64 out_param; 387 388 if (mlx4_is_mfunc(dev)) { 389 set_param_l(&out_param, port); 390 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 391 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 392 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 393 return; 394 } 395 __mlx4_unregister_mac(dev, port, mac); 396 return; 397 } 398 EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 399 400 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) 401 { 402 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 403 struct mlx4_mac_table *table = &info->mac_table; 404 struct mlx4_mac_entry *entry; 405 int index = qpn - info->base_qpn; 406 int err = 0; 407 408 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 409 entry = radix_tree_lookup(&info->mac_tree, qpn); 410 if (!entry) 411 return -EINVAL; 412 mlx4_uc_steer_release(dev, port, entry->mac, 413 qpn, entry->reg_id); 414 mlx4_unregister_mac(dev, port, entry->mac); 415 entry->mac = new_mac; 416 entry->reg_id = 0; 417 mlx4_register_mac(dev, port, new_mac); 418 err = mlx4_uc_steer_add(dev, port, entry->mac, 419 &qpn, &entry->reg_id); 420 return err; 421 } 422 423 /* CX1 doesn't support multi-functions */ 424 mutex_lock(&table->mutex); 425 426 err = validate_index(dev, table, index); 427 if (err) 428 goto out; 429 430 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); 431 432 err = mlx4_set_port_mac_table(dev, port, table->entries); 433 if (unlikely(err)) { 434 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 435 (unsigned long long) new_mac); 436 table->entries[index] = 0; 437 } 438 out: 439 mutex_unlock(&table->mutex); 440 return err; 441 } 442 EXPORT_SYMBOL_GPL(mlx4_replace_mac); 443 444 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 445 __be32 *entries) 446 { 447 struct mlx4_cmd_mailbox *mailbox; 448 u32 in_mod; 449 int err; 450 451 mailbox = mlx4_alloc_cmd_mailbox(dev); 452 if (IS_ERR(mailbox)) 453 return PTR_ERR(mailbox); 454 455 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 456 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 457 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 458 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 459 460 mlx4_free_cmd_mailbox(dev, mailbox); 461 462 return err; 463 } 464 465 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) 466 { 467 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 468 int i; 469 470 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { 471 if (table->refs[i] && 472 (vid == (MLX4_VLAN_MASK & 473 be32_to_cpu(table->entries[i])))) { 474 /* VLAN already registered, increase reference count */ 475 *idx = i; 476 return 0; 477 } 478 } 479 480 return -ENOENT; 481 } 482 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); 483 484 static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, 485 int *index) 486 { 487 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 488 int i, err = 0; 489 int free = -1; 490 491 mutex_lock(&table->mutex); 492 493 if (table->total == table->max) { 494 /* No free vlan entries */ 495 err = -ENOSPC; 496 goto out; 497 } 498 499 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { 500 if (free < 0 && (table->refs[i] == 0)) { 501 free = i; 502 continue; 503 } 504 505 if (table->refs[i] && 506 (vlan == (MLX4_VLAN_MASK & 507 be32_to_cpu(table->entries[i])))) { 508 /* Vlan already registered, increase references count */ 509 *index = i; 510 ++table->refs[i]; 511 goto out; 512 } 513 } 514 515 if (free < 0) { 516 err = -ENOMEM; 517 goto out; 518 } 519 520 /* Register new VLAN */ 521 table->refs[free] = 1; 522 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); 523 524 err = mlx4_set_port_vlan_table(dev, port, table->entries); 525 if (unlikely(err)) { 526 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); 527 table->refs[free] = 0; 528 table->entries[free] = 0; 529 goto out; 530 } 531 532 *index = free; 533 ++table->total; 534 out: 535 mutex_unlock(&table->mutex); 536 return err; 537 } 538 539 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 540 { 541 u64 out_param; 542 int err; 543 544 if (mlx4_is_mfunc(dev)) { 545 set_param_l(&out_param, port); 546 err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, 547 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 548 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 549 if (!err) 550 *index = get_param_l(&out_param); 551 552 return err; 553 } 554 return __mlx4_register_vlan(dev, port, vlan, index); 555 } 556 EXPORT_SYMBOL_GPL(mlx4_register_vlan); 557 558 static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 559 { 560 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 561 562 if (index < MLX4_VLAN_REGULAR) { 563 mlx4_warn(dev, "Trying to free special vlan index %d\n", index); 564 return; 565 } 566 567 mutex_lock(&table->mutex); 568 if (!table->refs[index]) { 569 mlx4_warn(dev, "No vlan entry for index %d\n", index); 570 goto out; 571 } 572 if (--table->refs[index]) { 573 mlx4_dbg(dev, "Have more references for index %d," 574 "no need to modify vlan table\n", index); 575 goto out; 576 } 577 table->entries[index] = 0; 578 mlx4_set_port_vlan_table(dev, port, table->entries); 579 --table->total; 580 out: 581 mutex_unlock(&table->mutex); 582 } 583 584 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 585 { 586 u64 in_param; 587 int err; 588 589 if (mlx4_is_mfunc(dev)) { 590 set_param_l(&in_param, port); 591 err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, 592 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 593 MLX4_CMD_WRAPPED); 594 if (!err) 595 mlx4_warn(dev, "Failed freeing vlan at index:%d\n", 596 index); 597 598 return; 599 } 600 __mlx4_unregister_vlan(dev, port, index); 601 } 602 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); 603 604 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) 605 { 606 struct mlx4_cmd_mailbox *inmailbox, *outmailbox; 607 u8 *inbuf, *outbuf; 608 int err; 609 610 inmailbox = mlx4_alloc_cmd_mailbox(dev); 611 if (IS_ERR(inmailbox)) 612 return PTR_ERR(inmailbox); 613 614 outmailbox = mlx4_alloc_cmd_mailbox(dev); 615 if (IS_ERR(outmailbox)) { 616 mlx4_free_cmd_mailbox(dev, inmailbox); 617 return PTR_ERR(outmailbox); 618 } 619 620 inbuf = inmailbox->buf; 621 outbuf = outmailbox->buf; 622 memset(inbuf, 0, 256); 623 memset(outbuf, 0, 256); 624 inbuf[0] = 1; 625 inbuf[1] = 1; 626 inbuf[2] = 1; 627 inbuf[3] = 1; 628 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); 629 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); 630 631 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, 632 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 633 MLX4_CMD_NATIVE); 634 if (!err) 635 *caps = *(__be32 *) (outbuf + 84); 636 mlx4_free_cmd_mailbox(dev, inmailbox); 637 mlx4_free_cmd_mailbox(dev, outmailbox); 638 return err; 639 } 640 641 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 642 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 643 { 644 struct mlx4_priv *priv = mlx4_priv(dev); 645 struct mlx4_port_info *port_info; 646 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; 647 struct mlx4_slave_state *slave_st = &master->slave_state[slave]; 648 struct mlx4_set_port_rqp_calc_context *qpn_context; 649 struct mlx4_set_port_general_context *gen_context; 650 int reset_qkey_viols; 651 int port; 652 int is_eth; 653 u32 in_modifier; 654 u32 promisc; 655 u16 mtu, prev_mtu; 656 int err; 657 int i; 658 __be32 agg_cap_mask; 659 __be32 slave_cap_mask; 660 __be32 new_cap_mask; 661 662 port = in_mod & 0xff; 663 in_modifier = in_mod >> 8; 664 is_eth = op_mod; 665 port_info = &priv->port[port]; 666 667 /* Slaves cannot perform SET_PORT operations except changing MTU */ 668 if (is_eth) { 669 if (slave != dev->caps.function && 670 in_modifier != MLX4_SET_PORT_GENERAL) { 671 mlx4_warn(dev, "denying SET_PORT for slave:%d\n", 672 slave); 673 return -EINVAL; 674 } 675 switch (in_modifier) { 676 case MLX4_SET_PORT_RQP_CALC: 677 qpn_context = inbox->buf; 678 qpn_context->base_qpn = 679 cpu_to_be32(port_info->base_qpn); 680 qpn_context->n_mac = 0x7; 681 promisc = be32_to_cpu(qpn_context->promisc) >> 682 SET_PORT_PROMISC_SHIFT; 683 qpn_context->promisc = cpu_to_be32( 684 promisc << SET_PORT_PROMISC_SHIFT | 685 port_info->base_qpn); 686 promisc = be32_to_cpu(qpn_context->mcast) >> 687 SET_PORT_MC_PROMISC_SHIFT; 688 qpn_context->mcast = cpu_to_be32( 689 promisc << SET_PORT_MC_PROMISC_SHIFT | 690 port_info->base_qpn); 691 break; 692 case MLX4_SET_PORT_GENERAL: 693 gen_context = inbox->buf; 694 /* Mtu is configured as the max MTU among all the 695 * the functions on the port. */ 696 mtu = be16_to_cpu(gen_context->mtu); 697 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]); 698 prev_mtu = slave_st->mtu[port]; 699 slave_st->mtu[port] = mtu; 700 if (mtu > master->max_mtu[port]) 701 master->max_mtu[port] = mtu; 702 if (mtu < prev_mtu && prev_mtu == 703 master->max_mtu[port]) { 704 slave_st->mtu[port] = mtu; 705 master->max_mtu[port] = mtu; 706 for (i = 0; i < dev->num_slaves; i++) { 707 master->max_mtu[port] = 708 max(master->max_mtu[port], 709 master->slave_state[i].mtu[port]); 710 } 711 } 712 713 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 714 break; 715 } 716 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 717 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 718 MLX4_CMD_NATIVE); 719 } 720 721 /* For IB, we only consider: 722 * - The capability mask, which is set to the aggregate of all 723 * slave function capabilities 724 * - The QKey violatin counter - reset according to each request. 725 */ 726 727 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 728 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; 729 new_cap_mask = ((__be32 *) inbox->buf)[2]; 730 } else { 731 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; 732 new_cap_mask = ((__be32 *) inbox->buf)[1]; 733 } 734 735 agg_cap_mask = 0; 736 slave_cap_mask = 737 priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; 738 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; 739 for (i = 0; i < dev->num_slaves; i++) 740 agg_cap_mask |= 741 priv->mfunc.master.slave_state[i].ib_cap_mask[port]; 742 743 /* only clear mailbox for guests. Master may be setting 744 * MTU or PKEY table size 745 */ 746 if (slave != dev->caps.function) 747 memset(inbox->buf, 0, 256); 748 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 749 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; 750 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 751 } else { 752 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; 753 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 754 } 755 756 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, 757 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 758 if (err) 759 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = 760 slave_cap_mask; 761 return err; 762 } 763 764 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 765 struct mlx4_vhcr *vhcr, 766 struct mlx4_cmd_mailbox *inbox, 767 struct mlx4_cmd_mailbox *outbox, 768 struct mlx4_cmd_info *cmd) 769 { 770 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 771 vhcr->op_modifier, inbox); 772 } 773 774 /* bit locations for set port command with zero op modifier */ 775 enum { 776 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ 777 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ 778 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, 779 MLX4_CHANGE_PORT_VL_CAP = 21, 780 MLX4_CHANGE_PORT_MTU_CAP = 22, 781 }; 782 783 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) 784 { 785 struct mlx4_cmd_mailbox *mailbox; 786 int err, vl_cap, pkey_tbl_flag = 0; 787 788 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 789 return 0; 790 791 mailbox = mlx4_alloc_cmd_mailbox(dev); 792 if (IS_ERR(mailbox)) 793 return PTR_ERR(mailbox); 794 795 memset(mailbox->buf, 0, 256); 796 797 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 798 799 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { 800 pkey_tbl_flag = 1; 801 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); 802 } 803 804 /* IB VL CAP enum isn't used by the firmware, just numerical values */ 805 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { 806 ((__be32 *) mailbox->buf)[0] = cpu_to_be32( 807 (1 << MLX4_CHANGE_PORT_MTU_CAP) | 808 (1 << MLX4_CHANGE_PORT_VL_CAP) | 809 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | 810 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | 811 (vl_cap << MLX4_SET_PORT_VL_CAP)); 812 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 813 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 814 if (err != -ENOMEM) 815 break; 816 } 817 818 mlx4_free_cmd_mailbox(dev, mailbox); 819 return err; 820 } 821 822 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, 823 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) 824 { 825 struct mlx4_cmd_mailbox *mailbox; 826 struct mlx4_set_port_general_context *context; 827 int err; 828 u32 in_mod; 829 830 mailbox = mlx4_alloc_cmd_mailbox(dev); 831 if (IS_ERR(mailbox)) 832 return PTR_ERR(mailbox); 833 context = mailbox->buf; 834 memset(context, 0, sizeof *context); 835 836 context->flags = SET_PORT_GEN_ALL_VALID; 837 context->mtu = cpu_to_be16(mtu); 838 context->pptx = (pptx * (!pfctx)) << 7; 839 context->pfctx = pfctx; 840 context->pprx = (pprx * (!pfcrx)) << 7; 841 context->pfcrx = pfcrx; 842 843 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 844 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 845 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 846 847 mlx4_free_cmd_mailbox(dev, mailbox); 848 return err; 849 } 850 EXPORT_SYMBOL(mlx4_SET_PORT_general); 851 852 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 853 u8 promisc) 854 { 855 struct mlx4_cmd_mailbox *mailbox; 856 struct mlx4_set_port_rqp_calc_context *context; 857 int err; 858 u32 in_mod; 859 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? 860 MCAST_DIRECT : MCAST_DEFAULT; 861 862 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 863 return 0; 864 865 mailbox = mlx4_alloc_cmd_mailbox(dev); 866 if (IS_ERR(mailbox)) 867 return PTR_ERR(mailbox); 868 context = mailbox->buf; 869 memset(context, 0, sizeof *context); 870 871 context->base_qpn = cpu_to_be32(base_qpn); 872 context->n_mac = dev->caps.log_num_macs; 873 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 874 base_qpn); 875 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | 876 base_qpn); 877 context->intra_no_vlan = 0; 878 context->no_vlan = MLX4_NO_VLAN_IDX; 879 context->intra_vlan_miss = 0; 880 context->vlan_miss = MLX4_VLAN_MISS_IDX; 881 882 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; 883 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 884 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 885 886 mlx4_free_cmd_mailbox(dev, mailbox); 887 return err; 888 } 889 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 890 891 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) 892 { 893 struct mlx4_cmd_mailbox *mailbox; 894 struct mlx4_set_port_prio2tc_context *context; 895 int err; 896 u32 in_mod; 897 int i; 898 899 mailbox = mlx4_alloc_cmd_mailbox(dev); 900 if (IS_ERR(mailbox)) 901 return PTR_ERR(mailbox); 902 context = mailbox->buf; 903 memset(context, 0, sizeof *context); 904 905 for (i = 0; i < MLX4_NUM_UP; i += 2) 906 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; 907 908 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; 909 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 910 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 911 912 mlx4_free_cmd_mailbox(dev, mailbox); 913 return err; 914 } 915 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); 916 917 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 918 u8 *pg, u16 *ratelimit) 919 { 920 struct mlx4_cmd_mailbox *mailbox; 921 struct mlx4_set_port_scheduler_context *context; 922 int err; 923 u32 in_mod; 924 int i; 925 926 mailbox = mlx4_alloc_cmd_mailbox(dev); 927 if (IS_ERR(mailbox)) 928 return PTR_ERR(mailbox); 929 context = mailbox->buf; 930 memset(context, 0, sizeof *context); 931 932 for (i = 0; i < MLX4_NUM_TC; i++) { 933 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; 934 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] : 935 MLX4_RATELIMIT_DEFAULT; 936 937 tc->pg = htons(pg[i]); 938 tc->bw_precentage = htons(tc_tx_bw[i]); 939 940 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS); 941 tc->max_bw_value = htons(r); 942 } 943 944 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; 945 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 946 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 947 948 mlx4_free_cmd_mailbox(dev, mailbox); 949 return err; 950 } 951 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); 952 953 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 954 struct mlx4_vhcr *vhcr, 955 struct mlx4_cmd_mailbox *inbox, 956 struct mlx4_cmd_mailbox *outbox, 957 struct mlx4_cmd_info *cmd) 958 { 959 int err = 0; 960 961 return err; 962 } 963 964 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, 965 u64 mac, u64 clear, u8 mode) 966 { 967 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, 968 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, 969 MLX4_CMD_WRAPPED); 970 } 971 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); 972 973 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, 974 struct mlx4_vhcr *vhcr, 975 struct mlx4_cmd_mailbox *inbox, 976 struct mlx4_cmd_mailbox *outbox, 977 struct mlx4_cmd_info *cmd) 978 { 979 int err = 0; 980 981 return err; 982 } 983 984 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, 985 u32 in_mod, struct mlx4_cmd_mailbox *outbox) 986 { 987 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, 988 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 989 MLX4_CMD_NATIVE); 990 } 991 992 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 993 struct mlx4_vhcr *vhcr, 994 struct mlx4_cmd_mailbox *inbox, 995 struct mlx4_cmd_mailbox *outbox, 996 struct mlx4_cmd_info *cmd) 997 { 998 if (slave != dev->caps.function) 999 return 0; 1000 return mlx4_common_dump_eth_stats(dev, slave, 1001 vhcr->in_modifier, outbox); 1002 } 1003 1004 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) 1005 { 1006 if (!mlx4_is_mfunc(dev)) { 1007 *stats_bitmap = 0; 1008 return; 1009 } 1010 1011 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK | 1012 MLX4_STATS_TRAFFIC_DROPS_MASK | 1013 MLX4_STATS_PORT_COUNTERS_MASK); 1014 1015 if (mlx4_is_master(dev)) 1016 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; 1017 } 1018 EXPORT_SYMBOL(mlx4_set_stats_bitmap); 1019