1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/cmd.h> 34 #include <linux/mlx5/vport.h> 35 #include <rdma/ib_mad.h> 36 #include <rdma/ib_smi.h> 37 #include <rdma/ib_pma.h> 38 #include "mlx5_ib.h" 39 40 enum { 41 MLX5_IB_VENDOR_CLASS1 = 0x9, 42 MLX5_IB_VENDOR_CLASS2 = 0xa 43 }; 44 45 static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num, 46 struct ib_mad *in_mad) 47 { 48 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED && 49 in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 50 return true; 51 return dev->mdev->port_caps[port_num - 1].has_smi; 52 } 53 54 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 55 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 56 const void *in_mad, void *response_mad) 57 { 58 u8 op_modifier = 0; 59 60 if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad)) 61 return -EPERM; 62 63 /* Key check traps can't be generated unless we have in_wc to 64 * tell us where to send the trap. 65 */ 66 if (ignore_mkey || !in_wc) 67 op_modifier |= 0x1; 68 if (ignore_bkey || !in_wc) 69 op_modifier |= 0x2; 70 71 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 72 } 73 74 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 75 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 76 const struct ib_mad *in_mad, struct ib_mad *out_mad) 77 { 78 u16 slid; 79 int err; 80 81 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 82 83 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) 84 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 85 86 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 87 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 88 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 89 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 90 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 91 return IB_MAD_RESULT_SUCCESS; 92 93 /* Don't process SMInfo queries -- the SMA can't handle them. 94 */ 95 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 96 return IB_MAD_RESULT_SUCCESS; 97 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 98 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || 99 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || 100 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 101 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 102 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 103 return IB_MAD_RESULT_SUCCESS; 104 } else { 105 return IB_MAD_RESULT_SUCCESS; 106 } 107 108 err = mlx5_MAD_IFC(to_mdev(ibdev), 109 mad_flags & IB_MAD_IGNORE_MKEY, 110 mad_flags & IB_MAD_IGNORE_BKEY, 111 port_num, in_wc, in_grh, in_mad, out_mad); 112 if (err) 113 return IB_MAD_RESULT_FAILURE; 114 115 /* set return bit in status of directed route responses */ 116 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 117 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 118 119 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 120 /* no response for trap repress */ 121 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 122 123 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 124 } 125 126 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, 127 void *out) 128 { 129 #define MLX5_SUM_CNT(p, cntr1, cntr2) \ 130 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \ 131 MLX5_GET64(query_vport_counter_out, p, cntr2)) 132 133 pma_cnt_ext->port_xmit_data = 134 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, 135 transmitted_ib_multicast.octets) >> 2); 136 pma_cnt_ext->port_rcv_data = 137 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, 138 received_ib_multicast.octets) >> 2); 139 pma_cnt_ext->port_xmit_packets = 140 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets, 141 transmitted_ib_multicast.packets)); 142 pma_cnt_ext->port_rcv_packets = 143 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets, 144 received_ib_multicast.packets)); 145 pma_cnt_ext->port_unicast_xmit_packets = 146 MLX5_GET64_BE(query_vport_counter_out, 147 out, transmitted_ib_unicast.packets); 148 pma_cnt_ext->port_unicast_rcv_packets = 149 MLX5_GET64_BE(query_vport_counter_out, 150 out, received_ib_unicast.packets); 151 pma_cnt_ext->port_multicast_xmit_packets = 152 MLX5_GET64_BE(query_vport_counter_out, 153 out, transmitted_ib_multicast.packets); 154 pma_cnt_ext->port_multicast_rcv_packets = 155 MLX5_GET64_BE(query_vport_counter_out, 156 out, received_ib_multicast.packets); 157 } 158 159 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, 160 void *out) 161 { 162 /* Traffic counters will be reported in 163 * their 64bit form via ib_pma_portcounters_ext by default. 164 */ 165 void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out, 166 counter_set); 167 168 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \ 169 counter_var = MLX5_GET_BE(typeof(counter_var), \ 170 ib_port_cntrs_grp_data_layout, \ 171 out_pma, counter_name); \ 172 } 173 174 MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter, 175 symbol_error_counter); 176 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter, 177 link_error_recovery_counter); 178 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter, 179 link_downed_counter); 180 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors, 181 port_rcv_errors); 182 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors, 183 port_rcv_remote_physical_errors); 184 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors, 185 port_rcv_switch_relay_errors); 186 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards, 187 port_xmit_discards); 188 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors, 189 port_xmit_constraint_errors); 190 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors, 191 port_rcv_constraint_errors); 192 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors, 193 link_overrun_errors); 194 MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped, 195 vl_15_dropped); 196 } 197 198 static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, 199 const struct ib_mad *in_mad, struct ib_mad *out_mad) 200 { 201 struct mlx5_ib_dev *dev = to_mdev(ibdev); 202 int err; 203 void *out_cnt; 204 205 /* Decalring support of extended counters */ 206 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { 207 struct ib_class_port_info cpi = {}; 208 209 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 210 memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); 211 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 212 } 213 214 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { 215 struct ib_pma_portcounters_ext *pma_cnt_ext = 216 (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 217 int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); 218 219 out_cnt = mlx5_vzalloc(sz); 220 if (!out_cnt) 221 return IB_MAD_RESULT_FAILURE; 222 223 err = mlx5_core_query_vport_counter(dev->mdev, 0, 0, 224 port_num, out_cnt, sz); 225 if (!err) 226 pma_cnt_ext_assign(pma_cnt_ext, out_cnt); 227 } else { 228 struct ib_pma_portcounters *pma_cnt = 229 (struct ib_pma_portcounters *)(out_mad->data + 40); 230 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 231 232 out_cnt = mlx5_vzalloc(sz); 233 if (!out_cnt) 234 return IB_MAD_RESULT_FAILURE; 235 236 err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num, 237 out_cnt, sz); 238 if (!err) 239 pma_cnt_assign(pma_cnt, out_cnt); 240 } 241 242 kvfree(out_cnt); 243 if (err) 244 return IB_MAD_RESULT_FAILURE; 245 246 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 247 } 248 249 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 250 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 251 const struct ib_mad_hdr *in, size_t in_mad_size, 252 struct ib_mad_hdr *out, size_t *out_mad_size, 253 u16 *out_mad_pkey_index) 254 { 255 struct mlx5_ib_dev *dev = to_mdev(ibdev); 256 struct mlx5_core_dev *mdev = dev->mdev; 257 const struct ib_mad *in_mad = (const struct ib_mad *)in; 258 struct ib_mad *out_mad = (struct ib_mad *)out; 259 260 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || 261 *out_mad_size != sizeof(*out_mad))) 262 return IB_MAD_RESULT_FAILURE; 263 264 memset(out_mad->data, 0, sizeof(out_mad->data)); 265 266 if (MLX5_CAP_GEN(mdev, vport_counters) && 267 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 268 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { 269 return process_pma_cmd(ibdev, port_num, in_mad, out_mad); 270 } else { 271 return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, 272 in_mad, out_mad); 273 } 274 } 275 276 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) 277 { 278 struct ib_smp *in_mad = NULL; 279 struct ib_smp *out_mad = NULL; 280 int err = -ENOMEM; 281 u16 packet_error; 282 283 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 284 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 285 if (!in_mad || !out_mad) 286 goto out; 287 288 init_query_mad(in_mad); 289 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 290 in_mad->attr_mod = cpu_to_be32(port); 291 292 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 293 294 packet_error = be16_to_cpu(out_mad->status); 295 296 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? 297 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 298 299 out: 300 kfree(in_mad); 301 kfree(out_mad); 302 return err; 303 } 304 305 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, 306 struct ib_smp *out_mad) 307 { 308 struct ib_smp *in_mad = NULL; 309 int err = -ENOMEM; 310 311 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 312 if (!in_mad) 313 return -ENOMEM; 314 315 init_query_mad(in_mad); 316 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 317 318 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, 319 out_mad); 320 321 kfree(in_mad); 322 return err; 323 } 324 325 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 326 __be64 *sys_image_guid) 327 { 328 struct ib_smp *out_mad = NULL; 329 int err = -ENOMEM; 330 331 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 332 if (!out_mad) 333 return -ENOMEM; 334 335 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 336 if (err) 337 goto out; 338 339 memcpy(sys_image_guid, out_mad->data + 4, 8); 340 341 out: 342 kfree(out_mad); 343 344 return err; 345 } 346 347 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 348 u16 *max_pkeys) 349 { 350 struct ib_smp *out_mad = NULL; 351 int err = -ENOMEM; 352 353 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 354 if (!out_mad) 355 return -ENOMEM; 356 357 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 358 if (err) 359 goto out; 360 361 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 362 363 out: 364 kfree(out_mad); 365 366 return err; 367 } 368 369 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 370 u32 *vendor_id) 371 { 372 struct ib_smp *out_mad = NULL; 373 int err = -ENOMEM; 374 375 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 376 if (!out_mad) 377 return -ENOMEM; 378 379 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 380 if (err) 381 goto out; 382 383 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; 384 385 out: 386 kfree(out_mad); 387 388 return err; 389 } 390 391 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 392 { 393 struct ib_smp *in_mad = NULL; 394 struct ib_smp *out_mad = NULL; 395 int err = -ENOMEM; 396 397 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 398 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 399 if (!in_mad || !out_mad) 400 goto out; 401 402 init_query_mad(in_mad); 403 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 404 405 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 406 if (err) 407 goto out; 408 409 memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 410 out: 411 kfree(in_mad); 412 kfree(out_mad); 413 return err; 414 } 415 416 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) 417 { 418 struct ib_smp *in_mad = NULL; 419 struct ib_smp *out_mad = NULL; 420 int err = -ENOMEM; 421 422 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 423 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 424 if (!in_mad || !out_mad) 425 goto out; 426 427 init_query_mad(in_mad); 428 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 429 430 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 431 if (err) 432 goto out; 433 434 memcpy(node_guid, out_mad->data + 12, 8); 435 out: 436 kfree(in_mad); 437 kfree(out_mad); 438 return err; 439 } 440 441 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 442 u16 *pkey) 443 { 444 struct ib_smp *in_mad = NULL; 445 struct ib_smp *out_mad = NULL; 446 int err = -ENOMEM; 447 448 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 449 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 450 if (!in_mad || !out_mad) 451 goto out; 452 453 init_query_mad(in_mad); 454 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 455 in_mad->attr_mod = cpu_to_be32(index / 32); 456 457 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 458 out_mad); 459 if (err) 460 goto out; 461 462 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); 463 464 out: 465 kfree(in_mad); 466 kfree(out_mad); 467 return err; 468 } 469 470 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 471 union ib_gid *gid) 472 { 473 struct ib_smp *in_mad = NULL; 474 struct ib_smp *out_mad = NULL; 475 int err = -ENOMEM; 476 477 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 478 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 479 if (!in_mad || !out_mad) 480 goto out; 481 482 init_query_mad(in_mad); 483 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 484 in_mad->attr_mod = cpu_to_be32(port); 485 486 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 487 out_mad); 488 if (err) 489 goto out; 490 491 memcpy(gid->raw, out_mad->data + 8, 8); 492 493 init_query_mad(in_mad); 494 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 495 in_mad->attr_mod = cpu_to_be32(index / 8); 496 497 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 498 out_mad); 499 if (err) 500 goto out; 501 502 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 503 504 out: 505 kfree(in_mad); 506 kfree(out_mad); 507 return err; 508 } 509 510 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 511 struct ib_port_attr *props) 512 { 513 struct mlx5_ib_dev *dev = to_mdev(ibdev); 514 struct mlx5_core_dev *mdev = dev->mdev; 515 struct ib_smp *in_mad = NULL; 516 struct ib_smp *out_mad = NULL; 517 int ext_active_speed; 518 int err = -ENOMEM; 519 520 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { 521 mlx5_ib_warn(dev, "invalid port number %d\n", port); 522 return -EINVAL; 523 } 524 525 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 526 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 527 if (!in_mad || !out_mad) 528 goto out; 529 530 /* props being zeroed by the caller, avoid zeroing it here */ 531 532 init_query_mad(in_mad); 533 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 534 in_mad->attr_mod = cpu_to_be32(port); 535 536 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); 537 if (err) { 538 mlx5_ib_warn(dev, "err %d\n", err); 539 goto out; 540 } 541 542 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); 543 props->lmc = out_mad->data[34] & 0x7; 544 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); 545 props->sm_sl = out_mad->data[36] & 0xf; 546 props->state = out_mad->data[32] & 0xf; 547 props->phys_state = out_mad->data[33] >> 4; 548 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 549 props->gid_tbl_len = out_mad->data[50]; 550 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 551 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; 552 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 553 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 554 props->active_width = out_mad->data[31] & 0xf; 555 props->active_speed = out_mad->data[35] >> 4; 556 props->max_mtu = out_mad->data[41] & 0xf; 557 props->active_mtu = out_mad->data[36] >> 4; 558 props->subnet_timeout = out_mad->data[51] & 0x1f; 559 props->max_vl_num = out_mad->data[37] >> 4; 560 props->init_type_reply = out_mad->data[41] >> 4; 561 562 /* Check if extended speeds (EDR/FDR/...) are supported */ 563 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 564 ext_active_speed = out_mad->data[62] >> 4; 565 566 switch (ext_active_speed) { 567 case 1: 568 props->active_speed = 16; /* FDR */ 569 break; 570 case 2: 571 props->active_speed = 32; /* EDR */ 572 break; 573 } 574 } 575 576 /* If reported active speed is QDR, check if is FDR-10 */ 577 if (props->active_speed == 4) { 578 if (mdev->port_caps[port - 1].ext_port_cap & 579 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 580 init_query_mad(in_mad); 581 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 582 in_mad->attr_mod = cpu_to_be32(port); 583 584 err = mlx5_MAD_IFC(dev, 1, 1, port, 585 NULL, NULL, in_mad, out_mad); 586 if (err) 587 goto out; 588 589 /* Checking LinkSpeedActive for FDR-10 */ 590 if (out_mad->data[15] & 0x1) 591 props->active_speed = 8; 592 } 593 } 594 595 out: 596 kfree(in_mad); 597 kfree(out_mad); 598 599 return err; 600 } 601