1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/export.h> 34 #include <linux/etherdevice.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/eswitch.h> 38 #include "mlx5_core.h" 39 40 /* Mutex to hold while enabling or disabling RoCE */ 41 static DEFINE_MUTEX(mlx5_roce_en_lock); 42 43 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) 44 { 45 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {}; 46 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {}; 47 int err; 48 49 MLX5_SET(query_vport_state_in, in, opcode, 50 MLX5_CMD_OP_QUERY_VPORT_STATE); 51 MLX5_SET(query_vport_state_in, in, op_mod, opmod); 52 MLX5_SET(query_vport_state_in, in, vport_number, vport); 53 if (vport) 54 MLX5_SET(query_vport_state_in, in, other_vport, 1); 55 56 err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out); 57 if (err) 58 return 0; 59 60 return MLX5_GET(query_vport_state_out, out, state); 61 } 62 63 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, 64 u16 vport, u8 other_vport, u8 state) 65 { 66 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {}; 67 68 MLX5_SET(modify_vport_state_in, in, opcode, 69 MLX5_CMD_OP_MODIFY_VPORT_STATE); 70 MLX5_SET(modify_vport_state_in, in, op_mod, opmod); 71 MLX5_SET(modify_vport_state_in, in, vport_number, vport); 72 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport); 73 MLX5_SET(modify_vport_state_in, in, admin_state, state); 74 75 return mlx5_cmd_exec_in(mdev, modify_vport_state, in); 76 } 77 78 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, 79 u32 *out) 80 { 81 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; 82 83 MLX5_SET(query_nic_vport_context_in, in, opcode, 84 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 85 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 86 if (vport) 87 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 88 89 return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out); 90 } 91 92 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 93 u16 vport, u8 *min_inline) 94 { 95 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; 96 int err; 97 98 err = mlx5_query_nic_vport_context(mdev, vport, out); 99 if (!err) 100 *min_inline = MLX5_GET(query_nic_vport_context_out, out, 101 nic_vport_context.min_wqe_inline_mode); 102 return err; 103 } 104 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); 105 106 void mlx5_query_min_inline(struct mlx5_core_dev *mdev, 107 u8 *min_inline_mode) 108 { 109 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { 110 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 111 if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) 112 break; 113 fallthrough; 114 case MLX5_CAP_INLINE_MODE_L2: 115 *min_inline_mode = MLX5_INLINE_MODE_L2; 116 break; 117 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 118 *min_inline_mode = MLX5_INLINE_MODE_NONE; 119 break; 120 } 121 } 122 EXPORT_SYMBOL_GPL(mlx5_query_min_inline); 123 124 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, 125 u16 vport, u8 min_inline) 126 { 127 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; 128 void *nic_vport_ctx; 129 130 MLX5_SET(modify_nic_vport_context_in, in, 131 field_select.min_inline, 1); 132 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 133 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 134 135 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 136 in, nic_vport_context); 137 MLX5_SET(nic_vport_context, nic_vport_ctx, 138 min_wqe_inline_mode, min_inline); 139 MLX5_SET(modify_nic_vport_context_in, in, opcode, 140 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 141 142 return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 143 } 144 145 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 146 u16 vport, bool other, u8 *addr) 147 { 148 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; 149 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; 150 u8 *out_addr; 151 int err; 152 153 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, 154 nic_vport_context.permanent_address); 155 156 MLX5_SET(query_nic_vport_context_in, in, opcode, 157 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 158 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 159 MLX5_SET(query_nic_vport_context_in, in, other_vport, other); 160 161 err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out); 162 if (!err) 163 ether_addr_copy(addr, &out_addr[2]); 164 165 return err; 166 } 167 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address); 168 169 int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr) 170 { 171 return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr); 172 } 173 EXPORT_SYMBOL_GPL(mlx5_query_mac_address); 174 175 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, 176 u16 vport, const u8 *addr) 177 { 178 void *in; 179 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 180 int err; 181 void *nic_vport_ctx; 182 u8 *perm_mac; 183 184 in = kvzalloc(inlen, GFP_KERNEL); 185 if (!in) 186 return -ENOMEM; 187 188 MLX5_SET(modify_nic_vport_context_in, in, 189 field_select.permanent_address, 1); 190 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 191 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 192 193 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 194 in, nic_vport_context); 195 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, 196 permanent_address); 197 198 ether_addr_copy(&perm_mac[2], addr); 199 MLX5_SET(modify_nic_vport_context_in, in, opcode, 200 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 201 202 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 203 204 kvfree(in); 205 206 return err; 207 } 208 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 209 210 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) 211 { 212 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 213 u32 *out; 214 int err; 215 216 out = kvzalloc(outlen, GFP_KERNEL); 217 if (!out) 218 return -ENOMEM; 219 220 err = mlx5_query_nic_vport_context(mdev, 0, out); 221 if (!err) 222 *mtu = MLX5_GET(query_nic_vport_context_out, out, 223 nic_vport_context.mtu); 224 225 kvfree(out); 226 return err; 227 } 228 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); 229 230 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) 231 { 232 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 233 void *in; 234 int err; 235 236 in = kvzalloc(inlen, GFP_KERNEL); 237 if (!in) 238 return -ENOMEM; 239 240 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); 241 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); 242 MLX5_SET(modify_nic_vport_context_in, in, opcode, 243 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 244 245 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 246 247 kvfree(in); 248 return err; 249 } 250 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); 251 252 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 253 u16 vport, 254 enum mlx5_list_type list_type, 255 u8 addr_list[][ETH_ALEN], 256 int *list_size) 257 { 258 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 259 void *nic_vport_ctx; 260 int max_list_size; 261 int req_list_size; 262 int out_sz; 263 void *out; 264 int err; 265 int i; 266 267 req_list_size = *list_size; 268 269 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? 270 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : 271 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); 272 273 if (req_list_size > max_list_size) { 274 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", 275 req_list_size, max_list_size); 276 req_list_size = max_list_size; 277 } 278 279 out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + 280 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); 281 282 out = kzalloc(out_sz, GFP_KERNEL); 283 if (!out) 284 return -ENOMEM; 285 286 MLX5_SET(query_nic_vport_context_in, in, opcode, 287 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 288 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); 289 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 290 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 291 292 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 293 if (err) 294 goto out; 295 296 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 297 nic_vport_context); 298 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, 299 allowed_list_size); 300 301 *list_size = req_list_size; 302 for (i = 0; i < req_list_size; i++) { 303 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context, 304 nic_vport_ctx, 305 current_uc_mac_address[i]) + 2; 306 ether_addr_copy(addr_list[i], mac_addr); 307 } 308 out: 309 kfree(out); 310 return err; 311 } 312 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list); 313 314 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, 315 enum mlx5_list_type list_type, 316 u8 addr_list[][ETH_ALEN], 317 int list_size) 318 { 319 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {}; 320 void *nic_vport_ctx; 321 int max_list_size; 322 int in_sz; 323 void *in; 324 int err; 325 int i; 326 327 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? 328 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : 329 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); 330 331 if (list_size > max_list_size) 332 return -ENOSPC; 333 334 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + 335 list_size * MLX5_ST_SZ_BYTES(mac_address_layout); 336 337 in = kzalloc(in_sz, GFP_KERNEL); 338 if (!in) 339 return -ENOMEM; 340 341 MLX5_SET(modify_nic_vport_context_in, in, opcode, 342 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 343 MLX5_SET(modify_nic_vport_context_in, in, 344 field_select.addresses_list, 1); 345 346 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, 347 nic_vport_context); 348 349 MLX5_SET(nic_vport_context, nic_vport_ctx, 350 allowed_list_type, list_type); 351 MLX5_SET(nic_vport_context, nic_vport_ctx, 352 allowed_list_size, list_size); 353 354 for (i = 0; i < list_size; i++) { 355 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context, 356 nic_vport_ctx, 357 current_uc_mac_address[i]) + 2; 358 ether_addr_copy(curr_mac, addr_list[i]); 359 } 360 361 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 362 kfree(in); 363 return err; 364 } 365 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); 366 367 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, 368 u16 vlans[], 369 int list_size) 370 { 371 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; 372 void *nic_vport_ctx; 373 int max_list_size; 374 int in_sz; 375 void *in; 376 int err; 377 int i; 378 379 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); 380 381 if (list_size > max_list_size) 382 return -ENOSPC; 383 384 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + 385 list_size * MLX5_ST_SZ_BYTES(vlan_layout); 386 387 memset(out, 0, sizeof(out)); 388 in = kzalloc(in_sz, GFP_KERNEL); 389 if (!in) 390 return -ENOMEM; 391 392 MLX5_SET(modify_nic_vport_context_in, in, opcode, 393 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 394 MLX5_SET(modify_nic_vport_context_in, in, 395 field_select.addresses_list, 1); 396 397 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, 398 nic_vport_context); 399 400 MLX5_SET(nic_vport_context, nic_vport_ctx, 401 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN); 402 MLX5_SET(nic_vport_context, nic_vport_ctx, 403 allowed_list_size, list_size); 404 405 for (i = 0; i < list_size; i++) { 406 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, 407 nic_vport_ctx, 408 current_uc_mac_address[i]); 409 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]); 410 } 411 412 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 413 kfree(in); 414 return err; 415 } 416 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans); 417 418 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 419 u64 *system_image_guid) 420 { 421 u32 *out; 422 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 423 424 out = kvzalloc(outlen, GFP_KERNEL); 425 if (!out) 426 return -ENOMEM; 427 428 mlx5_query_nic_vport_context(mdev, 0, out); 429 430 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, 431 nic_vport_context.system_image_guid); 432 433 kvfree(out); 434 435 return 0; 436 } 437 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); 438 439 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) 440 { 441 u32 *out; 442 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 443 444 out = kvzalloc(outlen, GFP_KERNEL); 445 if (!out) 446 return -ENOMEM; 447 448 mlx5_query_nic_vport_context(mdev, 0, out); 449 450 *node_guid = MLX5_GET64(query_nic_vport_context_out, out, 451 nic_vport_context.node_guid); 452 453 kvfree(out); 454 455 return 0; 456 } 457 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); 458 459 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, 460 u16 vport, u64 node_guid) 461 { 462 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 463 void *nic_vport_context; 464 void *in; 465 int err; 466 467 if (!vport) 468 return -EINVAL; 469 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 470 return -EACCES; 471 472 in = kvzalloc(inlen, GFP_KERNEL); 473 if (!in) 474 return -ENOMEM; 475 476 MLX5_SET(modify_nic_vport_context_in, in, 477 field_select.node_guid, 1); 478 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 479 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 480 481 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 482 in, nic_vport_context); 483 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 484 MLX5_SET(modify_nic_vport_context_in, in, opcode, 485 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 486 487 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 488 489 kvfree(in); 490 491 return err; 492 } 493 494 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 495 u16 *qkey_viol_cntr) 496 { 497 u32 *out; 498 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 499 500 out = kvzalloc(outlen, GFP_KERNEL); 501 if (!out) 502 return -ENOMEM; 503 504 mlx5_query_nic_vport_context(mdev, 0, out); 505 506 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, 507 nic_vport_context.qkey_violation_counter); 508 509 kvfree(out); 510 511 return 0; 512 } 513 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr); 514 515 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 516 u8 port_num, u16 vf_num, u16 gid_index, 517 union ib_gid *gid) 518 { 519 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in); 520 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); 521 int is_group_manager; 522 void *out = NULL; 523 void *in = NULL; 524 union ib_gid *tmp; 525 int tbsz; 526 int nout; 527 int err; 528 529 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); 530 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); 531 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", 532 vf_num, gid_index, tbsz); 533 534 if (gid_index > tbsz && gid_index != 0xffff) 535 return -EINVAL; 536 537 if (gid_index == 0xffff) 538 nout = tbsz; 539 else 540 nout = 1; 541 542 out_sz += nout * sizeof(*gid); 543 544 in = kzalloc(in_sz, GFP_KERNEL); 545 out = kzalloc(out_sz, GFP_KERNEL); 546 if (!in || !out) { 547 err = -ENOMEM; 548 goto out; 549 } 550 551 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID); 552 if (other_vport) { 553 if (is_group_manager) { 554 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num); 555 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1); 556 } else { 557 err = -EPERM; 558 goto out; 559 } 560 } 561 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index); 562 563 if (MLX5_CAP_GEN(dev, num_ports) == 2) 564 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num); 565 566 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); 567 if (err) 568 goto out; 569 570 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); 571 gid->global.subnet_prefix = tmp->global.subnet_prefix; 572 gid->global.interface_id = tmp->global.interface_id; 573 574 out: 575 kfree(in); 576 kfree(out); 577 return err; 578 } 579 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid); 580 581 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, 582 u8 port_num, u16 vf_num, u16 pkey_index, 583 u16 *pkey) 584 { 585 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in); 586 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out); 587 int is_group_manager; 588 void *out = NULL; 589 void *in = NULL; 590 void *pkarr; 591 int nout; 592 int tbsz; 593 int err; 594 int i; 595 596 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); 597 598 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)); 599 if (pkey_index > tbsz && pkey_index != 0xffff) 600 return -EINVAL; 601 602 if (pkey_index == 0xffff) 603 nout = tbsz; 604 else 605 nout = 1; 606 607 out_sz += nout * MLX5_ST_SZ_BYTES(pkey); 608 609 in = kzalloc(in_sz, GFP_KERNEL); 610 out = kzalloc(out_sz, GFP_KERNEL); 611 if (!in || !out) { 612 err = -ENOMEM; 613 goto out; 614 } 615 616 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY); 617 if (other_vport) { 618 if (is_group_manager) { 619 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num); 620 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1); 621 } else { 622 err = -EPERM; 623 goto out; 624 } 625 } 626 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index); 627 628 if (MLX5_CAP_GEN(dev, num_ports) == 2) 629 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num); 630 631 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); 632 if (err) 633 goto out; 634 635 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); 636 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) 637 *pkey = MLX5_GET_PR(pkey, pkarr, pkey); 638 639 out: 640 kfree(in); 641 kfree(out); 642 return err; 643 } 644 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey); 645 646 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, 647 u8 other_vport, u8 port_num, 648 u16 vf_num, 649 struct mlx5_hca_vport_context *rep) 650 { 651 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); 652 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {}; 653 int is_group_manager; 654 void *out; 655 void *ctx; 656 int err; 657 658 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); 659 660 out = kzalloc(out_sz, GFP_KERNEL); 661 if (!out) 662 return -ENOMEM; 663 664 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); 665 666 if (other_vport) { 667 if (is_group_manager) { 668 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1); 669 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num); 670 } else { 671 err = -EPERM; 672 goto ex; 673 } 674 } 675 676 if (MLX5_CAP_GEN(dev, num_ports) == 2) 677 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); 678 679 err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out); 680 if (err) 681 goto ex; 682 683 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); 684 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); 685 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware); 686 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi); 687 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw); 688 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy); 689 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx, 690 port_physical_state); 691 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state); 692 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx, 693 port_physical_state); 694 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid); 695 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid); 696 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1); 697 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx, 698 cap_mask1_field_select); 699 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2); 700 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx, 701 cap_mask2_field_select); 702 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid); 703 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx, 704 init_type_reply); 705 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc); 706 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx, 707 subnet_timeout); 708 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid); 709 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl); 710 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, 711 qkey_violation_counter); 712 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, 713 pkey_violation_counter); 714 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required); 715 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx, 716 system_image_guid); 717 718 ex: 719 kfree(out); 720 return err; 721 } 722 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); 723 724 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, 725 u64 *sys_image_guid) 726 { 727 struct mlx5_hca_vport_context *rep; 728 int err; 729 730 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 731 if (!rep) 732 return -ENOMEM; 733 734 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); 735 if (!err) 736 *sys_image_guid = rep->sys_image_guid; 737 738 kfree(rep); 739 return err; 740 } 741 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid); 742 743 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, 744 u64 *node_guid) 745 { 746 struct mlx5_hca_vport_context *rep; 747 int err; 748 749 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 750 if (!rep) 751 return -ENOMEM; 752 753 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); 754 if (!err) 755 *node_guid = rep->node_guid; 756 757 kfree(rep); 758 return err; 759 } 760 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); 761 762 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, 763 u16 vport, 764 int *promisc_uc, 765 int *promisc_mc, 766 int *promisc_all) 767 { 768 u32 *out; 769 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 770 int err; 771 772 out = kzalloc(outlen, GFP_KERNEL); 773 if (!out) 774 return -ENOMEM; 775 776 err = mlx5_query_nic_vport_context(mdev, vport, out); 777 if (err) 778 goto out; 779 780 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out, 781 nic_vport_context.promisc_uc); 782 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out, 783 nic_vport_context.promisc_mc); 784 *promisc_all = MLX5_GET(query_nic_vport_context_out, out, 785 nic_vport_context.promisc_all); 786 787 out: 788 kfree(out); 789 return err; 790 } 791 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc); 792 793 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, 794 int promisc_uc, 795 int promisc_mc, 796 int promisc_all) 797 { 798 void *in; 799 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 800 int err; 801 802 in = kvzalloc(inlen, GFP_KERNEL); 803 if (!in) 804 return -ENOMEM; 805 806 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1); 807 MLX5_SET(modify_nic_vport_context_in, in, 808 nic_vport_context.promisc_uc, promisc_uc); 809 MLX5_SET(modify_nic_vport_context_in, in, 810 nic_vport_context.promisc_mc, promisc_mc); 811 MLX5_SET(modify_nic_vport_context_in, in, 812 nic_vport_context.promisc_all, promisc_all); 813 MLX5_SET(modify_nic_vport_context_in, in, opcode, 814 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 815 816 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 817 818 kvfree(in); 819 820 return err; 821 } 822 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc); 823 824 enum { 825 UC_LOCAL_LB, 826 MC_LOCAL_LB 827 }; 828 829 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) 830 { 831 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 832 void *in; 833 int err; 834 835 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && 836 !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) 837 return 0; 838 839 in = kvzalloc(inlen, GFP_KERNEL); 840 if (!in) 841 return -ENOMEM; 842 843 MLX5_SET(modify_nic_vport_context_in, in, 844 nic_vport_context.disable_mc_local_lb, !enable); 845 MLX5_SET(modify_nic_vport_context_in, in, 846 nic_vport_context.disable_uc_local_lb, !enable); 847 848 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) 849 MLX5_SET(modify_nic_vport_context_in, in, 850 field_select.disable_mc_local_lb, 1); 851 852 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) 853 MLX5_SET(modify_nic_vport_context_in, in, 854 field_select.disable_uc_local_lb, 1); 855 MLX5_SET(modify_nic_vport_context_in, in, opcode, 856 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 857 858 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 859 860 if (!err) 861 mlx5_core_dbg(mdev, "%s local_lb\n", 862 enable ? "enable" : "disable"); 863 864 kvfree(in); 865 return err; 866 } 867 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb); 868 869 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status) 870 { 871 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 872 u32 *out; 873 int value; 874 int err; 875 876 out = kzalloc(outlen, GFP_KERNEL); 877 if (!out) 878 return -ENOMEM; 879 880 err = mlx5_query_nic_vport_context(mdev, 0, out); 881 if (err) 882 goto out; 883 884 value = MLX5_GET(query_nic_vport_context_out, out, 885 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB; 886 887 value |= MLX5_GET(query_nic_vport_context_out, out, 888 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB; 889 890 *status = !value; 891 892 out: 893 kfree(out); 894 return err; 895 } 896 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb); 897 898 enum mlx5_vport_roce_state { 899 MLX5_VPORT_ROCE_DISABLED = 0, 900 MLX5_VPORT_ROCE_ENABLED = 1, 901 }; 902 903 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, 904 enum mlx5_vport_roce_state state) 905 { 906 void *in; 907 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 908 int err; 909 910 in = kvzalloc(inlen, GFP_KERNEL); 911 if (!in) 912 return -ENOMEM; 913 914 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1); 915 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en, 916 state); 917 MLX5_SET(modify_nic_vport_context_in, in, opcode, 918 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 919 920 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); 921 922 kvfree(in); 923 924 return err; 925 } 926 927 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev) 928 { 929 int err = 0; 930 931 mutex_lock(&mlx5_roce_en_lock); 932 if (!mdev->roce.roce_en) 933 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); 934 935 if (!err) 936 mdev->roce.roce_en++; 937 mutex_unlock(&mlx5_roce_en_lock); 938 939 return err; 940 } 941 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce); 942 943 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) 944 { 945 int err = 0; 946 947 mutex_lock(&mlx5_roce_en_lock); 948 if (mdev->roce.roce_en) { 949 mdev->roce.roce_en--; 950 if (mdev->roce.roce_en == 0) 951 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); 952 953 if (err) 954 mdev->roce.roce_en++; 955 } 956 mutex_unlock(&mlx5_roce_en_lock); 957 return err; 958 } 959 EXPORT_SYMBOL(mlx5_nic_vport_disable_roce); 960 961 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, 962 int vf, u8 port_num, void *out) 963 { 964 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); 965 int is_group_manager; 966 void *in; 967 int err; 968 969 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); 970 in = kvzalloc(in_sz, GFP_KERNEL); 971 if (!in) { 972 err = -ENOMEM; 973 return err; 974 } 975 976 MLX5_SET(query_vport_counter_in, in, opcode, 977 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 978 if (other_vport) { 979 if (is_group_manager) { 980 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 981 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1); 982 } else { 983 err = -EPERM; 984 goto free; 985 } 986 } 987 if (MLX5_CAP_GEN(dev, num_ports) == 2) 988 MLX5_SET(query_vport_counter_in, in, port_num, port_num); 989 990 err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out); 991 free: 992 kvfree(in); 993 return err; 994 } 995 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); 996 997 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, 998 u8 other_vport, u64 *rx_discard_vport_down, 999 u64 *tx_discard_vport_down) 1000 { 1001 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 1002 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 1003 int err; 1004 1005 MLX5_SET(query_vnic_env_in, in, opcode, 1006 MLX5_CMD_OP_QUERY_VNIC_ENV); 1007 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 1008 MLX5_SET(query_vnic_env_in, in, vport_number, vport); 1009 MLX5_SET(query_vnic_env_in, in, other_vport, other_vport); 1010 1011 err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); 1012 if (err) 1013 return err; 1014 1015 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, 1016 vport_env.receive_discard_vport_down); 1017 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, 1018 vport_env.transmit_discard_vport_down); 1019 return 0; 1020 } 1021 1022 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, 1023 u8 other_vport, u8 port_num, 1024 int vf, 1025 struct mlx5_hca_vport_context *req) 1026 { 1027 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in); 1028 int is_group_manager; 1029 void *ctx; 1030 void *in; 1031 int err; 1032 1033 mlx5_core_dbg(dev, "vf %d\n", vf); 1034 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); 1035 in = kzalloc(in_sz, GFP_KERNEL); 1036 if (!in) 1037 return -ENOMEM; 1038 1039 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT); 1040 if (other_vport) { 1041 if (is_group_manager) { 1042 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1); 1043 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf); 1044 } else { 1045 err = -EPERM; 1046 goto ex; 1047 } 1048 } 1049 1050 if (MLX5_CAP_GEN(dev, num_ports) > 1) 1051 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num); 1052 1053 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); 1054 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); 1055 if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY) 1056 MLX5_SET(hca_vport_context, ctx, vport_state_policy, 1057 req->policy); 1058 if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID) 1059 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); 1060 if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) 1061 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); 1062 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); 1063 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, 1064 req->cap_mask1_perm); 1065 err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in); 1066 ex: 1067 kfree(in); 1068 return err; 1069 } 1070 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context); 1071 1072 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, 1073 struct mlx5_core_dev *port_mdev) 1074 { 1075 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 1076 void *in; 1077 int err; 1078 1079 in = kvzalloc(inlen, GFP_KERNEL); 1080 if (!in) 1081 return -ENOMEM; 1082 1083 err = mlx5_nic_vport_enable_roce(port_mdev); 1084 if (err) 1085 goto free; 1086 1087 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1); 1088 MLX5_SET(modify_nic_vport_context_in, in, 1089 nic_vport_context.affiliated_vhca_id, 1090 MLX5_CAP_GEN(master_mdev, vhca_id)); 1091 MLX5_SET(modify_nic_vport_context_in, in, 1092 nic_vport_context.affiliation_criteria, 1093 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria)); 1094 MLX5_SET(modify_nic_vport_context_in, in, opcode, 1095 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 1096 1097 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in); 1098 if (err) 1099 mlx5_nic_vport_disable_roce(port_mdev); 1100 1101 free: 1102 kvfree(in); 1103 return err; 1104 } 1105 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport); 1106 1107 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev) 1108 { 1109 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 1110 void *in; 1111 int err; 1112 1113 in = kvzalloc(inlen, GFP_KERNEL); 1114 if (!in) 1115 return -ENOMEM; 1116 1117 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1); 1118 MLX5_SET(modify_nic_vport_context_in, in, 1119 nic_vport_context.affiliated_vhca_id, 0); 1120 MLX5_SET(modify_nic_vport_context_in, in, 1121 nic_vport_context.affiliation_criteria, 0); 1122 MLX5_SET(modify_nic_vport_context_in, in, opcode, 1123 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 1124 1125 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in); 1126 if (!err) 1127 mlx5_nic_vport_disable_roce(port_mdev); 1128 1129 kvfree(in); 1130 return err; 1131 } 1132 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport); 1133 1134 u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) 1135 { 1136 int port_type_cap = MLX5_CAP_GEN(mdev, port_type); 1137 u64 tmp = 0; 1138 1139 if (mdev->sys_image_guid) 1140 return mdev->sys_image_guid; 1141 1142 if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH) 1143 mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 1144 else 1145 mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 1146 1147 mdev->sys_image_guid = tmp; 1148 1149 return tmp; 1150 } 1151 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); 1152 1153 /** 1154 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch 1155 * 1156 * @dev: Pointer to core device 1157 * 1158 * mlx5_eswitch_get_total_vports returns total number of vports for 1159 * the eswitch. 1160 */ 1161 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) 1162 { 1163 return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev); 1164 } 1165 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); 1166