1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/driver.h> 34 #include <linux/mlx5/eswitch.h> 35 #include "mlx5_core.h" 36 #include "../../mlxfw/mlxfw.h" 37 #include "lib/tout.h" 38 39 enum { 40 MCQS_IDENTIFIER_BOOT_IMG = 0x1, 41 MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4, 42 MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5, 43 MCQS_IDENTIFIER_CS_TOKEN = 0x6, 44 MCQS_IDENTIFIER_DBG_TOKEN = 0x7, 45 MCQS_IDENTIFIER_GEARBOX = 0xA, 46 }; 47 48 enum { 49 MCQS_UPDATE_STATE_IDLE, 50 MCQS_UPDATE_STATE_IN_PROGRESS, 51 MCQS_UPDATE_STATE_APPLIED, 52 MCQS_UPDATE_STATE_ACTIVE, 53 MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET, 54 MCQS_UPDATE_STATE_FAILED, 55 MCQS_UPDATE_STATE_CANCELED, 56 MCQS_UPDATE_STATE_BUSY, 57 }; 58 59 enum { 60 MCQI_INFO_TYPE_CAPABILITIES = 0x0, 61 MCQI_INFO_TYPE_VERSION = 0x1, 62 MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5, 63 }; 64 65 enum { 66 MCQI_FW_RUNNING_VERSION = 0, 67 MCQI_FW_STORED_VERSION = 1, 68 }; 69 70 int mlx5_query_board_id(struct mlx5_core_dev *dev) 71 { 72 u32 *out; 73 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); 74 u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; 75 int err; 76 77 out = kzalloc(outlen, GFP_KERNEL); 78 if (!out) 79 return -ENOMEM; 80 81 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); 82 err = mlx5_cmd_exec_inout(dev, query_adapter, in, out); 83 if (err) 84 goto out; 85 86 memcpy(dev->board_id, 87 MLX5_ADDR_OF(query_adapter_out, out, 88 query_adapter_struct.vsd_contd_psid), 89 MLX5_FLD_SZ_BYTES(query_adapter_out, 90 query_adapter_struct.vsd_contd_psid)); 91 92 out: 93 kfree(out); 94 return err; 95 } 96 97 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) 98 { 99 u32 *out; 100 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); 101 u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; 102 int err; 103 104 out = kzalloc(outlen, GFP_KERNEL); 105 if (!out) 106 return -ENOMEM; 107 108 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); 109 err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out); 110 if (err) 111 goto out; 112 113 *vendor_id = MLX5_GET(query_adapter_out, out, 114 query_adapter_struct.ieee_vendor_id); 115 out: 116 kfree(out); 117 return err; 118 } 119 EXPORT_SYMBOL(mlx5_core_query_vendor_id); 120 121 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) 122 { 123 return mlx5_query_pcam_reg(dev, dev->caps.pcam, 124 MLX5_PCAM_FEATURE_ENHANCED_FEATURES, 125 MLX5_PCAM_REGS_5000_TO_507F); 126 } 127 128 static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev, 129 enum mlx5_mcam_reg_groups group) 130 { 131 return mlx5_query_mcam_reg(dev, dev->caps.mcam[group], 132 MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group); 133 } 134 135 static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) 136 { 137 return mlx5_query_qcam_reg(dev, dev->caps.qcam, 138 MLX5_QCAM_FEATURE_ENHANCED_FEATURES, 139 MLX5_QCAM_REGS_FIRST_128); 140 } 141 142 int mlx5_query_hca_caps(struct mlx5_core_dev *dev) 143 { 144 int err; 145 146 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 147 if (err) 148 return err; 149 150 if (MLX5_CAP_GEN(dev, port_selection_cap)) { 151 err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION); 152 if (err) 153 return err; 154 } 155 156 if (MLX5_CAP_GEN(dev, hca_cap_2)) { 157 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); 158 if (err) 159 return err; 160 } 161 162 if (MLX5_CAP_GEN(dev, eth_net_offloads)) { 163 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS); 164 if (err) 165 return err; 166 } 167 168 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { 169 err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS); 170 if (err) 171 return err; 172 } 173 174 if (MLX5_CAP_GEN(dev, pg)) { 175 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); 176 if (err) 177 return err; 178 } 179 180 if (MLX5_CAP_GEN(dev, atomic)) { 181 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 182 if (err) 183 return err; 184 } 185 186 if (MLX5_CAP_GEN(dev, roce)) { 187 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); 188 if (err) 189 return err; 190 } 191 192 if (MLX5_CAP_GEN(dev, nic_flow_table) || 193 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { 194 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); 195 if (err) 196 return err; 197 } 198 199 if (MLX5_CAP_GEN(dev, vport_group_manager) && 200 MLX5_ESWITCH_MANAGER(dev)) { 201 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); 202 if (err) 203 return err; 204 } 205 206 if (MLX5_ESWITCH_MANAGER(dev)) { 207 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); 208 if (err) 209 return err; 210 } 211 212 if (MLX5_CAP_GEN(dev, vector_calc)) { 213 err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC); 214 if (err) 215 return err; 216 } 217 218 if (MLX5_CAP_GEN(dev, qos)) { 219 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); 220 if (err) 221 return err; 222 } 223 224 if (MLX5_CAP_GEN(dev, debug)) 225 mlx5_core_get_caps(dev, MLX5_CAP_DEBUG); 226 227 if (MLX5_CAP_GEN(dev, pcam_reg)) 228 mlx5_get_pcam_reg(dev); 229 230 if (MLX5_CAP_GEN(dev, mcam_reg)) { 231 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128); 232 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF); 233 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F); 234 } 235 236 if (MLX5_CAP_GEN(dev, qcam_reg)) 237 mlx5_get_qcam_reg(dev); 238 239 if (MLX5_CAP_GEN(dev, device_memory)) { 240 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM); 241 if (err) 242 return err; 243 } 244 245 if (MLX5_CAP_GEN(dev, event_cap)) { 246 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT); 247 if (err) 248 return err; 249 } 250 251 if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { 252 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 253 if (err) 254 return err; 255 } 256 257 if (MLX5_CAP_GEN_64(dev, general_obj_types) & 258 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { 259 err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); 260 if (err) 261 return err; 262 } 263 264 if (MLX5_CAP_GEN(dev, ipsec_offload)) { 265 err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC); 266 if (err) 267 return err; 268 } 269 270 if (MLX5_CAP_GEN(dev, shampo)) { 271 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO); 272 if (err) 273 return err; 274 } 275 276 if (MLX5_CAP_GEN_64(dev, general_obj_types) & 277 MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) { 278 err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC); 279 if (err) 280 return err; 281 } 282 283 if (MLX5_CAP_GEN(dev, adv_virtualization)) { 284 err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION); 285 if (err) 286 return err; 287 } 288 289 return 0; 290 } 291 292 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) 293 { 294 u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; 295 int i; 296 297 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); 298 299 if (MLX5_CAP_GEN(dev, sw_owner_id)) { 300 for (i = 0; i < 4; i++) 301 MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i, 302 sw_owner_id[i]); 303 } 304 305 if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) && 306 dev->priv.sw_vhca_id > 0) 307 MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id); 308 309 return mlx5_cmd_exec_in(dev, init_hca, in); 310 } 311 312 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) 313 { 314 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; 315 316 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 317 return mlx5_cmd_exec_in(dev, teardown_hca, in); 318 } 319 320 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) 321 { 322 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; 323 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; 324 int force_state; 325 int ret; 326 327 if (!MLX5_CAP_GEN(dev, force_teardown)) { 328 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); 329 return -EOPNOTSUPP; 330 } 331 332 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 333 MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE); 334 335 ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out)); 336 if (ret) 337 return ret; 338 339 force_state = MLX5_GET(teardown_hca_out, out, state); 340 if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { 341 mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); 342 return -EIO; 343 } 344 345 return 0; 346 } 347 348 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) 349 { 350 unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN); 351 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {}; 352 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; 353 int state; 354 int ret; 355 356 if (!MLX5_CAP_GEN(dev, fast_teardown)) { 357 mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n"); 358 return -EOPNOTSUPP; 359 } 360 361 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 362 MLX5_SET(teardown_hca_in, in, profile, 363 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); 364 365 ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out); 366 if (ret) 367 return ret; 368 369 state = MLX5_GET(teardown_hca_out, out, state); 370 if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { 371 mlx5_core_warn(dev, "teardown with fast mode failed\n"); 372 return -EIO; 373 } 374 375 mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); 376 377 /* Loop until device state turns to disable */ 378 end = jiffies + msecs_to_jiffies(delay_ms); 379 do { 380 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) 381 break; 382 383 cond_resched(); 384 } while (!time_after(jiffies, end)); 385 386 if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { 387 dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", 388 mlx5_get_nic_state(dev), delay_ms); 389 return -EIO; 390 } 391 392 return 0; 393 } 394 395 enum mlxsw_reg_mcc_instruction { 396 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, 397 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, 398 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03, 399 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04, 400 MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06, 401 MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08, 402 }; 403 404 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev, 405 enum mlxsw_reg_mcc_instruction instr, 406 u16 component_index, u32 update_handle, 407 u32 component_size) 408 { 409 u32 out[MLX5_ST_SZ_DW(mcc_reg)]; 410 u32 in[MLX5_ST_SZ_DW(mcc_reg)]; 411 412 memset(in, 0, sizeof(in)); 413 414 MLX5_SET(mcc_reg, in, instruction, instr); 415 MLX5_SET(mcc_reg, in, component_index, component_index); 416 MLX5_SET(mcc_reg, in, update_handle, update_handle); 417 MLX5_SET(mcc_reg, in, component_size, component_size); 418 419 return mlx5_core_access_reg(dev, in, sizeof(in), out, 420 sizeof(out), MLX5_REG_MCC, 0, 1); 421 } 422 423 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev, 424 u32 *update_handle, u8 *error_code, 425 u8 *control_state) 426 { 427 u32 out[MLX5_ST_SZ_DW(mcc_reg)]; 428 u32 in[MLX5_ST_SZ_DW(mcc_reg)]; 429 int err; 430 431 memset(in, 0, sizeof(in)); 432 memset(out, 0, sizeof(out)); 433 MLX5_SET(mcc_reg, in, update_handle, *update_handle); 434 435 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 436 sizeof(out), MLX5_REG_MCC, 0, 0); 437 if (err) 438 goto out; 439 440 *update_handle = MLX5_GET(mcc_reg, out, update_handle); 441 *error_code = MLX5_GET(mcc_reg, out, error_code); 442 *control_state = MLX5_GET(mcc_reg, out, control_state); 443 444 out: 445 return err; 446 } 447 448 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev, 449 u32 update_handle, 450 u32 offset, u16 size, 451 u8 *data) 452 { 453 int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size; 454 u32 out[MLX5_ST_SZ_DW(mcda_reg)]; 455 int i, j, dw_size = size >> 2; 456 __be32 data_element; 457 u32 *in; 458 459 in = kzalloc(in_size, GFP_KERNEL); 460 if (!in) 461 return -ENOMEM; 462 463 MLX5_SET(mcda_reg, in, update_handle, update_handle); 464 MLX5_SET(mcda_reg, in, offset, offset); 465 MLX5_SET(mcda_reg, in, size, size); 466 467 for (i = 0; i < dw_size; i++) { 468 j = i * 4; 469 data_element = htonl(*(u32 *)&data[j]); 470 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4); 471 } 472 473 err = mlx5_core_access_reg(dev, in, in_size, out, 474 sizeof(out), MLX5_REG_MCDA, 0, 1); 475 kfree(in); 476 return err; 477 } 478 479 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev, 480 u16 component_index, bool read_pending, 481 u8 info_type, u16 data_size, void *mcqi_data) 482 { 483 u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {}; 484 u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {}; 485 void *data; 486 int err; 487 488 MLX5_SET(mcqi_reg, in, component_index, component_index); 489 MLX5_SET(mcqi_reg, in, read_pending_component, read_pending); 490 MLX5_SET(mcqi_reg, in, info_type, info_type); 491 MLX5_SET(mcqi_reg, in, data_size, data_size); 492 493 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 494 MLX5_ST_SZ_BYTES(mcqi_reg) + data_size, 495 MLX5_REG_MCQI, 0, 0); 496 if (err) 497 return err; 498 499 data = MLX5_ADDR_OF(mcqi_reg, out, data); 500 memcpy(mcqi_data, data, data_size); 501 502 return 0; 503 } 504 505 static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index, 506 u32 *max_component_size, u8 *log_mcda_word_size, 507 u16 *mcda_max_write_size) 508 { 509 u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {}; 510 int err; 511 512 err = mlx5_reg_mcqi_query(dev, component_index, 0, 513 MCQI_INFO_TYPE_CAPABILITIES, 514 MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_reg); 515 if (err) 516 return err; 517 518 *max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size); 519 *log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size); 520 *mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size); 521 522 return 0; 523 } 524 525 struct mlx5_mlxfw_dev { 526 struct mlxfw_dev mlxfw_dev; 527 struct mlx5_core_dev *mlx5_core_dev; 528 }; 529 530 static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev, 531 u16 component_index, u32 *p_max_size, 532 u8 *p_align_bits, u16 *p_max_write_size) 533 { 534 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 535 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 536 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 537 538 if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) { 539 mlx5_core_warn(dev, "caps query isn't supported by running FW\n"); 540 return -EOPNOTSUPP; 541 } 542 543 return mlx5_reg_mcqi_caps_query(dev, component_index, p_max_size, 544 p_align_bits, p_max_write_size); 545 } 546 547 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 548 { 549 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 550 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 551 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 552 u8 control_state, error_code; 553 int err; 554 555 *fwhandle = 0; 556 err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state); 557 if (err) 558 return err; 559 560 if (control_state != MLXFW_FSM_STATE_IDLE) 561 return -EBUSY; 562 563 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 564 0, *fwhandle, 0); 565 } 566 567 static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 568 u16 component_index, u32 component_size) 569 { 570 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 571 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 572 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 573 574 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 575 component_index, fwhandle, component_size); 576 } 577 578 static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 579 u8 *data, u16 size, u32 offset) 580 { 581 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 582 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 583 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 584 585 return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data); 586 } 587 588 static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 589 u16 component_index) 590 { 591 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 592 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 593 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 594 595 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 596 component_index, fwhandle, 0); 597 } 598 599 static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 600 { 601 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 602 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 603 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 604 605 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0, 606 fwhandle, 0); 607 } 608 609 static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 610 enum mlxfw_fsm_state *fsm_state, 611 enum mlxfw_fsm_state_err *fsm_state_err) 612 { 613 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 614 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 615 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 616 u8 control_state, error_code; 617 int err; 618 619 err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state); 620 if (err) 621 return err; 622 623 *fsm_state = control_state; 624 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 625 MLXFW_FSM_STATE_ERR_MAX); 626 return 0; 627 } 628 629 static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 630 { 631 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 632 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 633 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 634 635 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0); 636 } 637 638 static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 639 { 640 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 641 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 642 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 643 644 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 645 fwhandle, 0); 646 } 647 648 static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status) 649 { 650 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 651 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 652 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 653 u32 out[MLX5_ST_SZ_DW(mirc_reg)]; 654 u32 in[MLX5_ST_SZ_DW(mirc_reg)]; 655 unsigned long exp_time; 656 int err; 657 658 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE)); 659 660 if (!MLX5_CAP_MCAM_REG2(dev, mirc)) 661 return -EOPNOTSUPP; 662 663 memset(in, 0, sizeof(in)); 664 665 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 666 sizeof(out), MLX5_REG_MIRC, 0, 1); 667 if (err) 668 return err; 669 670 do { 671 memset(out, 0, sizeof(out)); 672 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 673 sizeof(out), MLX5_REG_MIRC, 0, 0); 674 if (err) 675 return err; 676 677 *status = MLX5_GET(mirc_reg, out, status_code); 678 if (*status != MLXFW_FSM_REACTIVATE_STATUS_BUSY) 679 return 0; 680 681 msleep(20); 682 } while (time_before(jiffies, exp_time)); 683 684 return 0; 685 } 686 687 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = { 688 .component_query = mlx5_component_query, 689 .fsm_lock = mlx5_fsm_lock, 690 .fsm_component_update = mlx5_fsm_component_update, 691 .fsm_block_download = mlx5_fsm_block_download, 692 .fsm_component_verify = mlx5_fsm_component_verify, 693 .fsm_activate = mlx5_fsm_activate, 694 .fsm_reactivate = mlx5_fsm_reactivate, 695 .fsm_query_state = mlx5_fsm_query_state, 696 .fsm_cancel = mlx5_fsm_cancel, 697 .fsm_release = mlx5_fsm_release 698 }; 699 700 int mlx5_firmware_flash(struct mlx5_core_dev *dev, 701 const struct firmware *firmware, 702 struct netlink_ext_ack *extack) 703 { 704 struct mlx5_mlxfw_dev mlx5_mlxfw_dev = { 705 .mlxfw_dev = { 706 .ops = &mlx5_mlxfw_dev_ops, 707 .psid = dev->board_id, 708 .psid_size = strlen(dev->board_id), 709 .devlink = priv_to_devlink(dev), 710 }, 711 .mlx5_core_dev = dev 712 }; 713 714 if (!MLX5_CAP_GEN(dev, mcam_reg) || 715 !MLX5_CAP_MCAM_REG(dev, mcqi) || 716 !MLX5_CAP_MCAM_REG(dev, mcc) || 717 !MLX5_CAP_MCAM_REG(dev, mcda)) { 718 pr_info("%s flashing isn't supported by the running FW\n", __func__); 719 return -EOPNOTSUPP; 720 } 721 722 return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, 723 firmware, extack); 724 } 725 726 static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev, 727 u16 component_index, bool read_pending, 728 u32 *mcqi_version_out) 729 { 730 return mlx5_reg_mcqi_query(dev, component_index, read_pending, 731 MCQI_INFO_TYPE_VERSION, 732 MLX5_ST_SZ_BYTES(mcqi_version), 733 mcqi_version_out); 734 } 735 736 static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out, 737 u16 component_index) 738 { 739 u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg); 740 u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 741 int err; 742 743 memset(out, 0, out_sz); 744 745 MLX5_SET(mcqs_reg, in, component_index, component_index); 746 747 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 748 out_sz, MLX5_REG_MCQS, 0, 0); 749 return err; 750 } 751 752 /* scans component index sequentially, to find the boot img index */ 753 static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev) 754 { 755 u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 756 u16 identifier, component_idx = 0; 757 bool quit; 758 int err; 759 760 do { 761 err = mlx5_reg_mcqs_query(dev, out, component_idx); 762 if (err) 763 return err; 764 765 identifier = MLX5_GET(mcqs_reg, out, identifier); 766 quit = !!MLX5_GET(mcqs_reg, out, last_index_flag); 767 quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG; 768 } while (!quit && ++component_idx); 769 770 if (identifier != MCQS_IDENTIFIER_BOOT_IMG) { 771 mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n", 772 component_idx); 773 return -EOPNOTSUPP; 774 } 775 776 return component_idx; 777 } 778 779 static int 780 mlx5_fw_image_pending(struct mlx5_core_dev *dev, 781 int component_index, 782 bool *pending_version_exists) 783 { 784 u32 out[MLX5_ST_SZ_DW(mcqs_reg)]; 785 u8 component_update_state; 786 int err; 787 788 err = mlx5_reg_mcqs_query(dev, out, component_index); 789 if (err) 790 return err; 791 792 component_update_state = MLX5_GET(mcqs_reg, out, component_update_state); 793 794 if (component_update_state == MCQS_UPDATE_STATE_IDLE) { 795 *pending_version_exists = false; 796 } else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) { 797 *pending_version_exists = true; 798 } else { 799 mlx5_core_warn(dev, 800 "mcqs: can't read pending fw version while fw state is %d\n", 801 component_update_state); 802 return -ENODATA; 803 } 804 return 0; 805 } 806 807 int mlx5_fw_version_query(struct mlx5_core_dev *dev, 808 u32 *running_ver, u32 *pending_ver) 809 { 810 u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {}; 811 bool pending_version_exists; 812 int component_index; 813 int err; 814 815 if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) || 816 !MLX5_CAP_MCAM_REG(dev, mcqs)) { 817 mlx5_core_warn(dev, "fw query isn't supported by the FW\n"); 818 return -EOPNOTSUPP; 819 } 820 821 component_index = mlx5_get_boot_img_component_index(dev); 822 if (component_index < 0) 823 return component_index; 824 825 err = mlx5_reg_mcqi_version_query(dev, component_index, 826 MCQI_FW_RUNNING_VERSION, 827 reg_mcqi_version); 828 if (err) 829 return err; 830 831 *running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 832 833 err = mlx5_fw_image_pending(dev, component_index, &pending_version_exists); 834 if (err) 835 return err; 836 837 if (!pending_version_exists) { 838 *pending_ver = 0; 839 return 0; 840 } 841 842 err = mlx5_reg_mcqi_version_query(dev, component_index, 843 MCQI_FW_STORED_VERSION, 844 reg_mcqi_version); 845 if (err) 846 return err; 847 848 *pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 849 850 return 0; 851 } 852