1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <net/devlink.h> 13 #include "bnxt_hsi.h" 14 #include "bnxt.h" 15 #include "bnxt_vfr.h" 16 #include "bnxt_devlink.h" 17 #include "bnxt_ethtool.h" 18 19 static int 20 bnxt_dl_flash_update(struct devlink *dl, const char *filename, 21 const char *region, struct netlink_ext_ack *extack) 22 { 23 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 24 25 if (region) 26 return -EOPNOTSUPP; 27 28 if (!BNXT_PF(bp)) { 29 NL_SET_ERR_MSG_MOD(extack, 30 "flash update not supported from a VF"); 31 return -EPERM; 32 } 33 34 return bnxt_flash_package_from_file(bp->dev, filename, 0); 35 } 36 37 static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, 38 struct devlink_fmsg *fmsg, 39 struct netlink_ext_ack *extack) 40 { 41 struct bnxt *bp = devlink_health_reporter_priv(reporter); 42 u32 val, health_status; 43 int rc; 44 45 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 46 return 0; 47 48 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 49 health_status = val & 0xffff; 50 51 if (health_status < BNXT_FW_STATUS_HEALTHY) { 52 rc = devlink_fmsg_string_pair_put(fmsg, "Description", 53 "Not yet completed initialization"); 54 if (rc) 55 return rc; 56 } else if (health_status > BNXT_FW_STATUS_HEALTHY) { 57 rc = devlink_fmsg_string_pair_put(fmsg, "Description", 58 "Encountered fatal error and cannot recover"); 59 if (rc) 60 return rc; 61 } 62 63 if (val >> 16) { 64 rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16); 65 if (rc) 66 return rc; 67 } 68 69 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 70 rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val); 71 if (rc) 72 return rc; 73 74 return 0; 75 } 76 77 static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { 78 .name = "fw", 79 .diagnose = bnxt_fw_reporter_diagnose, 80 }; 81 82 static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, 83 void *priv_ctx, 84 struct netlink_ext_ack *extack) 85 { 86 struct bnxt *bp = devlink_health_reporter_priv(reporter); 87 88 if (!priv_ctx) 89 return -EOPNOTSUPP; 90 91 bnxt_fw_reset(bp); 92 return 0; 93 } 94 95 static const 96 struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = { 97 .name = "fw_reset", 98 .recover = bnxt_fw_reset_recover, 99 }; 100 101 static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, 102 void *priv_ctx, 103 struct netlink_ext_ack *extack) 104 { 105 struct bnxt *bp = devlink_health_reporter_priv(reporter); 106 struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; 107 unsigned long event; 108 109 if (!priv_ctx) 110 return -EOPNOTSUPP; 111 112 bp->fw_health->fatal = true; 113 event = fw_reporter_ctx->sp_event; 114 if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) 115 bnxt_fw_reset(bp); 116 else if (event == BNXT_FW_EXCEPTION_SP_EVENT) 117 bnxt_fw_exception(bp); 118 119 return 0; 120 } 121 122 static const 123 struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { 124 .name = "fw_fatal", 125 .recover = bnxt_fw_fatal_recover, 126 }; 127 128 void bnxt_dl_fw_reporters_create(struct bnxt *bp) 129 { 130 struct bnxt_fw_health *health = bp->fw_health; 131 132 if (!bp->dl || !health) 133 return; 134 135 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) 136 goto err_recovery; 137 138 health->fw_reset_reporter = 139 devlink_health_reporter_create(bp->dl, 140 &bnxt_dl_fw_reset_reporter_ops, 141 0, true, bp); 142 if (IS_ERR(health->fw_reset_reporter)) { 143 netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", 144 PTR_ERR(health->fw_reset_reporter)); 145 health->fw_reset_reporter = NULL; 146 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 147 } 148 149 err_recovery: 150 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 151 return; 152 153 if (!health->fw_reporter) { 154 health->fw_reporter = 155 devlink_health_reporter_create(bp->dl, 156 &bnxt_dl_fw_reporter_ops, 157 0, false, bp); 158 if (IS_ERR(health->fw_reporter)) { 159 netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", 160 PTR_ERR(health->fw_reporter)); 161 health->fw_reporter = NULL; 162 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 163 return; 164 } 165 } 166 167 if (health->fw_fatal_reporter) 168 return; 169 170 health->fw_fatal_reporter = 171 devlink_health_reporter_create(bp->dl, 172 &bnxt_dl_fw_fatal_reporter_ops, 173 0, true, bp); 174 if (IS_ERR(health->fw_fatal_reporter)) { 175 netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", 176 PTR_ERR(health->fw_fatal_reporter)); 177 health->fw_fatal_reporter = NULL; 178 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 179 } 180 } 181 182 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) 183 { 184 struct bnxt_fw_health *health = bp->fw_health; 185 186 if (!bp->dl || !health) 187 return; 188 189 if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && 190 health->fw_reset_reporter) { 191 devlink_health_reporter_destroy(health->fw_reset_reporter); 192 health->fw_reset_reporter = NULL; 193 } 194 195 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) 196 return; 197 198 if (health->fw_reporter) { 199 devlink_health_reporter_destroy(health->fw_reporter); 200 health->fw_reporter = NULL; 201 } 202 203 if (health->fw_fatal_reporter) { 204 devlink_health_reporter_destroy(health->fw_fatal_reporter); 205 health->fw_fatal_reporter = NULL; 206 } 207 } 208 209 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) 210 { 211 struct bnxt_fw_health *fw_health = bp->fw_health; 212 struct bnxt_fw_reporter_ctx fw_reporter_ctx; 213 214 fw_reporter_ctx.sp_event = event; 215 switch (event) { 216 case BNXT_FW_RESET_NOTIFY_SP_EVENT: 217 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 218 if (!fw_health->fw_fatal_reporter) 219 return; 220 221 devlink_health_report(fw_health->fw_fatal_reporter, 222 "FW fatal async event received", 223 &fw_reporter_ctx); 224 return; 225 } 226 if (!fw_health->fw_reset_reporter) 227 return; 228 229 devlink_health_report(fw_health->fw_reset_reporter, 230 "FW non-fatal reset event received", 231 &fw_reporter_ctx); 232 return; 233 234 case BNXT_FW_EXCEPTION_SP_EVENT: 235 if (!fw_health->fw_fatal_reporter) 236 return; 237 238 devlink_health_report(fw_health->fw_fatal_reporter, 239 "FW fatal error reported", 240 &fw_reporter_ctx); 241 return; 242 } 243 } 244 245 void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) 246 { 247 struct bnxt_fw_health *health = bp->fw_health; 248 u8 state; 249 250 if (healthy) 251 state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; 252 else 253 state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; 254 255 if (health->fatal) 256 devlink_health_reporter_state_update(health->fw_fatal_reporter, 257 state); 258 else 259 devlink_health_reporter_state_update(health->fw_reset_reporter, 260 state); 261 262 health->fatal = false; 263 } 264 265 static const struct devlink_ops bnxt_dl_ops = { 266 #ifdef CONFIG_BNXT_SRIOV 267 .eswitch_mode_set = bnxt_dl_eswitch_mode_set, 268 .eswitch_mode_get = bnxt_dl_eswitch_mode_get, 269 #endif /* CONFIG_BNXT_SRIOV */ 270 .flash_update = bnxt_dl_flash_update, 271 }; 272 273 static const struct devlink_ops bnxt_vf_dl_ops; 274 275 enum bnxt_dl_param_id { 276 BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 277 BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 278 }; 279 280 static const struct bnxt_dl_nvm_param nvm_params[] = { 281 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, 282 BNXT_NVM_SHARED_CFG, 1, 1}, 283 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, 284 BNXT_NVM_SHARED_CFG, 1, 1}, 285 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 286 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, 287 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 288 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, 289 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, 290 BNXT_NVM_SHARED_CFG, 1, 1}, 291 }; 292 293 union bnxt_nvm_data { 294 u8 val8; 295 __le32 val32; 296 }; 297 298 static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, 299 union devlink_param_value *src, 300 int nvm_num_bits, int dl_num_bytes) 301 { 302 u32 val32 = 0; 303 304 if (nvm_num_bits == 1) { 305 dst->val8 = src->vbool; 306 return; 307 } 308 if (dl_num_bytes == 4) 309 val32 = src->vu32; 310 else if (dl_num_bytes == 2) 311 val32 = (u32)src->vu16; 312 else if (dl_num_bytes == 1) 313 val32 = (u32)src->vu8; 314 dst->val32 = cpu_to_le32(val32); 315 } 316 317 static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, 318 union bnxt_nvm_data *src, 319 int nvm_num_bits, int dl_num_bytes) 320 { 321 u32 val32; 322 323 if (nvm_num_bits == 1) { 324 dst->vbool = src->val8; 325 return; 326 } 327 val32 = le32_to_cpu(src->val32); 328 if (dl_num_bytes == 4) 329 dst->vu32 = val32; 330 else if (dl_num_bytes == 2) 331 dst->vu16 = (u16)val32; 332 else if (dl_num_bytes == 1) 333 dst->vu8 = (u8)val32; 334 } 335 336 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, 337 int msg_len, union devlink_param_value *val) 338 { 339 struct hwrm_nvm_get_variable_input *req = msg; 340 struct bnxt_dl_nvm_param nvm_param; 341 union bnxt_nvm_data *data; 342 dma_addr_t data_dma_addr; 343 int idx = 0, rc, i; 344 345 /* Get/Set NVM CFG parameter is supported only on PFs */ 346 if (BNXT_VF(bp)) 347 return -EPERM; 348 349 for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { 350 if (nvm_params[i].id == param_id) { 351 nvm_param = nvm_params[i]; 352 break; 353 } 354 } 355 356 if (i == ARRAY_SIZE(nvm_params)) 357 return -EOPNOTSUPP; 358 359 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) 360 idx = bp->pf.port_id; 361 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) 362 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; 363 364 data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), 365 &data_dma_addr, GFP_KERNEL); 366 if (!data) 367 return -ENOMEM; 368 369 req->dest_data_addr = cpu_to_le64(data_dma_addr); 370 req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); 371 req->option_num = cpu_to_le16(nvm_param.offset); 372 req->index_0 = cpu_to_le16(idx); 373 if (idx) 374 req->dimensions = cpu_to_le16(1); 375 376 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { 377 bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, 378 nvm_param.dl_num_bytes); 379 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 380 } else { 381 rc = hwrm_send_message_silent(bp, msg, msg_len, 382 HWRM_CMD_TIMEOUT); 383 if (!rc) { 384 bnxt_copy_from_nvm_data(val, data, 385 nvm_param.nvm_num_bits, 386 nvm_param.dl_num_bytes); 387 } else { 388 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 389 390 if (resp->cmd_err == 391 NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) 392 rc = -EOPNOTSUPP; 393 } 394 } 395 dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); 396 if (rc == -EACCES) 397 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); 398 return rc; 399 } 400 401 static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, 402 struct devlink_param_gset_ctx *ctx) 403 { 404 struct hwrm_nvm_get_variable_input req = {0}; 405 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 406 int rc; 407 408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); 409 rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); 410 if (!rc) 411 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 412 ctx->val.vbool = !ctx->val.vbool; 413 414 return rc; 415 } 416 417 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, 418 struct devlink_param_gset_ctx *ctx) 419 { 420 struct hwrm_nvm_set_variable_input req = {0}; 421 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 422 423 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); 424 425 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 426 ctx->val.vbool = !ctx->val.vbool; 427 428 return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); 429 } 430 431 static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, 432 union devlink_param_value val, 433 struct netlink_ext_ack *extack) 434 { 435 int max_val = -1; 436 437 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) 438 max_val = BNXT_MSIX_VEC_MAX; 439 440 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) 441 max_val = BNXT_MSIX_VEC_MIN_MAX; 442 443 if (val.vu32 > max_val) { 444 NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); 445 return -EINVAL; 446 } 447 448 return 0; 449 } 450 451 static const struct devlink_param bnxt_dl_params[] = { 452 DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, 453 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 454 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 455 NULL), 456 DEVLINK_PARAM_GENERIC(IGNORE_ARI, 457 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 458 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 459 NULL), 460 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 461 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 462 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 463 bnxt_dl_msix_validate), 464 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 465 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 466 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 467 bnxt_dl_msix_validate), 468 DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 469 "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, 470 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 471 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 472 NULL), 473 }; 474 475 static const struct devlink_param bnxt_dl_port_params[] = { 476 }; 477 478 int bnxt_dl_register(struct bnxt *bp) 479 { 480 struct devlink *dl; 481 int rc; 482 483 if (bp->hwrm_spec_code < 0x10600) { 484 netdev_warn(bp->dev, "Firmware does not support NVM params"); 485 return -ENOTSUPP; 486 } 487 488 if (BNXT_PF(bp)) 489 dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); 490 else 491 dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); 492 if (!dl) { 493 netdev_warn(bp->dev, "devlink_alloc failed"); 494 return -ENOMEM; 495 } 496 497 bnxt_link_bp_to_dl(bp, dl); 498 499 /* Add switchdev eswitch mode setting, if SRIOV supported */ 500 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && 501 bp->hwrm_spec_code > 0x10803) 502 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 503 504 rc = devlink_register(dl, &bp->pdev->dev); 505 if (rc) { 506 netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); 507 goto err_dl_free; 508 } 509 510 if (!BNXT_PF(bp)) 511 return 0; 512 513 rc = devlink_params_register(dl, bnxt_dl_params, 514 ARRAY_SIZE(bnxt_dl_params)); 515 if (rc) { 516 netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", 517 rc); 518 goto err_dl_unreg; 519 } 520 521 devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 522 bp->pf.port_id, false, 0, 523 bp->switch_id, sizeof(bp->switch_id)); 524 rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); 525 if (rc) { 526 netdev_err(bp->dev, "devlink_port_register failed"); 527 goto err_dl_param_unreg; 528 } 529 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 530 531 rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, 532 ARRAY_SIZE(bnxt_dl_port_params)); 533 if (rc) { 534 netdev_err(bp->dev, "devlink_port_params_register failed"); 535 goto err_dl_port_unreg; 536 } 537 538 devlink_params_publish(dl); 539 540 return 0; 541 542 err_dl_port_unreg: 543 devlink_port_unregister(&bp->dl_port); 544 err_dl_param_unreg: 545 devlink_params_unregister(dl, bnxt_dl_params, 546 ARRAY_SIZE(bnxt_dl_params)); 547 err_dl_unreg: 548 devlink_unregister(dl); 549 err_dl_free: 550 bnxt_link_bp_to_dl(bp, NULL); 551 devlink_free(dl); 552 return rc; 553 } 554 555 void bnxt_dl_unregister(struct bnxt *bp) 556 { 557 struct devlink *dl = bp->dl; 558 559 if (!dl) 560 return; 561 562 if (BNXT_PF(bp)) { 563 devlink_port_params_unregister(&bp->dl_port, 564 bnxt_dl_port_params, 565 ARRAY_SIZE(bnxt_dl_port_params)); 566 devlink_port_unregister(&bp->dl_port); 567 devlink_params_unregister(dl, bnxt_dl_params, 568 ARRAY_SIZE(bnxt_dl_params)); 569 } 570 devlink_unregister(dl); 571 devlink_free(dl); 572 } 573