1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_devlink.h" 9 #include "ice_eswitch.h" 10 #include "ice_fw_update.h" 11 #include "ice_dcb_lib.h" 12 13 static int ice_active_port_option = -1; 14 15 /* context for devlink info version reporting */ 16 struct ice_info_ctx { 17 char buf[128]; 18 struct ice_orom_info pending_orom; 19 struct ice_nvm_info pending_nvm; 20 struct ice_netlist_info pending_netlist; 21 struct ice_hw_dev_caps dev_caps; 22 }; 23 24 /* The following functions are used to format specific strings for various 25 * devlink info versions. The ctx parameter is used to provide the storage 26 * buffer, as well as any ancillary information calculated when the info 27 * request was made. 28 * 29 * If a version does not exist, for example when attempting to get the 30 * inactive version of flash when there is no pending update, the function 31 * should leave the buffer in the ctx structure empty. 32 */ 33 34 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) 35 { 36 u8 dsn[8]; 37 38 /* Copy the DSN into an array in Big Endian format */ 39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); 40 41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); 42 } 43 44 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) 45 { 46 struct ice_hw *hw = &pf->hw; 47 int status; 48 49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); 50 if (status) 51 /* We failed to locate the PBA, so just skip this entry */ 52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", 53 status); 54 } 55 56 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) 57 { 58 struct ice_hw *hw = &pf->hw; 59 60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); 62 } 63 64 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) 65 { 66 struct ice_hw *hw = &pf->hw; 67 68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, 69 hw->api_min_ver, hw->api_patch); 70 } 71 72 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 73 { 74 struct ice_hw *hw = &pf->hw; 75 76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); 77 } 78 79 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 80 { 81 struct ice_orom_info *orom = &pf->hw.flash.orom; 82 83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 84 orom->major, orom->build, orom->patch); 85 } 86 87 static void 88 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, 89 struct ice_info_ctx *ctx) 90 { 91 struct ice_orom_info *orom = &ctx->pending_orom; 92 93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) 94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 95 orom->major, orom->build, orom->patch); 96 } 97 98 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 99 { 100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 101 102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); 103 } 104 105 static void 106 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, 107 struct ice_info_ctx *ctx) 108 { 109 struct ice_nvm_info *nvm = &ctx->pending_nvm; 110 111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", 113 nvm->major, nvm->minor); 114 } 115 116 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 117 { 118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 119 120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 121 } 122 123 static void 124 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 125 { 126 struct ice_nvm_info *nvm = &ctx->pending_nvm; 127 128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 130 } 131 132 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) 133 { 134 struct ice_hw *hw = &pf->hw; 135 136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); 137 } 138 139 static void 140 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) 141 { 142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; 143 144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", 145 pkg->major, pkg->minor, pkg->update, pkg->draft); 146 } 147 148 static void 149 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 150 { 151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); 152 } 153 154 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 155 { 156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 157 158 /* The netlist version fields are BCD formatted */ 159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 160 netlist->major, netlist->minor, 161 netlist->type >> 16, netlist->type & 0xFFFF, 162 netlist->rev, netlist->cust_ver); 163 } 164 165 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 166 { 167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 168 169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 170 } 171 172 static void 173 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, 174 struct ice_info_ctx *ctx) 175 { 176 struct ice_netlist_info *netlist = &ctx->pending_netlist; 177 178 /* The netlist version fields are BCD formatted */ 179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 181 netlist->major, netlist->minor, 182 netlist->type >> 16, netlist->type & 0xFFFF, 183 netlist->rev, netlist->cust_ver); 184 } 185 186 static void 187 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, 188 struct ice_info_ctx *ctx) 189 { 190 struct ice_netlist_info *netlist = &ctx->pending_netlist; 191 192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 194 } 195 196 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } 197 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } 198 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } 199 200 /* The combined() macro inserts both the running entry as well as a stored 201 * entry. The running entry will always report the version from the active 202 * handler. The stored entry will first try the pending handler, and fallback 203 * to the active handler if the pending function does not report a version. 204 * The pending handler should check the status of a pending update for the 205 * relevant flash component. It should only fill in the buffer in the case 206 * where a valid pending version is available. This ensures that the related 207 * stored and running versions remain in sync, and that stored versions are 208 * correctly reported as expected. 209 */ 210 #define combined(key, active, pending) \ 211 running(key, active), \ 212 stored(key, pending, active) 213 214 enum ice_version_type { 215 ICE_VERSION_FIXED, 216 ICE_VERSION_RUNNING, 217 ICE_VERSION_STORED, 218 }; 219 220 static const struct ice_devlink_version { 221 enum ice_version_type type; 222 const char *key; 223 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); 224 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); 225 } ice_devlink_versions[] = { 226 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), 227 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), 228 running("fw.mgmt.api", ice_info_fw_api), 229 running("fw.mgmt.build", ice_info_fw_build), 230 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver), 231 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), 232 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack), 233 running("fw.app.name", ice_info_ddp_pkg_name), 234 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), 235 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), 236 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), 237 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), 238 }; 239 240 /** 241 * ice_devlink_info_get - .info_get devlink handler 242 * @devlink: devlink instance structure 243 * @req: the devlink info request 244 * @extack: extended netdev ack structure 245 * 246 * Callback for the devlink .info_get operation. Reports information about the 247 * device. 248 * 249 * Return: zero on success or an error code on failure. 250 */ 251 static int ice_devlink_info_get(struct devlink *devlink, 252 struct devlink_info_req *req, 253 struct netlink_ext_ack *extack) 254 { 255 struct ice_pf *pf = devlink_priv(devlink); 256 struct device *dev = ice_pf_to_dev(pf); 257 struct ice_hw *hw = &pf->hw; 258 struct ice_info_ctx *ctx; 259 size_t i; 260 int err; 261 262 err = ice_wait_for_reset(pf, 10 * HZ); 263 if (err) { 264 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); 265 return err; 266 } 267 268 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 269 if (!ctx) 270 return -ENOMEM; 271 272 /* discover capabilities first */ 273 err = ice_discover_dev_caps(hw, &ctx->dev_caps); 274 if (err) { 275 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", 276 err, ice_aq_str(hw->adminq.sq_last_status)); 277 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); 278 goto out_free_ctx; 279 } 280 281 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { 282 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); 283 if (err) { 284 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", 285 err, ice_aq_str(hw->adminq.sq_last_status)); 286 287 /* disable display of pending Option ROM */ 288 ctx->dev_caps.common_cap.nvm_update_pending_orom = false; 289 } 290 } 291 292 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { 293 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); 294 if (err) { 295 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", 296 err, ice_aq_str(hw->adminq.sq_last_status)); 297 298 /* disable display of pending Option ROM */ 299 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; 300 } 301 } 302 303 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { 304 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); 305 if (err) { 306 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", 307 err, ice_aq_str(hw->adminq.sq_last_status)); 308 309 /* disable display of pending Option ROM */ 310 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; 311 } 312 } 313 314 ice_info_get_dsn(pf, ctx); 315 316 err = devlink_info_serial_number_put(req, ctx->buf); 317 if (err) { 318 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); 319 goto out_free_ctx; 320 } 321 322 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { 323 enum ice_version_type type = ice_devlink_versions[i].type; 324 const char *key = ice_devlink_versions[i].key; 325 326 memset(ctx->buf, 0, sizeof(ctx->buf)); 327 328 ice_devlink_versions[i].getter(pf, ctx); 329 330 /* If the default getter doesn't report a version, use the 331 * fallback function. This is primarily useful in the case of 332 * "stored" versions that want to report the same value as the 333 * running version in the normal case of no pending update. 334 */ 335 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) 336 ice_devlink_versions[i].fallback(pf, ctx); 337 338 /* Do not report missing versions */ 339 if (ctx->buf[0] == '\0') 340 continue; 341 342 switch (type) { 343 case ICE_VERSION_FIXED: 344 err = devlink_info_version_fixed_put(req, key, ctx->buf); 345 if (err) { 346 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); 347 goto out_free_ctx; 348 } 349 break; 350 case ICE_VERSION_RUNNING: 351 err = devlink_info_version_running_put(req, key, ctx->buf); 352 if (err) { 353 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); 354 goto out_free_ctx; 355 } 356 break; 357 case ICE_VERSION_STORED: 358 err = devlink_info_version_stored_put(req, key, ctx->buf); 359 if (err) { 360 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); 361 goto out_free_ctx; 362 } 363 break; 364 } 365 } 366 367 out_free_ctx: 368 kfree(ctx); 369 return err; 370 } 371 372 /** 373 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware 374 * @pf: pointer to the pf instance 375 * @extack: netlink extended ACK structure 376 * 377 * Allow user to activate new Embedded Management Processor firmware by 378 * issuing device specific EMP reset. Called in response to 379 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. 380 * 381 * Note that teardown and rebuild of the driver state happens automatically as 382 * part of an interrupt and watchdog task. This is because all physical 383 * functions on the device must be able to reset when an EMP reset occurs from 384 * any source. 385 */ 386 static int 387 ice_devlink_reload_empr_start(struct ice_pf *pf, 388 struct netlink_ext_ack *extack) 389 { 390 struct device *dev = ice_pf_to_dev(pf); 391 struct ice_hw *hw = &pf->hw; 392 u8 pending; 393 int err; 394 395 err = ice_get_pending_updates(pf, &pending, extack); 396 if (err) 397 return err; 398 399 /* pending is a bitmask of which flash banks have a pending update, 400 * including the main NVM bank, the Option ROM bank, and the netlist 401 * bank. If any of these bits are set, then there is a pending update 402 * waiting to be activated. 403 */ 404 if (!pending) { 405 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); 406 return -ECANCELED; 407 } 408 409 if (pf->fw_emp_reset_disabled) { 410 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); 411 return -ECANCELED; 412 } 413 414 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); 415 416 err = ice_aq_nvm_update_empr(hw); 417 if (err) { 418 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", 419 err, ice_aq_str(hw->adminq.sq_last_status)); 420 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); 421 return err; 422 } 423 424 return 0; 425 } 426 427 /** 428 * ice_devlink_reload_down - prepare for reload 429 * @devlink: pointer to the devlink instance to reload 430 * @netns_change: if true, the network namespace is changing 431 * @action: the action to perform 432 * @limit: limits on what reload should do, such as not resetting 433 * @extack: netlink extended ACK structure 434 */ 435 static int 436 ice_devlink_reload_down(struct devlink *devlink, bool netns_change, 437 enum devlink_reload_action action, 438 enum devlink_reload_limit limit, 439 struct netlink_ext_ack *extack) 440 { 441 struct ice_pf *pf = devlink_priv(devlink); 442 443 switch (action) { 444 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 445 if (ice_is_eswitch_mode_switchdev(pf)) { 446 NL_SET_ERR_MSG_MOD(extack, 447 "Go to legacy mode before doing reinit\n"); 448 return -EOPNOTSUPP; 449 } 450 if (ice_is_adq_active(pf)) { 451 NL_SET_ERR_MSG_MOD(extack, 452 "Turn off ADQ before doing reinit\n"); 453 return -EOPNOTSUPP; 454 } 455 if (ice_has_vfs(pf)) { 456 NL_SET_ERR_MSG_MOD(extack, 457 "Remove all VFs before doing reinit\n"); 458 return -EOPNOTSUPP; 459 } 460 ice_unload(pf); 461 return 0; 462 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 463 return ice_devlink_reload_empr_start(pf, extack); 464 default: 465 WARN_ON(1); 466 return -EOPNOTSUPP; 467 } 468 } 469 470 /** 471 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish 472 * @pf: pointer to the pf instance 473 * @extack: netlink extended ACK structure 474 * 475 * Wait for driver to finish rebuilding after EMP reset is completed. This 476 * includes time to wait for both the actual device reset as well as the time 477 * for the driver's rebuild to complete. 478 */ 479 static int 480 ice_devlink_reload_empr_finish(struct ice_pf *pf, 481 struct netlink_ext_ack *extack) 482 { 483 int err; 484 485 err = ice_wait_for_reset(pf, 60 * HZ); 486 if (err) { 487 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); 488 return err; 489 } 490 491 return 0; 492 } 493 494 /** 495 * ice_devlink_port_opt_speed_str - convert speed to a string 496 * @speed: speed value 497 */ 498 static const char *ice_devlink_port_opt_speed_str(u8 speed) 499 { 500 switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { 501 case ICE_AQC_PORT_OPT_MAX_LANE_100M: 502 return "0.1"; 503 case ICE_AQC_PORT_OPT_MAX_LANE_1G: 504 return "1"; 505 case ICE_AQC_PORT_OPT_MAX_LANE_2500M: 506 return "2.5"; 507 case ICE_AQC_PORT_OPT_MAX_LANE_5G: 508 return "5"; 509 case ICE_AQC_PORT_OPT_MAX_LANE_10G: 510 return "10"; 511 case ICE_AQC_PORT_OPT_MAX_LANE_25G: 512 return "25"; 513 case ICE_AQC_PORT_OPT_MAX_LANE_50G: 514 return "50"; 515 case ICE_AQC_PORT_OPT_MAX_LANE_100G: 516 return "100"; 517 } 518 519 return "-"; 520 } 521 522 #define ICE_PORT_OPT_DESC_LEN 50 523 /** 524 * ice_devlink_port_options_print - Print available port split options 525 * @pf: the PF to print split port options 526 * 527 * Prints a table with available port split options and max port speeds 528 */ 529 static void ice_devlink_port_options_print(struct ice_pf *pf) 530 { 531 u8 i, j, options_count, cnt, speed, pending_idx, active_idx; 532 struct ice_aqc_get_port_options_elem *options, *opt; 533 struct device *dev = ice_pf_to_dev(pf); 534 bool active_valid, pending_valid; 535 char desc[ICE_PORT_OPT_DESC_LEN]; 536 const char *str; 537 int status; 538 539 options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, 540 sizeof(*options), GFP_KERNEL); 541 if (!options) 542 return; 543 544 for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { 545 opt = options + i * ICE_AQC_PORT_OPT_MAX; 546 options_count = ICE_AQC_PORT_OPT_MAX; 547 active_valid = 0; 548 549 status = ice_aq_get_port_options(&pf->hw, opt, &options_count, 550 i, true, &active_idx, 551 &active_valid, &pending_idx, 552 &pending_valid); 553 if (status) { 554 dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", 555 i, status); 556 goto err; 557 } 558 } 559 560 dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); 561 dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); 562 dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); 563 564 for (i = 0; i < options_count; i++) { 565 cnt = 0; 566 567 if (i == ice_active_port_option) 568 str = "Active"; 569 else if ((i == pending_idx) && pending_valid) 570 str = "Pending"; 571 else 572 str = ""; 573 574 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 575 "%-8s", str); 576 577 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 578 "%-6u", options[i].pmd); 579 580 for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { 581 speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; 582 str = ice_devlink_port_opt_speed_str(speed); 583 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 584 "%3s ", str); 585 } 586 587 dev_dbg(dev, "%s\n", desc); 588 } 589 590 err: 591 kfree(options); 592 } 593 594 /** 595 * ice_devlink_aq_set_port_option - Send set port option admin queue command 596 * @pf: the PF to print split port options 597 * @option_idx: selected port option 598 * @extack: extended netdev ack structure 599 * 600 * Sends set port option admin queue command with selected port option and 601 * calls NVM write activate. 602 */ 603 static int 604 ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, 605 struct netlink_ext_ack *extack) 606 { 607 struct device *dev = ice_pf_to_dev(pf); 608 int status; 609 610 status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); 611 if (status) { 612 dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", 613 status, pf->hw.adminq.sq_last_status); 614 NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); 615 return -EIO; 616 } 617 618 status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); 619 if (status) { 620 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 621 status, pf->hw.adminq.sq_last_status); 622 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 623 return -EIO; 624 } 625 626 status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); 627 if (status) { 628 dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", 629 status, pf->hw.adminq.sq_last_status); 630 NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); 631 ice_release_nvm(&pf->hw); 632 return -EIO; 633 } 634 635 ice_release_nvm(&pf->hw); 636 637 NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); 638 return 0; 639 } 640 641 /** 642 * ice_devlink_port_split - .port_split devlink handler 643 * @devlink: devlink instance structure 644 * @port: devlink port structure 645 * @count: number of ports to split to 646 * @extack: extended netdev ack structure 647 * 648 * Callback for the devlink .port_split operation. 649 * 650 * Unfortunately, the devlink expression of available options is limited 651 * to just a number, so search for an FW port option which supports 652 * the specified number. As there could be multiple FW port options with 653 * the same port split count, allow switching between them. When the same 654 * port split count request is issued again, switch to the next FW port 655 * option with the same port split count. 656 * 657 * Return: zero on success or an error code on failure. 658 */ 659 static int 660 ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, 661 unsigned int count, struct netlink_ext_ack *extack) 662 { 663 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; 664 u8 i, j, active_idx, pending_idx, new_option; 665 struct ice_pf *pf = devlink_priv(devlink); 666 u8 option_count = ICE_AQC_PORT_OPT_MAX; 667 struct device *dev = ice_pf_to_dev(pf); 668 bool active_valid, pending_valid; 669 int status; 670 671 status = ice_aq_get_port_options(&pf->hw, options, &option_count, 672 0, true, &active_idx, &active_valid, 673 &pending_idx, &pending_valid); 674 if (status) { 675 dev_dbg(dev, "Couldn't read port split options, err = %d\n", 676 status); 677 NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); 678 return -EIO; 679 } 680 681 new_option = ICE_AQC_PORT_OPT_MAX; 682 active_idx = pending_valid ? pending_idx : active_idx; 683 for (i = 1; i <= option_count; i++) { 684 /* In order to allow switching between FW port options with 685 * the same port split count, search for a new option starting 686 * from the active/pending option (with array wrap around). 687 */ 688 j = (active_idx + i) % option_count; 689 690 if (count == options[j].pmd) { 691 new_option = j; 692 break; 693 } 694 } 695 696 if (new_option == active_idx) { 697 dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", 698 count); 699 NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); 700 ice_devlink_port_options_print(pf); 701 return -EINVAL; 702 } 703 704 if (new_option == ICE_AQC_PORT_OPT_MAX) { 705 dev_dbg(dev, "request to split: count: %u not found\n", count); 706 NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); 707 ice_devlink_port_options_print(pf); 708 return -EINVAL; 709 } 710 711 status = ice_devlink_aq_set_port_option(pf, new_option, extack); 712 if (status) 713 return status; 714 715 ice_devlink_port_options_print(pf); 716 717 return 0; 718 } 719 720 /** 721 * ice_devlink_port_unsplit - .port_unsplit devlink handler 722 * @devlink: devlink instance structure 723 * @port: devlink port structure 724 * @extack: extended netdev ack structure 725 * 726 * Callback for the devlink .port_unsplit operation. 727 * Calls ice_devlink_port_split with split count set to 1. 728 * There could be no FW option available with split count 1. 729 * 730 * Return: zero on success or an error code on failure. 731 */ 732 static int 733 ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, 734 struct netlink_ext_ack *extack) 735 { 736 return ice_devlink_port_split(devlink, port, 1, extack); 737 } 738 739 /** 740 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree 741 * @pf: pf struct 742 * 743 * This function tears down tree exported during VF's creation. 744 */ 745 void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) 746 { 747 struct devlink *devlink; 748 struct ice_vf *vf; 749 unsigned int bkt; 750 751 devlink = priv_to_devlink(pf); 752 753 devl_lock(devlink); 754 mutex_lock(&pf->vfs.table_lock); 755 ice_for_each_vf(pf, bkt, vf) { 756 if (vf->devlink_port.devlink_rate) 757 devl_rate_leaf_destroy(&vf->devlink_port); 758 } 759 mutex_unlock(&pf->vfs.table_lock); 760 761 devl_rate_nodes_destroy(devlink); 762 devl_unlock(devlink); 763 } 764 765 /** 766 * ice_enable_custom_tx - try to enable custom Tx feature 767 * @pf: pf struct 768 * 769 * This function tries to enable custom Tx feature, 770 * it's not possible to enable it, if DCB or ADQ is active. 771 */ 772 static bool ice_enable_custom_tx(struct ice_pf *pf) 773 { 774 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; 775 struct device *dev = ice_pf_to_dev(pf); 776 777 if (pi->is_custom_tx_enabled) 778 /* already enabled, return true */ 779 return true; 780 781 if (ice_is_adq_active(pf)) { 782 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); 783 return false; 784 } 785 786 if (ice_is_dcb_active(pf)) { 787 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); 788 return false; 789 } 790 791 pi->is_custom_tx_enabled = true; 792 793 return true; 794 } 795 796 /** 797 * ice_traverse_tx_tree - traverse Tx scheduler tree 798 * @devlink: devlink struct 799 * @node: current node, used for recursion 800 * @tc_node: tc_node struct, that is treated as a root 801 * @pf: pf struct 802 * 803 * This function traverses Tx scheduler tree and exports 804 * entire structure to the devlink-rate. 805 */ 806 static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, 807 struct ice_sched_node *tc_node, struct ice_pf *pf) 808 { 809 struct devlink_rate *rate_node = NULL; 810 struct ice_vf *vf; 811 int i; 812 813 if (node->parent == tc_node) { 814 /* create root node */ 815 rate_node = devl_rate_node_create(devlink, node, node->name, NULL); 816 } else if (node->vsi_handle && 817 pf->vsi[node->vsi_handle]->vf) { 818 vf = pf->vsi[node->vsi_handle]->vf; 819 if (!vf->devlink_port.devlink_rate) 820 /* leaf nodes doesn't have children 821 * so we don't set rate_node 822 */ 823 devl_rate_leaf_create(&vf->devlink_port, node, 824 node->parent->rate_node); 825 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && 826 node->parent->rate_node) { 827 rate_node = devl_rate_node_create(devlink, node, node->name, 828 node->parent->rate_node); 829 } 830 831 if (rate_node && !IS_ERR(rate_node)) 832 node->rate_node = rate_node; 833 834 for (i = 0; i < node->num_children; i++) 835 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); 836 } 837 838 /** 839 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate 840 * @devlink: devlink struct 841 * @vsi: main vsi struct 842 * 843 * This function finds a root node, then calls ice_traverse_tx tree, which 844 * traverses the tree and exports it's contents to devlink rate. 845 */ 846 int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) 847 { 848 struct ice_port_info *pi = vsi->port_info; 849 struct ice_sched_node *tc_node; 850 struct ice_pf *pf = vsi->back; 851 int i; 852 853 tc_node = pi->root->children[0]; 854 mutex_lock(&pi->sched_lock); 855 devl_lock(devlink); 856 for (i = 0; i < tc_node->num_children; i++) 857 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); 858 devl_unlock(devlink); 859 mutex_unlock(&pi->sched_lock); 860 861 return 0; 862 } 863 864 /** 865 * ice_set_object_tx_share - sets node scheduling parameter 866 * @pi: devlink struct instance 867 * @node: node struct instance 868 * @bw: bandwidth in bytes per second 869 * @extack: extended netdev ack structure 870 * 871 * This function sets ICE_MIN_BW scheduling BW limit. 872 */ 873 static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, 874 u64 bw, struct netlink_ext_ack *extack) 875 { 876 int status; 877 878 mutex_lock(&pi->sched_lock); 879 /* converts bytes per second to kilo bits per second */ 880 node->tx_share = div_u64(bw, 125); 881 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); 882 mutex_unlock(&pi->sched_lock); 883 884 if (status) 885 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); 886 887 return status; 888 } 889 890 /** 891 * ice_set_object_tx_max - sets node scheduling parameter 892 * @pi: devlink struct instance 893 * @node: node struct instance 894 * @bw: bandwidth in bytes per second 895 * @extack: extended netdev ack structure 896 * 897 * This function sets ICE_MAX_BW scheduling BW limit. 898 */ 899 static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, 900 u64 bw, struct netlink_ext_ack *extack) 901 { 902 int status; 903 904 mutex_lock(&pi->sched_lock); 905 /* converts bytes per second value to kilo bits per second */ 906 node->tx_max = div_u64(bw, 125); 907 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); 908 mutex_unlock(&pi->sched_lock); 909 910 if (status) 911 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); 912 913 return status; 914 } 915 916 /** 917 * ice_set_object_tx_priority - sets node scheduling parameter 918 * @pi: devlink struct instance 919 * @node: node struct instance 920 * @priority: value representing priority for strict priority arbitration 921 * @extack: extended netdev ack structure 922 * 923 * This function sets priority of node among siblings. 924 */ 925 static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, 926 u32 priority, struct netlink_ext_ack *extack) 927 { 928 int status; 929 930 if (priority >= 8) { 931 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); 932 return -EINVAL; 933 } 934 935 mutex_lock(&pi->sched_lock); 936 node->tx_priority = priority; 937 status = ice_sched_set_node_priority(pi, node, node->tx_priority); 938 mutex_unlock(&pi->sched_lock); 939 940 if (status) 941 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); 942 943 return status; 944 } 945 946 /** 947 * ice_set_object_tx_weight - sets node scheduling parameter 948 * @pi: devlink struct instance 949 * @node: node struct instance 950 * @weight: value represeting relative weight for WFQ arbitration 951 * @extack: extended netdev ack structure 952 * 953 * This function sets node weight for WFQ algorithm. 954 */ 955 static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, 956 u32 weight, struct netlink_ext_ack *extack) 957 { 958 int status; 959 960 if (weight > 200 || weight < 1) { 961 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); 962 return -EINVAL; 963 } 964 965 mutex_lock(&pi->sched_lock); 966 node->tx_weight = weight; 967 status = ice_sched_set_node_weight(pi, node, node->tx_weight); 968 mutex_unlock(&pi->sched_lock); 969 970 if (status) 971 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); 972 973 return status; 974 } 975 976 /** 977 * ice_get_pi_from_dev_rate - get port info from devlink_rate 978 * @rate_node: devlink struct instance 979 * 980 * This function returns corresponding port_info struct of devlink_rate 981 */ 982 static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) 983 { 984 struct ice_pf *pf = devlink_priv(rate_node->devlink); 985 986 return ice_get_main_vsi(pf)->port_info; 987 } 988 989 static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, 990 struct netlink_ext_ack *extack) 991 { 992 struct ice_sched_node *node; 993 struct ice_port_info *pi; 994 995 pi = ice_get_pi_from_dev_rate(rate_node); 996 997 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 998 return -EBUSY; 999 1000 /* preallocate memory for ice_sched_node */ 1001 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); 1002 *priv = node; 1003 1004 return 0; 1005 } 1006 1007 static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, 1008 struct netlink_ext_ack *extack) 1009 { 1010 struct ice_sched_node *node, *tc_node; 1011 struct ice_port_info *pi; 1012 1013 pi = ice_get_pi_from_dev_rate(rate_node); 1014 tc_node = pi->root->children[0]; 1015 node = priv; 1016 1017 if (!rate_node->parent || !node || tc_node == node || !extack) 1018 return 0; 1019 1020 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1021 return -EBUSY; 1022 1023 /* can't allow to delete a node with children */ 1024 if (node->num_children) 1025 return -EINVAL; 1026 1027 mutex_lock(&pi->sched_lock); 1028 ice_free_sched_node(pi, node); 1029 mutex_unlock(&pi->sched_lock); 1030 1031 return 0; 1032 } 1033 1034 static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, 1035 u64 tx_max, struct netlink_ext_ack *extack) 1036 { 1037 struct ice_sched_node *node = priv; 1038 1039 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1040 return -EBUSY; 1041 1042 if (!node) 1043 return 0; 1044 1045 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), 1046 node, tx_max, extack); 1047 } 1048 1049 static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, 1050 u64 tx_share, struct netlink_ext_ack *extack) 1051 { 1052 struct ice_sched_node *node = priv; 1053 1054 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1055 return -EBUSY; 1056 1057 if (!node) 1058 return 0; 1059 1060 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, 1061 tx_share, extack); 1062 } 1063 1064 static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, 1065 u32 tx_priority, struct netlink_ext_ack *extack) 1066 { 1067 struct ice_sched_node *node = priv; 1068 1069 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1070 return -EBUSY; 1071 1072 if (!node) 1073 return 0; 1074 1075 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, 1076 tx_priority, extack); 1077 } 1078 1079 static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, 1080 u32 tx_weight, struct netlink_ext_ack *extack) 1081 { 1082 struct ice_sched_node *node = priv; 1083 1084 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1085 return -EBUSY; 1086 1087 if (!node) 1088 return 0; 1089 1090 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, 1091 tx_weight, extack); 1092 } 1093 1094 static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, 1095 u64 tx_max, struct netlink_ext_ack *extack) 1096 { 1097 struct ice_sched_node *node = priv; 1098 1099 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1100 return -EBUSY; 1101 1102 if (!node) 1103 return 0; 1104 1105 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), 1106 node, tx_max, extack); 1107 } 1108 1109 static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, 1110 u64 tx_share, struct netlink_ext_ack *extack) 1111 { 1112 struct ice_sched_node *node = priv; 1113 1114 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1115 return -EBUSY; 1116 1117 if (!node) 1118 return 0; 1119 1120 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), 1121 node, tx_share, extack); 1122 } 1123 1124 static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, 1125 u32 tx_priority, struct netlink_ext_ack *extack) 1126 { 1127 struct ice_sched_node *node = priv; 1128 1129 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1130 return -EBUSY; 1131 1132 if (!node) 1133 return 0; 1134 1135 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), 1136 node, tx_priority, extack); 1137 } 1138 1139 static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, 1140 u32 tx_weight, struct netlink_ext_ack *extack) 1141 { 1142 struct ice_sched_node *node = priv; 1143 1144 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1145 return -EBUSY; 1146 1147 if (!node) 1148 return 0; 1149 1150 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), 1151 node, tx_weight, extack); 1152 } 1153 1154 static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, 1155 struct devlink_rate *parent, 1156 void *priv, void *parent_priv, 1157 struct netlink_ext_ack *extack) 1158 { 1159 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); 1160 struct ice_sched_node *tc_node, *node, *parent_node; 1161 u16 num_nodes_added; 1162 u32 first_node_teid; 1163 u32 node_teid; 1164 int status; 1165 1166 tc_node = pi->root->children[0]; 1167 node = priv; 1168 1169 if (!extack) 1170 return 0; 1171 1172 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) 1173 return -EBUSY; 1174 1175 if (!parent) { 1176 if (!node || tc_node == node || node->num_children) 1177 return -EINVAL; 1178 1179 mutex_lock(&pi->sched_lock); 1180 ice_free_sched_node(pi, node); 1181 mutex_unlock(&pi->sched_lock); 1182 1183 return 0; 1184 } 1185 1186 parent_node = parent_priv; 1187 1188 /* if the node doesn't exist, create it */ 1189 if (!node->parent) { 1190 mutex_lock(&pi->sched_lock); 1191 status = ice_sched_add_elems(pi, tc_node, parent_node, 1192 parent_node->tx_sched_layer + 1, 1193 1, &num_nodes_added, &first_node_teid, 1194 &node); 1195 mutex_unlock(&pi->sched_lock); 1196 1197 if (status) { 1198 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); 1199 return status; 1200 } 1201 1202 if (devlink_rate->tx_share) 1203 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); 1204 if (devlink_rate->tx_max) 1205 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); 1206 if (devlink_rate->tx_priority) 1207 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); 1208 if (devlink_rate->tx_weight) 1209 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); 1210 } else { 1211 node_teid = le32_to_cpu(node->info.node_teid); 1212 mutex_lock(&pi->sched_lock); 1213 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); 1214 mutex_unlock(&pi->sched_lock); 1215 1216 if (status) 1217 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); 1218 } 1219 1220 return status; 1221 } 1222 1223 /** 1224 * ice_devlink_reload_up - do reload up after reinit 1225 * @devlink: pointer to the devlink instance reloading 1226 * @action: the action requested 1227 * @limit: limits imposed by userspace, such as not resetting 1228 * @actions_performed: on return, indicate what actions actually performed 1229 * @extack: netlink extended ACK structure 1230 */ 1231 static int 1232 ice_devlink_reload_up(struct devlink *devlink, 1233 enum devlink_reload_action action, 1234 enum devlink_reload_limit limit, 1235 u32 *actions_performed, 1236 struct netlink_ext_ack *extack) 1237 { 1238 struct ice_pf *pf = devlink_priv(devlink); 1239 1240 switch (action) { 1241 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 1242 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 1243 return ice_load(pf); 1244 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 1245 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1246 return ice_devlink_reload_empr_finish(pf, extack); 1247 default: 1248 WARN_ON(1); 1249 return -EOPNOTSUPP; 1250 } 1251 } 1252 1253 static const struct devlink_ops ice_devlink_ops = { 1254 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, 1255 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1256 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1257 .reload_down = ice_devlink_reload_down, 1258 .reload_up = ice_devlink_reload_up, 1259 .port_split = ice_devlink_port_split, 1260 .port_unsplit = ice_devlink_port_unsplit, 1261 .eswitch_mode_get = ice_eswitch_mode_get, 1262 .eswitch_mode_set = ice_eswitch_mode_set, 1263 .info_get = ice_devlink_info_get, 1264 .flash_update = ice_devlink_flash_update, 1265 1266 .rate_node_new = ice_devlink_rate_node_new, 1267 .rate_node_del = ice_devlink_rate_node_del, 1268 1269 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, 1270 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, 1271 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, 1272 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, 1273 1274 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, 1275 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, 1276 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, 1277 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, 1278 1279 .rate_leaf_parent_set = ice_devlink_set_parent, 1280 .rate_node_parent_set = ice_devlink_set_parent, 1281 }; 1282 1283 static int 1284 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, 1285 struct devlink_param_gset_ctx *ctx) 1286 { 1287 struct ice_pf *pf = devlink_priv(devlink); 1288 1289 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; 1290 1291 return 0; 1292 } 1293 1294 static int 1295 ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, 1296 struct devlink_param_gset_ctx *ctx) 1297 { 1298 struct ice_pf *pf = devlink_priv(devlink); 1299 bool roce_ena = ctx->val.vbool; 1300 int ret; 1301 1302 if (!roce_ena) { 1303 ice_unplug_aux_dev(pf); 1304 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1305 return 0; 1306 } 1307 1308 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; 1309 ret = ice_plug_aux_dev(pf); 1310 if (ret) 1311 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1312 1313 return ret; 1314 } 1315 1316 static int 1317 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 1318 union devlink_param_value val, 1319 struct netlink_ext_ack *extack) 1320 { 1321 struct ice_pf *pf = devlink_priv(devlink); 1322 1323 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1324 return -EOPNOTSUPP; 1325 1326 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { 1327 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1328 return -EOPNOTSUPP; 1329 } 1330 1331 return 0; 1332 } 1333 1334 static int 1335 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, 1336 struct devlink_param_gset_ctx *ctx) 1337 { 1338 struct ice_pf *pf = devlink_priv(devlink); 1339 1340 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; 1341 1342 return 0; 1343 } 1344 1345 static int 1346 ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, 1347 struct devlink_param_gset_ctx *ctx) 1348 { 1349 struct ice_pf *pf = devlink_priv(devlink); 1350 bool iw_ena = ctx->val.vbool; 1351 int ret; 1352 1353 if (!iw_ena) { 1354 ice_unplug_aux_dev(pf); 1355 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1356 return 0; 1357 } 1358 1359 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; 1360 ret = ice_plug_aux_dev(pf); 1361 if (ret) 1362 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1363 1364 return ret; 1365 } 1366 1367 static int 1368 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, 1369 union devlink_param_value val, 1370 struct netlink_ext_ack *extack) 1371 { 1372 struct ice_pf *pf = devlink_priv(devlink); 1373 1374 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1375 return -EOPNOTSUPP; 1376 1377 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { 1378 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1379 return -EOPNOTSUPP; 1380 } 1381 1382 return 0; 1383 } 1384 1385 static const struct devlink_param ice_devlink_params[] = { 1386 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1387 ice_devlink_enable_roce_get, 1388 ice_devlink_enable_roce_set, 1389 ice_devlink_enable_roce_validate), 1390 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1391 ice_devlink_enable_iw_get, 1392 ice_devlink_enable_iw_set, 1393 ice_devlink_enable_iw_validate), 1394 1395 }; 1396 1397 static void ice_devlink_free(void *devlink_ptr) 1398 { 1399 devlink_free((struct devlink *)devlink_ptr); 1400 } 1401 1402 /** 1403 * ice_allocate_pf - Allocate devlink and return PF structure pointer 1404 * @dev: the device to allocate for 1405 * 1406 * Allocate a devlink instance for this device and return the private area as 1407 * the PF structure. The devlink memory is kept track of through devres by 1408 * adding an action to remove it when unwinding. 1409 */ 1410 struct ice_pf *ice_allocate_pf(struct device *dev) 1411 { 1412 struct devlink *devlink; 1413 1414 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); 1415 if (!devlink) 1416 return NULL; 1417 1418 /* Add an action to teardown the devlink when unwinding the driver */ 1419 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) 1420 return NULL; 1421 1422 return devlink_priv(devlink); 1423 } 1424 1425 /** 1426 * ice_devlink_register - Register devlink interface for this PF 1427 * @pf: the PF to register the devlink for. 1428 * 1429 * Register the devlink instance associated with this physical function. 1430 * 1431 * Return: zero on success or an error code on failure. 1432 */ 1433 void ice_devlink_register(struct ice_pf *pf) 1434 { 1435 struct devlink *devlink = priv_to_devlink(pf); 1436 1437 devlink_register(devlink); 1438 } 1439 1440 /** 1441 * ice_devlink_unregister - Unregister devlink resources for this PF. 1442 * @pf: the PF structure to cleanup 1443 * 1444 * Releases resources used by devlink and cleans up associated memory. 1445 */ 1446 void ice_devlink_unregister(struct ice_pf *pf) 1447 { 1448 devlink_unregister(priv_to_devlink(pf)); 1449 } 1450 1451 /** 1452 * ice_devlink_set_switch_id - Set unique switch id based on pci dsn 1453 * @pf: the PF to create a devlink port for 1454 * @ppid: struct with switch id information 1455 */ 1456 static void 1457 ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) 1458 { 1459 struct pci_dev *pdev = pf->pdev; 1460 u64 id; 1461 1462 id = pci_get_dsn(pdev); 1463 1464 ppid->id_len = sizeof(id); 1465 put_unaligned_be64(id, &ppid->id); 1466 } 1467 1468 int ice_devlink_register_params(struct ice_pf *pf) 1469 { 1470 struct devlink *devlink = priv_to_devlink(pf); 1471 1472 return devlink_params_register(devlink, ice_devlink_params, 1473 ARRAY_SIZE(ice_devlink_params)); 1474 } 1475 1476 void ice_devlink_unregister_params(struct ice_pf *pf) 1477 { 1478 devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, 1479 ARRAY_SIZE(ice_devlink_params)); 1480 } 1481 1482 /** 1483 * ice_devlink_set_port_split_options - Set port split options 1484 * @pf: the PF to set port split options 1485 * @attrs: devlink attributes 1486 * 1487 * Sets devlink port split options based on available FW port options 1488 */ 1489 static void 1490 ice_devlink_set_port_split_options(struct ice_pf *pf, 1491 struct devlink_port_attrs *attrs) 1492 { 1493 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; 1494 u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; 1495 bool active_valid, pending_valid; 1496 int status; 1497 1498 status = ice_aq_get_port_options(&pf->hw, options, &option_count, 1499 0, true, &active_idx, &active_valid, 1500 &pending_idx, &pending_valid); 1501 if (status) { 1502 dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", 1503 status); 1504 return; 1505 } 1506 1507 /* find the biggest available port split count */ 1508 for (i = 0; i < option_count; i++) 1509 attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); 1510 1511 attrs->splittable = attrs->lanes ? 1 : 0; 1512 ice_active_port_option = active_idx; 1513 } 1514 1515 /** 1516 * ice_devlink_create_pf_port - Create a devlink port for this PF 1517 * @pf: the PF to create a devlink port for 1518 * 1519 * Create and register a devlink_port for this PF. 1520 * 1521 * Return: zero on success or an error code on failure. 1522 */ 1523 int ice_devlink_create_pf_port(struct ice_pf *pf) 1524 { 1525 struct devlink_port_attrs attrs = {}; 1526 struct devlink_port *devlink_port; 1527 struct devlink *devlink; 1528 struct ice_vsi *vsi; 1529 struct device *dev; 1530 int err; 1531 1532 dev = ice_pf_to_dev(pf); 1533 1534 devlink_port = &pf->devlink_port; 1535 1536 vsi = ice_get_main_vsi(pf); 1537 if (!vsi) 1538 return -EIO; 1539 1540 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1541 attrs.phys.port_number = pf->hw.bus.func; 1542 1543 /* As FW supports only port split options for whole device, 1544 * set port split options only for first PF. 1545 */ 1546 if (pf->hw.pf_id == 0) 1547 ice_devlink_set_port_split_options(pf, &attrs); 1548 1549 ice_devlink_set_switch_id(pf, &attrs.switch_id); 1550 1551 devlink_port_attrs_set(devlink_port, &attrs); 1552 devlink = priv_to_devlink(pf); 1553 1554 err = devlink_port_register(devlink, devlink_port, vsi->idx); 1555 if (err) { 1556 dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", 1557 pf->hw.pf_id, err); 1558 return err; 1559 } 1560 1561 return 0; 1562 } 1563 1564 /** 1565 * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF 1566 * @pf: the PF to cleanup 1567 * 1568 * Unregisters the devlink_port structure associated with this PF. 1569 */ 1570 void ice_devlink_destroy_pf_port(struct ice_pf *pf) 1571 { 1572 devlink_port_unregister(&pf->devlink_port); 1573 } 1574 1575 /** 1576 * ice_devlink_create_vf_port - Create a devlink port for this VF 1577 * @vf: the VF to create a port for 1578 * 1579 * Create and register a devlink_port for this VF. 1580 * 1581 * Return: zero on success or an error code on failure. 1582 */ 1583 int ice_devlink_create_vf_port(struct ice_vf *vf) 1584 { 1585 struct devlink_port_attrs attrs = {}; 1586 struct devlink_port *devlink_port; 1587 struct devlink *devlink; 1588 struct ice_vsi *vsi; 1589 struct device *dev; 1590 struct ice_pf *pf; 1591 int err; 1592 1593 pf = vf->pf; 1594 dev = ice_pf_to_dev(pf); 1595 devlink_port = &vf->devlink_port; 1596 1597 vsi = ice_get_vf_vsi(vf); 1598 if (!vsi) 1599 return -EINVAL; 1600 1601 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; 1602 attrs.pci_vf.pf = pf->hw.bus.func; 1603 attrs.pci_vf.vf = vf->vf_id; 1604 1605 ice_devlink_set_switch_id(pf, &attrs.switch_id); 1606 1607 devlink_port_attrs_set(devlink_port, &attrs); 1608 devlink = priv_to_devlink(pf); 1609 1610 err = devlink_port_register(devlink, devlink_port, vsi->idx); 1611 if (err) { 1612 dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", 1613 vf->vf_id, err); 1614 return err; 1615 } 1616 1617 return 0; 1618 } 1619 1620 /** 1621 * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF 1622 * @vf: the VF to cleanup 1623 * 1624 * Unregisters the devlink_port structure associated with this VF. 1625 */ 1626 void ice_devlink_destroy_vf_port(struct ice_vf *vf) 1627 { 1628 devl_rate_leaf_destroy(&vf->devlink_port); 1629 devlink_port_unregister(&vf->devlink_port); 1630 } 1631 1632 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) 1633 1634 static const struct devlink_region_ops ice_nvm_region_ops; 1635 static const struct devlink_region_ops ice_sram_region_ops; 1636 1637 /** 1638 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents 1639 * @devlink: the devlink instance 1640 * @ops: the devlink region to snapshot 1641 * @extack: extended ACK response structure 1642 * @data: on exit points to snapshot data buffer 1643 * 1644 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either 1645 * the nvm-flash or shadow-ram region. 1646 * 1647 * It captures a snapshot of the NVM or Shadow RAM flash contents. This 1648 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink 1649 * interface. 1650 * 1651 * @returns zero on success, and updates the data pointer. Returns a non-zero 1652 * error code on failure. 1653 */ 1654 static int ice_devlink_nvm_snapshot(struct devlink *devlink, 1655 const struct devlink_region_ops *ops, 1656 struct netlink_ext_ack *extack, u8 **data) 1657 { 1658 struct ice_pf *pf = devlink_priv(devlink); 1659 struct device *dev = ice_pf_to_dev(pf); 1660 struct ice_hw *hw = &pf->hw; 1661 bool read_shadow_ram; 1662 u8 *nvm_data, *tmp, i; 1663 u32 nvm_size, left; 1664 s8 num_blks; 1665 int status; 1666 1667 if (ops == &ice_nvm_region_ops) { 1668 read_shadow_ram = false; 1669 nvm_size = hw->flash.flash_size; 1670 } else if (ops == &ice_sram_region_ops) { 1671 read_shadow_ram = true; 1672 nvm_size = hw->flash.sr_words * 2u; 1673 } else { 1674 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1675 return -EOPNOTSUPP; 1676 } 1677 1678 nvm_data = vzalloc(nvm_size); 1679 if (!nvm_data) 1680 return -ENOMEM; 1681 1682 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); 1683 tmp = nvm_data; 1684 left = nvm_size; 1685 1686 /* Some systems take longer to read the NVM than others which causes the 1687 * FW to reclaim the NVM lock before the entire NVM has been read. Fix 1688 * this by breaking the reads of the NVM into smaller chunks that will 1689 * probably not take as long. This has some overhead since we are 1690 * increasing the number of AQ commands, but it should always work 1691 */ 1692 for (i = 0; i < num_blks; i++) { 1693 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); 1694 1695 status = ice_acquire_nvm(hw, ICE_RES_READ); 1696 if (status) { 1697 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1698 status, hw->adminq.sq_last_status); 1699 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1700 vfree(nvm_data); 1701 return -EIO; 1702 } 1703 1704 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, 1705 &read_sz, tmp, read_shadow_ram); 1706 if (status) { 1707 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1708 read_sz, status, hw->adminq.sq_last_status); 1709 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1710 ice_release_nvm(hw); 1711 vfree(nvm_data); 1712 return -EIO; 1713 } 1714 ice_release_nvm(hw); 1715 1716 tmp += read_sz; 1717 left -= read_sz; 1718 } 1719 1720 *data = nvm_data; 1721 1722 return 0; 1723 } 1724 1725 /** 1726 * ice_devlink_nvm_read - Read a portion of NVM flash contents 1727 * @devlink: the devlink instance 1728 * @ops: the devlink region to snapshot 1729 * @extack: extended ACK response structure 1730 * @offset: the offset to start at 1731 * @size: the amount to read 1732 * @data: the data buffer to read into 1733 * 1734 * This function is called in response to DEVLINK_CMD_REGION_READ to directly 1735 * read a section of the NVM contents. 1736 * 1737 * It reads from either the nvm-flash or shadow-ram region contents. 1738 * 1739 * @returns zero on success, and updates the data pointer. Returns a non-zero 1740 * error code on failure. 1741 */ 1742 static int ice_devlink_nvm_read(struct devlink *devlink, 1743 const struct devlink_region_ops *ops, 1744 struct netlink_ext_ack *extack, 1745 u64 offset, u32 size, u8 *data) 1746 { 1747 struct ice_pf *pf = devlink_priv(devlink); 1748 struct device *dev = ice_pf_to_dev(pf); 1749 struct ice_hw *hw = &pf->hw; 1750 bool read_shadow_ram; 1751 u64 nvm_size; 1752 int status; 1753 1754 if (ops == &ice_nvm_region_ops) { 1755 read_shadow_ram = false; 1756 nvm_size = hw->flash.flash_size; 1757 } else if (ops == &ice_sram_region_ops) { 1758 read_shadow_ram = true; 1759 nvm_size = hw->flash.sr_words * 2u; 1760 } else { 1761 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1762 return -EOPNOTSUPP; 1763 } 1764 1765 if (offset + size >= nvm_size) { 1766 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); 1767 return -ERANGE; 1768 } 1769 1770 status = ice_acquire_nvm(hw, ICE_RES_READ); 1771 if (status) { 1772 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1773 status, hw->adminq.sq_last_status); 1774 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1775 return -EIO; 1776 } 1777 1778 status = ice_read_flat_nvm(hw, (u32)offset, &size, data, 1779 read_shadow_ram); 1780 if (status) { 1781 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1782 size, status, hw->adminq.sq_last_status); 1783 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1784 ice_release_nvm(hw); 1785 return -EIO; 1786 } 1787 ice_release_nvm(hw); 1788 1789 return 0; 1790 } 1791 1792 /** 1793 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities 1794 * @devlink: the devlink instance 1795 * @ops: the devlink region being snapshotted 1796 * @extack: extended ACK response structure 1797 * @data: on exit points to snapshot data buffer 1798 * 1799 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for 1800 * the device-caps devlink region. It captures a snapshot of the device 1801 * capabilities reported by firmware. 1802 * 1803 * @returns zero on success, and updates the data pointer. Returns a non-zero 1804 * error code on failure. 1805 */ 1806 static int 1807 ice_devlink_devcaps_snapshot(struct devlink *devlink, 1808 const struct devlink_region_ops *ops, 1809 struct netlink_ext_ack *extack, u8 **data) 1810 { 1811 struct ice_pf *pf = devlink_priv(devlink); 1812 struct device *dev = ice_pf_to_dev(pf); 1813 struct ice_hw *hw = &pf->hw; 1814 void *devcaps; 1815 int status; 1816 1817 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); 1818 if (!devcaps) 1819 return -ENOMEM; 1820 1821 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, 1822 ice_aqc_opc_list_dev_caps, NULL); 1823 if (status) { 1824 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", 1825 status, hw->adminq.sq_last_status); 1826 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); 1827 vfree(devcaps); 1828 return status; 1829 } 1830 1831 *data = (u8 *)devcaps; 1832 1833 return 0; 1834 } 1835 1836 static const struct devlink_region_ops ice_nvm_region_ops = { 1837 .name = "nvm-flash", 1838 .destructor = vfree, 1839 .snapshot = ice_devlink_nvm_snapshot, 1840 .read = ice_devlink_nvm_read, 1841 }; 1842 1843 static const struct devlink_region_ops ice_sram_region_ops = { 1844 .name = "shadow-ram", 1845 .destructor = vfree, 1846 .snapshot = ice_devlink_nvm_snapshot, 1847 .read = ice_devlink_nvm_read, 1848 }; 1849 1850 static const struct devlink_region_ops ice_devcaps_region_ops = { 1851 .name = "device-caps", 1852 .destructor = vfree, 1853 .snapshot = ice_devlink_devcaps_snapshot, 1854 }; 1855 1856 /** 1857 * ice_devlink_init_regions - Initialize devlink regions 1858 * @pf: the PF device structure 1859 * 1860 * Create devlink regions used to enable access to dump the contents of the 1861 * flash memory on the device. 1862 */ 1863 void ice_devlink_init_regions(struct ice_pf *pf) 1864 { 1865 struct devlink *devlink = priv_to_devlink(pf); 1866 struct device *dev = ice_pf_to_dev(pf); 1867 u64 nvm_size, sram_size; 1868 1869 nvm_size = pf->hw.flash.flash_size; 1870 pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, 1871 nvm_size); 1872 if (IS_ERR(pf->nvm_region)) { 1873 dev_err(dev, "failed to create NVM devlink region, err %ld\n", 1874 PTR_ERR(pf->nvm_region)); 1875 pf->nvm_region = NULL; 1876 } 1877 1878 sram_size = pf->hw.flash.sr_words * 2u; 1879 pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, 1880 1, sram_size); 1881 if (IS_ERR(pf->sram_region)) { 1882 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", 1883 PTR_ERR(pf->sram_region)); 1884 pf->sram_region = NULL; 1885 } 1886 1887 pf->devcaps_region = devlink_region_create(devlink, 1888 &ice_devcaps_region_ops, 10, 1889 ICE_AQ_MAX_BUF_LEN); 1890 if (IS_ERR(pf->devcaps_region)) { 1891 dev_err(dev, "failed to create device-caps devlink region, err %ld\n", 1892 PTR_ERR(pf->devcaps_region)); 1893 pf->devcaps_region = NULL; 1894 } 1895 } 1896 1897 /** 1898 * ice_devlink_destroy_regions - Destroy devlink regions 1899 * @pf: the PF device structure 1900 * 1901 * Remove previously created regions for this PF. 1902 */ 1903 void ice_devlink_destroy_regions(struct ice_pf *pf) 1904 { 1905 if (pf->nvm_region) 1906 devlink_region_destroy(pf->nvm_region); 1907 1908 if (pf->sram_region) 1909 devlink_region_destroy(pf->sram_region); 1910 1911 if (pf->devcaps_region) 1912 devlink_region_destroy(pf->devcaps_region); 1913 } 1914