1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_devlink.h" 9 #include "ice_eswitch.h" 10 #include "ice_fw_update.h" 11 #include "ice_dcb_lib.h" 12 13 static int ice_active_port_option = -1; 14 15 /* context for devlink info version reporting */ 16 struct ice_info_ctx { 17 char buf[128]; 18 struct ice_orom_info pending_orom; 19 struct ice_nvm_info pending_nvm; 20 struct ice_netlist_info pending_netlist; 21 struct ice_hw_dev_caps dev_caps; 22 }; 23 24 /* The following functions are used to format specific strings for various 25 * devlink info versions. The ctx parameter is used to provide the storage 26 * buffer, as well as any ancillary information calculated when the info 27 * request was made. 28 * 29 * If a version does not exist, for example when attempting to get the 30 * inactive version of flash when there is no pending update, the function 31 * should leave the buffer in the ctx structure empty. 32 */ 33 34 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) 35 { 36 u8 dsn[8]; 37 38 /* Copy the DSN into an array in Big Endian format */ 39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); 40 41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); 42 } 43 44 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) 45 { 46 struct ice_hw *hw = &pf->hw; 47 int status; 48 49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); 50 if (status) 51 /* We failed to locate the PBA, so just skip this entry */ 52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", 53 status); 54 } 55 56 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) 57 { 58 struct ice_hw *hw = &pf->hw; 59 60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); 62 } 63 64 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) 65 { 66 struct ice_hw *hw = &pf->hw; 67 68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, 69 hw->api_min_ver, hw->api_patch); 70 } 71 72 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 73 { 74 struct ice_hw *hw = &pf->hw; 75 76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); 77 } 78 79 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 80 { 81 struct ice_orom_info *orom = &pf->hw.flash.orom; 82 83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 84 orom->major, orom->build, orom->patch); 85 } 86 87 static void 88 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, 89 struct ice_info_ctx *ctx) 90 { 91 struct ice_orom_info *orom = &ctx->pending_orom; 92 93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) 94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 95 orom->major, orom->build, orom->patch); 96 } 97 98 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 99 { 100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 101 102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); 103 } 104 105 static void 106 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, 107 struct ice_info_ctx *ctx) 108 { 109 struct ice_nvm_info *nvm = &ctx->pending_nvm; 110 111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", 113 nvm->major, nvm->minor); 114 } 115 116 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 117 { 118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 119 120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 121 } 122 123 static void 124 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 125 { 126 struct ice_nvm_info *nvm = &ctx->pending_nvm; 127 128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 130 } 131 132 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) 133 { 134 struct ice_hw *hw = &pf->hw; 135 136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); 137 } 138 139 static void 140 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) 141 { 142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; 143 144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", 145 pkg->major, pkg->minor, pkg->update, pkg->draft); 146 } 147 148 static void 149 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 150 { 151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); 152 } 153 154 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 155 { 156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 157 158 /* The netlist version fields are BCD formatted */ 159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 160 netlist->major, netlist->minor, 161 netlist->type >> 16, netlist->type & 0xFFFF, 162 netlist->rev, netlist->cust_ver); 163 } 164 165 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 166 { 167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 168 169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 170 } 171 172 static void 173 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, 174 struct ice_info_ctx *ctx) 175 { 176 struct ice_netlist_info *netlist = &ctx->pending_netlist; 177 178 /* The netlist version fields are BCD formatted */ 179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 181 netlist->major, netlist->minor, 182 netlist->type >> 16, netlist->type & 0xFFFF, 183 netlist->rev, netlist->cust_ver); 184 } 185 186 static void 187 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, 188 struct ice_info_ctx *ctx) 189 { 190 struct ice_netlist_info *netlist = &ctx->pending_netlist; 191 192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 194 } 195 196 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } 197 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } 198 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } 199 200 /* The combined() macro inserts both the running entry as well as a stored 201 * entry. The running entry will always report the version from the active 202 * handler. The stored entry will first try the pending handler, and fallback 203 * to the active handler if the pending function does not report a version. 204 * The pending handler should check the status of a pending update for the 205 * relevant flash component. It should only fill in the buffer in the case 206 * where a valid pending version is available. This ensures that the related 207 * stored and running versions remain in sync, and that stored versions are 208 * correctly reported as expected. 209 */ 210 #define combined(key, active, pending) \ 211 running(key, active), \ 212 stored(key, pending, active) 213 214 enum ice_version_type { 215 ICE_VERSION_FIXED, 216 ICE_VERSION_RUNNING, 217 ICE_VERSION_STORED, 218 }; 219 220 static const struct ice_devlink_version { 221 enum ice_version_type type; 222 const char *key; 223 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); 224 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); 225 } ice_devlink_versions[] = { 226 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), 227 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), 228 running("fw.mgmt.api", ice_info_fw_api), 229 running("fw.mgmt.build", ice_info_fw_build), 230 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver), 231 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), 232 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack), 233 running("fw.app.name", ice_info_ddp_pkg_name), 234 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), 235 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), 236 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), 237 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), 238 }; 239 240 /** 241 * ice_devlink_info_get - .info_get devlink handler 242 * @devlink: devlink instance structure 243 * @req: the devlink info request 244 * @extack: extended netdev ack structure 245 * 246 * Callback for the devlink .info_get operation. Reports information about the 247 * device. 248 * 249 * Return: zero on success or an error code on failure. 250 */ 251 static int ice_devlink_info_get(struct devlink *devlink, 252 struct devlink_info_req *req, 253 struct netlink_ext_ack *extack) 254 { 255 struct ice_pf *pf = devlink_priv(devlink); 256 struct device *dev = ice_pf_to_dev(pf); 257 struct ice_hw *hw = &pf->hw; 258 struct ice_info_ctx *ctx; 259 size_t i; 260 int err; 261 262 err = ice_wait_for_reset(pf, 10 * HZ); 263 if (err) { 264 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); 265 return err; 266 } 267 268 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 269 if (!ctx) 270 return -ENOMEM; 271 272 /* discover capabilities first */ 273 err = ice_discover_dev_caps(hw, &ctx->dev_caps); 274 if (err) { 275 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", 276 err, ice_aq_str(hw->adminq.sq_last_status)); 277 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); 278 goto out_free_ctx; 279 } 280 281 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { 282 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); 283 if (err) { 284 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", 285 err, ice_aq_str(hw->adminq.sq_last_status)); 286 287 /* disable display of pending Option ROM */ 288 ctx->dev_caps.common_cap.nvm_update_pending_orom = false; 289 } 290 } 291 292 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { 293 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); 294 if (err) { 295 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", 296 err, ice_aq_str(hw->adminq.sq_last_status)); 297 298 /* disable display of pending Option ROM */ 299 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; 300 } 301 } 302 303 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { 304 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); 305 if (err) { 306 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", 307 err, ice_aq_str(hw->adminq.sq_last_status)); 308 309 /* disable display of pending Option ROM */ 310 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; 311 } 312 } 313 314 ice_info_get_dsn(pf, ctx); 315 316 err = devlink_info_serial_number_put(req, ctx->buf); 317 if (err) { 318 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); 319 goto out_free_ctx; 320 } 321 322 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { 323 enum ice_version_type type = ice_devlink_versions[i].type; 324 const char *key = ice_devlink_versions[i].key; 325 326 memset(ctx->buf, 0, sizeof(ctx->buf)); 327 328 ice_devlink_versions[i].getter(pf, ctx); 329 330 /* If the default getter doesn't report a version, use the 331 * fallback function. This is primarily useful in the case of 332 * "stored" versions that want to report the same value as the 333 * running version in the normal case of no pending update. 334 */ 335 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) 336 ice_devlink_versions[i].fallback(pf, ctx); 337 338 /* Do not report missing versions */ 339 if (ctx->buf[0] == '\0') 340 continue; 341 342 switch (type) { 343 case ICE_VERSION_FIXED: 344 err = devlink_info_version_fixed_put(req, key, ctx->buf); 345 if (err) { 346 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); 347 goto out_free_ctx; 348 } 349 break; 350 case ICE_VERSION_RUNNING: 351 err = devlink_info_version_running_put(req, key, ctx->buf); 352 if (err) { 353 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); 354 goto out_free_ctx; 355 } 356 break; 357 case ICE_VERSION_STORED: 358 err = devlink_info_version_stored_put(req, key, ctx->buf); 359 if (err) { 360 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); 361 goto out_free_ctx; 362 } 363 break; 364 } 365 } 366 367 out_free_ctx: 368 kfree(ctx); 369 return err; 370 } 371 372 /** 373 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware 374 * @pf: pointer to the pf instance 375 * @extack: netlink extended ACK structure 376 * 377 * Allow user to activate new Embedded Management Processor firmware by 378 * issuing device specific EMP reset. Called in response to 379 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. 380 * 381 * Note that teardown and rebuild of the driver state happens automatically as 382 * part of an interrupt and watchdog task. This is because all physical 383 * functions on the device must be able to reset when an EMP reset occurs from 384 * any source. 385 */ 386 static int 387 ice_devlink_reload_empr_start(struct ice_pf *pf, 388 struct netlink_ext_ack *extack) 389 { 390 struct device *dev = ice_pf_to_dev(pf); 391 struct ice_hw *hw = &pf->hw; 392 u8 pending; 393 int err; 394 395 err = ice_get_pending_updates(pf, &pending, extack); 396 if (err) 397 return err; 398 399 /* pending is a bitmask of which flash banks have a pending update, 400 * including the main NVM bank, the Option ROM bank, and the netlist 401 * bank. If any of these bits are set, then there is a pending update 402 * waiting to be activated. 403 */ 404 if (!pending) { 405 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); 406 return -ECANCELED; 407 } 408 409 if (pf->fw_emp_reset_disabled) { 410 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); 411 return -ECANCELED; 412 } 413 414 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); 415 416 err = ice_aq_nvm_update_empr(hw); 417 if (err) { 418 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", 419 err, ice_aq_str(hw->adminq.sq_last_status)); 420 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); 421 return err; 422 } 423 424 return 0; 425 } 426 427 /** 428 * ice_devlink_reload_down - prepare for reload 429 * @devlink: pointer to the devlink instance to reload 430 * @netns_change: if true, the network namespace is changing 431 * @action: the action to perform 432 * @limit: limits on what reload should do, such as not resetting 433 * @extack: netlink extended ACK structure 434 */ 435 static int 436 ice_devlink_reload_down(struct devlink *devlink, bool netns_change, 437 enum devlink_reload_action action, 438 enum devlink_reload_limit limit, 439 struct netlink_ext_ack *extack) 440 { 441 struct ice_pf *pf = devlink_priv(devlink); 442 443 switch (action) { 444 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 445 if (ice_is_eswitch_mode_switchdev(pf)) { 446 NL_SET_ERR_MSG_MOD(extack, 447 "Go to legacy mode before doing reinit\n"); 448 return -EOPNOTSUPP; 449 } 450 if (ice_is_adq_active(pf)) { 451 NL_SET_ERR_MSG_MOD(extack, 452 "Turn off ADQ before doing reinit\n"); 453 return -EOPNOTSUPP; 454 } 455 if (ice_has_vfs(pf)) { 456 NL_SET_ERR_MSG_MOD(extack, 457 "Remove all VFs before doing reinit\n"); 458 return -EOPNOTSUPP; 459 } 460 ice_unload(pf); 461 return 0; 462 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 463 return ice_devlink_reload_empr_start(pf, extack); 464 default: 465 WARN_ON(1); 466 return -EOPNOTSUPP; 467 } 468 } 469 470 /** 471 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish 472 * @pf: pointer to the pf instance 473 * @extack: netlink extended ACK structure 474 * 475 * Wait for driver to finish rebuilding after EMP reset is completed. This 476 * includes time to wait for both the actual device reset as well as the time 477 * for the driver's rebuild to complete. 478 */ 479 static int 480 ice_devlink_reload_empr_finish(struct ice_pf *pf, 481 struct netlink_ext_ack *extack) 482 { 483 int err; 484 485 err = ice_wait_for_reset(pf, 60 * HZ); 486 if (err) { 487 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); 488 return err; 489 } 490 491 return 0; 492 } 493 494 /** 495 * ice_devlink_port_opt_speed_str - convert speed to a string 496 * @speed: speed value 497 */ 498 static const char *ice_devlink_port_opt_speed_str(u8 speed) 499 { 500 switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { 501 case ICE_AQC_PORT_OPT_MAX_LANE_100M: 502 return "0.1"; 503 case ICE_AQC_PORT_OPT_MAX_LANE_1G: 504 return "1"; 505 case ICE_AQC_PORT_OPT_MAX_LANE_2500M: 506 return "2.5"; 507 case ICE_AQC_PORT_OPT_MAX_LANE_5G: 508 return "5"; 509 case ICE_AQC_PORT_OPT_MAX_LANE_10G: 510 return "10"; 511 case ICE_AQC_PORT_OPT_MAX_LANE_25G: 512 return "25"; 513 case ICE_AQC_PORT_OPT_MAX_LANE_50G: 514 return "50"; 515 case ICE_AQC_PORT_OPT_MAX_LANE_100G: 516 return "100"; 517 } 518 519 return "-"; 520 } 521 522 #define ICE_PORT_OPT_DESC_LEN 50 523 /** 524 * ice_devlink_port_options_print - Print available port split options 525 * @pf: the PF to print split port options 526 * 527 * Prints a table with available port split options and max port speeds 528 */ 529 static void ice_devlink_port_options_print(struct ice_pf *pf) 530 { 531 u8 i, j, options_count, cnt, speed, pending_idx, active_idx; 532 struct ice_aqc_get_port_options_elem *options, *opt; 533 struct device *dev = ice_pf_to_dev(pf); 534 bool active_valid, pending_valid; 535 char desc[ICE_PORT_OPT_DESC_LEN]; 536 const char *str; 537 int status; 538 539 options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, 540 sizeof(*options), GFP_KERNEL); 541 if (!options) 542 return; 543 544 for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { 545 opt = options + i * ICE_AQC_PORT_OPT_MAX; 546 options_count = ICE_AQC_PORT_OPT_MAX; 547 active_valid = 0; 548 549 status = ice_aq_get_port_options(&pf->hw, opt, &options_count, 550 i, true, &active_idx, 551 &active_valid, &pending_idx, 552 &pending_valid); 553 if (status) { 554 dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", 555 i, status); 556 goto err; 557 } 558 } 559 560 dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); 561 dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); 562 dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); 563 564 for (i = 0; i < options_count; i++) { 565 cnt = 0; 566 567 if (i == ice_active_port_option) 568 str = "Active"; 569 else if ((i == pending_idx) && pending_valid) 570 str = "Pending"; 571 else 572 str = ""; 573 574 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 575 "%-8s", str); 576 577 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 578 "%-6u", options[i].pmd); 579 580 for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { 581 speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; 582 str = ice_devlink_port_opt_speed_str(speed); 583 cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, 584 "%3s ", str); 585 } 586 587 dev_dbg(dev, "%s\n", desc); 588 } 589 590 err: 591 kfree(options); 592 } 593 594 /** 595 * ice_devlink_aq_set_port_option - Send set port option admin queue command 596 * @pf: the PF to print split port options 597 * @option_idx: selected port option 598 * @extack: extended netdev ack structure 599 * 600 * Sends set port option admin queue command with selected port option and 601 * calls NVM write activate. 602 */ 603 static int 604 ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, 605 struct netlink_ext_ack *extack) 606 { 607 struct device *dev = ice_pf_to_dev(pf); 608 int status; 609 610 status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); 611 if (status) { 612 dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", 613 status, pf->hw.adminq.sq_last_status); 614 NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); 615 return -EIO; 616 } 617 618 status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); 619 if (status) { 620 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 621 status, pf->hw.adminq.sq_last_status); 622 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 623 return -EIO; 624 } 625 626 status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); 627 if (status) { 628 dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", 629 status, pf->hw.adminq.sq_last_status); 630 NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); 631 ice_release_nvm(&pf->hw); 632 return -EIO; 633 } 634 635 ice_release_nvm(&pf->hw); 636 637 NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); 638 return 0; 639 } 640 641 /** 642 * ice_devlink_port_split - .port_split devlink handler 643 * @devlink: devlink instance structure 644 * @port: devlink port structure 645 * @count: number of ports to split to 646 * @extack: extended netdev ack structure 647 * 648 * Callback for the devlink .port_split operation. 649 * 650 * Unfortunately, the devlink expression of available options is limited 651 * to just a number, so search for an FW port option which supports 652 * the specified number. As there could be multiple FW port options with 653 * the same port split count, allow switching between them. When the same 654 * port split count request is issued again, switch to the next FW port 655 * option with the same port split count. 656 * 657 * Return: zero on success or an error code on failure. 658 */ 659 static int 660 ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, 661 unsigned int count, struct netlink_ext_ack *extack) 662 { 663 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; 664 u8 i, j, active_idx, pending_idx, new_option; 665 struct ice_pf *pf = devlink_priv(devlink); 666 u8 option_count = ICE_AQC_PORT_OPT_MAX; 667 struct device *dev = ice_pf_to_dev(pf); 668 bool active_valid, pending_valid; 669 int status; 670 671 status = ice_aq_get_port_options(&pf->hw, options, &option_count, 672 0, true, &active_idx, &active_valid, 673 &pending_idx, &pending_valid); 674 if (status) { 675 dev_dbg(dev, "Couldn't read port split options, err = %d\n", 676 status); 677 NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); 678 return -EIO; 679 } 680 681 new_option = ICE_AQC_PORT_OPT_MAX; 682 active_idx = pending_valid ? pending_idx : active_idx; 683 for (i = 1; i <= option_count; i++) { 684 /* In order to allow switching between FW port options with 685 * the same port split count, search for a new option starting 686 * from the active/pending option (with array wrap around). 687 */ 688 j = (active_idx + i) % option_count; 689 690 if (count == options[j].pmd) { 691 new_option = j; 692 break; 693 } 694 } 695 696 if (new_option == active_idx) { 697 dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", 698 count); 699 NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); 700 ice_devlink_port_options_print(pf); 701 return -EINVAL; 702 } 703 704 if (new_option == ICE_AQC_PORT_OPT_MAX) { 705 dev_dbg(dev, "request to split: count: %u not found\n", count); 706 NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); 707 ice_devlink_port_options_print(pf); 708 return -EINVAL; 709 } 710 711 status = ice_devlink_aq_set_port_option(pf, new_option, extack); 712 if (status) 713 return status; 714 715 ice_devlink_port_options_print(pf); 716 717 return 0; 718 } 719 720 /** 721 * ice_devlink_port_unsplit - .port_unsplit devlink handler 722 * @devlink: devlink instance structure 723 * @port: devlink port structure 724 * @extack: extended netdev ack structure 725 * 726 * Callback for the devlink .port_unsplit operation. 727 * Calls ice_devlink_port_split with split count set to 1. 728 * There could be no FW option available with split count 1. 729 * 730 * Return: zero on success or an error code on failure. 731 */ 732 static int 733 ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, 734 struct netlink_ext_ack *extack) 735 { 736 return ice_devlink_port_split(devlink, port, 1, extack); 737 } 738 739 /** 740 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree 741 * @pf: pf struct 742 * 743 * This function tears down tree exported during VF's creation. 744 */ 745 void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) 746 { 747 struct devlink *devlink; 748 struct ice_vf *vf; 749 unsigned int bkt; 750 751 devlink = priv_to_devlink(pf); 752 753 devl_lock(devlink); 754 mutex_lock(&pf->vfs.table_lock); 755 ice_for_each_vf(pf, bkt, vf) { 756 if (vf->devlink_port.devlink_rate) 757 devl_rate_leaf_destroy(&vf->devlink_port); 758 } 759 mutex_unlock(&pf->vfs.table_lock); 760 761 devl_rate_nodes_destroy(devlink); 762 devl_unlock(devlink); 763 } 764 765 /** 766 * ice_enable_custom_tx - try to enable custom Tx feature 767 * @pf: pf struct 768 * 769 * This function tries to enable custom Tx feature, 770 * it's not possible to enable it, if DCB or ADQ is active. 771 */ 772 static bool ice_enable_custom_tx(struct ice_pf *pf) 773 { 774 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; 775 struct device *dev = ice_pf_to_dev(pf); 776 777 if (pi->is_custom_tx_enabled) 778 /* already enabled, return true */ 779 return true; 780 781 if (ice_is_adq_active(pf)) { 782 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); 783 return false; 784 } 785 786 if (ice_is_dcb_active(pf)) { 787 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); 788 return false; 789 } 790 791 pi->is_custom_tx_enabled = true; 792 793 return true; 794 } 795 796 /** 797 * ice_traverse_tx_tree - traverse Tx scheduler tree 798 * @devlink: devlink struct 799 * @node: current node, used for recursion 800 * @tc_node: tc_node struct, that is treated as a root 801 * @pf: pf struct 802 * 803 * This function traverses Tx scheduler tree and exports 804 * entire structure to the devlink-rate. 805 */ 806 static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, 807 struct ice_sched_node *tc_node, struct ice_pf *pf) 808 { 809 struct devlink_rate *rate_node = NULL; 810 struct ice_vf *vf; 811 int i; 812 813 if (node->parent == tc_node) { 814 /* create root node */ 815 rate_node = devl_rate_node_create(devlink, node, node->name, NULL); 816 } else if (node->vsi_handle && 817 pf->vsi[node->vsi_handle]->vf) { 818 vf = pf->vsi[node->vsi_handle]->vf; 819 if (!vf->devlink_port.devlink_rate) 820 /* leaf nodes doesn't have children 821 * so we don't set rate_node 822 */ 823 devl_rate_leaf_create(&vf->devlink_port, node, 824 node->parent->rate_node); 825 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && 826 node->parent->rate_node) { 827 rate_node = devl_rate_node_create(devlink, node, node->name, 828 node->parent->rate_node); 829 } 830 831 if (rate_node && !IS_ERR(rate_node)) 832 node->rate_node = rate_node; 833 834 for (i = 0; i < node->num_children; i++) 835 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); 836 } 837 838 /** 839 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate 840 * @devlink: devlink struct 841 * @vsi: main vsi struct 842 * 843 * This function finds a root node, then calls ice_traverse_tx tree, which 844 * traverses the tree and exports it's contents to devlink rate. 845 */ 846 int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) 847 { 848 struct ice_port_info *pi = vsi->port_info; 849 struct ice_sched_node *tc_node; 850 struct ice_pf *pf = vsi->back; 851 int i; 852 853 tc_node = pi->root->children[0]; 854 mutex_lock(&pi->sched_lock); 855 devl_lock(devlink); 856 for (i = 0; i < tc_node->num_children; i++) 857 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); 858 devl_unlock(devlink); 859 mutex_unlock(&pi->sched_lock); 860 861 return 0; 862 } 863 864 /** 865 * ice_set_object_tx_share - sets node scheduling parameter 866 * @pi: devlink struct instance 867 * @node: node struct instance 868 * @bw: bandwidth in bytes per second 869 * @extack: extended netdev ack structure 870 * 871 * This function sets ICE_MIN_BW scheduling BW limit. 872 */ 873 static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, 874 u64 bw, struct netlink_ext_ack *extack) 875 { 876 int status; 877 878 mutex_lock(&pi->sched_lock); 879 /* converts bytes per second to kilo bits per second */ 880 node->tx_share = div_u64(bw, 125); 881 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); 882 mutex_unlock(&pi->sched_lock); 883 884 if (status) 885 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); 886 887 return status; 888 } 889 890 /** 891 * ice_set_object_tx_max - sets node scheduling parameter 892 * @pi: devlink struct instance 893 * @node: node struct instance 894 * @bw: bandwidth in bytes per second 895 * @extack: extended netdev ack structure 896 * 897 * This function sets ICE_MAX_BW scheduling BW limit. 898 */ 899 static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, 900 u64 bw, struct netlink_ext_ack *extack) 901 { 902 int status; 903 904 mutex_lock(&pi->sched_lock); 905 /* converts bytes per second value to kilo bits per second */ 906 node->tx_max = div_u64(bw, 125); 907 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); 908 mutex_unlock(&pi->sched_lock); 909 910 if (status) 911 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); 912 913 return status; 914 } 915 916 /** 917 * ice_set_object_tx_priority - sets node scheduling parameter 918 * @pi: devlink struct instance 919 * @node: node struct instance 920 * @priority: value representing priority for strict priority arbitration 921 * @extack: extended netdev ack structure 922 * 923 * This function sets priority of node among siblings. 924 */ 925 static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, 926 u32 priority, struct netlink_ext_ack *extack) 927 { 928 int status; 929 930 if (priority >= 8) { 931 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); 932 return -EINVAL; 933 } 934 935 mutex_lock(&pi->sched_lock); 936 node->tx_priority = priority; 937 status = ice_sched_set_node_priority(pi, node, node->tx_priority); 938 mutex_unlock(&pi->sched_lock); 939 940 if (status) 941 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); 942 943 return status; 944 } 945 946 /** 947 * ice_set_object_tx_weight - sets node scheduling parameter 948 * @pi: devlink struct instance 949 * @node: node struct instance 950 * @weight: value represeting relative weight for WFQ arbitration 951 * @extack: extended netdev ack structure 952 * 953 * This function sets node weight for WFQ algorithm. 954 */ 955 static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, 956 u32 weight, struct netlink_ext_ack *extack) 957 { 958 int status; 959 960 if (weight > 200 || weight < 1) { 961 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); 962 return -EINVAL; 963 } 964 965 mutex_lock(&pi->sched_lock); 966 node->tx_weight = weight; 967 status = ice_sched_set_node_weight(pi, node, node->tx_weight); 968 mutex_unlock(&pi->sched_lock); 969 970 if (status) 971 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); 972 973 return status; 974 } 975 976 /** 977 * ice_get_pi_from_dev_rate - get port info from devlink_rate 978 * @rate_node: devlink struct instance 979 * 980 * This function returns corresponding port_info struct of devlink_rate 981 */ 982 static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) 983 { 984 struct ice_pf *pf = devlink_priv(rate_node->devlink); 985 986 return ice_get_main_vsi(pf)->port_info; 987 } 988 989 static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, 990 struct netlink_ext_ack *extack) 991 { 992 struct ice_sched_node *node; 993 struct ice_port_info *pi; 994 995 pi = ice_get_pi_from_dev_rate(rate_node); 996 997 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 998 return -EBUSY; 999 1000 /* preallocate memory for ice_sched_node */ 1001 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); 1002 *priv = node; 1003 1004 return 0; 1005 } 1006 1007 static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, 1008 struct netlink_ext_ack *extack) 1009 { 1010 struct ice_sched_node *node, *tc_node; 1011 struct ice_port_info *pi; 1012 1013 pi = ice_get_pi_from_dev_rate(rate_node); 1014 tc_node = pi->root->children[0]; 1015 node = priv; 1016 1017 if (!rate_node->parent || !node || tc_node == node || !extack) 1018 return 0; 1019 1020 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1021 return -EBUSY; 1022 1023 /* can't allow to delete a node with children */ 1024 if (node->num_children) 1025 return -EINVAL; 1026 1027 mutex_lock(&pi->sched_lock); 1028 ice_free_sched_node(pi, node); 1029 mutex_unlock(&pi->sched_lock); 1030 1031 return 0; 1032 } 1033 1034 static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, 1035 u64 tx_max, struct netlink_ext_ack *extack) 1036 { 1037 struct ice_sched_node *node = priv; 1038 1039 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1040 return -EBUSY; 1041 1042 if (!node) 1043 return 0; 1044 1045 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), 1046 node, tx_max, extack); 1047 } 1048 1049 static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, 1050 u64 tx_share, struct netlink_ext_ack *extack) 1051 { 1052 struct ice_sched_node *node = priv; 1053 1054 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1055 return -EBUSY; 1056 1057 if (!node) 1058 return 0; 1059 1060 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, 1061 tx_share, extack); 1062 } 1063 1064 static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, 1065 u32 tx_priority, struct netlink_ext_ack *extack) 1066 { 1067 struct ice_sched_node *node = priv; 1068 1069 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1070 return -EBUSY; 1071 1072 if (!node) 1073 return 0; 1074 1075 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, 1076 tx_priority, extack); 1077 } 1078 1079 static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, 1080 u32 tx_weight, struct netlink_ext_ack *extack) 1081 { 1082 struct ice_sched_node *node = priv; 1083 1084 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1085 return -EBUSY; 1086 1087 if (!node) 1088 return 0; 1089 1090 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, 1091 tx_weight, extack); 1092 } 1093 1094 static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, 1095 u64 tx_max, struct netlink_ext_ack *extack) 1096 { 1097 struct ice_sched_node *node = priv; 1098 1099 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1100 return -EBUSY; 1101 1102 if (!node) 1103 return 0; 1104 1105 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), 1106 node, tx_max, extack); 1107 } 1108 1109 static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, 1110 u64 tx_share, struct netlink_ext_ack *extack) 1111 { 1112 struct ice_sched_node *node = priv; 1113 1114 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1115 return -EBUSY; 1116 1117 if (!node) 1118 return 0; 1119 1120 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), 1121 node, tx_share, extack); 1122 } 1123 1124 static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, 1125 u32 tx_priority, struct netlink_ext_ack *extack) 1126 { 1127 struct ice_sched_node *node = priv; 1128 1129 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1130 return -EBUSY; 1131 1132 if (!node) 1133 return 0; 1134 1135 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), 1136 node, tx_priority, extack); 1137 } 1138 1139 static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, 1140 u32 tx_weight, struct netlink_ext_ack *extack) 1141 { 1142 struct ice_sched_node *node = priv; 1143 1144 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1145 return -EBUSY; 1146 1147 if (!node) 1148 return 0; 1149 1150 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), 1151 node, tx_weight, extack); 1152 } 1153 1154 static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, 1155 struct devlink_rate *parent, 1156 void *priv, void *parent_priv, 1157 struct netlink_ext_ack *extack) 1158 { 1159 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); 1160 struct ice_sched_node *tc_node, *node, *parent_node; 1161 u16 num_nodes_added; 1162 u32 first_node_teid; 1163 u32 node_teid; 1164 int status; 1165 1166 tc_node = pi->root->children[0]; 1167 node = priv; 1168 1169 if (!extack) 1170 return 0; 1171 1172 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) 1173 return -EBUSY; 1174 1175 if (!parent) { 1176 if (!node || tc_node == node || node->num_children) 1177 return -EINVAL; 1178 1179 mutex_lock(&pi->sched_lock); 1180 ice_free_sched_node(pi, node); 1181 mutex_unlock(&pi->sched_lock); 1182 1183 return 0; 1184 } 1185 1186 parent_node = parent_priv; 1187 1188 /* if the node doesn't exist, create it */ 1189 if (!node->parent) { 1190 mutex_lock(&pi->sched_lock); 1191 status = ice_sched_add_elems(pi, tc_node, parent_node, 1192 parent_node->tx_sched_layer + 1, 1193 1, &num_nodes_added, &first_node_teid, 1194 &node); 1195 mutex_unlock(&pi->sched_lock); 1196 1197 if (status) { 1198 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); 1199 return status; 1200 } 1201 1202 if (devlink_rate->tx_share) 1203 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); 1204 if (devlink_rate->tx_max) 1205 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); 1206 if (devlink_rate->tx_priority) 1207 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); 1208 if (devlink_rate->tx_weight) 1209 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); 1210 } else { 1211 node_teid = le32_to_cpu(node->info.node_teid); 1212 mutex_lock(&pi->sched_lock); 1213 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); 1214 mutex_unlock(&pi->sched_lock); 1215 1216 if (status) 1217 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); 1218 } 1219 1220 return status; 1221 } 1222 1223 /** 1224 * ice_devlink_reload_up - do reload up after reinit 1225 * @devlink: pointer to the devlink instance reloading 1226 * @action: the action requested 1227 * @limit: limits imposed by userspace, such as not resetting 1228 * @actions_performed: on return, indicate what actions actually performed 1229 * @extack: netlink extended ACK structure 1230 */ 1231 static int 1232 ice_devlink_reload_up(struct devlink *devlink, 1233 enum devlink_reload_action action, 1234 enum devlink_reload_limit limit, 1235 u32 *actions_performed, 1236 struct netlink_ext_ack *extack) 1237 { 1238 struct ice_pf *pf = devlink_priv(devlink); 1239 1240 switch (action) { 1241 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 1242 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 1243 return ice_load(pf); 1244 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 1245 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1246 return ice_devlink_reload_empr_finish(pf, extack); 1247 default: 1248 WARN_ON(1); 1249 return -EOPNOTSUPP; 1250 } 1251 } 1252 1253 static const struct devlink_ops ice_devlink_ops = { 1254 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, 1255 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1256 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1257 /* The ice driver currently does not support driver reinit */ 1258 .reload_down = ice_devlink_reload_down, 1259 .reload_up = ice_devlink_reload_up, 1260 .port_split = ice_devlink_port_split, 1261 .port_unsplit = ice_devlink_port_unsplit, 1262 .eswitch_mode_get = ice_eswitch_mode_get, 1263 .eswitch_mode_set = ice_eswitch_mode_set, 1264 .info_get = ice_devlink_info_get, 1265 .flash_update = ice_devlink_flash_update, 1266 1267 .rate_node_new = ice_devlink_rate_node_new, 1268 .rate_node_del = ice_devlink_rate_node_del, 1269 1270 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, 1271 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, 1272 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, 1273 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, 1274 1275 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, 1276 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, 1277 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, 1278 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, 1279 1280 .rate_leaf_parent_set = ice_devlink_set_parent, 1281 .rate_node_parent_set = ice_devlink_set_parent, 1282 }; 1283 1284 static int 1285 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, 1286 struct devlink_param_gset_ctx *ctx) 1287 { 1288 struct ice_pf *pf = devlink_priv(devlink); 1289 1290 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; 1291 1292 return 0; 1293 } 1294 1295 static int 1296 ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, 1297 struct devlink_param_gset_ctx *ctx) 1298 { 1299 struct ice_pf *pf = devlink_priv(devlink); 1300 bool roce_ena = ctx->val.vbool; 1301 int ret; 1302 1303 if (!roce_ena) { 1304 ice_unplug_aux_dev(pf); 1305 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1306 return 0; 1307 } 1308 1309 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; 1310 ret = ice_plug_aux_dev(pf); 1311 if (ret) 1312 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1313 1314 return ret; 1315 } 1316 1317 static int 1318 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 1319 union devlink_param_value val, 1320 struct netlink_ext_ack *extack) 1321 { 1322 struct ice_pf *pf = devlink_priv(devlink); 1323 1324 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1325 return -EOPNOTSUPP; 1326 1327 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { 1328 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1329 return -EOPNOTSUPP; 1330 } 1331 1332 return 0; 1333 } 1334 1335 static int 1336 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, 1337 struct devlink_param_gset_ctx *ctx) 1338 { 1339 struct ice_pf *pf = devlink_priv(devlink); 1340 1341 ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; 1342 1343 return 0; 1344 } 1345 1346 static int 1347 ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, 1348 struct devlink_param_gset_ctx *ctx) 1349 { 1350 struct ice_pf *pf = devlink_priv(devlink); 1351 bool iw_ena = ctx->val.vbool; 1352 int ret; 1353 1354 if (!iw_ena) { 1355 ice_unplug_aux_dev(pf); 1356 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1357 return 0; 1358 } 1359 1360 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; 1361 ret = ice_plug_aux_dev(pf); 1362 if (ret) 1363 pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1364 1365 return ret; 1366 } 1367 1368 static int 1369 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, 1370 union devlink_param_value val, 1371 struct netlink_ext_ack *extack) 1372 { 1373 struct ice_pf *pf = devlink_priv(devlink); 1374 1375 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1376 return -EOPNOTSUPP; 1377 1378 if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { 1379 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1380 return -EOPNOTSUPP; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static const struct devlink_param ice_devlink_params[] = { 1387 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1388 ice_devlink_enable_roce_get, 1389 ice_devlink_enable_roce_set, 1390 ice_devlink_enable_roce_validate), 1391 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1392 ice_devlink_enable_iw_get, 1393 ice_devlink_enable_iw_set, 1394 ice_devlink_enable_iw_validate), 1395 1396 }; 1397 1398 static void ice_devlink_free(void *devlink_ptr) 1399 { 1400 devlink_free((struct devlink *)devlink_ptr); 1401 } 1402 1403 /** 1404 * ice_allocate_pf - Allocate devlink and return PF structure pointer 1405 * @dev: the device to allocate for 1406 * 1407 * Allocate a devlink instance for this device and return the private area as 1408 * the PF structure. The devlink memory is kept track of through devres by 1409 * adding an action to remove it when unwinding. 1410 */ 1411 struct ice_pf *ice_allocate_pf(struct device *dev) 1412 { 1413 struct devlink *devlink; 1414 1415 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); 1416 if (!devlink) 1417 return NULL; 1418 1419 /* Add an action to teardown the devlink when unwinding the driver */ 1420 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) 1421 return NULL; 1422 1423 return devlink_priv(devlink); 1424 } 1425 1426 /** 1427 * ice_devlink_register - Register devlink interface for this PF 1428 * @pf: the PF to register the devlink for. 1429 * 1430 * Register the devlink instance associated with this physical function. 1431 * 1432 * Return: zero on success or an error code on failure. 1433 */ 1434 void ice_devlink_register(struct ice_pf *pf) 1435 { 1436 struct devlink *devlink = priv_to_devlink(pf); 1437 1438 devlink_register(devlink); 1439 } 1440 1441 /** 1442 * ice_devlink_unregister - Unregister devlink resources for this PF. 1443 * @pf: the PF structure to cleanup 1444 * 1445 * Releases resources used by devlink and cleans up associated memory. 1446 */ 1447 void ice_devlink_unregister(struct ice_pf *pf) 1448 { 1449 devlink_unregister(priv_to_devlink(pf)); 1450 } 1451 1452 /** 1453 * ice_devlink_set_switch_id - Set unique switch id based on pci dsn 1454 * @pf: the PF to create a devlink port for 1455 * @ppid: struct with switch id information 1456 */ 1457 static void 1458 ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) 1459 { 1460 struct pci_dev *pdev = pf->pdev; 1461 u64 id; 1462 1463 id = pci_get_dsn(pdev); 1464 1465 ppid->id_len = sizeof(id); 1466 put_unaligned_be64(id, &ppid->id); 1467 } 1468 1469 int ice_devlink_register_params(struct ice_pf *pf) 1470 { 1471 struct devlink *devlink = priv_to_devlink(pf); 1472 1473 return devlink_params_register(devlink, ice_devlink_params, 1474 ARRAY_SIZE(ice_devlink_params)); 1475 } 1476 1477 void ice_devlink_unregister_params(struct ice_pf *pf) 1478 { 1479 devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, 1480 ARRAY_SIZE(ice_devlink_params)); 1481 } 1482 1483 /** 1484 * ice_devlink_set_port_split_options - Set port split options 1485 * @pf: the PF to set port split options 1486 * @attrs: devlink attributes 1487 * 1488 * Sets devlink port split options based on available FW port options 1489 */ 1490 static void 1491 ice_devlink_set_port_split_options(struct ice_pf *pf, 1492 struct devlink_port_attrs *attrs) 1493 { 1494 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; 1495 u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; 1496 bool active_valid, pending_valid; 1497 int status; 1498 1499 status = ice_aq_get_port_options(&pf->hw, options, &option_count, 1500 0, true, &active_idx, &active_valid, 1501 &pending_idx, &pending_valid); 1502 if (status) { 1503 dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", 1504 status); 1505 return; 1506 } 1507 1508 /* find the biggest available port split count */ 1509 for (i = 0; i < option_count; i++) 1510 attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); 1511 1512 attrs->splittable = attrs->lanes ? 1 : 0; 1513 ice_active_port_option = active_idx; 1514 } 1515 1516 /** 1517 * ice_devlink_create_pf_port - Create a devlink port for this PF 1518 * @pf: the PF to create a devlink port for 1519 * 1520 * Create and register a devlink_port for this PF. 1521 * 1522 * Return: zero on success or an error code on failure. 1523 */ 1524 int ice_devlink_create_pf_port(struct ice_pf *pf) 1525 { 1526 struct devlink_port_attrs attrs = {}; 1527 struct devlink_port *devlink_port; 1528 struct devlink *devlink; 1529 struct ice_vsi *vsi; 1530 struct device *dev; 1531 int err; 1532 1533 dev = ice_pf_to_dev(pf); 1534 1535 devlink_port = &pf->devlink_port; 1536 1537 vsi = ice_get_main_vsi(pf); 1538 if (!vsi) 1539 return -EIO; 1540 1541 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1542 attrs.phys.port_number = pf->hw.bus.func; 1543 1544 /* As FW supports only port split options for whole device, 1545 * set port split options only for first PF. 1546 */ 1547 if (pf->hw.pf_id == 0) 1548 ice_devlink_set_port_split_options(pf, &attrs); 1549 1550 ice_devlink_set_switch_id(pf, &attrs.switch_id); 1551 1552 devlink_port_attrs_set(devlink_port, &attrs); 1553 devlink = priv_to_devlink(pf); 1554 1555 err = devlink_port_register(devlink, devlink_port, vsi->idx); 1556 if (err) { 1557 dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", 1558 pf->hw.pf_id, err); 1559 return err; 1560 } 1561 1562 return 0; 1563 } 1564 1565 /** 1566 * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF 1567 * @pf: the PF to cleanup 1568 * 1569 * Unregisters the devlink_port structure associated with this PF. 1570 */ 1571 void ice_devlink_destroy_pf_port(struct ice_pf *pf) 1572 { 1573 devlink_port_unregister(&pf->devlink_port); 1574 } 1575 1576 /** 1577 * ice_devlink_create_vf_port - Create a devlink port for this VF 1578 * @vf: the VF to create a port for 1579 * 1580 * Create and register a devlink_port for this VF. 1581 * 1582 * Return: zero on success or an error code on failure. 1583 */ 1584 int ice_devlink_create_vf_port(struct ice_vf *vf) 1585 { 1586 struct devlink_port_attrs attrs = {}; 1587 struct devlink_port *devlink_port; 1588 struct devlink *devlink; 1589 struct ice_vsi *vsi; 1590 struct device *dev; 1591 struct ice_pf *pf; 1592 int err; 1593 1594 pf = vf->pf; 1595 dev = ice_pf_to_dev(pf); 1596 devlink_port = &vf->devlink_port; 1597 1598 vsi = ice_get_vf_vsi(vf); 1599 if (!vsi) 1600 return -EINVAL; 1601 1602 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; 1603 attrs.pci_vf.pf = pf->hw.bus.func; 1604 attrs.pci_vf.vf = vf->vf_id; 1605 1606 ice_devlink_set_switch_id(pf, &attrs.switch_id); 1607 1608 devlink_port_attrs_set(devlink_port, &attrs); 1609 devlink = priv_to_devlink(pf); 1610 1611 err = devlink_port_register(devlink, devlink_port, vsi->idx); 1612 if (err) { 1613 dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", 1614 vf->vf_id, err); 1615 return err; 1616 } 1617 1618 return 0; 1619 } 1620 1621 /** 1622 * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF 1623 * @vf: the VF to cleanup 1624 * 1625 * Unregisters the devlink_port structure associated with this VF. 1626 */ 1627 void ice_devlink_destroy_vf_port(struct ice_vf *vf) 1628 { 1629 devl_rate_leaf_destroy(&vf->devlink_port); 1630 devlink_port_unregister(&vf->devlink_port); 1631 } 1632 1633 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) 1634 1635 static const struct devlink_region_ops ice_nvm_region_ops; 1636 static const struct devlink_region_ops ice_sram_region_ops; 1637 1638 /** 1639 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents 1640 * @devlink: the devlink instance 1641 * @ops: the devlink region to snapshot 1642 * @extack: extended ACK response structure 1643 * @data: on exit points to snapshot data buffer 1644 * 1645 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either 1646 * the nvm-flash or shadow-ram region. 1647 * 1648 * It captures a snapshot of the NVM or Shadow RAM flash contents. This 1649 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink 1650 * interface. 1651 * 1652 * @returns zero on success, and updates the data pointer. Returns a non-zero 1653 * error code on failure. 1654 */ 1655 static int ice_devlink_nvm_snapshot(struct devlink *devlink, 1656 const struct devlink_region_ops *ops, 1657 struct netlink_ext_ack *extack, u8 **data) 1658 { 1659 struct ice_pf *pf = devlink_priv(devlink); 1660 struct device *dev = ice_pf_to_dev(pf); 1661 struct ice_hw *hw = &pf->hw; 1662 bool read_shadow_ram; 1663 u8 *nvm_data, *tmp, i; 1664 u32 nvm_size, left; 1665 s8 num_blks; 1666 int status; 1667 1668 if (ops == &ice_nvm_region_ops) { 1669 read_shadow_ram = false; 1670 nvm_size = hw->flash.flash_size; 1671 } else if (ops == &ice_sram_region_ops) { 1672 read_shadow_ram = true; 1673 nvm_size = hw->flash.sr_words * 2u; 1674 } else { 1675 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1676 return -EOPNOTSUPP; 1677 } 1678 1679 nvm_data = vzalloc(nvm_size); 1680 if (!nvm_data) 1681 return -ENOMEM; 1682 1683 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); 1684 tmp = nvm_data; 1685 left = nvm_size; 1686 1687 /* Some systems take longer to read the NVM than others which causes the 1688 * FW to reclaim the NVM lock before the entire NVM has been read. Fix 1689 * this by breaking the reads of the NVM into smaller chunks that will 1690 * probably not take as long. This has some overhead since we are 1691 * increasing the number of AQ commands, but it should always work 1692 */ 1693 for (i = 0; i < num_blks; i++) { 1694 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); 1695 1696 status = ice_acquire_nvm(hw, ICE_RES_READ); 1697 if (status) { 1698 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1699 status, hw->adminq.sq_last_status); 1700 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1701 vfree(nvm_data); 1702 return -EIO; 1703 } 1704 1705 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, 1706 &read_sz, tmp, read_shadow_ram); 1707 if (status) { 1708 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1709 read_sz, status, hw->adminq.sq_last_status); 1710 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1711 ice_release_nvm(hw); 1712 vfree(nvm_data); 1713 return -EIO; 1714 } 1715 ice_release_nvm(hw); 1716 1717 tmp += read_sz; 1718 left -= read_sz; 1719 } 1720 1721 *data = nvm_data; 1722 1723 return 0; 1724 } 1725 1726 /** 1727 * ice_devlink_nvm_read - Read a portion of NVM flash contents 1728 * @devlink: the devlink instance 1729 * @ops: the devlink region to snapshot 1730 * @extack: extended ACK response structure 1731 * @offset: the offset to start at 1732 * @size: the amount to read 1733 * @data: the data buffer to read into 1734 * 1735 * This function is called in response to DEVLINK_CMD_REGION_READ to directly 1736 * read a section of the NVM contents. 1737 * 1738 * It reads from either the nvm-flash or shadow-ram region contents. 1739 * 1740 * @returns zero on success, and updates the data pointer. Returns a non-zero 1741 * error code on failure. 1742 */ 1743 static int ice_devlink_nvm_read(struct devlink *devlink, 1744 const struct devlink_region_ops *ops, 1745 struct netlink_ext_ack *extack, 1746 u64 offset, u32 size, u8 *data) 1747 { 1748 struct ice_pf *pf = devlink_priv(devlink); 1749 struct device *dev = ice_pf_to_dev(pf); 1750 struct ice_hw *hw = &pf->hw; 1751 bool read_shadow_ram; 1752 u64 nvm_size; 1753 int status; 1754 1755 if (ops == &ice_nvm_region_ops) { 1756 read_shadow_ram = false; 1757 nvm_size = hw->flash.flash_size; 1758 } else if (ops == &ice_sram_region_ops) { 1759 read_shadow_ram = true; 1760 nvm_size = hw->flash.sr_words * 2u; 1761 } else { 1762 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1763 return -EOPNOTSUPP; 1764 } 1765 1766 if (offset + size >= nvm_size) { 1767 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); 1768 return -ERANGE; 1769 } 1770 1771 status = ice_acquire_nvm(hw, ICE_RES_READ); 1772 if (status) { 1773 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1774 status, hw->adminq.sq_last_status); 1775 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1776 return -EIO; 1777 } 1778 1779 status = ice_read_flat_nvm(hw, (u32)offset, &size, data, 1780 read_shadow_ram); 1781 if (status) { 1782 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1783 size, status, hw->adminq.sq_last_status); 1784 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1785 ice_release_nvm(hw); 1786 return -EIO; 1787 } 1788 ice_release_nvm(hw); 1789 1790 return 0; 1791 } 1792 1793 /** 1794 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities 1795 * @devlink: the devlink instance 1796 * @ops: the devlink region being snapshotted 1797 * @extack: extended ACK response structure 1798 * @data: on exit points to snapshot data buffer 1799 * 1800 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for 1801 * the device-caps devlink region. It captures a snapshot of the device 1802 * capabilities reported by firmware. 1803 * 1804 * @returns zero on success, and updates the data pointer. Returns a non-zero 1805 * error code on failure. 1806 */ 1807 static int 1808 ice_devlink_devcaps_snapshot(struct devlink *devlink, 1809 const struct devlink_region_ops *ops, 1810 struct netlink_ext_ack *extack, u8 **data) 1811 { 1812 struct ice_pf *pf = devlink_priv(devlink); 1813 struct device *dev = ice_pf_to_dev(pf); 1814 struct ice_hw *hw = &pf->hw; 1815 void *devcaps; 1816 int status; 1817 1818 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); 1819 if (!devcaps) 1820 return -ENOMEM; 1821 1822 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, 1823 ice_aqc_opc_list_dev_caps, NULL); 1824 if (status) { 1825 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", 1826 status, hw->adminq.sq_last_status); 1827 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); 1828 vfree(devcaps); 1829 return status; 1830 } 1831 1832 *data = (u8 *)devcaps; 1833 1834 return 0; 1835 } 1836 1837 static const struct devlink_region_ops ice_nvm_region_ops = { 1838 .name = "nvm-flash", 1839 .destructor = vfree, 1840 .snapshot = ice_devlink_nvm_snapshot, 1841 .read = ice_devlink_nvm_read, 1842 }; 1843 1844 static const struct devlink_region_ops ice_sram_region_ops = { 1845 .name = "shadow-ram", 1846 .destructor = vfree, 1847 .snapshot = ice_devlink_nvm_snapshot, 1848 .read = ice_devlink_nvm_read, 1849 }; 1850 1851 static const struct devlink_region_ops ice_devcaps_region_ops = { 1852 .name = "device-caps", 1853 .destructor = vfree, 1854 .snapshot = ice_devlink_devcaps_snapshot, 1855 }; 1856 1857 /** 1858 * ice_devlink_init_regions - Initialize devlink regions 1859 * @pf: the PF device structure 1860 * 1861 * Create devlink regions used to enable access to dump the contents of the 1862 * flash memory on the device. 1863 */ 1864 void ice_devlink_init_regions(struct ice_pf *pf) 1865 { 1866 struct devlink *devlink = priv_to_devlink(pf); 1867 struct device *dev = ice_pf_to_dev(pf); 1868 u64 nvm_size, sram_size; 1869 1870 nvm_size = pf->hw.flash.flash_size; 1871 pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, 1872 nvm_size); 1873 if (IS_ERR(pf->nvm_region)) { 1874 dev_err(dev, "failed to create NVM devlink region, err %ld\n", 1875 PTR_ERR(pf->nvm_region)); 1876 pf->nvm_region = NULL; 1877 } 1878 1879 sram_size = pf->hw.flash.sr_words * 2u; 1880 pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, 1881 1, sram_size); 1882 if (IS_ERR(pf->sram_region)) { 1883 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", 1884 PTR_ERR(pf->sram_region)); 1885 pf->sram_region = NULL; 1886 } 1887 1888 pf->devcaps_region = devlink_region_create(devlink, 1889 &ice_devcaps_region_ops, 10, 1890 ICE_AQ_MAX_BUF_LEN); 1891 if (IS_ERR(pf->devcaps_region)) { 1892 dev_err(dev, "failed to create device-caps devlink region, err %ld\n", 1893 PTR_ERR(pf->devcaps_region)); 1894 pf->devcaps_region = NULL; 1895 } 1896 } 1897 1898 /** 1899 * ice_devlink_destroy_regions - Destroy devlink regions 1900 * @pf: the PF device structure 1901 * 1902 * Remove previously created regions for this PF. 1903 */ 1904 void ice_devlink_destroy_regions(struct ice_pf *pf) 1905 { 1906 if (pf->nvm_region) 1907 devlink_region_destroy(pf->nvm_region); 1908 1909 if (pf->sram_region) 1910 devlink_region_destroy(pf->sram_region); 1911 1912 if (pf->devcaps_region) 1913 devlink_region_destroy(pf->devcaps_region); 1914 } 1915