1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies */ 3 4 #include <devlink.h> 5 6 #include "mlx5_core.h" 7 #include "fw_reset.h" 8 #include "fs_core.h" 9 #include "eswitch.h" 10 #include "esw/qos.h" 11 #include "sf/dev/dev.h" 12 #include "sf/sf.h" 13 14 static int mlx5_devlink_flash_update(struct devlink *devlink, 15 struct devlink_flash_update_params *params, 16 struct netlink_ext_ack *extack) 17 { 18 struct mlx5_core_dev *dev = devlink_priv(devlink); 19 20 return mlx5_firmware_flash(dev, params->fw, extack); 21 } 22 23 static u8 mlx5_fw_ver_major(u32 version) 24 { 25 return (version >> 24) & 0xff; 26 } 27 28 static u8 mlx5_fw_ver_minor(u32 version) 29 { 30 return (version >> 16) & 0xff; 31 } 32 33 static u16 mlx5_fw_ver_subminor(u32 version) 34 { 35 return version & 0xffff; 36 } 37 38 #define DEVLINK_FW_STRING_LEN 32 39 40 static int 41 mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, 42 struct netlink_ext_ack *extack) 43 { 44 struct mlx5_core_dev *dev = devlink_priv(devlink); 45 char version_str[DEVLINK_FW_STRING_LEN]; 46 u32 running_fw, stored_fw; 47 int err; 48 49 err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id); 50 if (err) 51 return err; 52 53 err = mlx5_fw_version_query(dev, &running_fw, &stored_fw); 54 if (err) 55 return err; 56 57 snprintf(version_str, sizeof(version_str), "%d.%d.%04d", 58 mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw), 59 mlx5_fw_ver_subminor(running_fw)); 60 err = devlink_info_version_running_put(req, "fw.version", version_str); 61 if (err) 62 return err; 63 err = devlink_info_version_running_put(req, 64 DEVLINK_INFO_VERSION_GENERIC_FW, 65 version_str); 66 if (err) 67 return err; 68 69 /* no pending version, return running (stored) version */ 70 if (stored_fw == 0) 71 stored_fw = running_fw; 72 73 snprintf(version_str, sizeof(version_str), "%d.%d.%04d", 74 mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw), 75 mlx5_fw_ver_subminor(stored_fw)); 76 err = devlink_info_version_stored_put(req, "fw.version", version_str); 77 if (err) 78 return err; 79 return devlink_info_version_stored_put(req, 80 DEVLINK_INFO_VERSION_GENERIC_FW, 81 version_str); 82 } 83 84 static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack) 85 { 86 struct mlx5_core_dev *dev = devlink_priv(devlink); 87 u8 reset_level, reset_type, net_port_alive; 88 int err; 89 90 err = mlx5_fw_reset_query(dev, &reset_level, &reset_type); 91 if (err) 92 return err; 93 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL3)) { 94 NL_SET_ERR_MSG_MOD(extack, "FW activate requires reboot"); 95 return -EINVAL; 96 } 97 98 net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE); 99 err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack); 100 if (err) 101 return err; 102 103 err = mlx5_fw_reset_wait_reset_done(dev); 104 if (err) 105 return err; 106 107 mlx5_unload_one_devl_locked(dev); 108 err = mlx5_health_wait_pci_up(dev); 109 if (err) 110 NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset"); 111 112 return err; 113 } 114 115 static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink, 116 struct netlink_ext_ack *extack) 117 { 118 struct mlx5_core_dev *dev = devlink_priv(devlink); 119 u8 reset_level; 120 int err; 121 122 err = mlx5_fw_reset_query(dev, &reset_level, NULL); 123 if (err) 124 return err; 125 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL0)) { 126 NL_SET_ERR_MSG_MOD(extack, 127 "FW upgrade to the stored FW can't be done by FW live patching"); 128 return -EINVAL; 129 } 130 131 return mlx5_fw_reset_set_live_patch(dev); 132 } 133 134 static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, 135 enum devlink_reload_action action, 136 enum devlink_reload_limit limit, 137 struct netlink_ext_ack *extack) 138 { 139 struct mlx5_core_dev *dev = devlink_priv(devlink); 140 struct pci_dev *pdev = dev->pdev; 141 bool sf_dev_allocated; 142 int ret = 0; 143 144 sf_dev_allocated = mlx5_sf_dev_allocated(dev); 145 if (sf_dev_allocated) { 146 /* Reload results in deleting SF device which further results in 147 * unregistering devlink instance while holding devlink_mutext. 148 * Hence, do not support reload. 149 */ 150 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated"); 151 return -EOPNOTSUPP; 152 } 153 154 if (mlx5_lag_is_active(dev)) { 155 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); 156 return -EOPNOTSUPP; 157 } 158 159 if (pci_num_vf(pdev)) { 160 NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); 161 } 162 163 switch (action) { 164 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 165 mlx5_unload_one_devl_locked(dev); 166 break; 167 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 168 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 169 ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack); 170 else 171 ret = mlx5_devlink_reload_fw_activate(devlink, extack); 172 break; 173 default: 174 /* Unsupported action should not get to this function */ 175 WARN_ON(1); 176 ret = -EOPNOTSUPP; 177 } 178 179 return ret; 180 } 181 182 static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action, 183 enum devlink_reload_limit limit, u32 *actions_performed, 184 struct netlink_ext_ack *extack) 185 { 186 struct mlx5_core_dev *dev = devlink_priv(devlink); 187 int ret = 0; 188 189 *actions_performed = BIT(action); 190 switch (action) { 191 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 192 ret = mlx5_load_one_devl_locked(dev, false); 193 break; 194 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 195 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 196 break; 197 /* On fw_activate action, also driver is reloaded and reinit performed */ 198 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 199 ret = mlx5_load_one_devl_locked(dev, false); 200 break; 201 default: 202 /* Unsupported action should not get to this function */ 203 WARN_ON(1); 204 ret = -EOPNOTSUPP; 205 } 206 207 return ret; 208 } 209 210 static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id) 211 { 212 struct mlx5_devlink_trap *dl_trap; 213 214 list_for_each_entry(dl_trap, &dev->priv.traps, list) 215 if (dl_trap->trap.id == trap_id) 216 return dl_trap; 217 218 return NULL; 219 } 220 221 static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap, 222 void *trap_ctx) 223 { 224 struct mlx5_core_dev *dev = devlink_priv(devlink); 225 struct mlx5_devlink_trap *dl_trap; 226 227 dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL); 228 if (!dl_trap) 229 return -ENOMEM; 230 231 dl_trap->trap.id = trap->id; 232 dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP; 233 dl_trap->item = trap_ctx; 234 235 if (mlx5_find_trap_by_id(dev, trap->id)) { 236 kfree(dl_trap); 237 mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id); 238 return -EEXIST; 239 } 240 241 list_add_tail(&dl_trap->list, &dev->priv.traps); 242 return 0; 243 } 244 245 static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap, 246 void *trap_ctx) 247 { 248 struct mlx5_core_dev *dev = devlink_priv(devlink); 249 struct mlx5_devlink_trap *dl_trap; 250 251 dl_trap = mlx5_find_trap_by_id(dev, trap->id); 252 if (!dl_trap) { 253 mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id); 254 return; 255 } 256 list_del(&dl_trap->list); 257 kfree(dl_trap); 258 } 259 260 static int mlx5_devlink_trap_action_set(struct devlink *devlink, 261 const struct devlink_trap *trap, 262 enum devlink_trap_action action, 263 struct netlink_ext_ack *extack) 264 { 265 struct mlx5_core_dev *dev = devlink_priv(devlink); 266 enum devlink_trap_action action_orig; 267 struct mlx5_devlink_trap *dl_trap; 268 int err = 0; 269 270 if (is_mdev_switchdev_mode(dev)) { 271 NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode"); 272 return -EOPNOTSUPP; 273 } 274 275 dl_trap = mlx5_find_trap_by_id(dev, trap->id); 276 if (!dl_trap) { 277 mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id); 278 err = -EINVAL; 279 goto out; 280 } 281 282 if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) { 283 err = -EOPNOTSUPP; 284 goto out; 285 } 286 287 if (action == dl_trap->trap.action) 288 goto out; 289 290 action_orig = dl_trap->trap.action; 291 dl_trap->trap.action = action; 292 err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP, 293 &dl_trap->trap); 294 if (err) 295 dl_trap->trap.action = action_orig; 296 out: 297 return err; 298 } 299 300 static const struct devlink_ops mlx5_devlink_ops = { 301 #ifdef CONFIG_MLX5_ESWITCH 302 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, 303 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, 304 .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, 305 .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, 306 .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set, 307 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, 308 .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, 309 .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, 310 .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set, 311 .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set, 312 .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set, 313 .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set, 314 .rate_node_new = mlx5_esw_devlink_rate_node_new, 315 .rate_node_del = mlx5_esw_devlink_rate_node_del, 316 .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, 317 .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, 318 .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, 319 .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, 320 .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, 321 #endif 322 #ifdef CONFIG_MLX5_SF_MANAGER 323 .port_new = mlx5_devlink_sf_port_new, 324 .port_del = mlx5_devlink_sf_port_del, 325 .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, 326 .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, 327 #endif 328 .flash_update = mlx5_devlink_flash_update, 329 .info_get = mlx5_devlink_info_get, 330 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 331 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 332 .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), 333 .reload_down = mlx5_devlink_reload_down, 334 .reload_up = mlx5_devlink_reload_up, 335 .trap_init = mlx5_devlink_trap_init, 336 .trap_fini = mlx5_devlink_trap_fini, 337 .trap_action_set = mlx5_devlink_trap_action_set, 338 }; 339 340 void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb, 341 struct devlink_port *dl_port) 342 { 343 struct devlink *devlink = priv_to_devlink(dev); 344 struct mlx5_devlink_trap *dl_trap; 345 346 dl_trap = mlx5_find_trap_by_id(dev, trap_id); 347 if (!dl_trap) { 348 mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id); 349 return; 350 } 351 352 if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) { 353 mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id, 354 dl_trap->trap.action); 355 return; 356 } 357 devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL); 358 } 359 360 int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev) 361 { 362 struct mlx5_devlink_trap *dl_trap; 363 int count = 0; 364 365 list_for_each_entry(dl_trap, &dev->priv.traps, list) 366 if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP) 367 count++; 368 369 return count; 370 } 371 372 int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, 373 enum devlink_trap_action *action) 374 { 375 struct mlx5_devlink_trap *dl_trap; 376 377 dl_trap = mlx5_find_trap_by_id(dev, trap_id); 378 if (!dl_trap) { 379 mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x", 380 trap_id); 381 return -EINVAL; 382 } 383 384 *action = dl_trap->trap.action; 385 return 0; 386 } 387 388 struct devlink *mlx5_devlink_alloc(struct device *dev) 389 { 390 return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev), 391 dev); 392 } 393 394 void mlx5_devlink_free(struct devlink *devlink) 395 { 396 devlink_free(devlink); 397 } 398 399 static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id, 400 union devlink_param_value val, 401 struct netlink_ext_ack *extack) 402 { 403 struct mlx5_core_dev *dev = devlink_priv(devlink); 404 char *value = val.vstr; 405 int err = 0; 406 407 if (!strcmp(value, "dmfs")) { 408 return 0; 409 } else if (!strcmp(value, "smfs")) { 410 u8 eswitch_mode; 411 bool smfs_cap; 412 413 eswitch_mode = mlx5_eswitch_mode(dev); 414 smfs_cap = mlx5_fs_dr_is_supported(dev); 415 416 if (!smfs_cap) { 417 err = -EOPNOTSUPP; 418 NL_SET_ERR_MSG_MOD(extack, 419 "Software managed steering is not supported by current device"); 420 } 421 422 else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { 423 NL_SET_ERR_MSG_MOD(extack, 424 "Software managed steering is not supported when eswitch offloads enabled."); 425 err = -EOPNOTSUPP; 426 } 427 } else { 428 NL_SET_ERR_MSG_MOD(extack, 429 "Bad parameter: supported values are [\"dmfs\", \"smfs\"]"); 430 err = -EINVAL; 431 } 432 433 return err; 434 } 435 436 static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id, 437 struct devlink_param_gset_ctx *ctx) 438 { 439 struct mlx5_core_dev *dev = devlink_priv(devlink); 440 enum mlx5_flow_steering_mode mode; 441 442 if (!strcmp(ctx->val.vstr, "smfs")) 443 mode = MLX5_FLOW_STEERING_MODE_SMFS; 444 else 445 mode = MLX5_FLOW_STEERING_MODE_DMFS; 446 dev->priv.steering->mode = mode; 447 448 return 0; 449 } 450 451 static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id, 452 struct devlink_param_gset_ctx *ctx) 453 { 454 struct mlx5_core_dev *dev = devlink_priv(devlink); 455 456 if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) 457 strcpy(ctx->val.vstr, "smfs"); 458 else 459 strcpy(ctx->val.vstr, "dmfs"); 460 return 0; 461 } 462 463 static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 464 union devlink_param_value val, 465 struct netlink_ext_ack *extack) 466 { 467 struct mlx5_core_dev *dev = devlink_priv(devlink); 468 bool new_state = val.vbool; 469 470 if (new_state && !MLX5_CAP_GEN(dev, roce) && 471 !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) { 472 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); 473 return -EOPNOTSUPP; 474 } 475 if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) { 476 NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE"); 477 return -EOPNOTSUPP; 478 } 479 480 return 0; 481 } 482 483 #ifdef CONFIG_MLX5_ESWITCH 484 static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id, 485 union devlink_param_value val, 486 struct netlink_ext_ack *extack) 487 { 488 int group_num = val.vu32; 489 490 if (group_num < 1 || group_num > 1024) { 491 NL_SET_ERR_MSG_MOD(extack, 492 "Unsupported group number, supported range is 1-1024"); 493 return -EOPNOTSUPP; 494 } 495 496 return 0; 497 } 498 499 static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id, 500 struct devlink_param_gset_ctx *ctx) 501 { 502 struct mlx5_core_dev *dev = devlink_priv(devlink); 503 504 if (!MLX5_ESWITCH_MANAGER(dev)) 505 return -EOPNOTSUPP; 506 507 return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool); 508 } 509 510 static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id, 511 struct devlink_param_gset_ctx *ctx) 512 { 513 struct mlx5_core_dev *dev = devlink_priv(devlink); 514 515 if (!MLX5_ESWITCH_MANAGER(dev)) 516 return -EOPNOTSUPP; 517 518 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch); 519 return 0; 520 } 521 522 static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id, 523 union devlink_param_value val, 524 struct netlink_ext_ack *extack) 525 { 526 struct mlx5_core_dev *dev = devlink_priv(devlink); 527 u8 esw_mode; 528 529 if (!MLX5_ESWITCH_MANAGER(dev)) { 530 NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported"); 531 return -EOPNOTSUPP; 532 } 533 esw_mode = mlx5_eswitch_mode(dev); 534 if (esw_mode == MLX5_ESWITCH_OFFLOADS) { 535 NL_SET_ERR_MSG_MOD(extack, 536 "E-Switch must either disabled or non switchdev mode"); 537 return -EBUSY; 538 } 539 return 0; 540 } 541 542 #endif 543 544 static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id, 545 struct devlink_param_gset_ctx *ctx) 546 { 547 struct mlx5_core_dev *dev = devlink_priv(devlink); 548 549 mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool); 550 return 0; 551 } 552 553 static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id, 554 struct devlink_param_gset_ctx *ctx) 555 { 556 struct mlx5_core_dev *dev = devlink_priv(devlink); 557 558 ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev); 559 return 0; 560 } 561 562 static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id, 563 union devlink_param_value val, 564 struct netlink_ext_ack *extack) 565 { 566 return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL; 567 } 568 569 static const struct devlink_param mlx5_devlink_params[] = { 570 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, 571 "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, 572 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 573 mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, 574 mlx5_devlink_fs_mode_validate), 575 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 576 NULL, NULL, mlx5_devlink_enable_roce_validate), 577 #ifdef CONFIG_MLX5_ESWITCH 578 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 579 "fdb_large_groups", DEVLINK_PARAM_TYPE_U32, 580 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 581 NULL, NULL, 582 mlx5_devlink_large_group_num_validate), 583 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, 584 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL, 585 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 586 mlx5_devlink_esw_port_metadata_get, 587 mlx5_devlink_esw_port_metadata_set, 588 mlx5_devlink_esw_port_metadata_validate), 589 #endif 590 DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 591 mlx5_devlink_enable_remote_dev_reset_get, 592 mlx5_devlink_enable_remote_dev_reset_set, NULL), 593 DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 594 NULL, NULL, mlx5_devlink_eq_depth_validate), 595 DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 596 NULL, NULL, mlx5_devlink_eq_depth_validate), 597 }; 598 599 static void mlx5_devlink_set_params_init_values(struct devlink *devlink) 600 { 601 struct mlx5_core_dev *dev = devlink_priv(devlink); 602 union devlink_param_value value; 603 604 value.vbool = MLX5_CAP_GEN(dev, roce); 605 devlink_param_driverinit_value_set(devlink, 606 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, 607 value); 608 609 #ifdef CONFIG_MLX5_ESWITCH 610 value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; 611 devlink_param_driverinit_value_set(devlink, 612 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 613 value); 614 #endif 615 616 value.vu32 = MLX5_COMP_EQ_SIZE; 617 devlink_param_driverinit_value_set(devlink, 618 DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, 619 value); 620 621 value.vu32 = MLX5_NUM_ASYNC_EQE; 622 devlink_param_driverinit_value_set(devlink, 623 DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, 624 value); 625 } 626 627 static const struct devlink_param enable_eth_param = 628 DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 629 NULL, NULL, NULL); 630 631 static int mlx5_devlink_eth_param_register(struct devlink *devlink) 632 { 633 struct mlx5_core_dev *dev = devlink_priv(devlink); 634 union devlink_param_value value; 635 int err; 636 637 if (!mlx5_eth_supported(dev)) 638 return 0; 639 640 err = devlink_param_register(devlink, &enable_eth_param); 641 if (err) 642 return err; 643 644 value.vbool = true; 645 devlink_param_driverinit_value_set(devlink, 646 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, 647 value); 648 return 0; 649 } 650 651 static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) 652 { 653 struct mlx5_core_dev *dev = devlink_priv(devlink); 654 655 if (!mlx5_eth_supported(dev)) 656 return; 657 658 devlink_param_unregister(devlink, &enable_eth_param); 659 } 660 661 static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, 662 union devlink_param_value val, 663 struct netlink_ext_ack *extack) 664 { 665 struct mlx5_core_dev *dev = devlink_priv(devlink); 666 bool new_state = val.vbool; 667 668 if (new_state && !mlx5_rdma_supported(dev)) 669 return -EOPNOTSUPP; 670 return 0; 671 } 672 673 static const struct devlink_param enable_rdma_param = 674 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 675 NULL, NULL, mlx5_devlink_enable_rdma_validate); 676 677 static int mlx5_devlink_rdma_param_register(struct devlink *devlink) 678 { 679 union devlink_param_value value; 680 int err; 681 682 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 683 return 0; 684 685 err = devlink_param_register(devlink, &enable_rdma_param); 686 if (err) 687 return err; 688 689 value.vbool = true; 690 devlink_param_driverinit_value_set(devlink, 691 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, 692 value); 693 return 0; 694 } 695 696 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) 697 { 698 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 699 return; 700 701 devlink_param_unregister(devlink, &enable_rdma_param); 702 } 703 704 static const struct devlink_param enable_vnet_param = 705 DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 706 NULL, NULL, NULL); 707 708 static int mlx5_devlink_vnet_param_register(struct devlink *devlink) 709 { 710 struct mlx5_core_dev *dev = devlink_priv(devlink); 711 union devlink_param_value value; 712 int err; 713 714 if (!mlx5_vnet_supported(dev)) 715 return 0; 716 717 err = devlink_param_register(devlink, &enable_vnet_param); 718 if (err) 719 return err; 720 721 value.vbool = true; 722 devlink_param_driverinit_value_set(devlink, 723 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, 724 value); 725 return 0; 726 } 727 728 static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) 729 { 730 struct mlx5_core_dev *dev = devlink_priv(devlink); 731 732 if (!mlx5_vnet_supported(dev)) 733 return; 734 735 devlink_param_unregister(devlink, &enable_vnet_param); 736 } 737 738 static int mlx5_devlink_auxdev_params_register(struct devlink *devlink) 739 { 740 int err; 741 742 err = mlx5_devlink_eth_param_register(devlink); 743 if (err) 744 return err; 745 746 err = mlx5_devlink_rdma_param_register(devlink); 747 if (err) 748 goto rdma_err; 749 750 err = mlx5_devlink_vnet_param_register(devlink); 751 if (err) 752 goto vnet_err; 753 return 0; 754 755 vnet_err: 756 mlx5_devlink_rdma_param_unregister(devlink); 757 rdma_err: 758 mlx5_devlink_eth_param_unregister(devlink); 759 return err; 760 } 761 762 static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink) 763 { 764 mlx5_devlink_vnet_param_unregister(devlink); 765 mlx5_devlink_rdma_param_unregister(devlink); 766 mlx5_devlink_eth_param_unregister(devlink); 767 } 768 769 static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id, 770 union devlink_param_value val, 771 struct netlink_ext_ack *extack) 772 { 773 struct mlx5_core_dev *dev = devlink_priv(devlink); 774 775 if (val.vu32 == 0) { 776 NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0"); 777 return -EINVAL; 778 } 779 780 if (!is_power_of_2(val.vu32)) { 781 NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs"); 782 return -EINVAL; 783 } 784 785 if (ilog2(val.vu32) > 786 MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) { 787 NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range"); 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static const struct devlink_param max_uc_list_param = 795 DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 796 NULL, NULL, mlx5_devlink_max_uc_list_validate); 797 798 static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink) 799 { 800 struct mlx5_core_dev *dev = devlink_priv(devlink); 801 union devlink_param_value value; 802 int err; 803 804 if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported)) 805 return 0; 806 807 err = devlink_param_register(devlink, &max_uc_list_param); 808 if (err) 809 return err; 810 811 value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list); 812 devlink_param_driverinit_value_set(devlink, 813 DEVLINK_PARAM_GENERIC_ID_MAX_MACS, 814 value); 815 return 0; 816 } 817 818 static void 819 mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink) 820 { 821 struct mlx5_core_dev *dev = devlink_priv(devlink); 822 823 if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported)) 824 return; 825 826 devlink_param_unregister(devlink, &max_uc_list_param); 827 } 828 829 #define MLX5_TRAP_DROP(_id, _group_id) \ 830 DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ 831 DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ 832 DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) 833 834 static const struct devlink_trap mlx5_traps_arr[] = { 835 MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS), 836 MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS), 837 }; 838 839 static const struct devlink_trap_group mlx5_trap_groups_arr[] = { 840 DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), 841 }; 842 843 int mlx5_devlink_traps_register(struct devlink *devlink) 844 { 845 struct mlx5_core_dev *core_dev = devlink_priv(devlink); 846 int err; 847 848 err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr, 849 ARRAY_SIZE(mlx5_trap_groups_arr)); 850 if (err) 851 return err; 852 853 err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr), 854 &core_dev->priv); 855 if (err) 856 goto err_trap_group; 857 return 0; 858 859 err_trap_group: 860 devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 861 ARRAY_SIZE(mlx5_trap_groups_arr)); 862 return err; 863 } 864 865 void mlx5_devlink_traps_unregister(struct devlink *devlink) 866 { 867 devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr)); 868 devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 869 ARRAY_SIZE(mlx5_trap_groups_arr)); 870 } 871 872 int mlx5_devlink_register(struct devlink *devlink) 873 { 874 struct mlx5_core_dev *dev = devlink_priv(devlink); 875 int err; 876 877 err = devlink_params_register(devlink, mlx5_devlink_params, 878 ARRAY_SIZE(mlx5_devlink_params)); 879 if (err) 880 return err; 881 882 mlx5_devlink_set_params_init_values(devlink); 883 884 err = mlx5_devlink_auxdev_params_register(devlink); 885 if (err) 886 goto auxdev_reg_err; 887 888 err = mlx5_devlink_max_uc_list_param_register(devlink); 889 if (err) 890 goto max_uc_list_err; 891 892 if (!mlx5_core_is_mp_slave(dev)) 893 devlink_set_features(devlink, DEVLINK_F_RELOAD); 894 895 return 0; 896 897 max_uc_list_err: 898 mlx5_devlink_auxdev_params_unregister(devlink); 899 auxdev_reg_err: 900 devlink_params_unregister(devlink, mlx5_devlink_params, 901 ARRAY_SIZE(mlx5_devlink_params)); 902 return err; 903 } 904 905 void mlx5_devlink_unregister(struct devlink *devlink) 906 { 907 mlx5_devlink_max_uc_list_param_unregister(devlink); 908 mlx5_devlink_auxdev_params_unregister(devlink); 909 devlink_params_unregister(devlink, mlx5_devlink_params, 910 ARRAY_SIZE(mlx5_devlink_params)); 911 } 912