1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/driver.h> 34 #include <linux/mlx5/eswitch.h> 35 #include <linux/mlx5/mlx5_ifc_vdpa.h> 36 #include "mlx5_core.h" 37 38 /* intf dev list mutex */ 39 static DEFINE_MUTEX(mlx5_intf_mutex); 40 static DEFINE_IDA(mlx5_adev_ida); 41 42 static bool is_eth_rep_supported(struct mlx5_core_dev *dev) 43 { 44 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH)) 45 return false; 46 47 if (!MLX5_ESWITCH_MANAGER(dev)) 48 return false; 49 50 if (!is_mdev_switchdev_mode(dev)) 51 return false; 52 53 return true; 54 } 55 56 static bool is_eth_supported(struct mlx5_core_dev *dev) 57 { 58 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) 59 return false; 60 61 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 62 return false; 63 64 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { 65 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n"); 66 return false; 67 } 68 69 if (!MLX5_CAP_GEN(dev, nic_flow_table)) { 70 mlx5_core_warn(dev, "Missing nic_flow_table capability\n"); 71 return false; 72 } 73 74 if (!MLX5_CAP_ETH(dev, csum_cap)) { 75 mlx5_core_warn(dev, "Missing csum_cap capability\n"); 76 return false; 77 } 78 79 if (!MLX5_CAP_ETH(dev, max_lso_cap)) { 80 mlx5_core_warn(dev, "Missing max_lso_cap capability\n"); 81 return false; 82 } 83 84 if (!MLX5_CAP_ETH(dev, vlan_cap)) { 85 mlx5_core_warn(dev, "Missing vlan_cap capability\n"); 86 return false; 87 } 88 89 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) { 90 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n"); 91 return false; 92 } 93 94 if (MLX5_CAP_FLOWTABLE(dev, 95 flow_table_properties_nic_receive.max_ft_level) < 3) { 96 mlx5_core_warn(dev, "max_ft_level < 3\n"); 97 return false; 98 } 99 100 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable)) 101 mlx5_core_warn(dev, "Self loop back prevention is not supported\n"); 102 if (!MLX5_CAP_GEN(dev, cq_moderation)) 103 mlx5_core_warn(dev, "CQ moderation is not supported\n"); 104 105 return true; 106 } 107 108 static bool is_vnet_supported(struct mlx5_core_dev *dev) 109 { 110 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET)) 111 return false; 112 113 if (mlx5_core_is_pf(dev)) 114 return false; 115 116 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & 117 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q)) 118 return false; 119 120 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) & 121 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE)) 122 return false; 123 124 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type)) 125 return false; 126 127 return true; 128 } 129 130 static bool is_ib_rep_supported(struct mlx5_core_dev *dev) 131 { 132 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 133 return false; 134 135 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) 136 return false; 137 138 if (!is_eth_rep_supported(dev)) 139 return false; 140 141 if (!MLX5_ESWITCH_MANAGER(dev)) 142 return false; 143 144 if (!is_mdev_switchdev_mode(dev)) 145 return false; 146 147 if (mlx5_core_mp_enabled(dev)) 148 return false; 149 150 return true; 151 } 152 153 static bool is_mp_supported(struct mlx5_core_dev *dev) 154 { 155 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 156 return false; 157 158 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) 159 return false; 160 161 if (is_ib_rep_supported(dev)) 162 return false; 163 164 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 165 return false; 166 167 if (!mlx5_core_is_mp_slave(dev)) 168 return false; 169 170 return true; 171 } 172 173 static bool is_ib_supported(struct mlx5_core_dev *dev) 174 { 175 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 176 return false; 177 178 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) 179 return false; 180 181 if (is_ib_rep_supported(dev)) 182 return false; 183 184 if (is_mp_supported(dev)) 185 return false; 186 187 return true; 188 } 189 190 enum { 191 MLX5_INTERFACE_PROTOCOL_ETH, 192 MLX5_INTERFACE_PROTOCOL_ETH_REP, 193 194 MLX5_INTERFACE_PROTOCOL_IB, 195 MLX5_INTERFACE_PROTOCOL_IB_REP, 196 MLX5_INTERFACE_PROTOCOL_MPIB, 197 198 MLX5_INTERFACE_PROTOCOL_VNET, 199 }; 200 201 static const struct mlx5_adev_device { 202 const char *suffix; 203 bool (*is_supported)(struct mlx5_core_dev *dev); 204 } mlx5_adev_devices[] = { 205 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet", 206 .is_supported = &is_vnet_supported }, 207 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma", 208 .is_supported = &is_ib_supported }, 209 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth", 210 .is_supported = &is_eth_supported }, 211 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep", 212 .is_supported = &is_eth_rep_supported }, 213 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep", 214 .is_supported = &is_ib_rep_supported }, 215 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport", 216 .is_supported = &is_mp_supported }, 217 }; 218 219 int mlx5_adev_idx_alloc(void) 220 { 221 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL); 222 } 223 224 void mlx5_adev_idx_free(int idx) 225 { 226 ida_free(&mlx5_adev_ida, idx); 227 } 228 229 int mlx5_adev_init(struct mlx5_core_dev *dev) 230 { 231 struct mlx5_priv *priv = &dev->priv; 232 233 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices), 234 sizeof(struct mlx5_adev *), GFP_KERNEL); 235 if (!priv->adev) 236 return -ENOMEM; 237 238 return 0; 239 } 240 241 void mlx5_adev_cleanup(struct mlx5_core_dev *dev) 242 { 243 struct mlx5_priv *priv = &dev->priv; 244 245 kfree(priv->adev); 246 } 247 248 static void adev_release(struct device *dev) 249 { 250 struct mlx5_adev *mlx5_adev = 251 container_of(dev, struct mlx5_adev, adev.dev); 252 struct mlx5_priv *priv = &mlx5_adev->mdev->priv; 253 int idx = mlx5_adev->idx; 254 255 kfree(mlx5_adev); 256 priv->adev[idx] = NULL; 257 } 258 259 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx) 260 { 261 const char *suffix = mlx5_adev_devices[idx].suffix; 262 struct auxiliary_device *adev; 263 struct mlx5_adev *madev; 264 int ret; 265 266 madev = kzalloc(sizeof(*madev), GFP_KERNEL); 267 if (!madev) 268 return ERR_PTR(-ENOMEM); 269 270 adev = &madev->adev; 271 adev->id = dev->priv.adev_idx; 272 adev->name = suffix; 273 adev->dev.parent = dev->device; 274 adev->dev.release = adev_release; 275 madev->mdev = dev; 276 madev->idx = idx; 277 278 ret = auxiliary_device_init(adev); 279 if (ret) { 280 kfree(madev); 281 return ERR_PTR(ret); 282 } 283 284 ret = auxiliary_device_add(adev); 285 if (ret) { 286 auxiliary_device_uninit(adev); 287 return ERR_PTR(ret); 288 } 289 return madev; 290 } 291 292 static void del_adev(struct auxiliary_device *adev) 293 { 294 auxiliary_device_delete(adev); 295 auxiliary_device_uninit(adev); 296 } 297 298 int mlx5_attach_device(struct mlx5_core_dev *dev) 299 { 300 struct mlx5_priv *priv = &dev->priv; 301 struct auxiliary_device *adev; 302 struct auxiliary_driver *adrv; 303 int ret = 0, i; 304 305 mutex_lock(&mlx5_intf_mutex); 306 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; 307 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { 308 if (!priv->adev[i]) { 309 bool is_supported = false; 310 311 if (mlx5_adev_devices[i].is_supported) 312 is_supported = mlx5_adev_devices[i].is_supported(dev); 313 314 if (!is_supported) 315 continue; 316 317 priv->adev[i] = add_adev(dev, i); 318 if (IS_ERR(priv->adev[i])) { 319 ret = PTR_ERR(priv->adev[i]); 320 priv->adev[i] = NULL; 321 } 322 } else { 323 adev = &priv->adev[i]->adev; 324 325 /* Pay attention that this is not PCI driver that 326 * mlx5_core_dev is connected, but auxiliary driver. 327 * 328 * Here we can race of module unload with devlink 329 * reload, but we don't need to take extra lock because 330 * we are holding global mlx5_intf_mutex. 331 */ 332 if (!adev->dev.driver) 333 continue; 334 adrv = to_auxiliary_drv(adev->dev.driver); 335 336 if (adrv->resume) 337 ret = adrv->resume(adev); 338 } 339 if (ret) { 340 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", 341 i, mlx5_adev_devices[i].suffix); 342 343 break; 344 } 345 } 346 mutex_unlock(&mlx5_intf_mutex); 347 return ret; 348 } 349 350 void mlx5_detach_device(struct mlx5_core_dev *dev) 351 { 352 struct mlx5_priv *priv = &dev->priv; 353 struct auxiliary_device *adev; 354 struct auxiliary_driver *adrv; 355 pm_message_t pm = {}; 356 int i; 357 358 mutex_lock(&mlx5_intf_mutex); 359 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { 360 if (!priv->adev[i]) 361 continue; 362 363 adev = &priv->adev[i]->adev; 364 /* Auxiliary driver was unbind manually through sysfs */ 365 if (!adev->dev.driver) 366 goto skip_suspend; 367 368 adrv = to_auxiliary_drv(adev->dev.driver); 369 370 if (adrv->suspend) { 371 adrv->suspend(adev, pm); 372 continue; 373 } 374 375 skip_suspend: 376 del_adev(&priv->adev[i]->adev); 377 priv->adev[i] = NULL; 378 } 379 priv->flags |= MLX5_PRIV_FLAGS_DETACH; 380 mutex_unlock(&mlx5_intf_mutex); 381 } 382 383 int mlx5_register_device(struct mlx5_core_dev *dev) 384 { 385 int ret; 386 387 mutex_lock(&mlx5_intf_mutex); 388 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; 389 ret = mlx5_rescan_drivers_locked(dev); 390 mutex_unlock(&mlx5_intf_mutex); 391 if (ret) 392 mlx5_unregister_device(dev); 393 394 return ret; 395 } 396 397 void mlx5_unregister_device(struct mlx5_core_dev *dev) 398 { 399 mutex_lock(&mlx5_intf_mutex); 400 dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; 401 mlx5_rescan_drivers_locked(dev); 402 mutex_unlock(&mlx5_intf_mutex); 403 } 404 405 static int add_drivers(struct mlx5_core_dev *dev) 406 { 407 struct mlx5_priv *priv = &dev->priv; 408 int i, ret = 0; 409 410 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { 411 bool is_supported = false; 412 413 if (priv->adev[i]) 414 continue; 415 416 if (mlx5_adev_devices[i].is_supported) 417 is_supported = mlx5_adev_devices[i].is_supported(dev); 418 419 if (!is_supported) 420 continue; 421 422 priv->adev[i] = add_adev(dev, i); 423 if (IS_ERR(priv->adev[i])) { 424 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", 425 i, mlx5_adev_devices[i].suffix); 426 /* We continue to rescan drivers and leave to the caller 427 * to make decision if to release everything or continue. 428 */ 429 ret = PTR_ERR(priv->adev[i]); 430 priv->adev[i] = NULL; 431 } 432 } 433 return ret; 434 } 435 436 static void delete_drivers(struct mlx5_core_dev *dev) 437 { 438 struct mlx5_priv *priv = &dev->priv; 439 bool delete_all; 440 int i; 441 442 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; 443 444 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { 445 bool is_supported = false; 446 447 if (!priv->adev[i]) 448 continue; 449 450 if (mlx5_adev_devices[i].is_supported && !delete_all) 451 is_supported = mlx5_adev_devices[i].is_supported(dev); 452 453 if (is_supported) 454 continue; 455 456 del_adev(&priv->adev[i]->adev); 457 priv->adev[i] = NULL; 458 } 459 } 460 461 /* This function is used after mlx5_core_dev is reconfigured. 462 */ 463 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) 464 { 465 struct mlx5_priv *priv = &dev->priv; 466 467 lockdep_assert_held(&mlx5_intf_mutex); 468 if (priv->flags & MLX5_PRIV_FLAGS_DETACH) 469 return 0; 470 471 delete_drivers(dev); 472 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) 473 return 0; 474 475 return add_drivers(dev); 476 } 477 478 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) 479 { 480 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | 481 (dev->pdev->bus->number << 8) | 482 PCI_SLOT(dev->pdev->devfn)); 483 } 484 485 static int next_phys_dev(struct device *dev, const void *data) 486 { 487 struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); 488 struct mlx5_core_dev *mdev = madev->mdev; 489 const struct mlx5_core_dev *curr = data; 490 491 if (!mlx5_core_is_pf(mdev)) 492 return 0; 493 494 if (mdev == curr) 495 return 0; 496 497 if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) 498 return 0; 499 500 return 1; 501 } 502 503 /* This function is called with two flows: 504 * 1. During initialization of mlx5_core_dev and we don't need to lock it. 505 * 2. During LAG configure stage and caller holds &mlx5_intf_mutex. 506 */ 507 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 508 { 509 struct auxiliary_device *adev; 510 struct mlx5_adev *madev; 511 512 if (!mlx5_core_is_pf(dev)) 513 return NULL; 514 515 adev = auxiliary_find_device(NULL, dev, &next_phys_dev); 516 if (!adev) 517 return NULL; 518 519 madev = container_of(adev, struct mlx5_adev, adev); 520 put_device(&adev->dev); 521 return madev->mdev; 522 } 523 524 void mlx5_dev_list_lock(void) 525 { 526 mutex_lock(&mlx5_intf_mutex); 527 } 528 void mlx5_dev_list_unlock(void) 529 { 530 mutex_unlock(&mlx5_intf_mutex); 531 } 532 533 int mlx5_dev_list_trylock(void) 534 { 535 return mutex_trylock(&mlx5_intf_mutex); 536 } 537