1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/init.h> 39 #include <linux/errno.h> 40 #include <linux/pci.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/slab.h> 43 #include <linux/io-mapping.h> 44 #include <linux/delay.h> 45 #include <linux/kmod.h> 46 #include <linux/etherdevice.h> 47 #include <net/devlink.h> 48 49 #include <uapi/rdma/mlx4-abi.h> 50 #include <linux/mlx4/device.h> 51 #include <linux/mlx4/doorbell.h> 52 53 #include "mlx4.h" 54 #include "fw.h" 55 #include "icm.h" 56 57 MODULE_AUTHOR("Roland Dreier"); 58 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 59 MODULE_LICENSE("Dual BSD/GPL"); 60 MODULE_VERSION(DRV_VERSION); 61 62 struct workqueue_struct *mlx4_wq; 63 64 #ifdef CONFIG_MLX4_DEBUG 65 66 int mlx4_debug_level; /* 0 by default */ 67 module_param_named(debug_level, mlx4_debug_level, int, 0644); 68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 69 70 #endif /* CONFIG_MLX4_DEBUG */ 71 72 #ifdef CONFIG_PCI_MSI 73 74 static int msi_x = 1; 75 module_param(msi_x, int, 0444); 76 MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x"); 77 78 #else /* CONFIG_PCI_MSI */ 79 80 #define msi_x (0) 81 82 #endif /* CONFIG_PCI_MSI */ 83 84 static uint8_t num_vfs[3] = {0, 0, 0}; 85 static int num_vfs_argc; 86 module_param_array(num_vfs, byte, &num_vfs_argc, 0444); 87 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 88 "num_vfs=port1,port2,port1+2"); 89 90 static uint8_t probe_vf[3] = {0, 0, 0}; 91 static int probe_vfs_argc; 92 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 93 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 94 "probe_vf=port1,port2,port1+2"); 95 96 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 97 module_param_named(log_num_mgm_entry_size, 98 mlx4_log_num_mgm_entry_size, int, 0444); 99 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 100 " of qp per mcg, for example:" 101 " 10 gives 248.range: 7 <=" 102 " log_num_mgm_entry_size <= 12." 103 " To activate device managed" 104 " flow steering when available, set to -1"); 105 106 static bool enable_64b_cqe_eqe = true; 107 module_param(enable_64b_cqe_eqe, bool, 0444); 108 MODULE_PARM_DESC(enable_64b_cqe_eqe, 109 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 110 111 static bool enable_4k_uar; 112 module_param(enable_4k_uar, bool, 0444); 113 MODULE_PARM_DESC(enable_4k_uar, 114 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); 115 116 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 117 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 118 MLX4_FUNC_CAP_DMFS_A0_STATIC) 119 120 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 121 122 static char mlx4_version[] = 123 DRV_NAME ": Mellanox ConnectX core driver v" 124 DRV_VERSION "\n"; 125 126 static const struct mlx4_profile default_profile = { 127 .num_qp = 1 << 18, 128 .num_srq = 1 << 16, 129 .rdmarc_per_qp = 1 << 4, 130 .num_cq = 1 << 16, 131 .num_mcg = 1 << 13, 132 .num_mpt = 1 << 19, 133 .num_mtt = 1 << 20, /* It is really num mtt segements */ 134 }; 135 136 static const struct mlx4_profile low_mem_profile = { 137 .num_qp = 1 << 17, 138 .num_srq = 1 << 6, 139 .rdmarc_per_qp = 1 << 4, 140 .num_cq = 1 << 8, 141 .num_mcg = 1 << 8, 142 .num_mpt = 1 << 9, 143 .num_mtt = 1 << 7, 144 }; 145 146 static int log_num_mac = 7; 147 module_param_named(log_num_mac, log_num_mac, int, 0444); 148 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 149 150 static int log_num_vlan; 151 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 152 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 153 /* Log2 max number of VLANs per ETH port (0-7) */ 154 #define MLX4_LOG_NUM_VLANS 7 155 #define MLX4_MIN_LOG_NUM_VLANS 0 156 #define MLX4_MIN_LOG_NUM_MAC 1 157 158 static bool use_prio; 159 module_param_named(use_prio, use_prio, bool, 0444); 160 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 161 162 int log_mtts_per_seg = ilog2(1); 163 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 164 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment " 165 "(0-7) (default: 0)"); 166 167 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 168 static int arr_argc = 2; 169 module_param_array(port_type_array, int, &arr_argc, 0444); 170 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 171 "1 for IB, 2 for Ethernet"); 172 173 struct mlx4_port_config { 174 struct list_head list; 175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 176 struct pci_dev *pdev; 177 }; 178 179 static atomic_t pf_loading = ATOMIC_INIT(0); 180 181 static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, 182 struct devlink_param_gset_ctx *ctx) 183 { 184 ctx->val.vbool = !!mlx4_internal_err_reset; 185 return 0; 186 } 187 188 static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, 189 struct devlink_param_gset_ctx *ctx) 190 { 191 mlx4_internal_err_reset = ctx->val.vbool; 192 return 0; 193 } 194 195 static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, 196 struct devlink_param_gset_ctx *ctx) 197 { 198 struct mlx4_priv *priv = devlink_priv(devlink); 199 struct mlx4_dev *dev = &priv->dev; 200 201 ctx->val.vbool = dev->persist->crdump.snapshot_enable; 202 return 0; 203 } 204 205 static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id, 206 struct devlink_param_gset_ctx *ctx) 207 { 208 struct mlx4_priv *priv = devlink_priv(devlink); 209 struct mlx4_dev *dev = &priv->dev; 210 211 dev->persist->crdump.snapshot_enable = ctx->val.vbool; 212 return 0; 213 } 214 215 static int 216 mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id, 217 union devlink_param_value val, 218 struct netlink_ext_ack *extack) 219 { 220 u32 value = val.vu32; 221 222 if (value < 1 || value > 128) 223 return -ERANGE; 224 225 if (!is_power_of_2(value)) { 226 NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2"); 227 return -EINVAL; 228 } 229 230 return 0; 231 } 232 233 enum mlx4_devlink_param_id { 234 MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 235 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 236 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 237 }; 238 239 static const struct devlink_param mlx4_devlink_params[] = { 240 DEVLINK_PARAM_GENERIC(INT_ERR_RESET, 241 BIT(DEVLINK_PARAM_CMODE_RUNTIME) | 242 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 243 mlx4_devlink_ierr_reset_get, 244 mlx4_devlink_ierr_reset_set, NULL), 245 DEVLINK_PARAM_GENERIC(MAX_MACS, 246 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 247 NULL, NULL, mlx4_devlink_max_macs_validate), 248 DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT, 249 BIT(DEVLINK_PARAM_CMODE_RUNTIME) | 250 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 251 mlx4_devlink_crdump_snapshot_get, 252 mlx4_devlink_crdump_snapshot_set, NULL), 253 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 254 "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL, 255 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 256 NULL, NULL, NULL), 257 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 258 "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL, 259 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 260 NULL, NULL, NULL), 261 }; 262 263 static void mlx4_devlink_set_params_init_values(struct devlink *devlink) 264 { 265 union devlink_param_value value; 266 267 value.vbool = !!mlx4_internal_err_reset; 268 devl_param_driverinit_value_set(devlink, 269 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, 270 value); 271 272 value.vu32 = 1UL << log_num_mac; 273 devl_param_driverinit_value_set(devlink, 274 DEVLINK_PARAM_GENERIC_ID_MAX_MACS, 275 value); 276 277 value.vbool = enable_64b_cqe_eqe; 278 devl_param_driverinit_value_set(devlink, 279 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 280 value); 281 282 value.vbool = enable_4k_uar; 283 devl_param_driverinit_value_set(devlink, 284 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 285 value); 286 287 value.vbool = false; 288 devl_param_driverinit_value_set(devlink, 289 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, 290 value); 291 } 292 293 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 294 struct mlx4_dev_cap *dev_cap) 295 { 296 /* The reserved_uars is calculated by system page size unit. 297 * Therefore, adjustment is added when the uar page size is less 298 * than the system page size 299 */ 300 dev->caps.reserved_uars = 301 max_t(int, 302 mlx4_get_num_reserved_uar(dev), 303 dev_cap->reserved_uars / 304 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 305 } 306 307 int mlx4_check_port_params(struct mlx4_dev *dev, 308 enum mlx4_port_type *port_type) 309 { 310 int i; 311 312 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 313 for (i = 0; i < dev->caps.num_ports - 1; i++) { 314 if (port_type[i] != port_type[i + 1]) { 315 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 316 return -EOPNOTSUPP; 317 } 318 } 319 } 320 321 for (i = 0; i < dev->caps.num_ports; i++) { 322 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 323 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 324 i + 1); 325 return -EOPNOTSUPP; 326 } 327 } 328 return 0; 329 } 330 331 static void mlx4_set_port_mask(struct mlx4_dev *dev) 332 { 333 int i; 334 335 for (i = 1; i <= dev->caps.num_ports; ++i) 336 dev->caps.port_mask[i] = dev->caps.port_type[i]; 337 } 338 339 enum { 340 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 341 }; 342 343 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 344 { 345 int err = 0; 346 struct mlx4_func func; 347 348 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 349 err = mlx4_QUERY_FUNC(dev, &func, 0); 350 if (err) { 351 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 352 return err; 353 } 354 dev_cap->max_eqs = func.max_eq; 355 dev_cap->reserved_eqs = func.rsvd_eqs; 356 dev_cap->reserved_uars = func.rsvd_uars; 357 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 358 } 359 return err; 360 } 361 362 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 363 { 364 struct mlx4_caps *dev_cap = &dev->caps; 365 366 /* FW not supporting or cancelled by user */ 367 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 368 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 369 return; 370 371 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 372 * When FW has NCSI it may decide not to report 64B CQE/EQEs 373 */ 374 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 375 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 376 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 377 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 378 return; 379 } 380 381 if (cache_line_size() == 128 || cache_line_size() == 256) { 382 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 383 /* Changing the real data inside CQE size to 32B */ 384 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 385 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 386 387 if (mlx4_is_master(dev)) 388 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 389 } else { 390 if (cache_line_size() != 32 && cache_line_size() != 64) 391 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 392 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 393 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 394 } 395 } 396 397 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 398 struct mlx4_port_cap *port_cap) 399 { 400 dev->caps.vl_cap[port] = port_cap->max_vl; 401 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 402 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 403 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 404 /* set gid and pkey table operating lengths by default 405 * to non-sriov values 406 */ 407 dev->caps.gid_table_len[port] = port_cap->max_gids; 408 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 409 dev->caps.port_width_cap[port] = port_cap->max_port_width; 410 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 411 dev->caps.max_tc_eth = port_cap->max_tc_eth; 412 dev->caps.def_mac[port] = port_cap->def_mac; 413 dev->caps.supported_type[port] = port_cap->supported_port_types; 414 dev->caps.suggested_type[port] = port_cap->suggested_type; 415 dev->caps.default_sense[port] = port_cap->default_sense; 416 dev->caps.trans_type[port] = port_cap->trans_type; 417 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 418 dev->caps.wavelength[port] = port_cap->wavelength; 419 dev->caps.trans_code[port] = port_cap->trans_code; 420 421 return 0; 422 } 423 424 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 425 struct mlx4_port_cap *port_cap) 426 { 427 int err = 0; 428 429 err = mlx4_QUERY_PORT(dev, port, port_cap); 430 431 if (err) 432 mlx4_err(dev, "QUERY_PORT command failed.\n"); 433 434 return err; 435 } 436 437 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 438 { 439 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 440 return; 441 442 if (mlx4_is_mfunc(dev)) { 443 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 444 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 445 return; 446 } 447 448 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 449 mlx4_dbg(dev, 450 "Keep FCS is not supported - Disabling Ignore FCS"); 451 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 452 return; 453 } 454 } 455 456 #define MLX4_A0_STEERING_TABLE_SIZE 256 457 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 458 { 459 int err; 460 int i; 461 462 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 463 if (err) { 464 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 465 return err; 466 } 467 mlx4_dev_cap_dump(dev, dev_cap); 468 469 if (dev_cap->min_page_sz > PAGE_SIZE) { 470 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 471 dev_cap->min_page_sz, PAGE_SIZE); 472 return -ENODEV; 473 } 474 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 475 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 476 dev_cap->num_ports, MLX4_MAX_PORTS); 477 return -ENODEV; 478 } 479 480 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 481 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 482 dev_cap->uar_size, 483 (unsigned long long) 484 pci_resource_len(dev->persist->pdev, 2)); 485 return -ENODEV; 486 } 487 488 dev->caps.num_ports = dev_cap->num_ports; 489 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 490 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 491 dev->caps.num_sys_eqs : 492 MLX4_MAX_EQ_NUM; 493 for (i = 1; i <= dev->caps.num_ports; ++i) { 494 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 495 if (err) { 496 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 497 return err; 498 } 499 } 500 501 dev->caps.map_clock_to_user = dev_cap->map_clock_to_user; 502 dev->caps.uar_page_size = PAGE_SIZE; 503 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 504 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 505 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 506 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 507 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 508 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 509 dev->caps.max_wqes = dev_cap->max_qp_sz; 510 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 511 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 512 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 513 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 514 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 515 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 516 /* 517 * Subtract 1 from the limit because we need to allocate a 518 * spare CQE to enable resizing the CQ. 519 */ 520 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 521 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 522 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 523 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 524 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 525 526 dev->caps.reserved_pds = dev_cap->reserved_pds; 527 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 528 dev_cap->reserved_xrcds : 0; 529 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 530 dev_cap->max_xrcds : 0; 531 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 532 533 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 534 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 535 dev->caps.flags = dev_cap->flags; 536 dev->caps.flags2 = dev_cap->flags2; 537 dev->caps.bmme_flags = dev_cap->bmme_flags; 538 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 539 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 540 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 541 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 542 dev->caps.wol_port[1] = dev_cap->wol_port[1]; 543 dev->caps.wol_port[2] = dev_cap->wol_port[2]; 544 dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs; 545 546 /* Save uar page shift */ 547 if (!mlx4_is_slave(dev)) { 548 /* Virtual PCI function needs to determine UAR page size from 549 * firmware. Only master PCI function can set the uar page size 550 */ 551 if (enable_4k_uar || !dev->persist->num_vfs) 552 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 553 else 554 dev->uar_page_shift = PAGE_SHIFT; 555 556 mlx4_set_num_reserved_uars(dev, dev_cap); 557 } 558 559 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 560 struct mlx4_init_hca_param hca_param; 561 562 memset(&hca_param, 0, sizeof(hca_param)); 563 err = mlx4_QUERY_HCA(dev, &hca_param); 564 /* Turn off PHV_EN flag in case phv_check_en is set. 565 * phv_check_en is a HW check that parse the packet and verify 566 * phv bit was reported correctly in the wqe. To allow QinQ 567 * PHV_EN flag should be set and phv_check_en must be cleared 568 * otherwise QinQ packets will be drop by the HW. 569 */ 570 if (err || hca_param.phv_check_en) 571 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 572 } 573 574 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 575 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 576 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 577 /* Don't do sense port on multifunction devices (for now at least) */ 578 if (mlx4_is_mfunc(dev)) 579 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 580 581 if (mlx4_low_memory_profile()) { 582 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 583 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 584 } else { 585 dev->caps.log_num_macs = log_num_mac; 586 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 587 } 588 589 for (i = 1; i <= dev->caps.num_ports; ++i) { 590 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 591 if (dev->caps.supported_type[i]) { 592 /* if only ETH is supported - assign ETH */ 593 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 594 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 595 /* if only IB is supported, assign IB */ 596 else if (dev->caps.supported_type[i] == 597 MLX4_PORT_TYPE_IB) 598 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 599 else { 600 /* if IB and ETH are supported, we set the port 601 * type according to user selection of port type; 602 * if user selected none, take the FW hint */ 603 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 604 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 605 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 606 else 607 dev->caps.port_type[i] = port_type_array[i - 1]; 608 } 609 } 610 /* 611 * Link sensing is allowed on the port if 3 conditions are true: 612 * 1. Both protocols are supported on the port. 613 * 2. Different types are supported on the port 614 * 3. FW declared that it supports link sensing 615 */ 616 mlx4_priv(dev)->sense.sense_allowed[i] = 617 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 618 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 619 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 620 621 /* 622 * If "default_sense" bit is set, we move the port to "AUTO" mode 623 * and perform sense_port FW command to try and set the correct 624 * port type from beginning 625 */ 626 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 627 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 628 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 629 mlx4_SENSE_PORT(dev, i, &sensed_port); 630 if (sensed_port != MLX4_PORT_TYPE_NONE) 631 dev->caps.port_type[i] = sensed_port; 632 } else { 633 dev->caps.possible_type[i] = dev->caps.port_type[i]; 634 } 635 636 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 637 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 638 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 639 i, 1 << dev->caps.log_num_macs); 640 } 641 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 642 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 643 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 644 i, 1 << dev->caps.log_num_vlans); 645 } 646 } 647 648 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 649 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 650 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 651 mlx4_warn(dev, 652 "Granular QoS per VF not supported with IB/Eth configuration\n"); 653 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 654 } 655 656 dev->caps.max_counters = dev_cap->max_counters; 657 658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 659 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 660 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 661 (1 << dev->caps.log_num_macs) * 662 (1 << dev->caps.log_num_vlans) * 663 dev->caps.num_ports; 664 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 665 666 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 667 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 668 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 669 else 670 dev->caps.dmfs_high_rate_qpn_base = 671 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 672 673 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 674 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 675 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 676 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 677 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 678 } else { 679 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 680 dev->caps.dmfs_high_rate_qpn_base = 681 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 682 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 683 } 684 685 dev->caps.rl_caps = dev_cap->rl_caps; 686 687 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 688 dev->caps.dmfs_high_rate_qpn_range; 689 690 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 691 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 692 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 693 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 694 695 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 696 697 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 698 if (dev_cap->flags & 699 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 700 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 701 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 702 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 703 } 704 705 if (dev_cap->flags2 & 706 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 707 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 708 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 709 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 710 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 711 } 712 } 713 714 if ((dev->caps.flags & 715 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 716 mlx4_is_master(dev)) 717 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 718 719 if (!mlx4_is_slave(dev)) { 720 mlx4_enable_cqe_eqe_stride(dev); 721 dev->caps.alloc_res_qp_mask = 722 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 723 MLX4_RESERVE_A0_QP; 724 725 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 726 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 727 mlx4_warn(dev, "Old device ETS support detected\n"); 728 mlx4_warn(dev, "Consider upgrading device FW.\n"); 729 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 730 } 731 732 } else { 733 dev->caps.alloc_res_qp_mask = 0; 734 } 735 736 mlx4_enable_ignore_fcs(dev); 737 738 return 0; 739 } 740 741 /*The function checks if there are live vf, return the num of them*/ 742 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 743 { 744 struct mlx4_priv *priv = mlx4_priv(dev); 745 struct mlx4_slave_state *s_state; 746 int i; 747 int ret = 0; 748 749 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 750 s_state = &priv->mfunc.master.slave_state[i]; 751 if (s_state->active && s_state->last_cmd != 752 MLX4_COMM_CMD_RESET) { 753 mlx4_warn(dev, "%s: slave: %d is still active\n", 754 __func__, i); 755 ret++; 756 } 757 } 758 return ret; 759 } 760 761 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 762 { 763 u32 qk = MLX4_RESERVED_QKEY_BASE; 764 765 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 766 qpn < dev->phys_caps.base_proxy_sqpn) 767 return -EINVAL; 768 769 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 770 /* tunnel qp */ 771 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 772 else 773 qk += qpn - dev->phys_caps.base_proxy_sqpn; 774 *qkey = qk; 775 return 0; 776 } 777 EXPORT_SYMBOL(mlx4_get_parav_qkey); 778 779 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 780 { 781 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 782 783 if (!mlx4_is_master(dev)) 784 return; 785 786 priv->virt2phys_pkey[slave][port - 1][i] = val; 787 } 788 EXPORT_SYMBOL(mlx4_sync_pkey_table); 789 790 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 791 { 792 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 793 794 if (!mlx4_is_master(dev)) 795 return; 796 797 priv->slave_node_guids[slave] = guid; 798 } 799 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 800 801 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 802 { 803 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 804 805 if (!mlx4_is_master(dev)) 806 return 0; 807 808 return priv->slave_node_guids[slave]; 809 } 810 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 811 812 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 813 { 814 struct mlx4_priv *priv = mlx4_priv(dev); 815 struct mlx4_slave_state *s_slave; 816 817 if (!mlx4_is_master(dev)) 818 return 0; 819 820 s_slave = &priv->mfunc.master.slave_state[slave]; 821 return !!s_slave->active; 822 } 823 EXPORT_SYMBOL(mlx4_is_slave_active); 824 825 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, 826 struct _rule_hw *eth_header) 827 { 828 if (is_multicast_ether_addr(eth_header->eth.dst_mac) || 829 is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 830 struct mlx4_net_trans_rule_hw_eth *eth = 831 (struct mlx4_net_trans_rule_hw_eth *)eth_header; 832 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); 833 bool last_rule = next_rule->size == 0 && next_rule->id == 0 && 834 next_rule->rsvd == 0; 835 836 if (last_rule) 837 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); 838 } 839 } 840 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio); 841 842 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 843 struct mlx4_dev_cap *dev_cap, 844 struct mlx4_init_hca_param *hca_param) 845 { 846 dev->caps.steering_mode = hca_param->steering_mode; 847 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 848 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 849 dev->caps.fs_log_max_ucast_qp_range_size = 850 dev_cap->fs_log_max_ucast_qp_range_size; 851 } else 852 dev->caps.num_qp_per_mgm = 853 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 854 855 mlx4_dbg(dev, "Steering mode is: %s\n", 856 mlx4_steering_mode_str(dev->caps.steering_mode)); 857 } 858 859 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev) 860 { 861 kfree(dev->caps.spec_qps); 862 dev->caps.spec_qps = NULL; 863 } 864 865 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev) 866 { 867 struct mlx4_func_cap *func_cap; 868 struct mlx4_caps *caps = &dev->caps; 869 int i, err = 0; 870 871 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); 872 caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL); 873 874 if (!func_cap || !caps->spec_qps) { 875 mlx4_err(dev, "Failed to allocate memory for special qps cap\n"); 876 err = -ENOMEM; 877 goto err_mem; 878 } 879 880 for (i = 1; i <= caps->num_ports; ++i) { 881 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap); 882 if (err) { 883 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 884 i, err); 885 goto err_mem; 886 } 887 caps->spec_qps[i - 1] = func_cap->spec_qps; 888 caps->port_mask[i] = caps->port_type[i]; 889 caps->phys_port_id[i] = func_cap->phys_port_id; 890 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 891 &caps->gid_table_len[i], 892 &caps->pkey_table_len[i]); 893 if (err) { 894 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n", 895 i, err); 896 goto err_mem; 897 } 898 } 899 900 err_mem: 901 if (err) 902 mlx4_slave_destroy_special_qp_cap(dev); 903 kfree(func_cap); 904 return err; 905 } 906 907 static int mlx4_slave_cap(struct mlx4_dev *dev) 908 { 909 int err; 910 u32 page_size; 911 struct mlx4_dev_cap *dev_cap; 912 struct mlx4_func_cap *func_cap; 913 struct mlx4_init_hca_param *hca_param; 914 915 hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL); 916 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); 917 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 918 if (!hca_param || !func_cap || !dev_cap) { 919 mlx4_err(dev, "Failed to allocate memory for slave_cap\n"); 920 err = -ENOMEM; 921 goto free_mem; 922 } 923 924 err = mlx4_QUERY_HCA(dev, hca_param); 925 if (err) { 926 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 927 goto free_mem; 928 } 929 930 /* fail if the hca has an unknown global capability 931 * at this time global_caps should be always zeroed 932 */ 933 if (hca_param->global_caps) { 934 mlx4_err(dev, "Unknown hca global capabilities\n"); 935 err = -EINVAL; 936 goto free_mem; 937 } 938 939 dev->caps.hca_core_clock = hca_param->hca_core_clock; 940 941 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp; 942 err = mlx4_dev_cap(dev, dev_cap); 943 if (err) { 944 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 945 goto free_mem; 946 } 947 948 err = mlx4_QUERY_FW(dev); 949 if (err) 950 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 951 952 page_size = ~dev->caps.page_size_cap + 1; 953 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 954 if (page_size > PAGE_SIZE) { 955 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 956 page_size, PAGE_SIZE); 957 err = -ENODEV; 958 goto free_mem; 959 } 960 961 /* Set uar_page_shift for VF */ 962 dev->uar_page_shift = hca_param->uar_page_sz + 12; 963 964 /* Make sure the master uar page size is valid */ 965 if (dev->uar_page_shift > PAGE_SHIFT) { 966 mlx4_err(dev, 967 "Invalid configuration: uar page size is larger than system page size\n"); 968 err = -ENODEV; 969 goto free_mem; 970 } 971 972 /* Set reserved_uars based on the uar_page_shift */ 973 mlx4_set_num_reserved_uars(dev, dev_cap); 974 975 /* Although uar page size in FW differs from system page size, 976 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 977 * still works with assumption that uar page size == system page size 978 */ 979 dev->caps.uar_page_size = PAGE_SIZE; 980 981 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap); 982 if (err) { 983 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 984 err); 985 goto free_mem; 986 } 987 988 if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 989 PF_CONTEXT_BEHAVIOUR_MASK) { 990 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 991 func_cap->pf_context_behaviour, 992 PF_CONTEXT_BEHAVIOUR_MASK); 993 err = -EINVAL; 994 goto free_mem; 995 } 996 997 dev->caps.num_ports = func_cap->num_ports; 998 dev->quotas.qp = func_cap->qp_quota; 999 dev->quotas.srq = func_cap->srq_quota; 1000 dev->quotas.cq = func_cap->cq_quota; 1001 dev->quotas.mpt = func_cap->mpt_quota; 1002 dev->quotas.mtt = func_cap->mtt_quota; 1003 dev->caps.num_qps = 1 << hca_param->log_num_qps; 1004 dev->caps.num_srqs = 1 << hca_param->log_num_srqs; 1005 dev->caps.num_cqs = 1 << hca_param->log_num_cqs; 1006 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz; 1007 dev->caps.num_eqs = func_cap->max_eq; 1008 dev->caps.reserved_eqs = func_cap->reserved_eq; 1009 dev->caps.reserved_lkey = func_cap->reserved_lkey; 1010 dev->caps.num_pds = MLX4_NUM_PDS; 1011 dev->caps.num_mgms = 0; 1012 dev->caps.num_amgms = 0; 1013 1014 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 1015 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 1016 dev->caps.num_ports, MLX4_MAX_PORTS); 1017 err = -ENODEV; 1018 goto free_mem; 1019 } 1020 1021 mlx4_replace_zero_macs(dev); 1022 1023 err = mlx4_slave_special_qp_cap(dev); 1024 if (err) { 1025 mlx4_err(dev, "Set special QP caps failed. aborting\n"); 1026 goto free_mem; 1027 } 1028 1029 if (dev->caps.uar_page_size * (dev->caps.num_uars - 1030 dev->caps.reserved_uars) > 1031 pci_resource_len(dev->persist->pdev, 1032 2)) { 1033 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 1034 dev->caps.uar_page_size * dev->caps.num_uars, 1035 (unsigned long long) 1036 pci_resource_len(dev->persist->pdev, 2)); 1037 err = -ENOMEM; 1038 goto err_mem; 1039 } 1040 1041 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 1042 dev->caps.eqe_size = 64; 1043 dev->caps.eqe_factor = 1; 1044 } else { 1045 dev->caps.eqe_size = 32; 1046 dev->caps.eqe_factor = 0; 1047 } 1048 1049 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 1050 dev->caps.cqe_size = 64; 1051 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1052 } else { 1053 dev->caps.cqe_size = 32; 1054 } 1055 1056 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 1057 dev->caps.eqe_size = hca_param->eqe_size; 1058 dev->caps.eqe_factor = 0; 1059 } 1060 1061 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 1062 dev->caps.cqe_size = hca_param->cqe_size; 1063 /* User still need to know when CQE > 32B */ 1064 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1065 } 1066 1067 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1068 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 1069 1070 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN; 1071 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n"); 1072 1073 slave_adjust_steering_mode(dev, dev_cap, hca_param); 1074 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 1075 hca_param->rss_ip_frags ? "on" : "off"); 1076 1077 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 1078 dev->caps.bf_reg_size) 1079 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 1080 1081 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 1082 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 1083 1084 err_mem: 1085 if (err) 1086 mlx4_slave_destroy_special_qp_cap(dev); 1087 free_mem: 1088 kfree(hca_param); 1089 kfree(func_cap); 1090 kfree(dev_cap); 1091 return err; 1092 } 1093 1094 static void mlx4_request_modules(struct mlx4_dev *dev) 1095 { 1096 int port; 1097 int has_ib_port = false; 1098 int has_eth_port = false; 1099 #define EN_DRV_NAME "mlx4_en" 1100 #define IB_DRV_NAME "mlx4_ib" 1101 1102 for (port = 1; port <= dev->caps.num_ports; port++) { 1103 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1104 has_ib_port = true; 1105 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1106 has_eth_port = true; 1107 } 1108 1109 if (has_eth_port) 1110 request_module_nowait(EN_DRV_NAME); 1111 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1112 request_module_nowait(IB_DRV_NAME); 1113 } 1114 1115 /* 1116 * Change the port configuration of the device. 1117 * Every user of this function must hold the port mutex. 1118 */ 1119 int mlx4_change_port_types(struct mlx4_dev *dev, 1120 enum mlx4_port_type *port_types) 1121 { 1122 int err = 0; 1123 int change = 0; 1124 int port; 1125 1126 for (port = 0; port < dev->caps.num_ports; port++) { 1127 /* Change the port type only if the new type is different 1128 * from the current, and not set to Auto */ 1129 if (port_types[port] != dev->caps.port_type[port + 1]) 1130 change = 1; 1131 } 1132 if (change) { 1133 mlx4_unregister_device(dev); 1134 for (port = 1; port <= dev->caps.num_ports; port++) { 1135 mlx4_CLOSE_PORT(dev, port); 1136 dev->caps.port_type[port] = port_types[port - 1]; 1137 err = mlx4_SET_PORT(dev, port, -1); 1138 if (err) { 1139 mlx4_err(dev, "Failed to set port %d, aborting\n", 1140 port); 1141 goto out; 1142 } 1143 } 1144 mlx4_set_port_mask(dev); 1145 err = mlx4_register_device(dev); 1146 if (err) { 1147 mlx4_err(dev, "Failed to register device\n"); 1148 goto out; 1149 } 1150 mlx4_request_modules(dev); 1151 } 1152 1153 out: 1154 return err; 1155 } 1156 1157 static ssize_t show_port_type(struct device *dev, 1158 struct device_attribute *attr, 1159 char *buf) 1160 { 1161 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1162 port_attr); 1163 struct mlx4_dev *mdev = info->dev; 1164 char type[8]; 1165 1166 sprintf(type, "%s", 1167 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1168 "ib" : "eth"); 1169 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1170 sprintf(buf, "auto (%s)\n", type); 1171 else 1172 sprintf(buf, "%s\n", type); 1173 1174 return strlen(buf); 1175 } 1176 1177 static int __set_port_type(struct mlx4_port_info *info, 1178 enum mlx4_port_type port_type) 1179 { 1180 struct mlx4_dev *mdev = info->dev; 1181 struct mlx4_priv *priv = mlx4_priv(mdev); 1182 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1183 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1184 int i; 1185 int err = 0; 1186 1187 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { 1188 mlx4_err(mdev, 1189 "Requested port type for port %d is not supported on this HCA\n", 1190 info->port); 1191 return -EOPNOTSUPP; 1192 } 1193 1194 mlx4_stop_sense(mdev); 1195 mutex_lock(&priv->port_mutex); 1196 info->tmp_type = port_type; 1197 1198 /* Possible type is always the one that was delivered */ 1199 mdev->caps.possible_type[info->port] = info->tmp_type; 1200 1201 for (i = 0; i < mdev->caps.num_ports; i++) { 1202 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1203 mdev->caps.possible_type[i+1]; 1204 if (types[i] == MLX4_PORT_TYPE_AUTO) 1205 types[i] = mdev->caps.port_type[i+1]; 1206 } 1207 1208 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1209 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1210 for (i = 1; i <= mdev->caps.num_ports; i++) { 1211 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1212 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1213 err = -EOPNOTSUPP; 1214 } 1215 } 1216 } 1217 if (err) { 1218 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1219 goto out; 1220 } 1221 1222 mlx4_do_sense_ports(mdev, new_types, types); 1223 1224 err = mlx4_check_port_params(mdev, new_types); 1225 if (err) 1226 goto out; 1227 1228 /* We are about to apply the changes after the configuration 1229 * was verified, no need to remember the temporary types 1230 * any more */ 1231 for (i = 0; i < mdev->caps.num_ports; i++) 1232 priv->port[i + 1].tmp_type = 0; 1233 1234 err = mlx4_change_port_types(mdev, new_types); 1235 1236 out: 1237 mlx4_start_sense(mdev); 1238 mutex_unlock(&priv->port_mutex); 1239 1240 return err; 1241 } 1242 1243 static ssize_t set_port_type(struct device *dev, 1244 struct device_attribute *attr, 1245 const char *buf, size_t count) 1246 { 1247 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1248 port_attr); 1249 struct mlx4_dev *mdev = info->dev; 1250 enum mlx4_port_type port_type; 1251 static DEFINE_MUTEX(set_port_type_mutex); 1252 int err; 1253 1254 mutex_lock(&set_port_type_mutex); 1255 1256 if (!strcmp(buf, "ib\n")) { 1257 port_type = MLX4_PORT_TYPE_IB; 1258 } else if (!strcmp(buf, "eth\n")) { 1259 port_type = MLX4_PORT_TYPE_ETH; 1260 } else if (!strcmp(buf, "auto\n")) { 1261 port_type = MLX4_PORT_TYPE_AUTO; 1262 } else { 1263 mlx4_err(mdev, "%s is not supported port type\n", buf); 1264 err = -EINVAL; 1265 goto err_out; 1266 } 1267 1268 err = __set_port_type(info, port_type); 1269 1270 err_out: 1271 mutex_unlock(&set_port_type_mutex); 1272 1273 return err ? err : count; 1274 } 1275 1276 enum ibta_mtu { 1277 IB_MTU_256 = 1, 1278 IB_MTU_512 = 2, 1279 IB_MTU_1024 = 3, 1280 IB_MTU_2048 = 4, 1281 IB_MTU_4096 = 5 1282 }; 1283 1284 static inline int int_to_ibta_mtu(int mtu) 1285 { 1286 switch (mtu) { 1287 case 256: return IB_MTU_256; 1288 case 512: return IB_MTU_512; 1289 case 1024: return IB_MTU_1024; 1290 case 2048: return IB_MTU_2048; 1291 case 4096: return IB_MTU_4096; 1292 default: return -1; 1293 } 1294 } 1295 1296 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1297 { 1298 switch (mtu) { 1299 case IB_MTU_256: return 256; 1300 case IB_MTU_512: return 512; 1301 case IB_MTU_1024: return 1024; 1302 case IB_MTU_2048: return 2048; 1303 case IB_MTU_4096: return 4096; 1304 default: return -1; 1305 } 1306 } 1307 1308 static ssize_t show_port_ib_mtu(struct device *dev, 1309 struct device_attribute *attr, 1310 char *buf) 1311 { 1312 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1313 port_mtu_attr); 1314 struct mlx4_dev *mdev = info->dev; 1315 1316 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1317 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1318 1319 sprintf(buf, "%d\n", 1320 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1321 return strlen(buf); 1322 } 1323 1324 static ssize_t set_port_ib_mtu(struct device *dev, 1325 struct device_attribute *attr, 1326 const char *buf, size_t count) 1327 { 1328 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1329 port_mtu_attr); 1330 struct mlx4_dev *mdev = info->dev; 1331 struct mlx4_priv *priv = mlx4_priv(mdev); 1332 int err, port, mtu, ibta_mtu = -1; 1333 1334 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1335 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1336 return -EINVAL; 1337 } 1338 1339 err = kstrtoint(buf, 0, &mtu); 1340 if (!err) 1341 ibta_mtu = int_to_ibta_mtu(mtu); 1342 1343 if (err || ibta_mtu < 0) { 1344 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1345 return -EINVAL; 1346 } 1347 1348 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1349 1350 mlx4_stop_sense(mdev); 1351 mutex_lock(&priv->port_mutex); 1352 mlx4_unregister_device(mdev); 1353 for (port = 1; port <= mdev->caps.num_ports; port++) { 1354 mlx4_CLOSE_PORT(mdev, port); 1355 err = mlx4_SET_PORT(mdev, port, -1); 1356 if (err) { 1357 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1358 port); 1359 goto err_set_port; 1360 } 1361 } 1362 err = mlx4_register_device(mdev); 1363 err_set_port: 1364 mutex_unlock(&priv->port_mutex); 1365 mlx4_start_sense(mdev); 1366 return err ? err : count; 1367 } 1368 1369 /* bond for multi-function device */ 1370 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1371 static int mlx4_mf_bond(struct mlx4_dev *dev) 1372 { 1373 int err = 0; 1374 int nvfs; 1375 struct mlx4_slaves_pport slaves_port1; 1376 struct mlx4_slaves_pport slaves_port2; 1377 1378 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1379 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1380 1381 /* only single port vfs are allowed */ 1382 if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves, 1383 dev->persist->num_vfs + 1) > 1) { 1384 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1385 return -EINVAL; 1386 } 1387 1388 /* number of virtual functions is number of total functions minus one 1389 * physical function for each port. 1390 */ 1391 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1392 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1393 1394 /* limit on maximum allowed VFs */ 1395 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1396 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1397 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1398 return -EINVAL; 1399 } 1400 1401 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1402 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1403 return -EINVAL; 1404 } 1405 1406 err = mlx4_bond_mac_table(dev); 1407 if (err) 1408 return err; 1409 err = mlx4_bond_vlan_table(dev); 1410 if (err) 1411 goto err1; 1412 err = mlx4_bond_fs_rules(dev); 1413 if (err) 1414 goto err2; 1415 1416 return 0; 1417 err2: 1418 (void)mlx4_unbond_vlan_table(dev); 1419 err1: 1420 (void)mlx4_unbond_mac_table(dev); 1421 return err; 1422 } 1423 1424 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1425 { 1426 int ret, ret1; 1427 1428 ret = mlx4_unbond_fs_rules(dev); 1429 if (ret) 1430 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); 1431 ret1 = mlx4_unbond_mac_table(dev); 1432 if (ret1) { 1433 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1434 ret = ret1; 1435 } 1436 ret1 = mlx4_unbond_vlan_table(dev); 1437 if (ret1) { 1438 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1439 ret = ret1; 1440 } 1441 return ret; 1442 } 1443 1444 static int mlx4_bond(struct mlx4_dev *dev) 1445 { 1446 int ret = 0; 1447 struct mlx4_priv *priv = mlx4_priv(dev); 1448 1449 mutex_lock(&priv->bond_mutex); 1450 1451 if (!mlx4_is_bonded(dev)) { 1452 ret = mlx4_do_bond(dev, true); 1453 if (ret) 1454 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1455 if (!ret && mlx4_is_master(dev)) { 1456 ret = mlx4_mf_bond(dev); 1457 if (ret) { 1458 mlx4_err(dev, "bond for multifunction failed\n"); 1459 mlx4_do_bond(dev, false); 1460 } 1461 } 1462 } 1463 1464 mutex_unlock(&priv->bond_mutex); 1465 if (!ret) 1466 mlx4_dbg(dev, "Device is bonded\n"); 1467 1468 return ret; 1469 } 1470 1471 static int mlx4_unbond(struct mlx4_dev *dev) 1472 { 1473 int ret = 0; 1474 struct mlx4_priv *priv = mlx4_priv(dev); 1475 1476 mutex_lock(&priv->bond_mutex); 1477 1478 if (mlx4_is_bonded(dev)) { 1479 int ret2 = 0; 1480 1481 ret = mlx4_do_bond(dev, false); 1482 if (ret) 1483 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1484 if (mlx4_is_master(dev)) 1485 ret2 = mlx4_mf_unbond(dev); 1486 if (ret2) { 1487 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1488 ret = ret2; 1489 } 1490 } 1491 1492 mutex_unlock(&priv->bond_mutex); 1493 if (!ret) 1494 mlx4_dbg(dev, "Device is unbonded\n"); 1495 1496 return ret; 1497 } 1498 1499 static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1500 { 1501 u8 port1 = v2p->port1; 1502 u8 port2 = v2p->port2; 1503 struct mlx4_priv *priv = mlx4_priv(dev); 1504 int err; 1505 1506 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1507 return -EOPNOTSUPP; 1508 1509 mutex_lock(&priv->bond_mutex); 1510 1511 /* zero means keep current mapping for this port */ 1512 if (port1 == 0) 1513 port1 = priv->v2p.port1; 1514 if (port2 == 0) 1515 port2 = priv->v2p.port2; 1516 1517 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1518 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1519 (port1 == 2 && port2 == 1)) { 1520 /* besides boundary checks cross mapping makes 1521 * no sense and therefore not allowed */ 1522 err = -EINVAL; 1523 } else if ((port1 == priv->v2p.port1) && 1524 (port2 == priv->v2p.port2)) { 1525 err = 0; 1526 } else { 1527 err = mlx4_virt2phy_port_map(dev, port1, port2); 1528 if (!err) { 1529 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1530 port1, port2); 1531 priv->v2p.port1 = port1; 1532 priv->v2p.port2 = port2; 1533 } else { 1534 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1535 } 1536 } 1537 1538 mutex_unlock(&priv->bond_mutex); 1539 return err; 1540 } 1541 1542 struct mlx4_bond { 1543 struct work_struct work; 1544 struct mlx4_dev *dev; 1545 int is_bonded; 1546 struct mlx4_port_map port_map; 1547 }; 1548 1549 static void mlx4_bond_work(struct work_struct *work) 1550 { 1551 struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work); 1552 int err = 0; 1553 1554 if (bond->is_bonded) { 1555 if (!mlx4_is_bonded(bond->dev)) { 1556 err = mlx4_bond(bond->dev); 1557 if (err) 1558 mlx4_err(bond->dev, "Fail to bond device\n"); 1559 } 1560 if (!err) { 1561 err = mlx4_port_map_set(bond->dev, &bond->port_map); 1562 if (err) 1563 mlx4_err(bond->dev, 1564 "Fail to set port map [%d][%d]: %d\n", 1565 bond->port_map.port1, 1566 bond->port_map.port2, err); 1567 } 1568 } else if (mlx4_is_bonded(bond->dev)) { 1569 err = mlx4_unbond(bond->dev); 1570 if (err) 1571 mlx4_err(bond->dev, "Fail to unbond device\n"); 1572 } 1573 put_device(&bond->dev->persist->pdev->dev); 1574 kfree(bond); 1575 } 1576 1577 int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, 1578 u8 v2p_p2) 1579 { 1580 struct mlx4_bond *bond; 1581 1582 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 1583 if (!bond) 1584 return -ENOMEM; 1585 1586 INIT_WORK(&bond->work, mlx4_bond_work); 1587 get_device(&dev->persist->pdev->dev); 1588 bond->dev = dev; 1589 bond->is_bonded = is_bonded; 1590 bond->port_map.port1 = v2p_p1; 1591 bond->port_map.port2 = v2p_p2; 1592 queue_work(mlx4_wq, &bond->work); 1593 return 0; 1594 } 1595 EXPORT_SYMBOL(mlx4_queue_bond_work); 1596 1597 static int mlx4_load_fw(struct mlx4_dev *dev) 1598 { 1599 struct mlx4_priv *priv = mlx4_priv(dev); 1600 int err; 1601 1602 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1603 GFP_HIGHUSER | __GFP_NOWARN, 0); 1604 if (!priv->fw.fw_icm) { 1605 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1606 return -ENOMEM; 1607 } 1608 1609 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1610 if (err) { 1611 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1612 goto err_free; 1613 } 1614 1615 err = mlx4_RUN_FW(dev); 1616 if (err) { 1617 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1618 goto err_unmap_fa; 1619 } 1620 1621 return 0; 1622 1623 err_unmap_fa: 1624 mlx4_UNMAP_FA(dev); 1625 1626 err_free: 1627 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1628 return err; 1629 } 1630 1631 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1632 int cmpt_entry_sz) 1633 { 1634 struct mlx4_priv *priv = mlx4_priv(dev); 1635 int err; 1636 int num_eqs; 1637 1638 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1639 cmpt_base + 1640 ((u64) (MLX4_CMPT_TYPE_QP * 1641 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1642 cmpt_entry_sz, dev->caps.num_qps, 1643 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1644 0, 0); 1645 if (err) 1646 goto err; 1647 1648 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1649 cmpt_base + 1650 ((u64) (MLX4_CMPT_TYPE_SRQ * 1651 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1652 cmpt_entry_sz, dev->caps.num_srqs, 1653 dev->caps.reserved_srqs, 0, 0); 1654 if (err) 1655 goto err_qp; 1656 1657 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1658 cmpt_base + 1659 ((u64) (MLX4_CMPT_TYPE_CQ * 1660 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1661 cmpt_entry_sz, dev->caps.num_cqs, 1662 dev->caps.reserved_cqs, 0, 0); 1663 if (err) 1664 goto err_srq; 1665 1666 num_eqs = dev->phys_caps.num_phys_eqs; 1667 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1668 cmpt_base + 1669 ((u64) (MLX4_CMPT_TYPE_EQ * 1670 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1671 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1672 if (err) 1673 goto err_cq; 1674 1675 return 0; 1676 1677 err_cq: 1678 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1679 1680 err_srq: 1681 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1682 1683 err_qp: 1684 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1685 1686 err: 1687 return err; 1688 } 1689 1690 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1691 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1692 { 1693 struct mlx4_priv *priv = mlx4_priv(dev); 1694 u64 aux_pages; 1695 int num_eqs; 1696 int err; 1697 1698 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1699 if (err) { 1700 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1701 return err; 1702 } 1703 1704 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1705 (unsigned long long) icm_size >> 10, 1706 (unsigned long long) aux_pages << 2); 1707 1708 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1709 GFP_HIGHUSER | __GFP_NOWARN, 0); 1710 if (!priv->fw.aux_icm) { 1711 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1712 return -ENOMEM; 1713 } 1714 1715 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1716 if (err) { 1717 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1718 goto err_free_aux; 1719 } 1720 1721 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1722 if (err) { 1723 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1724 goto err_unmap_aux; 1725 } 1726 1727 1728 num_eqs = dev->phys_caps.num_phys_eqs; 1729 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1730 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1731 num_eqs, num_eqs, 0, 0); 1732 if (err) { 1733 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1734 goto err_unmap_cmpt; 1735 } 1736 1737 /* 1738 * Reserved MTT entries must be aligned up to a cacheline 1739 * boundary, since the FW will write to them, while the driver 1740 * writes to all other MTT entries. (The variable 1741 * dev->caps.mtt_entry_sz below is really the MTT segment 1742 * size, not the raw entry size) 1743 */ 1744 dev->caps.reserved_mtts = 1745 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1746 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1747 1748 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1749 init_hca->mtt_base, 1750 dev->caps.mtt_entry_sz, 1751 dev->caps.num_mtts, 1752 dev->caps.reserved_mtts, 1, 0); 1753 if (err) { 1754 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1755 goto err_unmap_eq; 1756 } 1757 1758 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1759 init_hca->dmpt_base, 1760 dev_cap->dmpt_entry_sz, 1761 dev->caps.num_mpts, 1762 dev->caps.reserved_mrws, 1, 1); 1763 if (err) { 1764 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1765 goto err_unmap_mtt; 1766 } 1767 1768 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1769 init_hca->qpc_base, 1770 dev_cap->qpc_entry_sz, 1771 dev->caps.num_qps, 1772 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1773 0, 0); 1774 if (err) { 1775 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1776 goto err_unmap_dmpt; 1777 } 1778 1779 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1780 init_hca->auxc_base, 1781 dev_cap->aux_entry_sz, 1782 dev->caps.num_qps, 1783 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1784 0, 0); 1785 if (err) { 1786 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1787 goto err_unmap_qp; 1788 } 1789 1790 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1791 init_hca->altc_base, 1792 dev_cap->altc_entry_sz, 1793 dev->caps.num_qps, 1794 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1795 0, 0); 1796 if (err) { 1797 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1798 goto err_unmap_auxc; 1799 } 1800 1801 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1802 init_hca->rdmarc_base, 1803 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1804 dev->caps.num_qps, 1805 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1806 0, 0); 1807 if (err) { 1808 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1809 goto err_unmap_altc; 1810 } 1811 1812 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1813 init_hca->cqc_base, 1814 dev_cap->cqc_entry_sz, 1815 dev->caps.num_cqs, 1816 dev->caps.reserved_cqs, 0, 0); 1817 if (err) { 1818 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1819 goto err_unmap_rdmarc; 1820 } 1821 1822 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1823 init_hca->srqc_base, 1824 dev_cap->srq_entry_sz, 1825 dev->caps.num_srqs, 1826 dev->caps.reserved_srqs, 0, 0); 1827 if (err) { 1828 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1829 goto err_unmap_cq; 1830 } 1831 1832 /* 1833 * For flow steering device managed mode it is required to use 1834 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1835 * required, but for simplicity just map the whole multicast 1836 * group table now. The table isn't very big and it's a lot 1837 * easier than trying to track ref counts. 1838 */ 1839 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1840 init_hca->mc_base, 1841 mlx4_get_mgm_entry_size(dev), 1842 dev->caps.num_mgms + dev->caps.num_amgms, 1843 dev->caps.num_mgms + dev->caps.num_amgms, 1844 0, 0); 1845 if (err) { 1846 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1847 goto err_unmap_srq; 1848 } 1849 1850 return 0; 1851 1852 err_unmap_srq: 1853 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1854 1855 err_unmap_cq: 1856 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1857 1858 err_unmap_rdmarc: 1859 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1860 1861 err_unmap_altc: 1862 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1863 1864 err_unmap_auxc: 1865 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1866 1867 err_unmap_qp: 1868 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1869 1870 err_unmap_dmpt: 1871 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1872 1873 err_unmap_mtt: 1874 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1875 1876 err_unmap_eq: 1877 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1878 1879 err_unmap_cmpt: 1880 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1881 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1882 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1883 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1884 1885 err_unmap_aux: 1886 mlx4_UNMAP_ICM_AUX(dev); 1887 1888 err_free_aux: 1889 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1890 1891 return err; 1892 } 1893 1894 static void mlx4_free_icms(struct mlx4_dev *dev) 1895 { 1896 struct mlx4_priv *priv = mlx4_priv(dev); 1897 1898 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1899 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1900 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1901 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1902 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1903 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1904 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1905 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1906 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1907 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1908 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1909 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1910 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1911 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1912 1913 mlx4_UNMAP_ICM_AUX(dev); 1914 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1915 } 1916 1917 static void mlx4_slave_exit(struct mlx4_dev *dev) 1918 { 1919 struct mlx4_priv *priv = mlx4_priv(dev); 1920 1921 mutex_lock(&priv->cmd.slave_cmd_mutex); 1922 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1923 MLX4_COMM_TIME)) 1924 mlx4_warn(dev, "Failed to close slave function\n"); 1925 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1926 } 1927 1928 static int map_bf_area(struct mlx4_dev *dev) 1929 { 1930 struct mlx4_priv *priv = mlx4_priv(dev); 1931 resource_size_t bf_start; 1932 resource_size_t bf_len; 1933 int err = 0; 1934 1935 if (!dev->caps.bf_reg_size) 1936 return -ENXIO; 1937 1938 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1939 (dev->caps.num_uars << PAGE_SHIFT); 1940 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1941 (dev->caps.num_uars << PAGE_SHIFT); 1942 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1943 if (!priv->bf_mapping) 1944 err = -ENOMEM; 1945 1946 return err; 1947 } 1948 1949 static void unmap_bf_area(struct mlx4_dev *dev) 1950 { 1951 if (mlx4_priv(dev)->bf_mapping) 1952 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1953 } 1954 1955 u64 mlx4_read_clock(struct mlx4_dev *dev) 1956 { 1957 u32 clockhi, clocklo, clockhi1; 1958 u64 cycles; 1959 int i; 1960 struct mlx4_priv *priv = mlx4_priv(dev); 1961 1962 for (i = 0; i < 10; i++) { 1963 clockhi = swab32(readl(priv->clock_mapping)); 1964 clocklo = swab32(readl(priv->clock_mapping + 4)); 1965 clockhi1 = swab32(readl(priv->clock_mapping)); 1966 if (clockhi == clockhi1) 1967 break; 1968 } 1969 1970 cycles = (u64) clockhi << 32 | (u64) clocklo; 1971 1972 return cycles; 1973 } 1974 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1975 1976 1977 static int map_internal_clock(struct mlx4_dev *dev) 1978 { 1979 struct mlx4_priv *priv = mlx4_priv(dev); 1980 1981 priv->clock_mapping = 1982 ioremap(pci_resource_start(dev->persist->pdev, 1983 priv->fw.clock_bar) + 1984 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1985 1986 if (!priv->clock_mapping) 1987 return -ENOMEM; 1988 1989 return 0; 1990 } 1991 1992 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1993 struct mlx4_clock_params *params) 1994 { 1995 struct mlx4_priv *priv = mlx4_priv(dev); 1996 1997 if (mlx4_is_slave(dev)) 1998 return -EOPNOTSUPP; 1999 2000 if (!dev->caps.map_clock_to_user) { 2001 mlx4_dbg(dev, "Map clock to user is not supported.\n"); 2002 return -EOPNOTSUPP; 2003 } 2004 2005 if (!params) 2006 return -EINVAL; 2007 2008 params->bar = priv->fw.clock_bar; 2009 params->offset = priv->fw.clock_offset; 2010 params->size = MLX4_CLOCK_SIZE; 2011 2012 return 0; 2013 } 2014 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 2015 2016 static void unmap_internal_clock(struct mlx4_dev *dev) 2017 { 2018 struct mlx4_priv *priv = mlx4_priv(dev); 2019 2020 if (priv->clock_mapping) 2021 iounmap(priv->clock_mapping); 2022 } 2023 2024 static void mlx4_close_hca(struct mlx4_dev *dev) 2025 { 2026 unmap_internal_clock(dev); 2027 unmap_bf_area(dev); 2028 if (mlx4_is_slave(dev)) 2029 mlx4_slave_exit(dev); 2030 else { 2031 mlx4_CLOSE_HCA(dev, 0); 2032 mlx4_free_icms(dev); 2033 } 2034 } 2035 2036 static void mlx4_close_fw(struct mlx4_dev *dev) 2037 { 2038 if (!mlx4_is_slave(dev)) { 2039 mlx4_UNMAP_FA(dev); 2040 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 2041 } 2042 } 2043 2044 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 2045 { 2046 #define COMM_CHAN_OFFLINE_OFFSET 0x09 2047 2048 u32 comm_flags; 2049 u32 offline_bit; 2050 unsigned long end; 2051 struct mlx4_priv *priv = mlx4_priv(dev); 2052 2053 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 2054 while (time_before(jiffies, end)) { 2055 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 2056 MLX4_COMM_CHAN_FLAGS)); 2057 offline_bit = (comm_flags & 2058 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 2059 if (!offline_bit) 2060 return 0; 2061 2062 /* If device removal has been requested, 2063 * do not continue retrying. 2064 */ 2065 if (dev->persist->interface_state & 2066 MLX4_INTERFACE_STATE_NOWAIT) 2067 break; 2068 2069 /* There are cases as part of AER/Reset flow that PF needs 2070 * around 100 msec to load. We therefore sleep for 100 msec 2071 * to allow other tasks to make use of that CPU during this 2072 * time interval. 2073 */ 2074 msleep(100); 2075 } 2076 mlx4_err(dev, "Communication channel is offline.\n"); 2077 return -EIO; 2078 } 2079 2080 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 2081 { 2082 #define COMM_CHAN_RST_OFFSET 0x1e 2083 2084 struct mlx4_priv *priv = mlx4_priv(dev); 2085 u32 comm_rst; 2086 u32 comm_caps; 2087 2088 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 2089 MLX4_COMM_CHAN_CAPS)); 2090 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 2091 2092 if (comm_rst) 2093 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 2094 } 2095 2096 static int mlx4_init_slave(struct mlx4_dev *dev) 2097 { 2098 struct mlx4_priv *priv = mlx4_priv(dev); 2099 u64 dma = (u64) priv->mfunc.vhcr_dma; 2100 int ret_from_reset = 0; 2101 u32 slave_read; 2102 u32 cmd_channel_ver; 2103 2104 if (atomic_read(&pf_loading)) { 2105 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 2106 return -EPROBE_DEFER; 2107 } 2108 2109 mutex_lock(&priv->cmd.slave_cmd_mutex); 2110 priv->cmd.max_cmds = 1; 2111 if (mlx4_comm_check_offline(dev)) { 2112 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 2113 goto err_offline; 2114 } 2115 2116 mlx4_reset_vf_support(dev); 2117 mlx4_warn(dev, "Sending reset\n"); 2118 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 2119 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 2120 /* if we are in the middle of flr the slave will try 2121 * NUM_OF_RESET_RETRIES times before leaving.*/ 2122 if (ret_from_reset) { 2123 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 2124 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 2125 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2126 return -EPROBE_DEFER; 2127 } else 2128 goto err; 2129 } 2130 2131 /* check the driver version - the slave I/F revision 2132 * must match the master's */ 2133 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 2134 cmd_channel_ver = mlx4_comm_get_version(); 2135 2136 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 2137 MLX4_COMM_GET_IF_REV(slave_read)) { 2138 mlx4_err(dev, "slave driver version is not supported by the master\n"); 2139 goto err; 2140 } 2141 2142 mlx4_warn(dev, "Sending vhcr0\n"); 2143 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 2144 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2145 goto err; 2146 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 2147 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2148 goto err; 2149 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 2150 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2151 goto err; 2152 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 2153 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2154 goto err; 2155 2156 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2157 return 0; 2158 2159 err: 2160 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 2161 err_offline: 2162 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2163 return -EIO; 2164 } 2165 2166 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 2167 { 2168 int i; 2169 2170 for (i = 1; i <= dev->caps.num_ports; i++) { 2171 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 2172 dev->caps.gid_table_len[i] = 2173 mlx4_get_slave_num_gids(dev, 0, i); 2174 else 2175 dev->caps.gid_table_len[i] = 1; 2176 dev->caps.pkey_table_len[i] = 2177 dev->phys_caps.pkey_phys_table_len[i] - 1; 2178 } 2179 } 2180 2181 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2182 { 2183 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2184 2185 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2186 i++) { 2187 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2188 break; 2189 } 2190 2191 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2192 } 2193 2194 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2195 { 2196 switch (dmfs_high_steer_mode) { 2197 case MLX4_STEERING_DMFS_A0_DEFAULT: 2198 return "default performance"; 2199 2200 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2201 return "dynamic hybrid mode"; 2202 2203 case MLX4_STEERING_DMFS_A0_STATIC: 2204 return "performance optimized for limited rule configuration (static)"; 2205 2206 case MLX4_STEERING_DMFS_A0_DISABLE: 2207 return "disabled performance optimized steering"; 2208 2209 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2210 return "performance optimized steering not supported"; 2211 2212 default: 2213 return "Unrecognized mode"; 2214 } 2215 } 2216 2217 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2218 2219 static void choose_steering_mode(struct mlx4_dev *dev, 2220 struct mlx4_dev_cap *dev_cap) 2221 { 2222 if (mlx4_log_num_mgm_entry_size <= 0) { 2223 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2224 if (dev->caps.dmfs_high_steer_mode == 2225 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2226 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2227 else 2228 dev->caps.dmfs_high_steer_mode = 2229 MLX4_STEERING_DMFS_A0_STATIC; 2230 } 2231 } 2232 2233 if (mlx4_log_num_mgm_entry_size <= 0 && 2234 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2235 (!mlx4_is_mfunc(dev) || 2236 (dev_cap->fs_max_num_qp_per_entry >= 2237 (dev->persist->num_vfs + 1))) && 2238 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2239 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2240 dev->oper_log_mgm_entry_size = 2241 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2242 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2243 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2244 dev->caps.fs_log_max_ucast_qp_range_size = 2245 dev_cap->fs_log_max_ucast_qp_range_size; 2246 } else { 2247 if (dev->caps.dmfs_high_steer_mode != 2248 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2249 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2250 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2251 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2252 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2253 else { 2254 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2255 2256 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2257 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2258 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2259 } 2260 dev->oper_log_mgm_entry_size = 2261 mlx4_log_num_mgm_entry_size > 0 ? 2262 mlx4_log_num_mgm_entry_size : 2263 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2264 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2265 } 2266 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2267 mlx4_steering_mode_str(dev->caps.steering_mode), 2268 dev->oper_log_mgm_entry_size, 2269 mlx4_log_num_mgm_entry_size); 2270 } 2271 2272 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2273 struct mlx4_dev_cap *dev_cap) 2274 { 2275 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2276 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2277 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2278 else 2279 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2280 2281 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2282 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2283 } 2284 2285 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2286 { 2287 int i; 2288 struct mlx4_port_cap port_cap; 2289 2290 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2291 return -EINVAL; 2292 2293 for (i = 1; i <= dev->caps.num_ports; i++) { 2294 if (mlx4_dev_port(dev, i, &port_cap)) { 2295 mlx4_err(dev, 2296 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n"); 2297 } else if ((dev->caps.dmfs_high_steer_mode != 2298 MLX4_STEERING_DMFS_A0_DEFAULT) && 2299 (port_cap.dmfs_optimized_state == 2300 !!(dev->caps.dmfs_high_steer_mode == 2301 MLX4_STEERING_DMFS_A0_DISABLE))) { 2302 mlx4_err(dev, 2303 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2304 dmfs_high_rate_steering_mode_str( 2305 dev->caps.dmfs_high_steer_mode), 2306 (port_cap.dmfs_optimized_state ? 2307 "enabled" : "disabled")); 2308 } 2309 } 2310 2311 return 0; 2312 } 2313 2314 static int mlx4_init_fw(struct mlx4_dev *dev) 2315 { 2316 struct mlx4_mod_stat_cfg mlx4_cfg; 2317 int err = 0; 2318 2319 if (!mlx4_is_slave(dev)) { 2320 err = mlx4_QUERY_FW(dev); 2321 if (err) { 2322 if (err == -EACCES) 2323 mlx4_info(dev, "non-primary physical function, skipping\n"); 2324 else 2325 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2326 return err; 2327 } 2328 2329 err = mlx4_load_fw(dev); 2330 if (err) { 2331 mlx4_err(dev, "Failed to start FW, aborting\n"); 2332 return err; 2333 } 2334 2335 mlx4_cfg.log_pg_sz_m = 1; 2336 mlx4_cfg.log_pg_sz = 0; 2337 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2338 if (err) 2339 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2340 } 2341 2342 return err; 2343 } 2344 2345 static int mlx4_init_hca(struct mlx4_dev *dev) 2346 { 2347 struct mlx4_priv *priv = mlx4_priv(dev); 2348 struct mlx4_init_hca_param *init_hca = NULL; 2349 struct mlx4_dev_cap *dev_cap = NULL; 2350 struct mlx4_adapter adapter; 2351 struct mlx4_profile profile; 2352 u64 icm_size; 2353 struct mlx4_config_dev_params params; 2354 int err; 2355 2356 if (!mlx4_is_slave(dev)) { 2357 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2358 init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL); 2359 2360 if (!dev_cap || !init_hca) { 2361 err = -ENOMEM; 2362 goto out_free; 2363 } 2364 2365 err = mlx4_dev_cap(dev, dev_cap); 2366 if (err) { 2367 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2368 goto out_free; 2369 } 2370 2371 choose_steering_mode(dev, dev_cap); 2372 choose_tunnel_offload_mode(dev, dev_cap); 2373 2374 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2375 mlx4_is_master(dev)) 2376 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2377 2378 err = mlx4_get_phys_port_id(dev); 2379 if (err) 2380 mlx4_err(dev, "Fail to get physical port id\n"); 2381 2382 if (mlx4_is_master(dev)) 2383 mlx4_parav_master_pf_caps(dev); 2384 2385 if (mlx4_low_memory_profile()) { 2386 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2387 profile = low_mem_profile; 2388 } else { 2389 profile = default_profile; 2390 } 2391 if (dev->caps.steering_mode == 2392 MLX4_STEERING_MODE_DEVICE_MANAGED) 2393 profile.num_mcg = MLX4_FS_NUM_MCG; 2394 2395 icm_size = mlx4_make_profile(dev, &profile, dev_cap, 2396 init_hca); 2397 if ((long long) icm_size < 0) { 2398 err = icm_size; 2399 goto out_free; 2400 } 2401 2402 if (enable_4k_uar || !dev->persist->num_vfs) { 2403 init_hca->log_uar_sz = ilog2(dev->caps.num_uars) + 2404 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2405 init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2406 } else { 2407 init_hca->log_uar_sz = ilog2(dev->caps.num_uars); 2408 init_hca->uar_page_sz = PAGE_SHIFT - 12; 2409 } 2410 2411 init_hca->mw_enabled = 0; 2412 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2413 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2414 init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2415 2416 err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size); 2417 if (err) 2418 goto out_free; 2419 2420 err = mlx4_INIT_HCA(dev, init_hca); 2421 if (err) { 2422 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2423 goto err_free_icm; 2424 } 2425 2426 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2427 err = mlx4_query_func(dev, dev_cap); 2428 if (err < 0) { 2429 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2430 goto err_close; 2431 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2432 dev->caps.num_eqs = dev_cap->max_eqs; 2433 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 2434 dev->caps.reserved_uars = dev_cap->reserved_uars; 2435 } 2436 } 2437 2438 /* 2439 * If TS is supported by FW 2440 * read HCA frequency by QUERY_HCA command 2441 */ 2442 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2443 err = mlx4_QUERY_HCA(dev, init_hca); 2444 if (err) { 2445 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2446 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2447 } else { 2448 dev->caps.hca_core_clock = 2449 init_hca->hca_core_clock; 2450 } 2451 2452 /* In case we got HCA frequency 0 - disable timestamping 2453 * to avoid dividing by zero 2454 */ 2455 if (!dev->caps.hca_core_clock) { 2456 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2457 mlx4_err(dev, 2458 "HCA frequency is 0 - timestamping is not supported\n"); 2459 } else if (map_internal_clock(dev)) { 2460 /* 2461 * Map internal clock, 2462 * in case of failure disable timestamping 2463 */ 2464 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2465 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2466 } 2467 } 2468 2469 if (dev->caps.dmfs_high_steer_mode != 2470 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2471 if (mlx4_validate_optimized_steering(dev)) 2472 mlx4_warn(dev, "Optimized steering validation failed\n"); 2473 2474 if (dev->caps.dmfs_high_steer_mode == 2475 MLX4_STEERING_DMFS_A0_DISABLE) { 2476 dev->caps.dmfs_high_rate_qpn_base = 2477 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2478 dev->caps.dmfs_high_rate_qpn_range = 2479 MLX4_A0_STEERING_TABLE_SIZE; 2480 } 2481 2482 mlx4_info(dev, "DMFS high rate steer mode is: %s\n", 2483 dmfs_high_rate_steering_mode_str( 2484 dev->caps.dmfs_high_steer_mode)); 2485 } 2486 } else { 2487 err = mlx4_init_slave(dev); 2488 if (err) { 2489 if (err != -EPROBE_DEFER) 2490 mlx4_err(dev, "Failed to initialize slave\n"); 2491 return err; 2492 } 2493 2494 err = mlx4_slave_cap(dev); 2495 if (err) { 2496 mlx4_err(dev, "Failed to obtain slave caps\n"); 2497 goto err_close; 2498 } 2499 } 2500 2501 if (map_bf_area(dev)) 2502 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2503 2504 /*Only the master set the ports, all the rest got it from it.*/ 2505 if (!mlx4_is_slave(dev)) 2506 mlx4_set_port_mask(dev); 2507 2508 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2509 if (err) { 2510 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2511 goto unmap_bf; 2512 } 2513 2514 /* Query CONFIG_DEV parameters */ 2515 err = mlx4_config_dev_retrieval(dev, ¶ms); 2516 if (err && err != -EOPNOTSUPP) { 2517 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2518 } else if (!err) { 2519 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2520 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2521 } 2522 priv->eq_table.inta_pin = adapter.inta_pin; 2523 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); 2524 2525 err = 0; 2526 goto out_free; 2527 2528 unmap_bf: 2529 unmap_internal_clock(dev); 2530 unmap_bf_area(dev); 2531 2532 if (mlx4_is_slave(dev)) 2533 mlx4_slave_destroy_special_qp_cap(dev); 2534 2535 err_close: 2536 if (mlx4_is_slave(dev)) 2537 mlx4_slave_exit(dev); 2538 else 2539 mlx4_CLOSE_HCA(dev, 0); 2540 2541 err_free_icm: 2542 if (!mlx4_is_slave(dev)) 2543 mlx4_free_icms(dev); 2544 2545 out_free: 2546 kfree(dev_cap); 2547 kfree(init_hca); 2548 2549 return err; 2550 } 2551 2552 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2553 { 2554 struct mlx4_priv *priv = mlx4_priv(dev); 2555 int nent_pow2; 2556 2557 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2558 return -ENOENT; 2559 2560 if (!dev->caps.max_counters) 2561 return -ENOSPC; 2562 2563 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2564 /* reserve last counter index for sink counter */ 2565 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2566 nent_pow2 - 1, 0, 2567 nent_pow2 - dev->caps.max_counters + 1); 2568 } 2569 2570 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2571 { 2572 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2573 return; 2574 2575 if (!dev->caps.max_counters) 2576 return; 2577 2578 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2579 } 2580 2581 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2582 { 2583 struct mlx4_priv *priv = mlx4_priv(dev); 2584 int port; 2585 2586 for (port = 0; port < dev->caps.num_ports; port++) 2587 if (priv->def_counter[port] != -1) 2588 mlx4_counter_free(dev, priv->def_counter[port]); 2589 } 2590 2591 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2592 { 2593 struct mlx4_priv *priv = mlx4_priv(dev); 2594 int port, err = 0; 2595 u32 idx; 2596 2597 for (port = 0; port < dev->caps.num_ports; port++) 2598 priv->def_counter[port] = -1; 2599 2600 for (port = 0; port < dev->caps.num_ports; port++) { 2601 err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER); 2602 2603 if (!err || err == -ENOSPC) { 2604 priv->def_counter[port] = idx; 2605 err = 0; 2606 } else if (err == -ENOENT) { 2607 err = 0; 2608 continue; 2609 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2610 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2611 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2612 MLX4_SINK_COUNTER_INDEX(dev)); 2613 err = 0; 2614 } else { 2615 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2616 __func__, port + 1, err); 2617 mlx4_cleanup_default_counters(dev); 2618 return err; 2619 } 2620 2621 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2622 __func__, priv->def_counter[port], port + 1); 2623 } 2624 2625 return err; 2626 } 2627 2628 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2629 { 2630 struct mlx4_priv *priv = mlx4_priv(dev); 2631 2632 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2633 return -ENOENT; 2634 2635 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2636 if (*idx == -1) { 2637 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2638 return -ENOSPC; 2639 } 2640 2641 return 0; 2642 } 2643 2644 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) 2645 { 2646 u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30); 2647 u64 out_param; 2648 int err; 2649 2650 if (mlx4_is_mfunc(dev)) { 2651 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier, 2652 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2653 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2654 if (!err) 2655 *idx = get_param_l(&out_param); 2656 if (WARN_ON(err == -ENOSPC)) 2657 err = -EINVAL; 2658 return err; 2659 } 2660 return __mlx4_counter_alloc(dev, idx); 2661 } 2662 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2663 2664 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2665 u8 counter_index) 2666 { 2667 struct mlx4_cmd_mailbox *if_stat_mailbox; 2668 int err; 2669 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2670 2671 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2672 if (IS_ERR(if_stat_mailbox)) 2673 return PTR_ERR(if_stat_mailbox); 2674 2675 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2676 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2677 MLX4_CMD_NATIVE); 2678 2679 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2680 return err; 2681 } 2682 2683 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2684 { 2685 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2686 return; 2687 2688 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2689 return; 2690 2691 __mlx4_clear_if_stat(dev, idx); 2692 2693 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2694 return; 2695 } 2696 2697 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2698 { 2699 u64 in_param = 0; 2700 2701 if (mlx4_is_mfunc(dev)) { 2702 set_param_l(&in_param, idx); 2703 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2704 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2705 MLX4_CMD_WRAPPED); 2706 return; 2707 } 2708 __mlx4_counter_free(dev, idx); 2709 } 2710 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2711 2712 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2713 { 2714 struct mlx4_priv *priv = mlx4_priv(dev); 2715 2716 return priv->def_counter[port - 1]; 2717 } 2718 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2719 2720 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2721 { 2722 struct mlx4_priv *priv = mlx4_priv(dev); 2723 2724 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2725 } 2726 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2727 2728 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2729 { 2730 struct mlx4_priv *priv = mlx4_priv(dev); 2731 2732 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2733 } 2734 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2735 2736 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2737 { 2738 struct mlx4_priv *priv = mlx4_priv(dev); 2739 __be64 guid; 2740 2741 /* hw GUID */ 2742 if (entry == 0) 2743 return; 2744 2745 get_random_bytes((char *)&guid, sizeof(guid)); 2746 guid &= ~(cpu_to_be64(1ULL << 56)); 2747 guid |= cpu_to_be64(1ULL << 57); 2748 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2749 } 2750 2751 static int mlx4_setup_hca(struct mlx4_dev *dev) 2752 { 2753 struct mlx4_priv *priv = mlx4_priv(dev); 2754 int err; 2755 int port; 2756 __be32 ib_port_default_caps; 2757 2758 err = mlx4_init_uar_table(dev); 2759 if (err) { 2760 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2761 return err; 2762 } 2763 2764 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2765 if (err) { 2766 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2767 goto err_uar_table_free; 2768 } 2769 2770 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2771 if (!priv->kar) { 2772 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2773 err = -ENOMEM; 2774 goto err_uar_free; 2775 } 2776 2777 err = mlx4_init_pd_table(dev); 2778 if (err) { 2779 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2780 goto err_kar_unmap; 2781 } 2782 2783 err = mlx4_init_xrcd_table(dev); 2784 if (err) { 2785 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2786 goto err_pd_table_free; 2787 } 2788 2789 err = mlx4_init_mr_table(dev); 2790 if (err) { 2791 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2792 goto err_xrcd_table_free; 2793 } 2794 2795 if (!mlx4_is_slave(dev)) { 2796 err = mlx4_init_mcg_table(dev); 2797 if (err) { 2798 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2799 goto err_mr_table_free; 2800 } 2801 err = mlx4_config_mad_demux(dev); 2802 if (err) { 2803 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2804 goto err_mcg_table_free; 2805 } 2806 } 2807 2808 err = mlx4_init_eq_table(dev); 2809 if (err) { 2810 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2811 goto err_mcg_table_free; 2812 } 2813 2814 err = mlx4_cmd_use_events(dev); 2815 if (err) { 2816 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2817 goto err_eq_table_free; 2818 } 2819 2820 err = mlx4_NOP(dev); 2821 if (err) { 2822 if (dev->flags & MLX4_FLAG_MSI_X) { 2823 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2824 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2825 mlx4_warn(dev, "Trying again without MSI-X\n"); 2826 } else { 2827 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2828 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2829 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2830 } 2831 2832 goto err_cmd_poll; 2833 } 2834 2835 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2836 2837 err = mlx4_init_cq_table(dev); 2838 if (err) { 2839 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2840 goto err_cmd_poll; 2841 } 2842 2843 err = mlx4_init_srq_table(dev); 2844 if (err) { 2845 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2846 goto err_cq_table_free; 2847 } 2848 2849 err = mlx4_init_qp_table(dev); 2850 if (err) { 2851 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2852 goto err_srq_table_free; 2853 } 2854 2855 if (!mlx4_is_slave(dev)) { 2856 err = mlx4_init_counters_table(dev); 2857 if (err && err != -ENOENT) { 2858 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2859 goto err_qp_table_free; 2860 } 2861 } 2862 2863 err = mlx4_allocate_default_counters(dev); 2864 if (err) { 2865 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2866 goto err_counters_table_free; 2867 } 2868 2869 if (!mlx4_is_slave(dev)) { 2870 for (port = 1; port <= dev->caps.num_ports; port++) { 2871 ib_port_default_caps = 0; 2872 err = mlx4_get_port_ib_caps(dev, port, 2873 &ib_port_default_caps); 2874 if (err) 2875 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2876 port, err); 2877 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2878 2879 /* initialize per-slave default ib port capabilities */ 2880 if (mlx4_is_master(dev)) { 2881 int i; 2882 for (i = 0; i < dev->num_slaves; i++) { 2883 if (i == mlx4_master_func_num(dev)) 2884 continue; 2885 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2886 ib_port_default_caps; 2887 } 2888 } 2889 2890 if (mlx4_is_mfunc(dev)) 2891 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2892 else 2893 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2894 2895 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2896 dev->caps.pkey_table_len[port] : -1); 2897 if (err) { 2898 mlx4_err(dev, "Failed to set port %d, aborting\n", 2899 port); 2900 goto err_default_countes_free; 2901 } 2902 } 2903 } 2904 2905 return 0; 2906 2907 err_default_countes_free: 2908 mlx4_cleanup_default_counters(dev); 2909 2910 err_counters_table_free: 2911 if (!mlx4_is_slave(dev)) 2912 mlx4_cleanup_counters_table(dev); 2913 2914 err_qp_table_free: 2915 mlx4_cleanup_qp_table(dev); 2916 2917 err_srq_table_free: 2918 mlx4_cleanup_srq_table(dev); 2919 2920 err_cq_table_free: 2921 mlx4_cleanup_cq_table(dev); 2922 2923 err_cmd_poll: 2924 mlx4_cmd_use_polling(dev); 2925 2926 err_eq_table_free: 2927 mlx4_cleanup_eq_table(dev); 2928 2929 err_mcg_table_free: 2930 if (!mlx4_is_slave(dev)) 2931 mlx4_cleanup_mcg_table(dev); 2932 2933 err_mr_table_free: 2934 mlx4_cleanup_mr_table(dev); 2935 2936 err_xrcd_table_free: 2937 mlx4_cleanup_xrcd_table(dev); 2938 2939 err_pd_table_free: 2940 mlx4_cleanup_pd_table(dev); 2941 2942 err_kar_unmap: 2943 iounmap(priv->kar); 2944 2945 err_uar_free: 2946 mlx4_uar_free(dev, &priv->driver_uar); 2947 2948 err_uar_table_free: 2949 mlx4_cleanup_uar_table(dev); 2950 return err; 2951 } 2952 2953 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2954 { 2955 int requested_cpu = 0; 2956 struct mlx4_priv *priv = mlx4_priv(dev); 2957 struct mlx4_eq *eq; 2958 int off = 0; 2959 int i; 2960 2961 if (eqn > dev->caps.num_comp_vectors) 2962 return -EINVAL; 2963 2964 for (i = 1; i < port; i++) 2965 off += mlx4_get_eqs_per_port(dev, i); 2966 2967 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2968 2969 /* Meaning EQs are shared, and this call comes from the second port */ 2970 if (requested_cpu < 0) 2971 return 0; 2972 2973 eq = &priv->eq_table.eq[eqn]; 2974 2975 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) 2976 return -ENOMEM; 2977 2978 cpumask_set_cpu(requested_cpu, eq->affinity_mask); 2979 2980 return 0; 2981 } 2982 2983 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2984 { 2985 struct mlx4_priv *priv = mlx4_priv(dev); 2986 struct msix_entry *entries; 2987 int i; 2988 int port = 0; 2989 2990 if (msi_x) { 2991 int nreq = min3(dev->caps.num_ports * 2992 (int)num_online_cpus() + 1, 2993 dev->caps.num_eqs - dev->caps.reserved_eqs, 2994 MAX_MSIX); 2995 2996 if (msi_x > 1) 2997 nreq = min_t(int, nreq, msi_x); 2998 2999 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); 3000 if (!entries) 3001 goto no_msi; 3002 3003 for (i = 0; i < nreq; ++i) 3004 entries[i].entry = i; 3005 3006 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 3007 nreq); 3008 3009 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 3010 kfree(entries); 3011 goto no_msi; 3012 } 3013 /* 1 is reserved for events (asyncrounous EQ) */ 3014 dev->caps.num_comp_vectors = nreq - 1; 3015 3016 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 3017 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 3018 dev->caps.num_ports); 3019 3020 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 3021 if (i == MLX4_EQ_ASYNC) 3022 continue; 3023 3024 priv->eq_table.eq[i].irq = 3025 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 3026 3027 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 3028 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 3029 dev->caps.num_ports); 3030 /* We don't set affinity hint when there 3031 * aren't enough EQs 3032 */ 3033 } else { 3034 set_bit(port, 3035 priv->eq_table.eq[i].actv_ports.ports); 3036 if (mlx4_init_affinity_hint(dev, port + 1, i)) 3037 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 3038 i); 3039 } 3040 /* We divide the Eqs evenly between the two ports. 3041 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 3042 * refers to the number of Eqs per port 3043 * (i.e eqs_per_port). Theoretically, we would like to 3044 * write something like (i + 1) % eqs_per_port == 0. 3045 * However, since there's an asynchronous Eq, we have 3046 * to skip over it by comparing this condition to 3047 * !!((i + 1) > MLX4_EQ_ASYNC). 3048 */ 3049 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 3050 ((i + 1) % 3051 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 3052 !!((i + 1) > MLX4_EQ_ASYNC)) 3053 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 3054 * everything is shared anyway. 3055 */ 3056 port++; 3057 } 3058 3059 dev->flags |= MLX4_FLAG_MSI_X; 3060 3061 kfree(entries); 3062 return; 3063 } 3064 3065 no_msi: 3066 dev->caps.num_comp_vectors = 1; 3067 3068 BUG_ON(MLX4_EQ_ASYNC >= 2); 3069 for (i = 0; i < 2; ++i) { 3070 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 3071 if (i != MLX4_EQ_ASYNC) { 3072 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 3073 dev->caps.num_ports); 3074 } 3075 } 3076 } 3077 3078 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, 3079 enum devlink_port_type port_type) 3080 { 3081 struct mlx4_port_info *info = container_of(devlink_port, 3082 struct mlx4_port_info, 3083 devlink_port); 3084 enum mlx4_port_type mlx4_port_type; 3085 3086 switch (port_type) { 3087 case DEVLINK_PORT_TYPE_AUTO: 3088 mlx4_port_type = MLX4_PORT_TYPE_AUTO; 3089 break; 3090 case DEVLINK_PORT_TYPE_ETH: 3091 mlx4_port_type = MLX4_PORT_TYPE_ETH; 3092 break; 3093 case DEVLINK_PORT_TYPE_IB: 3094 mlx4_port_type = MLX4_PORT_TYPE_IB; 3095 break; 3096 default: 3097 return -EOPNOTSUPP; 3098 } 3099 3100 return __set_port_type(info, mlx4_port_type); 3101 } 3102 3103 static const struct devlink_port_ops mlx4_devlink_port_ops = { 3104 .port_type_set = mlx4_devlink_port_type_set, 3105 }; 3106 3107 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 3108 { 3109 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); 3110 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 3111 int err; 3112 3113 err = devl_port_register_with_ops(devlink, &info->devlink_port, port, 3114 &mlx4_devlink_port_ops); 3115 if (err) 3116 return err; 3117 3118 /* Ethernet and IB drivers will normally set the port type, 3119 * but if they are not built set the type now to prevent 3120 * devlink_port_type_warn() from firing. 3121 */ 3122 if (!IS_ENABLED(CONFIG_MLX4_EN) && 3123 dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 3124 devlink_port_type_eth_set(&info->devlink_port); 3125 else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) && 3126 dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 3127 devlink_port_type_ib_set(&info->devlink_port, NULL); 3128 3129 info->dev = dev; 3130 info->port = port; 3131 if (!mlx4_is_slave(dev)) { 3132 mlx4_init_mac_table(dev, &info->mac_table); 3133 mlx4_init_vlan_table(dev, &info->vlan_table); 3134 mlx4_init_roce_gid_table(dev, &info->gid_table); 3135 info->base_qpn = mlx4_get_base_qpn(dev, port); 3136 } 3137 3138 sprintf(info->dev_name, "mlx4_port%d", port); 3139 info->port_attr.attr.name = info->dev_name; 3140 if (mlx4_is_mfunc(dev)) { 3141 info->port_attr.attr.mode = 0444; 3142 } else { 3143 info->port_attr.attr.mode = 0644; 3144 info->port_attr.store = set_port_type; 3145 } 3146 info->port_attr.show = show_port_type; 3147 sysfs_attr_init(&info->port_attr.attr); 3148 3149 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 3150 if (err) { 3151 mlx4_err(dev, "Failed to create file for port %d\n", port); 3152 devlink_port_type_clear(&info->devlink_port); 3153 devl_port_unregister(&info->devlink_port); 3154 info->port = -1; 3155 return err; 3156 } 3157 3158 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 3159 info->port_mtu_attr.attr.name = info->dev_mtu_name; 3160 if (mlx4_is_mfunc(dev)) { 3161 info->port_mtu_attr.attr.mode = 0444; 3162 } else { 3163 info->port_mtu_attr.attr.mode = 0644; 3164 info->port_mtu_attr.store = set_port_ib_mtu; 3165 } 3166 info->port_mtu_attr.show = show_port_ib_mtu; 3167 sysfs_attr_init(&info->port_mtu_attr.attr); 3168 3169 err = device_create_file(&dev->persist->pdev->dev, 3170 &info->port_mtu_attr); 3171 if (err) { 3172 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 3173 device_remove_file(&info->dev->persist->pdev->dev, 3174 &info->port_attr); 3175 devlink_port_type_clear(&info->devlink_port); 3176 devl_port_unregister(&info->devlink_port); 3177 info->port = -1; 3178 return err; 3179 } 3180 3181 return 0; 3182 } 3183 3184 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 3185 { 3186 if (info->port < 0) 3187 return; 3188 3189 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 3190 device_remove_file(&info->dev->persist->pdev->dev, 3191 &info->port_mtu_attr); 3192 devlink_port_type_clear(&info->devlink_port); 3193 devl_port_unregister(&info->devlink_port); 3194 3195 #ifdef CONFIG_RFS_ACCEL 3196 free_irq_cpu_rmap(info->rmap); 3197 info->rmap = NULL; 3198 #endif 3199 } 3200 3201 static int mlx4_init_steering(struct mlx4_dev *dev) 3202 { 3203 struct mlx4_priv *priv = mlx4_priv(dev); 3204 int num_entries = dev->caps.num_ports; 3205 int i, j; 3206 3207 priv->steer = kcalloc(num_entries, sizeof(struct mlx4_steer), 3208 GFP_KERNEL); 3209 if (!priv->steer) 3210 return -ENOMEM; 3211 3212 for (i = 0; i < num_entries; i++) 3213 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3214 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 3215 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 3216 } 3217 return 0; 3218 } 3219 3220 static void mlx4_clear_steering(struct mlx4_dev *dev) 3221 { 3222 struct mlx4_priv *priv = mlx4_priv(dev); 3223 struct mlx4_steer_index *entry, *tmp_entry; 3224 struct mlx4_promisc_qp *pqp, *tmp_pqp; 3225 int num_entries = dev->caps.num_ports; 3226 int i, j; 3227 3228 for (i = 0; i < num_entries; i++) { 3229 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3230 list_for_each_entry_safe(pqp, tmp_pqp, 3231 &priv->steer[i].promisc_qps[j], 3232 list) { 3233 list_del(&pqp->list); 3234 kfree(pqp); 3235 } 3236 list_for_each_entry_safe(entry, tmp_entry, 3237 &priv->steer[i].steer_entries[j], 3238 list) { 3239 list_del(&entry->list); 3240 list_for_each_entry_safe(pqp, tmp_pqp, 3241 &entry->duplicates, 3242 list) { 3243 list_del(&pqp->list); 3244 kfree(pqp); 3245 } 3246 kfree(entry); 3247 } 3248 } 3249 } 3250 kfree(priv->steer); 3251 } 3252 3253 static int extended_func_num(struct pci_dev *pdev) 3254 { 3255 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3256 } 3257 3258 #define MLX4_OWNER_BASE 0x8069c 3259 #define MLX4_OWNER_SIZE 4 3260 3261 static int mlx4_get_ownership(struct mlx4_dev *dev) 3262 { 3263 void __iomem *owner; 3264 u32 ret; 3265 3266 if (pci_channel_offline(dev->persist->pdev)) 3267 return -EIO; 3268 3269 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3270 MLX4_OWNER_BASE, 3271 MLX4_OWNER_SIZE); 3272 if (!owner) { 3273 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3274 return -ENOMEM; 3275 } 3276 3277 ret = readl(owner); 3278 iounmap(owner); 3279 return (int) !!ret; 3280 } 3281 3282 static void mlx4_free_ownership(struct mlx4_dev *dev) 3283 { 3284 void __iomem *owner; 3285 3286 if (pci_channel_offline(dev->persist->pdev)) 3287 return; 3288 3289 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3290 MLX4_OWNER_BASE, 3291 MLX4_OWNER_SIZE); 3292 if (!owner) { 3293 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3294 return; 3295 } 3296 writel(0, owner); 3297 msleep(1000); 3298 iounmap(owner); 3299 } 3300 3301 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3302 !!((flags) & MLX4_FLAG_MASTER)) 3303 3304 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3305 u8 total_vfs, int existing_vfs, int reset_flow) 3306 { 3307 u64 dev_flags = dev->flags; 3308 int err = 0; 3309 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), 3310 MLX4_MAX_NUM_VF); 3311 3312 if (reset_flow) { 3313 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3314 GFP_KERNEL); 3315 if (!dev->dev_vfs) 3316 goto free_mem; 3317 return dev_flags; 3318 } 3319 3320 atomic_inc(&pf_loading); 3321 if (dev->flags & MLX4_FLAG_SRIOV) { 3322 if (existing_vfs != total_vfs) { 3323 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3324 existing_vfs, total_vfs); 3325 total_vfs = existing_vfs; 3326 } 3327 } 3328 3329 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL); 3330 if (NULL == dev->dev_vfs) { 3331 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3332 goto disable_sriov; 3333 } 3334 3335 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3336 if (total_vfs > fw_enabled_sriov_vfs) { 3337 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", 3338 total_vfs, fw_enabled_sriov_vfs); 3339 err = -ENOMEM; 3340 goto disable_sriov; 3341 } 3342 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3343 err = pci_enable_sriov(pdev, total_vfs); 3344 } 3345 if (err) { 3346 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3347 err); 3348 goto disable_sriov; 3349 } else { 3350 mlx4_warn(dev, "Running in master mode\n"); 3351 dev_flags |= MLX4_FLAG_SRIOV | 3352 MLX4_FLAG_MASTER; 3353 dev_flags &= ~MLX4_FLAG_SLAVE; 3354 dev->persist->num_vfs = total_vfs; 3355 } 3356 return dev_flags; 3357 3358 disable_sriov: 3359 atomic_dec(&pf_loading); 3360 free_mem: 3361 dev->persist->num_vfs = 0; 3362 kfree(dev->dev_vfs); 3363 dev->dev_vfs = NULL; 3364 return dev_flags & ~MLX4_FLAG_MASTER; 3365 } 3366 3367 enum { 3368 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3369 }; 3370 3371 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3372 int *nvfs) 3373 { 3374 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3375 /* Checking for 64 VFs as a limitation of CX2 */ 3376 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3377 requested_vfs >= 64) { 3378 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3379 requested_vfs); 3380 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3381 } 3382 return 0; 3383 } 3384 3385 static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3386 { 3387 struct pci_dev *pdev = dev->persist->pdev; 3388 int err = 0; 3389 3390 mutex_lock(&dev->persist->pci_status_mutex); 3391 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3392 err = pci_enable_device(pdev); 3393 if (!err) 3394 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3395 } 3396 mutex_unlock(&dev->persist->pci_status_mutex); 3397 3398 return err; 3399 } 3400 3401 static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3402 { 3403 struct pci_dev *pdev = dev->persist->pdev; 3404 3405 mutex_lock(&dev->persist->pci_status_mutex); 3406 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3407 pci_disable_device(pdev); 3408 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3409 } 3410 mutex_unlock(&dev->persist->pci_status_mutex); 3411 } 3412 3413 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3414 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3415 int reset_flow) 3416 { 3417 struct devlink *devlink = priv_to_devlink(priv); 3418 struct mlx4_dev *dev; 3419 unsigned sum = 0; 3420 int err; 3421 int port; 3422 int i; 3423 struct mlx4_dev_cap *dev_cap = NULL; 3424 int existing_vfs = 0; 3425 3426 devl_assert_locked(devlink); 3427 dev = &priv->dev; 3428 3429 INIT_LIST_HEAD(&priv->ctx_list); 3430 spin_lock_init(&priv->ctx_lock); 3431 3432 ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh); 3433 3434 mutex_init(&priv->port_mutex); 3435 mutex_init(&priv->bond_mutex); 3436 3437 INIT_LIST_HEAD(&priv->pgdir_list); 3438 mutex_init(&priv->pgdir_mutex); 3439 spin_lock_init(&priv->cmd.context_lock); 3440 3441 INIT_LIST_HEAD(&priv->bf_list); 3442 mutex_init(&priv->bf_mutex); 3443 3444 dev->rev_id = pdev->revision; 3445 dev->numa_node = dev_to_node(&pdev->dev); 3446 3447 /* Detect if this device is a virtual function */ 3448 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3449 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3450 dev->flags |= MLX4_FLAG_SLAVE; 3451 } else { 3452 /* We reset the device and enable SRIOV only for physical 3453 * devices. Try to claim ownership on the device; 3454 * if already taken, skip -- do not allow multiple PFs */ 3455 err = mlx4_get_ownership(dev); 3456 if (err) { 3457 if (err < 0) 3458 return err; 3459 else { 3460 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3461 return -EINVAL; 3462 } 3463 } 3464 3465 atomic_set(&priv->opreq_count, 0); 3466 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3467 3468 /* 3469 * Now reset the HCA before we touch the PCI capabilities or 3470 * attempt a firmware command, since a boot ROM may have left 3471 * the HCA in an undefined state. 3472 */ 3473 err = mlx4_reset(dev); 3474 if (err) { 3475 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3476 goto err_sriov; 3477 } 3478 3479 if (total_vfs) { 3480 dev->flags = MLX4_FLAG_MASTER; 3481 existing_vfs = pci_num_vf(pdev); 3482 if (existing_vfs) 3483 dev->flags |= MLX4_FLAG_SRIOV; 3484 dev->persist->num_vfs = total_vfs; 3485 } 3486 } 3487 3488 /* on load remove any previous indication of internal error, 3489 * device is up. 3490 */ 3491 dev->persist->state = MLX4_DEVICE_STATE_UP; 3492 3493 slave_start: 3494 err = mlx4_cmd_init(dev); 3495 if (err) { 3496 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3497 goto err_sriov; 3498 } 3499 3500 /* In slave functions, the communication channel must be initialized 3501 * before posting commands. Also, init num_slaves before calling 3502 * mlx4_init_hca */ 3503 if (mlx4_is_mfunc(dev)) { 3504 if (mlx4_is_master(dev)) { 3505 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3506 3507 } else { 3508 dev->num_slaves = 0; 3509 err = mlx4_multi_func_init(dev); 3510 if (err) { 3511 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3512 goto err_cmd; 3513 } 3514 } 3515 } 3516 3517 err = mlx4_init_fw(dev); 3518 if (err) { 3519 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3520 goto err_mfunc; 3521 } 3522 3523 if (mlx4_is_master(dev)) { 3524 /* when we hit the goto slave_start below, dev_cap already initialized */ 3525 if (!dev_cap) { 3526 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3527 3528 if (!dev_cap) { 3529 err = -ENOMEM; 3530 goto err_fw; 3531 } 3532 3533 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3534 if (err) { 3535 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3536 goto err_fw; 3537 } 3538 3539 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3540 goto err_fw; 3541 3542 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3543 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3544 total_vfs, 3545 existing_vfs, 3546 reset_flow); 3547 3548 mlx4_close_fw(dev); 3549 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3550 dev->flags = dev_flags; 3551 if (!SRIOV_VALID_STATE(dev->flags)) { 3552 mlx4_err(dev, "Invalid SRIOV state\n"); 3553 goto err_sriov; 3554 } 3555 err = mlx4_reset(dev); 3556 if (err) { 3557 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3558 goto err_sriov; 3559 } 3560 goto slave_start; 3561 } 3562 } else { 3563 /* Legacy mode FW requires SRIOV to be enabled before 3564 * doing QUERY_DEV_CAP, since max_eq's value is different if 3565 * SRIOV is enabled. 3566 */ 3567 memset(dev_cap, 0, sizeof(*dev_cap)); 3568 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3569 if (err) { 3570 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3571 goto err_fw; 3572 } 3573 3574 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3575 goto err_fw; 3576 } 3577 } 3578 3579 err = mlx4_init_hca(dev); 3580 if (err) { 3581 if (err == -EACCES) { 3582 /* Not primary Physical function 3583 * Running in slave mode */ 3584 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3585 /* We're not a PF */ 3586 if (dev->flags & MLX4_FLAG_SRIOV) { 3587 if (!existing_vfs) 3588 pci_disable_sriov(pdev); 3589 if (mlx4_is_master(dev) && !reset_flow) 3590 atomic_dec(&pf_loading); 3591 dev->flags &= ~MLX4_FLAG_SRIOV; 3592 } 3593 if (!mlx4_is_slave(dev)) 3594 mlx4_free_ownership(dev); 3595 dev->flags |= MLX4_FLAG_SLAVE; 3596 dev->flags &= ~MLX4_FLAG_MASTER; 3597 goto slave_start; 3598 } else 3599 goto err_fw; 3600 } 3601 3602 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3603 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3604 existing_vfs, reset_flow); 3605 3606 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3607 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3608 dev->flags = dev_flags; 3609 err = mlx4_cmd_init(dev); 3610 if (err) { 3611 /* Only VHCR is cleaned up, so could still 3612 * send FW commands 3613 */ 3614 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3615 goto err_close; 3616 } 3617 } else { 3618 dev->flags = dev_flags; 3619 } 3620 3621 if (!SRIOV_VALID_STATE(dev->flags)) { 3622 mlx4_err(dev, "Invalid SRIOV state\n"); 3623 err = -EINVAL; 3624 goto err_close; 3625 } 3626 } 3627 3628 /* check if the device is functioning at its maximum possible speed. 3629 * No return code for this call, just warn the user in case of PCI 3630 * express device capabilities are under-satisfied by the bus. 3631 */ 3632 if (!mlx4_is_slave(dev)) 3633 pcie_print_link_status(dev->persist->pdev); 3634 3635 /* In master functions, the communication channel must be initialized 3636 * after obtaining its address from fw */ 3637 if (mlx4_is_master(dev)) { 3638 if (dev->caps.num_ports < 2 && 3639 num_vfs_argc > 1) { 3640 err = -EINVAL; 3641 mlx4_err(dev, 3642 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3643 dev->caps.num_ports); 3644 goto err_close; 3645 } 3646 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3647 3648 for (i = 0; 3649 i < sizeof(dev->persist->nvfs)/ 3650 sizeof(dev->persist->nvfs[0]); i++) { 3651 unsigned j; 3652 3653 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3654 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3655 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3656 dev->caps.num_ports; 3657 } 3658 } 3659 3660 /* In master functions, the communication channel 3661 * must be initialized after obtaining its address from fw 3662 */ 3663 err = mlx4_multi_func_init(dev); 3664 if (err) { 3665 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3666 goto err_close; 3667 } 3668 } 3669 3670 err = mlx4_alloc_eq_table(dev); 3671 if (err) 3672 goto err_master_mfunc; 3673 3674 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3675 mutex_init(&priv->msix_ctl.pool_lock); 3676 3677 mlx4_enable_msi_x(dev); 3678 if ((mlx4_is_mfunc(dev)) && 3679 !(dev->flags & MLX4_FLAG_MSI_X)) { 3680 err = -EOPNOTSUPP; 3681 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3682 goto err_free_eq; 3683 } 3684 3685 if (!mlx4_is_slave(dev)) { 3686 err = mlx4_init_steering(dev); 3687 if (err) 3688 goto err_disable_msix; 3689 } 3690 3691 mlx4_init_quotas(dev); 3692 3693 err = mlx4_setup_hca(dev); 3694 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3695 !mlx4_is_mfunc(dev)) { 3696 dev->flags &= ~MLX4_FLAG_MSI_X; 3697 dev->caps.num_comp_vectors = 1; 3698 pci_disable_msix(pdev); 3699 err = mlx4_setup_hca(dev); 3700 } 3701 3702 if (err) 3703 goto err_steer; 3704 3705 /* When PF resources are ready arm its comm channel to enable 3706 * getting commands 3707 */ 3708 if (mlx4_is_master(dev)) { 3709 err = mlx4_ARM_COMM_CHANNEL(dev); 3710 if (err) { 3711 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3712 err); 3713 goto err_steer; 3714 } 3715 } 3716 3717 for (port = 1; port <= dev->caps.num_ports; port++) { 3718 err = mlx4_init_port_info(dev, port); 3719 if (err) 3720 goto err_port; 3721 } 3722 3723 priv->v2p.port1 = 1; 3724 priv->v2p.port2 = 2; 3725 3726 err = mlx4_register_device(dev); 3727 if (err) 3728 goto err_port; 3729 3730 mlx4_request_modules(dev); 3731 3732 mlx4_sense_init(dev); 3733 mlx4_start_sense(dev); 3734 3735 priv->removed = 0; 3736 3737 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3738 atomic_dec(&pf_loading); 3739 3740 kfree(dev_cap); 3741 return 0; 3742 3743 err_port: 3744 for (--port; port >= 1; --port) 3745 mlx4_cleanup_port_info(&priv->port[port]); 3746 3747 mlx4_cleanup_default_counters(dev); 3748 if (!mlx4_is_slave(dev)) 3749 mlx4_cleanup_counters_table(dev); 3750 mlx4_cleanup_qp_table(dev); 3751 mlx4_cleanup_srq_table(dev); 3752 mlx4_cleanup_cq_table(dev); 3753 mlx4_cmd_use_polling(dev); 3754 mlx4_cleanup_eq_table(dev); 3755 mlx4_cleanup_mcg_table(dev); 3756 mlx4_cleanup_mr_table(dev); 3757 mlx4_cleanup_xrcd_table(dev); 3758 mlx4_cleanup_pd_table(dev); 3759 mlx4_cleanup_uar_table(dev); 3760 3761 err_steer: 3762 if (!mlx4_is_slave(dev)) 3763 mlx4_clear_steering(dev); 3764 3765 err_disable_msix: 3766 if (dev->flags & MLX4_FLAG_MSI_X) 3767 pci_disable_msix(pdev); 3768 3769 err_free_eq: 3770 mlx4_free_eq_table(dev); 3771 3772 err_master_mfunc: 3773 if (mlx4_is_master(dev)) { 3774 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3775 mlx4_multi_func_cleanup(dev); 3776 } 3777 3778 if (mlx4_is_slave(dev)) 3779 mlx4_slave_destroy_special_qp_cap(dev); 3780 3781 err_close: 3782 mlx4_close_hca(dev); 3783 3784 err_fw: 3785 mlx4_close_fw(dev); 3786 3787 err_mfunc: 3788 if (mlx4_is_slave(dev)) 3789 mlx4_multi_func_cleanup(dev); 3790 3791 err_cmd: 3792 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3793 3794 err_sriov: 3795 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3796 pci_disable_sriov(pdev); 3797 dev->flags &= ~MLX4_FLAG_SRIOV; 3798 } 3799 3800 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3801 atomic_dec(&pf_loading); 3802 3803 kfree(priv->dev.dev_vfs); 3804 3805 if (!mlx4_is_slave(dev)) 3806 mlx4_free_ownership(dev); 3807 3808 kfree(dev_cap); 3809 return err; 3810 } 3811 3812 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3813 struct mlx4_priv *priv) 3814 { 3815 int err; 3816 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3817 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3818 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3819 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3820 unsigned total_vfs = 0; 3821 unsigned int i; 3822 3823 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3824 3825 err = mlx4_pci_enable_device(&priv->dev); 3826 if (err) { 3827 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3828 return err; 3829 } 3830 3831 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3832 * per port, we must limit the number of VFs to 63 (since their are 3833 * 128 MACs) 3834 */ 3835 for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc; 3836 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3837 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3838 if (nvfs[i] < 0) { 3839 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3840 err = -EINVAL; 3841 goto err_disable_pdev; 3842 } 3843 } 3844 for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc; 3845 i++) { 3846 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3847 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3848 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3849 err = -EINVAL; 3850 goto err_disable_pdev; 3851 } 3852 } 3853 if (total_vfs > MLX4_MAX_NUM_VF) { 3854 dev_err(&pdev->dev, 3855 "Requested more VF's (%d) than allowed by hw (%d)\n", 3856 total_vfs, MLX4_MAX_NUM_VF); 3857 err = -EINVAL; 3858 goto err_disable_pdev; 3859 } 3860 3861 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3862 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3863 dev_err(&pdev->dev, 3864 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3865 nvfs[i] + nvfs[2], i + 1, 3866 MLX4_MAX_NUM_VF_P_PORT); 3867 err = -EINVAL; 3868 goto err_disable_pdev; 3869 } 3870 } 3871 3872 /* Check for BARs. */ 3873 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3874 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3875 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3876 pci_dev_data, pci_resource_flags(pdev, 0)); 3877 err = -ENODEV; 3878 goto err_disable_pdev; 3879 } 3880 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3881 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3882 err = -ENODEV; 3883 goto err_disable_pdev; 3884 } 3885 3886 err = pci_request_regions(pdev, DRV_NAME); 3887 if (err) { 3888 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3889 goto err_disable_pdev; 3890 } 3891 3892 pci_set_master(pdev); 3893 3894 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3895 if (err) { 3896 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3897 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3898 if (err) { 3899 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3900 goto err_release_regions; 3901 } 3902 } 3903 3904 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3905 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3906 /* Detect if this device is a virtual function */ 3907 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3908 /* When acting as pf, we normally skip vfs unless explicitly 3909 * requested to probe them. 3910 */ 3911 if (total_vfs) { 3912 unsigned vfs_offset = 0; 3913 3914 for (i = 0; i < ARRAY_SIZE(nvfs) && 3915 vfs_offset + nvfs[i] < extended_func_num(pdev); 3916 vfs_offset += nvfs[i], i++) 3917 ; 3918 if (i == ARRAY_SIZE(nvfs)) { 3919 err = -ENODEV; 3920 goto err_release_regions; 3921 } 3922 if ((extended_func_num(pdev) - vfs_offset) 3923 > prb_vf[i]) { 3924 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3925 extended_func_num(pdev)); 3926 err = -ENODEV; 3927 goto err_release_regions; 3928 } 3929 } 3930 } 3931 3932 err = mlx4_crdump_init(&priv->dev); 3933 if (err) 3934 goto err_release_regions; 3935 3936 err = mlx4_catas_init(&priv->dev); 3937 if (err) 3938 goto err_crdump; 3939 3940 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3941 if (err) 3942 goto err_catas; 3943 3944 return 0; 3945 3946 err_catas: 3947 mlx4_catas_end(&priv->dev); 3948 3949 err_crdump: 3950 mlx4_crdump_end(&priv->dev); 3951 3952 err_release_regions: 3953 pci_release_regions(pdev); 3954 3955 err_disable_pdev: 3956 mlx4_pci_disable_device(&priv->dev); 3957 return err; 3958 } 3959 3960 static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) 3961 { 3962 struct mlx4_priv *priv = devlink_priv(devlink); 3963 struct mlx4_dev *dev = &priv->dev; 3964 struct mlx4_fw_crdump *crdump = &dev->persist->crdump; 3965 union devlink_param_value saved_value; 3966 int err; 3967 3968 err = devl_param_driverinit_value_get(devlink, 3969 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, 3970 &saved_value); 3971 if (!err && mlx4_internal_err_reset != saved_value.vbool) { 3972 mlx4_internal_err_reset = saved_value.vbool; 3973 /* Notify on value changed on runtime configuration mode */ 3974 devl_param_value_changed(devlink, 3975 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET); 3976 } 3977 err = devl_param_driverinit_value_get(devlink, 3978 DEVLINK_PARAM_GENERIC_ID_MAX_MACS, 3979 &saved_value); 3980 if (!err) 3981 log_num_mac = order_base_2(saved_value.vu32); 3982 err = devl_param_driverinit_value_get(devlink, 3983 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 3984 &saved_value); 3985 if (!err) 3986 enable_64b_cqe_eqe = saved_value.vbool; 3987 err = devl_param_driverinit_value_get(devlink, 3988 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 3989 &saved_value); 3990 if (!err) 3991 enable_4k_uar = saved_value.vbool; 3992 err = devl_param_driverinit_value_get(devlink, 3993 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, 3994 &saved_value); 3995 if (!err && crdump->snapshot_enable != saved_value.vbool) { 3996 crdump->snapshot_enable = saved_value.vbool; 3997 devl_param_value_changed(devlink, 3998 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT); 3999 } 4000 } 4001 4002 static void mlx4_restart_one_down(struct pci_dev *pdev); 4003 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, 4004 struct devlink *devlink); 4005 4006 static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change, 4007 enum devlink_reload_action action, 4008 enum devlink_reload_limit limit, 4009 struct netlink_ext_ack *extack) 4010 { 4011 struct mlx4_priv *priv = devlink_priv(devlink); 4012 struct mlx4_dev *dev = &priv->dev; 4013 struct mlx4_dev_persistent *persist = dev->persist; 4014 4015 if (netns_change) { 4016 NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported"); 4017 return -EOPNOTSUPP; 4018 } 4019 if (persist->num_vfs) 4020 mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); 4021 mlx4_restart_one_down(persist->pdev); 4022 return 0; 4023 } 4024 4025 static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action, 4026 enum devlink_reload_limit limit, u32 *actions_performed, 4027 struct netlink_ext_ack *extack) 4028 { 4029 struct mlx4_priv *priv = devlink_priv(devlink); 4030 struct mlx4_dev *dev = &priv->dev; 4031 struct mlx4_dev_persistent *persist = dev->persist; 4032 int err; 4033 4034 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 4035 err = mlx4_restart_one_up(persist->pdev, true, devlink); 4036 if (err) 4037 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n", 4038 err); 4039 4040 return err; 4041 } 4042 4043 static const struct devlink_ops mlx4_devlink_ops = { 4044 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), 4045 .reload_down = mlx4_devlink_reload_down, 4046 .reload_up = mlx4_devlink_reload_up, 4047 }; 4048 4049 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 4050 { 4051 struct devlink *devlink; 4052 struct mlx4_priv *priv; 4053 struct mlx4_dev *dev; 4054 int ret; 4055 4056 printk_once(KERN_INFO "%s", mlx4_version); 4057 4058 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev); 4059 if (!devlink) 4060 return -ENOMEM; 4061 devl_lock(devlink); 4062 priv = devlink_priv(devlink); 4063 4064 dev = &priv->dev; 4065 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 4066 if (!dev->persist) { 4067 ret = -ENOMEM; 4068 goto err_devlink_free; 4069 } 4070 dev->persist->pdev = pdev; 4071 dev->persist->dev = dev; 4072 pci_set_drvdata(pdev, dev->persist); 4073 priv->pci_dev_data = id->driver_data; 4074 mutex_init(&dev->persist->device_state_mutex); 4075 mutex_init(&dev->persist->interface_state_mutex); 4076 mutex_init(&dev->persist->pci_status_mutex); 4077 4078 ret = devl_params_register(devlink, mlx4_devlink_params, 4079 ARRAY_SIZE(mlx4_devlink_params)); 4080 if (ret) 4081 goto err_devlink_unregister; 4082 mlx4_devlink_set_params_init_values(devlink); 4083 ret = __mlx4_init_one(pdev, id->driver_data, priv); 4084 if (ret) 4085 goto err_params_unregister; 4086 4087 pci_save_state(pdev); 4088 devl_unlock(devlink); 4089 devlink_register(devlink); 4090 return 0; 4091 4092 err_params_unregister: 4093 devl_params_unregister(devlink, mlx4_devlink_params, 4094 ARRAY_SIZE(mlx4_devlink_params)); 4095 err_devlink_unregister: 4096 kfree(dev->persist); 4097 err_devlink_free: 4098 devl_unlock(devlink); 4099 devlink_free(devlink); 4100 return ret; 4101 } 4102 4103 static void mlx4_clean_dev(struct mlx4_dev *dev) 4104 { 4105 struct mlx4_dev_persistent *persist = dev->persist; 4106 struct mlx4_priv *priv = mlx4_priv(dev); 4107 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 4108 4109 memset(priv, 0, sizeof(*priv)); 4110 priv->dev.persist = persist; 4111 priv->dev.flags = flags; 4112 } 4113 4114 static void mlx4_unload_one(struct pci_dev *pdev) 4115 { 4116 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4117 struct mlx4_dev *dev = persist->dev; 4118 struct mlx4_priv *priv = mlx4_priv(dev); 4119 int pci_dev_data; 4120 struct devlink *devlink; 4121 int p, i; 4122 4123 devlink = priv_to_devlink(priv); 4124 devl_assert_locked(devlink); 4125 if (priv->removed) 4126 return; 4127 4128 /* saving current ports type for further use */ 4129 for (i = 0; i < dev->caps.num_ports; i++) { 4130 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 4131 dev->persist->curr_port_poss_type[i] = dev->caps. 4132 possible_type[i + 1]; 4133 } 4134 4135 pci_dev_data = priv->pci_dev_data; 4136 4137 mlx4_stop_sense(dev); 4138 mlx4_unregister_device(dev); 4139 4140 for (p = 1; p <= dev->caps.num_ports; p++) { 4141 mlx4_cleanup_port_info(&priv->port[p]); 4142 mlx4_CLOSE_PORT(dev, p); 4143 } 4144 4145 if (mlx4_is_master(dev)) 4146 mlx4_free_resource_tracker(dev, 4147 RES_TR_FREE_SLAVES_ONLY); 4148 4149 mlx4_cleanup_default_counters(dev); 4150 if (!mlx4_is_slave(dev)) 4151 mlx4_cleanup_counters_table(dev); 4152 mlx4_cleanup_qp_table(dev); 4153 mlx4_cleanup_srq_table(dev); 4154 mlx4_cleanup_cq_table(dev); 4155 mlx4_cmd_use_polling(dev); 4156 mlx4_cleanup_eq_table(dev); 4157 mlx4_cleanup_mcg_table(dev); 4158 mlx4_cleanup_mr_table(dev); 4159 mlx4_cleanup_xrcd_table(dev); 4160 mlx4_cleanup_pd_table(dev); 4161 4162 if (mlx4_is_master(dev)) 4163 mlx4_free_resource_tracker(dev, 4164 RES_TR_FREE_STRUCTS_ONLY); 4165 4166 iounmap(priv->kar); 4167 mlx4_uar_free(dev, &priv->driver_uar); 4168 mlx4_cleanup_uar_table(dev); 4169 if (!mlx4_is_slave(dev)) 4170 mlx4_clear_steering(dev); 4171 mlx4_free_eq_table(dev); 4172 if (mlx4_is_master(dev)) 4173 mlx4_multi_func_cleanup(dev); 4174 mlx4_close_hca(dev); 4175 mlx4_close_fw(dev); 4176 if (mlx4_is_slave(dev)) 4177 mlx4_multi_func_cleanup(dev); 4178 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 4179 4180 if (dev->flags & MLX4_FLAG_MSI_X) 4181 pci_disable_msix(pdev); 4182 4183 if (!mlx4_is_slave(dev)) 4184 mlx4_free_ownership(dev); 4185 4186 mlx4_slave_destroy_special_qp_cap(dev); 4187 kfree(dev->dev_vfs); 4188 4189 mlx4_clean_dev(dev); 4190 priv->pci_dev_data = pci_dev_data; 4191 priv->removed = 1; 4192 } 4193 4194 static void mlx4_remove_one(struct pci_dev *pdev) 4195 { 4196 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4197 struct mlx4_dev *dev = persist->dev; 4198 struct mlx4_priv *priv = mlx4_priv(dev); 4199 struct devlink *devlink = priv_to_devlink(priv); 4200 int active_vfs = 0; 4201 4202 devlink_unregister(devlink); 4203 4204 devl_lock(devlink); 4205 if (mlx4_is_slave(dev)) 4206 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; 4207 4208 mutex_lock(&persist->interface_state_mutex); 4209 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 4210 mutex_unlock(&persist->interface_state_mutex); 4211 4212 /* Disabling SR-IOV is not allowed while there are active vf's */ 4213 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 4214 active_vfs = mlx4_how_many_lives_vf(dev); 4215 if (active_vfs) { 4216 pr_warn("Removing PF when there are active VF's !!\n"); 4217 pr_warn("Will not disable SR-IOV.\n"); 4218 } 4219 } 4220 4221 /* device marked to be under deletion running now without the lock 4222 * letting other tasks to be terminated 4223 */ 4224 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4225 mlx4_unload_one(pdev); 4226 else 4227 mlx4_info(dev, "%s: interface is down\n", __func__); 4228 mlx4_catas_end(dev); 4229 mlx4_crdump_end(dev); 4230 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 4231 mlx4_warn(dev, "Disabling SR-IOV\n"); 4232 pci_disable_sriov(pdev); 4233 } 4234 4235 pci_release_regions(pdev); 4236 mlx4_pci_disable_device(dev); 4237 devl_params_unregister(devlink, mlx4_devlink_params, 4238 ARRAY_SIZE(mlx4_devlink_params)); 4239 kfree(dev->persist); 4240 devl_unlock(devlink); 4241 devlink_free(devlink); 4242 } 4243 4244 static int restore_current_port_types(struct mlx4_dev *dev, 4245 enum mlx4_port_type *types, 4246 enum mlx4_port_type *poss_types) 4247 { 4248 struct mlx4_priv *priv = mlx4_priv(dev); 4249 int err, i; 4250 4251 mlx4_stop_sense(dev); 4252 4253 mutex_lock(&priv->port_mutex); 4254 for (i = 0; i < dev->caps.num_ports; i++) 4255 dev->caps.possible_type[i + 1] = poss_types[i]; 4256 err = mlx4_change_port_types(dev, types); 4257 mlx4_start_sense(dev); 4258 mutex_unlock(&priv->port_mutex); 4259 4260 return err; 4261 } 4262 4263 static void mlx4_restart_one_down(struct pci_dev *pdev) 4264 { 4265 mlx4_unload_one(pdev); 4266 } 4267 4268 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, 4269 struct devlink *devlink) 4270 { 4271 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4272 struct mlx4_dev *dev = persist->dev; 4273 struct mlx4_priv *priv = mlx4_priv(dev); 4274 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4275 int pci_dev_data, err, total_vfs; 4276 4277 pci_dev_data = priv->pci_dev_data; 4278 total_vfs = dev->persist->num_vfs; 4279 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4280 4281 if (reload) 4282 mlx4_devlink_param_load_driverinit_values(devlink); 4283 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 4284 if (err) { 4285 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 4286 __func__, pci_name(pdev), err); 4287 return err; 4288 } 4289 4290 err = restore_current_port_types(dev, dev->persist->curr_port_type, 4291 dev->persist->curr_port_poss_type); 4292 if (err) 4293 mlx4_err(dev, "could not restore original port types (%d)\n", 4294 err); 4295 4296 return err; 4297 } 4298 4299 int mlx4_restart_one(struct pci_dev *pdev) 4300 { 4301 mlx4_restart_one_down(pdev); 4302 return mlx4_restart_one_up(pdev, false, NULL); 4303 } 4304 4305 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } 4306 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } 4307 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } 4308 4309 static const struct pci_device_id mlx4_pci_table[] = { 4310 #ifdef CONFIG_MLX4_CORE_GEN2 4311 /* MT25408 "Hermon" */ 4312 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ 4313 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ 4314 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */ 4315 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */ 4316 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */ 4317 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */ 4318 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */ 4319 /* MT25458 ConnectX EN 10GBASE-T */ 4320 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN), 4321 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */ 4322 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/ 4323 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2), 4324 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */ 4325 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2), 4326 /* MT26478 ConnectX2 40GigE PCIe Gen2 */ 4327 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), 4328 /* MT25400 Family [ConnectX-2] */ 4329 MLX_VF(0x1002), /* Virtual Function */ 4330 #endif /* CONFIG_MLX4_CORE_GEN2 */ 4331 /* MT27500 Family [ConnectX-3] */ 4332 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), 4333 MLX_VF(0x1004), /* Virtual Function */ 4334 MLX_GN(0x1005), /* MT27510 Family */ 4335 MLX_GN(0x1006), /* MT27511 Family */ 4336 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */ 4337 MLX_GN(0x1008), /* MT27521 Family */ 4338 MLX_GN(0x1009), /* MT27530 Family */ 4339 MLX_GN(0x100a), /* MT27531 Family */ 4340 MLX_GN(0x100b), /* MT27540 Family */ 4341 MLX_GN(0x100c), /* MT27541 Family */ 4342 MLX_GN(0x100d), /* MT27550 Family */ 4343 MLX_GN(0x100e), /* MT27551 Family */ 4344 MLX_GN(0x100f), /* MT27560 Family */ 4345 MLX_GN(0x1010), /* MT27561 Family */ 4346 4347 /* 4348 * See the mellanox_check_broken_intx_masking() quirk when 4349 * adding devices 4350 */ 4351 4352 { 0, } 4353 }; 4354 4355 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 4356 4357 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 4358 pci_channel_state_t state) 4359 { 4360 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4361 struct mlx4_dev *dev = persist->dev; 4362 struct devlink *devlink; 4363 4364 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4365 mlx4_enter_error_state(persist); 4366 4367 devlink = priv_to_devlink(mlx4_priv(dev)); 4368 devl_lock(devlink); 4369 mutex_lock(&persist->interface_state_mutex); 4370 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4371 mlx4_unload_one(pdev); 4372 4373 mutex_unlock(&persist->interface_state_mutex); 4374 devl_unlock(devlink); 4375 if (state == pci_channel_io_perm_failure) 4376 return PCI_ERS_RESULT_DISCONNECT; 4377 4378 mlx4_pci_disable_device(persist->dev); 4379 return PCI_ERS_RESULT_NEED_RESET; 4380 } 4381 4382 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 4383 { 4384 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4385 struct mlx4_dev *dev = persist->dev; 4386 int err; 4387 4388 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4389 err = mlx4_pci_enable_device(dev); 4390 if (err) { 4391 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4392 return PCI_ERS_RESULT_DISCONNECT; 4393 } 4394 4395 pci_set_master(pdev); 4396 pci_restore_state(pdev); 4397 pci_save_state(pdev); 4398 return PCI_ERS_RESULT_RECOVERED; 4399 } 4400 4401 static void mlx4_pci_resume(struct pci_dev *pdev) 4402 { 4403 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4404 struct mlx4_dev *dev = persist->dev; 4405 struct mlx4_priv *priv = mlx4_priv(dev); 4406 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4407 struct devlink *devlink; 4408 int total_vfs; 4409 int err; 4410 4411 mlx4_err(dev, "%s was called\n", __func__); 4412 total_vfs = dev->persist->num_vfs; 4413 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4414 4415 devlink = priv_to_devlink(priv); 4416 devl_lock(devlink); 4417 mutex_lock(&persist->interface_state_mutex); 4418 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4419 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4420 priv, 1); 4421 if (err) { 4422 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4423 __func__, err); 4424 goto end; 4425 } 4426 4427 err = restore_current_port_types(dev, dev->persist-> 4428 curr_port_type, dev->persist-> 4429 curr_port_poss_type); 4430 if (err) 4431 mlx4_err(dev, "could not restore original port types (%d)\n", err); 4432 } 4433 end: 4434 mutex_unlock(&persist->interface_state_mutex); 4435 devl_unlock(devlink); 4436 } 4437 4438 static void mlx4_shutdown(struct pci_dev *pdev) 4439 { 4440 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4441 struct mlx4_dev *dev = persist->dev; 4442 struct devlink *devlink; 4443 4444 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4445 devlink = priv_to_devlink(mlx4_priv(dev)); 4446 devl_lock(devlink); 4447 mutex_lock(&persist->interface_state_mutex); 4448 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4449 mlx4_unload_one(pdev); 4450 mutex_unlock(&persist->interface_state_mutex); 4451 devl_unlock(devlink); 4452 mlx4_pci_disable_device(dev); 4453 } 4454 4455 static const struct pci_error_handlers mlx4_err_handler = { 4456 .error_detected = mlx4_pci_err_detected, 4457 .slot_reset = mlx4_pci_slot_reset, 4458 .resume = mlx4_pci_resume, 4459 }; 4460 4461 static int __maybe_unused mlx4_suspend(struct device *dev_d) 4462 { 4463 struct pci_dev *pdev = to_pci_dev(dev_d); 4464 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4465 struct mlx4_dev *dev = persist->dev; 4466 struct devlink *devlink; 4467 4468 mlx4_err(dev, "suspend was called\n"); 4469 devlink = priv_to_devlink(mlx4_priv(dev)); 4470 devl_lock(devlink); 4471 mutex_lock(&persist->interface_state_mutex); 4472 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4473 mlx4_unload_one(pdev); 4474 mutex_unlock(&persist->interface_state_mutex); 4475 devl_unlock(devlink); 4476 4477 return 0; 4478 } 4479 4480 static int __maybe_unused mlx4_resume(struct device *dev_d) 4481 { 4482 struct pci_dev *pdev = to_pci_dev(dev_d); 4483 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4484 struct mlx4_dev *dev = persist->dev; 4485 struct mlx4_priv *priv = mlx4_priv(dev); 4486 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4487 struct devlink *devlink; 4488 int total_vfs; 4489 int ret = 0; 4490 4491 mlx4_err(dev, "resume was called\n"); 4492 total_vfs = dev->persist->num_vfs; 4493 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4494 4495 devlink = priv_to_devlink(priv); 4496 devl_lock(devlink); 4497 mutex_lock(&persist->interface_state_mutex); 4498 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4499 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, 4500 nvfs, priv, 1); 4501 if (!ret) { 4502 ret = restore_current_port_types(dev, 4503 dev->persist->curr_port_type, 4504 dev->persist->curr_port_poss_type); 4505 if (ret) 4506 mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret); 4507 } 4508 } 4509 mutex_unlock(&persist->interface_state_mutex); 4510 devl_unlock(devlink); 4511 4512 return ret; 4513 } 4514 4515 static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume); 4516 4517 static struct pci_driver mlx4_driver = { 4518 .name = DRV_NAME, 4519 .id_table = mlx4_pci_table, 4520 .probe = mlx4_init_one, 4521 .shutdown = mlx4_shutdown, 4522 .remove = mlx4_remove_one, 4523 .driver.pm = &mlx4_pm_ops, 4524 .err_handler = &mlx4_err_handler, 4525 }; 4526 4527 static int __init mlx4_verify_params(void) 4528 { 4529 if (msi_x < 0) { 4530 pr_warn("mlx4_core: bad msi_x: %d\n", msi_x); 4531 return -1; 4532 } 4533 4534 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4535 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4536 return -1; 4537 } 4538 4539 if (log_num_vlan != 0) 4540 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4541 MLX4_LOG_NUM_VLANS); 4542 4543 if (use_prio != 0) 4544 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4545 4546 if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) { 4547 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4548 log_mtts_per_seg); 4549 return -1; 4550 } 4551 4552 /* Check if module param for ports type has legal combination */ 4553 if (port_type_array[0] == false && port_type_array[1] == true) { 4554 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4555 port_type_array[0] = true; 4556 } 4557 4558 if (mlx4_log_num_mgm_entry_size < -7 || 4559 (mlx4_log_num_mgm_entry_size > 0 && 4560 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4561 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4562 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4563 mlx4_log_num_mgm_entry_size, 4564 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4565 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4566 return -1; 4567 } 4568 4569 return 0; 4570 } 4571 4572 static int __init mlx4_init(void) 4573 { 4574 int ret; 4575 4576 if (mlx4_verify_params()) 4577 return -EINVAL; 4578 4579 4580 mlx4_wq = create_singlethread_workqueue("mlx4"); 4581 if (!mlx4_wq) 4582 return -ENOMEM; 4583 4584 ret = pci_register_driver(&mlx4_driver); 4585 if (ret < 0) 4586 destroy_workqueue(mlx4_wq); 4587 return ret < 0 ? ret : 0; 4588 } 4589 4590 static void __exit mlx4_cleanup(void) 4591 { 4592 pci_unregister_driver(&mlx4_driver); 4593 destroy_workqueue(mlx4_wq); 4594 } 4595 4596 module_init(mlx4_init); 4597 module_exit(mlx4_cleanup); 4598