1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 static char mlx4_version[] = 112 DRV_NAME ": Mellanox ConnectX core driver v" 113 DRV_VERSION " (" DRV_RELDATE ")\n"; 114 115 static struct mlx4_profile default_profile = { 116 .num_qp = 1 << 18, 117 .num_srq = 1 << 16, 118 .rdmarc_per_qp = 1 << 4, 119 .num_cq = 1 << 16, 120 .num_mcg = 1 << 13, 121 .num_mpt = 1 << 19, 122 .num_mtt = 1 << 20, /* It is really num mtt segements */ 123 }; 124 125 static struct mlx4_profile low_mem_profile = { 126 .num_qp = 1 << 17, 127 .num_srq = 1 << 6, 128 .rdmarc_per_qp = 1 << 4, 129 .num_cq = 1 << 8, 130 .num_mcg = 1 << 8, 131 .num_mpt = 1 << 9, 132 .num_mtt = 1 << 7, 133 }; 134 135 static int log_num_mac = 7; 136 module_param_named(log_num_mac, log_num_mac, int, 0444); 137 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 138 139 static int log_num_vlan; 140 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 141 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 142 /* Log2 max number of VLANs per ETH port (0-7) */ 143 #define MLX4_LOG_NUM_VLANS 7 144 #define MLX4_MIN_LOG_NUM_VLANS 0 145 #define MLX4_MIN_LOG_NUM_MAC 1 146 147 static bool use_prio; 148 module_param_named(use_prio, use_prio, bool, 0444); 149 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 150 151 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 152 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 153 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 154 155 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 156 static int arr_argc = 2; 157 module_param_array(port_type_array, int, &arr_argc, 0444); 158 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 159 "1 for IB, 2 for Ethernet"); 160 161 struct mlx4_port_config { 162 struct list_head list; 163 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 164 struct pci_dev *pdev; 165 }; 166 167 static atomic_t pf_loading = ATOMIC_INIT(0); 168 169 int mlx4_check_port_params(struct mlx4_dev *dev, 170 enum mlx4_port_type *port_type) 171 { 172 int i; 173 174 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 175 for (i = 0; i < dev->caps.num_ports - 1; i++) { 176 if (port_type[i] != port_type[i + 1]) { 177 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 178 return -EINVAL; 179 } 180 } 181 } 182 183 for (i = 0; i < dev->caps.num_ports; i++) { 184 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 185 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 186 i + 1); 187 return -EINVAL; 188 } 189 } 190 return 0; 191 } 192 193 static void mlx4_set_port_mask(struct mlx4_dev *dev) 194 { 195 int i; 196 197 for (i = 1; i <= dev->caps.num_ports; ++i) 198 dev->caps.port_mask[i] = dev->caps.port_type[i]; 199 } 200 201 enum { 202 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 203 }; 204 205 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 206 { 207 int err = 0; 208 struct mlx4_func func; 209 210 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 211 err = mlx4_QUERY_FUNC(dev, &func, 0); 212 if (err) { 213 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 214 return err; 215 } 216 dev_cap->max_eqs = func.max_eq; 217 dev_cap->reserved_eqs = func.rsvd_eqs; 218 dev_cap->reserved_uars = func.rsvd_uars; 219 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 220 } 221 return err; 222 } 223 224 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 225 { 226 struct mlx4_caps *dev_cap = &dev->caps; 227 228 /* FW not supporting or cancelled by user */ 229 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 230 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 231 return; 232 233 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 234 * When FW has NCSI it may decide not to report 64B CQE/EQEs 235 */ 236 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 237 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 238 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 239 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 240 return; 241 } 242 243 if (cache_line_size() == 128 || cache_line_size() == 256) { 244 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 245 /* Changing the real data inside CQE size to 32B */ 246 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 247 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 248 249 if (mlx4_is_master(dev)) 250 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 251 } else { 252 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 253 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 254 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 255 } 256 } 257 258 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 259 struct mlx4_port_cap *port_cap) 260 { 261 dev->caps.vl_cap[port] = port_cap->max_vl; 262 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 263 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 264 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 265 /* set gid and pkey table operating lengths by default 266 * to non-sriov values 267 */ 268 dev->caps.gid_table_len[port] = port_cap->max_gids; 269 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 270 dev->caps.port_width_cap[port] = port_cap->max_port_width; 271 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 272 dev->caps.def_mac[port] = port_cap->def_mac; 273 dev->caps.supported_type[port] = port_cap->supported_port_types; 274 dev->caps.suggested_type[port] = port_cap->suggested_type; 275 dev->caps.default_sense[port] = port_cap->default_sense; 276 dev->caps.trans_type[port] = port_cap->trans_type; 277 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 278 dev->caps.wavelength[port] = port_cap->wavelength; 279 dev->caps.trans_code[port] = port_cap->trans_code; 280 281 return 0; 282 } 283 284 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 285 struct mlx4_port_cap *port_cap) 286 { 287 int err = 0; 288 289 err = mlx4_QUERY_PORT(dev, port, port_cap); 290 291 if (err) 292 mlx4_err(dev, "QUERY_PORT command failed.\n"); 293 294 return err; 295 } 296 297 #define MLX4_A0_STEERING_TABLE_SIZE 256 298 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 299 { 300 int err; 301 int i; 302 303 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 304 if (err) { 305 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 306 return err; 307 } 308 mlx4_dev_cap_dump(dev, dev_cap); 309 310 if (dev_cap->min_page_sz > PAGE_SIZE) { 311 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 312 dev_cap->min_page_sz, PAGE_SIZE); 313 return -ENODEV; 314 } 315 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 316 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 317 dev_cap->num_ports, MLX4_MAX_PORTS); 318 return -ENODEV; 319 } 320 321 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 322 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 323 dev_cap->uar_size, 324 (unsigned long long) pci_resource_len(dev->pdev, 2)); 325 return -ENODEV; 326 } 327 328 dev->caps.num_ports = dev_cap->num_ports; 329 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 330 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 331 dev->caps.num_sys_eqs : 332 MLX4_MAX_EQ_NUM; 333 for (i = 1; i <= dev->caps.num_ports; ++i) { 334 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 335 if (err) { 336 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 337 return err; 338 } 339 } 340 341 dev->caps.uar_page_size = PAGE_SIZE; 342 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 343 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 344 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 345 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 346 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 347 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 348 dev->caps.max_wqes = dev_cap->max_qp_sz; 349 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 350 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 351 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 352 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 353 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 354 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 355 /* 356 * Subtract 1 from the limit because we need to allocate a 357 * spare CQE so the HCA HW can tell the difference between an 358 * empty CQ and a full CQ. 359 */ 360 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 361 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 362 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 363 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 364 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 365 366 /* The first 128 UARs are used for EQ doorbells */ 367 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 368 dev->caps.reserved_pds = dev_cap->reserved_pds; 369 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 370 dev_cap->reserved_xrcds : 0; 371 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 372 dev_cap->max_xrcds : 0; 373 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 374 375 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 376 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 377 dev->caps.flags = dev_cap->flags; 378 dev->caps.flags2 = dev_cap->flags2; 379 dev->caps.bmme_flags = dev_cap->bmme_flags; 380 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 381 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 382 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 383 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 384 385 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 386 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 387 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 388 /* Don't do sense port on multifunction devices (for now at least) */ 389 if (mlx4_is_mfunc(dev)) 390 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 391 392 if (mlx4_low_memory_profile()) { 393 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 394 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 395 } else { 396 dev->caps.log_num_macs = log_num_mac; 397 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 398 } 399 400 for (i = 1; i <= dev->caps.num_ports; ++i) { 401 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 402 if (dev->caps.supported_type[i]) { 403 /* if only ETH is supported - assign ETH */ 404 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 405 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 406 /* if only IB is supported, assign IB */ 407 else if (dev->caps.supported_type[i] == 408 MLX4_PORT_TYPE_IB) 409 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 410 else { 411 /* if IB and ETH are supported, we set the port 412 * type according to user selection of port type; 413 * if user selected none, take the FW hint */ 414 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 415 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 416 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 417 else 418 dev->caps.port_type[i] = port_type_array[i - 1]; 419 } 420 } 421 /* 422 * Link sensing is allowed on the port if 3 conditions are true: 423 * 1. Both protocols are supported on the port. 424 * 2. Different types are supported on the port 425 * 3. FW declared that it supports link sensing 426 */ 427 mlx4_priv(dev)->sense.sense_allowed[i] = 428 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 429 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 430 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 431 432 /* 433 * If "default_sense" bit is set, we move the port to "AUTO" mode 434 * and perform sense_port FW command to try and set the correct 435 * port type from beginning 436 */ 437 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 438 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 439 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 440 mlx4_SENSE_PORT(dev, i, &sensed_port); 441 if (sensed_port != MLX4_PORT_TYPE_NONE) 442 dev->caps.port_type[i] = sensed_port; 443 } else { 444 dev->caps.possible_type[i] = dev->caps.port_type[i]; 445 } 446 447 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 448 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 449 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 450 i, 1 << dev->caps.log_num_macs); 451 } 452 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 453 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 454 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 455 i, 1 << dev->caps.log_num_vlans); 456 } 457 } 458 459 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 460 461 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 462 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 463 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 464 (1 << dev->caps.log_num_macs) * 465 (1 << dev->caps.log_num_vlans) * 466 dev->caps.num_ports; 467 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 468 469 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 470 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 471 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 472 else 473 dev->caps.dmfs_high_rate_qpn_base = 474 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 475 476 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 477 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 478 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 479 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 480 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 481 } else { 482 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 483 dev->caps.dmfs_high_rate_qpn_base = 484 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 485 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 486 } 487 488 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 489 dev->caps.dmfs_high_rate_qpn_range; 490 491 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 492 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 493 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 494 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 495 496 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 497 498 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 499 if (dev_cap->flags & 500 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 501 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 502 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 503 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 504 } 505 506 if (dev_cap->flags2 & 507 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 508 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 509 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 510 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 511 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 512 } 513 } 514 515 if ((dev->caps.flags & 516 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 517 mlx4_is_master(dev)) 518 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 519 520 if (!mlx4_is_slave(dev)) { 521 mlx4_enable_cqe_eqe_stride(dev); 522 dev->caps.alloc_res_qp_mask = 523 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 524 MLX4_RESERVE_A0_QP; 525 } else { 526 dev->caps.alloc_res_qp_mask = 0; 527 } 528 529 return 0; 530 } 531 532 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 533 enum pci_bus_speed *speed, 534 enum pcie_link_width *width) 535 { 536 u32 lnkcap1, lnkcap2; 537 int err1, err2; 538 539 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 540 541 *speed = PCI_SPEED_UNKNOWN; 542 *width = PCIE_LNK_WIDTH_UNKNOWN; 543 544 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); 545 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); 546 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 547 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 548 *speed = PCIE_SPEED_8_0GT; 549 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 550 *speed = PCIE_SPEED_5_0GT; 551 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 552 *speed = PCIE_SPEED_2_5GT; 553 } 554 if (!err1) { 555 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 556 if (!lnkcap2) { /* pre-r3.0 */ 557 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 558 *speed = PCIE_SPEED_5_0GT; 559 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 560 *speed = PCIE_SPEED_2_5GT; 561 } 562 } 563 564 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 565 return err1 ? err1 : 566 err2 ? err2 : -EINVAL; 567 } 568 return 0; 569 } 570 571 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 572 { 573 enum pcie_link_width width, width_cap; 574 enum pci_bus_speed speed, speed_cap; 575 int err; 576 577 #define PCIE_SPEED_STR(speed) \ 578 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 579 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 580 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 581 "Unknown") 582 583 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 584 if (err) { 585 mlx4_warn(dev, 586 "Unable to determine PCIe device BW capabilities\n"); 587 return; 588 } 589 590 err = pcie_get_minimum_link(dev->pdev, &speed, &width); 591 if (err || speed == PCI_SPEED_UNKNOWN || 592 width == PCIE_LNK_WIDTH_UNKNOWN) { 593 mlx4_warn(dev, 594 "Unable to determine PCI device chain minimum BW\n"); 595 return; 596 } 597 598 if (width != width_cap || speed != speed_cap) 599 mlx4_warn(dev, 600 "PCIe BW is different than device's capability\n"); 601 602 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 603 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 604 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 605 width, width_cap); 606 return; 607 } 608 609 /*The function checks if there are live vf, return the num of them*/ 610 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 611 { 612 struct mlx4_priv *priv = mlx4_priv(dev); 613 struct mlx4_slave_state *s_state; 614 int i; 615 int ret = 0; 616 617 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 618 s_state = &priv->mfunc.master.slave_state[i]; 619 if (s_state->active && s_state->last_cmd != 620 MLX4_COMM_CMD_RESET) { 621 mlx4_warn(dev, "%s: slave: %d is still active\n", 622 __func__, i); 623 ret++; 624 } 625 } 626 return ret; 627 } 628 629 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 630 { 631 u32 qk = MLX4_RESERVED_QKEY_BASE; 632 633 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 634 qpn < dev->phys_caps.base_proxy_sqpn) 635 return -EINVAL; 636 637 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 638 /* tunnel qp */ 639 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 640 else 641 qk += qpn - dev->phys_caps.base_proxy_sqpn; 642 *qkey = qk; 643 return 0; 644 } 645 EXPORT_SYMBOL(mlx4_get_parav_qkey); 646 647 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 648 { 649 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 650 651 if (!mlx4_is_master(dev)) 652 return; 653 654 priv->virt2phys_pkey[slave][port - 1][i] = val; 655 } 656 EXPORT_SYMBOL(mlx4_sync_pkey_table); 657 658 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 659 { 660 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 661 662 if (!mlx4_is_master(dev)) 663 return; 664 665 priv->slave_node_guids[slave] = guid; 666 } 667 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 668 669 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 670 { 671 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 672 673 if (!mlx4_is_master(dev)) 674 return 0; 675 676 return priv->slave_node_guids[slave]; 677 } 678 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 679 680 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 681 { 682 struct mlx4_priv *priv = mlx4_priv(dev); 683 struct mlx4_slave_state *s_slave; 684 685 if (!mlx4_is_master(dev)) 686 return 0; 687 688 s_slave = &priv->mfunc.master.slave_state[slave]; 689 return !!s_slave->active; 690 } 691 EXPORT_SYMBOL(mlx4_is_slave_active); 692 693 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 694 struct mlx4_dev_cap *dev_cap, 695 struct mlx4_init_hca_param *hca_param) 696 { 697 dev->caps.steering_mode = hca_param->steering_mode; 698 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 699 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 700 dev->caps.fs_log_max_ucast_qp_range_size = 701 dev_cap->fs_log_max_ucast_qp_range_size; 702 } else 703 dev->caps.num_qp_per_mgm = 704 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 705 706 mlx4_dbg(dev, "Steering mode is: %s\n", 707 mlx4_steering_mode_str(dev->caps.steering_mode)); 708 } 709 710 static int mlx4_slave_cap(struct mlx4_dev *dev) 711 { 712 int err; 713 u32 page_size; 714 struct mlx4_dev_cap dev_cap; 715 struct mlx4_func_cap func_cap; 716 struct mlx4_init_hca_param hca_param; 717 u8 i; 718 719 memset(&hca_param, 0, sizeof(hca_param)); 720 err = mlx4_QUERY_HCA(dev, &hca_param); 721 if (err) { 722 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 723 return err; 724 } 725 726 /* fail if the hca has an unknown global capability 727 * at this time global_caps should be always zeroed 728 */ 729 if (hca_param.global_caps) { 730 mlx4_err(dev, "Unknown hca global capabilities\n"); 731 return -ENOSYS; 732 } 733 734 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 735 736 dev->caps.hca_core_clock = hca_param.hca_core_clock; 737 738 memset(&dev_cap, 0, sizeof(dev_cap)); 739 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 740 err = mlx4_dev_cap(dev, &dev_cap); 741 if (err) { 742 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 743 return err; 744 } 745 746 err = mlx4_QUERY_FW(dev); 747 if (err) 748 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 749 750 page_size = ~dev->caps.page_size_cap + 1; 751 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 752 if (page_size > PAGE_SIZE) { 753 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 754 page_size, PAGE_SIZE); 755 return -ENODEV; 756 } 757 758 /* slave gets uar page size from QUERY_HCA fw command */ 759 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 760 761 /* TODO: relax this assumption */ 762 if (dev->caps.uar_page_size != PAGE_SIZE) { 763 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 764 dev->caps.uar_page_size, PAGE_SIZE); 765 return -ENODEV; 766 } 767 768 memset(&func_cap, 0, sizeof(func_cap)); 769 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 770 if (err) { 771 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 772 err); 773 return err; 774 } 775 776 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 777 PF_CONTEXT_BEHAVIOUR_MASK) { 778 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 779 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 780 return -ENOSYS; 781 } 782 783 dev->caps.num_ports = func_cap.num_ports; 784 dev->quotas.qp = func_cap.qp_quota; 785 dev->quotas.srq = func_cap.srq_quota; 786 dev->quotas.cq = func_cap.cq_quota; 787 dev->quotas.mpt = func_cap.mpt_quota; 788 dev->quotas.mtt = func_cap.mtt_quota; 789 dev->caps.num_qps = 1 << hca_param.log_num_qps; 790 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 791 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 792 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 793 dev->caps.num_eqs = func_cap.max_eq; 794 dev->caps.reserved_eqs = func_cap.reserved_eq; 795 dev->caps.num_pds = MLX4_NUM_PDS; 796 dev->caps.num_mgms = 0; 797 dev->caps.num_amgms = 0; 798 799 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 800 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 801 dev->caps.num_ports, MLX4_MAX_PORTS); 802 return -ENODEV; 803 } 804 805 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 806 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 807 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 808 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 809 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 810 811 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 812 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 813 !dev->caps.qp0_qkey) { 814 err = -ENOMEM; 815 goto err_mem; 816 } 817 818 for (i = 1; i <= dev->caps.num_ports; ++i) { 819 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 820 if (err) { 821 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 822 i, err); 823 goto err_mem; 824 } 825 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 826 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 827 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 828 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 829 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 830 dev->caps.port_mask[i] = dev->caps.port_type[i]; 831 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 832 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 833 &dev->caps.gid_table_len[i], 834 &dev->caps.pkey_table_len[i])) 835 goto err_mem; 836 } 837 838 if (dev->caps.uar_page_size * (dev->caps.num_uars - 839 dev->caps.reserved_uars) > 840 pci_resource_len(dev->pdev, 2)) { 841 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 842 dev->caps.uar_page_size * dev->caps.num_uars, 843 (unsigned long long) pci_resource_len(dev->pdev, 2)); 844 goto err_mem; 845 } 846 847 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 848 dev->caps.eqe_size = 64; 849 dev->caps.eqe_factor = 1; 850 } else { 851 dev->caps.eqe_size = 32; 852 dev->caps.eqe_factor = 0; 853 } 854 855 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 856 dev->caps.cqe_size = 64; 857 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 858 } else { 859 dev->caps.cqe_size = 32; 860 } 861 862 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 863 dev->caps.eqe_size = hca_param.eqe_size; 864 dev->caps.eqe_factor = 0; 865 } 866 867 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 868 dev->caps.cqe_size = hca_param.cqe_size; 869 /* User still need to know when CQE > 32B */ 870 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 871 } 872 873 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 874 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 875 876 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 877 878 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 879 dev->caps.bf_reg_size) 880 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 881 882 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 883 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 884 885 return 0; 886 887 err_mem: 888 kfree(dev->caps.qp0_qkey); 889 kfree(dev->caps.qp0_tunnel); 890 kfree(dev->caps.qp0_proxy); 891 kfree(dev->caps.qp1_tunnel); 892 kfree(dev->caps.qp1_proxy); 893 dev->caps.qp0_qkey = NULL; 894 dev->caps.qp0_tunnel = NULL; 895 dev->caps.qp0_proxy = NULL; 896 dev->caps.qp1_tunnel = NULL; 897 dev->caps.qp1_proxy = NULL; 898 899 return err; 900 } 901 902 static void mlx4_request_modules(struct mlx4_dev *dev) 903 { 904 int port; 905 int has_ib_port = false; 906 int has_eth_port = false; 907 #define EN_DRV_NAME "mlx4_en" 908 #define IB_DRV_NAME "mlx4_ib" 909 910 for (port = 1; port <= dev->caps.num_ports; port++) { 911 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 912 has_ib_port = true; 913 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 914 has_eth_port = true; 915 } 916 917 if (has_eth_port) 918 request_module_nowait(EN_DRV_NAME); 919 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 920 request_module_nowait(IB_DRV_NAME); 921 } 922 923 /* 924 * Change the port configuration of the device. 925 * Every user of this function must hold the port mutex. 926 */ 927 int mlx4_change_port_types(struct mlx4_dev *dev, 928 enum mlx4_port_type *port_types) 929 { 930 int err = 0; 931 int change = 0; 932 int port; 933 934 for (port = 0; port < dev->caps.num_ports; port++) { 935 /* Change the port type only if the new type is different 936 * from the current, and not set to Auto */ 937 if (port_types[port] != dev->caps.port_type[port + 1]) 938 change = 1; 939 } 940 if (change) { 941 mlx4_unregister_device(dev); 942 for (port = 1; port <= dev->caps.num_ports; port++) { 943 mlx4_CLOSE_PORT(dev, port); 944 dev->caps.port_type[port] = port_types[port - 1]; 945 err = mlx4_SET_PORT(dev, port, -1); 946 if (err) { 947 mlx4_err(dev, "Failed to set port %d, aborting\n", 948 port); 949 goto out; 950 } 951 } 952 mlx4_set_port_mask(dev); 953 err = mlx4_register_device(dev); 954 if (err) { 955 mlx4_err(dev, "Failed to register device\n"); 956 goto out; 957 } 958 mlx4_request_modules(dev); 959 } 960 961 out: 962 return err; 963 } 964 965 static ssize_t show_port_type(struct device *dev, 966 struct device_attribute *attr, 967 char *buf) 968 { 969 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 970 port_attr); 971 struct mlx4_dev *mdev = info->dev; 972 char type[8]; 973 974 sprintf(type, "%s", 975 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 976 "ib" : "eth"); 977 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 978 sprintf(buf, "auto (%s)\n", type); 979 else 980 sprintf(buf, "%s\n", type); 981 982 return strlen(buf); 983 } 984 985 static ssize_t set_port_type(struct device *dev, 986 struct device_attribute *attr, 987 const char *buf, size_t count) 988 { 989 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 990 port_attr); 991 struct mlx4_dev *mdev = info->dev; 992 struct mlx4_priv *priv = mlx4_priv(mdev); 993 enum mlx4_port_type types[MLX4_MAX_PORTS]; 994 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 995 static DEFINE_MUTEX(set_port_type_mutex); 996 int i; 997 int err = 0; 998 999 mutex_lock(&set_port_type_mutex); 1000 1001 if (!strcmp(buf, "ib\n")) 1002 info->tmp_type = MLX4_PORT_TYPE_IB; 1003 else if (!strcmp(buf, "eth\n")) 1004 info->tmp_type = MLX4_PORT_TYPE_ETH; 1005 else if (!strcmp(buf, "auto\n")) 1006 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1007 else { 1008 mlx4_err(mdev, "%s is not supported port type\n", buf); 1009 err = -EINVAL; 1010 goto err_out; 1011 } 1012 1013 mlx4_stop_sense(mdev); 1014 mutex_lock(&priv->port_mutex); 1015 /* Possible type is always the one that was delivered */ 1016 mdev->caps.possible_type[info->port] = info->tmp_type; 1017 1018 for (i = 0; i < mdev->caps.num_ports; i++) { 1019 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1020 mdev->caps.possible_type[i+1]; 1021 if (types[i] == MLX4_PORT_TYPE_AUTO) 1022 types[i] = mdev->caps.port_type[i+1]; 1023 } 1024 1025 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1026 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1027 for (i = 1; i <= mdev->caps.num_ports; i++) { 1028 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1029 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1030 err = -EINVAL; 1031 } 1032 } 1033 } 1034 if (err) { 1035 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1036 goto out; 1037 } 1038 1039 mlx4_do_sense_ports(mdev, new_types, types); 1040 1041 err = mlx4_check_port_params(mdev, new_types); 1042 if (err) 1043 goto out; 1044 1045 /* We are about to apply the changes after the configuration 1046 * was verified, no need to remember the temporary types 1047 * any more */ 1048 for (i = 0; i < mdev->caps.num_ports; i++) 1049 priv->port[i + 1].tmp_type = 0; 1050 1051 err = mlx4_change_port_types(mdev, new_types); 1052 1053 out: 1054 mlx4_start_sense(mdev); 1055 mutex_unlock(&priv->port_mutex); 1056 err_out: 1057 mutex_unlock(&set_port_type_mutex); 1058 1059 return err ? err : count; 1060 } 1061 1062 enum ibta_mtu { 1063 IB_MTU_256 = 1, 1064 IB_MTU_512 = 2, 1065 IB_MTU_1024 = 3, 1066 IB_MTU_2048 = 4, 1067 IB_MTU_4096 = 5 1068 }; 1069 1070 static inline int int_to_ibta_mtu(int mtu) 1071 { 1072 switch (mtu) { 1073 case 256: return IB_MTU_256; 1074 case 512: return IB_MTU_512; 1075 case 1024: return IB_MTU_1024; 1076 case 2048: return IB_MTU_2048; 1077 case 4096: return IB_MTU_4096; 1078 default: return -1; 1079 } 1080 } 1081 1082 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1083 { 1084 switch (mtu) { 1085 case IB_MTU_256: return 256; 1086 case IB_MTU_512: return 512; 1087 case IB_MTU_1024: return 1024; 1088 case IB_MTU_2048: return 2048; 1089 case IB_MTU_4096: return 4096; 1090 default: return -1; 1091 } 1092 } 1093 1094 static ssize_t show_port_ib_mtu(struct device *dev, 1095 struct device_attribute *attr, 1096 char *buf) 1097 { 1098 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1099 port_mtu_attr); 1100 struct mlx4_dev *mdev = info->dev; 1101 1102 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1103 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1104 1105 sprintf(buf, "%d\n", 1106 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1107 return strlen(buf); 1108 } 1109 1110 static ssize_t set_port_ib_mtu(struct device *dev, 1111 struct device_attribute *attr, 1112 const char *buf, size_t count) 1113 { 1114 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1115 port_mtu_attr); 1116 struct mlx4_dev *mdev = info->dev; 1117 struct mlx4_priv *priv = mlx4_priv(mdev); 1118 int err, port, mtu, ibta_mtu = -1; 1119 1120 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1121 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1122 return -EINVAL; 1123 } 1124 1125 err = kstrtoint(buf, 0, &mtu); 1126 if (!err) 1127 ibta_mtu = int_to_ibta_mtu(mtu); 1128 1129 if (err || ibta_mtu < 0) { 1130 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1131 return -EINVAL; 1132 } 1133 1134 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1135 1136 mlx4_stop_sense(mdev); 1137 mutex_lock(&priv->port_mutex); 1138 mlx4_unregister_device(mdev); 1139 for (port = 1; port <= mdev->caps.num_ports; port++) { 1140 mlx4_CLOSE_PORT(mdev, port); 1141 err = mlx4_SET_PORT(mdev, port, -1); 1142 if (err) { 1143 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1144 port); 1145 goto err_set_port; 1146 } 1147 } 1148 err = mlx4_register_device(mdev); 1149 err_set_port: 1150 mutex_unlock(&priv->port_mutex); 1151 mlx4_start_sense(mdev); 1152 return err ? err : count; 1153 } 1154 1155 static int mlx4_load_fw(struct mlx4_dev *dev) 1156 { 1157 struct mlx4_priv *priv = mlx4_priv(dev); 1158 int err; 1159 1160 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1161 GFP_HIGHUSER | __GFP_NOWARN, 0); 1162 if (!priv->fw.fw_icm) { 1163 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1164 return -ENOMEM; 1165 } 1166 1167 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1168 if (err) { 1169 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1170 goto err_free; 1171 } 1172 1173 err = mlx4_RUN_FW(dev); 1174 if (err) { 1175 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1176 goto err_unmap_fa; 1177 } 1178 1179 return 0; 1180 1181 err_unmap_fa: 1182 mlx4_UNMAP_FA(dev); 1183 1184 err_free: 1185 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1186 return err; 1187 } 1188 1189 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1190 int cmpt_entry_sz) 1191 { 1192 struct mlx4_priv *priv = mlx4_priv(dev); 1193 int err; 1194 int num_eqs; 1195 1196 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1197 cmpt_base + 1198 ((u64) (MLX4_CMPT_TYPE_QP * 1199 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1200 cmpt_entry_sz, dev->caps.num_qps, 1201 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1202 0, 0); 1203 if (err) 1204 goto err; 1205 1206 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1207 cmpt_base + 1208 ((u64) (MLX4_CMPT_TYPE_SRQ * 1209 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1210 cmpt_entry_sz, dev->caps.num_srqs, 1211 dev->caps.reserved_srqs, 0, 0); 1212 if (err) 1213 goto err_qp; 1214 1215 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1216 cmpt_base + 1217 ((u64) (MLX4_CMPT_TYPE_CQ * 1218 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1219 cmpt_entry_sz, dev->caps.num_cqs, 1220 dev->caps.reserved_cqs, 0, 0); 1221 if (err) 1222 goto err_srq; 1223 1224 num_eqs = dev->phys_caps.num_phys_eqs; 1225 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1226 cmpt_base + 1227 ((u64) (MLX4_CMPT_TYPE_EQ * 1228 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1229 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1230 if (err) 1231 goto err_cq; 1232 1233 return 0; 1234 1235 err_cq: 1236 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1237 1238 err_srq: 1239 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1240 1241 err_qp: 1242 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1243 1244 err: 1245 return err; 1246 } 1247 1248 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1249 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1250 { 1251 struct mlx4_priv *priv = mlx4_priv(dev); 1252 u64 aux_pages; 1253 int num_eqs; 1254 int err; 1255 1256 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1257 if (err) { 1258 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1259 return err; 1260 } 1261 1262 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1263 (unsigned long long) icm_size >> 10, 1264 (unsigned long long) aux_pages << 2); 1265 1266 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1267 GFP_HIGHUSER | __GFP_NOWARN, 0); 1268 if (!priv->fw.aux_icm) { 1269 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1270 return -ENOMEM; 1271 } 1272 1273 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1274 if (err) { 1275 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1276 goto err_free_aux; 1277 } 1278 1279 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1280 if (err) { 1281 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1282 goto err_unmap_aux; 1283 } 1284 1285 1286 num_eqs = dev->phys_caps.num_phys_eqs; 1287 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1288 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1289 num_eqs, num_eqs, 0, 0); 1290 if (err) { 1291 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1292 goto err_unmap_cmpt; 1293 } 1294 1295 /* 1296 * Reserved MTT entries must be aligned up to a cacheline 1297 * boundary, since the FW will write to them, while the driver 1298 * writes to all other MTT entries. (The variable 1299 * dev->caps.mtt_entry_sz below is really the MTT segment 1300 * size, not the raw entry size) 1301 */ 1302 dev->caps.reserved_mtts = 1303 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1304 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1305 1306 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1307 init_hca->mtt_base, 1308 dev->caps.mtt_entry_sz, 1309 dev->caps.num_mtts, 1310 dev->caps.reserved_mtts, 1, 0); 1311 if (err) { 1312 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1313 goto err_unmap_eq; 1314 } 1315 1316 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1317 init_hca->dmpt_base, 1318 dev_cap->dmpt_entry_sz, 1319 dev->caps.num_mpts, 1320 dev->caps.reserved_mrws, 1, 1); 1321 if (err) { 1322 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1323 goto err_unmap_mtt; 1324 } 1325 1326 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1327 init_hca->qpc_base, 1328 dev_cap->qpc_entry_sz, 1329 dev->caps.num_qps, 1330 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1331 0, 0); 1332 if (err) { 1333 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1334 goto err_unmap_dmpt; 1335 } 1336 1337 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1338 init_hca->auxc_base, 1339 dev_cap->aux_entry_sz, 1340 dev->caps.num_qps, 1341 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1342 0, 0); 1343 if (err) { 1344 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1345 goto err_unmap_qp; 1346 } 1347 1348 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1349 init_hca->altc_base, 1350 dev_cap->altc_entry_sz, 1351 dev->caps.num_qps, 1352 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1353 0, 0); 1354 if (err) { 1355 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1356 goto err_unmap_auxc; 1357 } 1358 1359 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1360 init_hca->rdmarc_base, 1361 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1362 dev->caps.num_qps, 1363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1364 0, 0); 1365 if (err) { 1366 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1367 goto err_unmap_altc; 1368 } 1369 1370 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1371 init_hca->cqc_base, 1372 dev_cap->cqc_entry_sz, 1373 dev->caps.num_cqs, 1374 dev->caps.reserved_cqs, 0, 0); 1375 if (err) { 1376 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1377 goto err_unmap_rdmarc; 1378 } 1379 1380 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1381 init_hca->srqc_base, 1382 dev_cap->srq_entry_sz, 1383 dev->caps.num_srqs, 1384 dev->caps.reserved_srqs, 0, 0); 1385 if (err) { 1386 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1387 goto err_unmap_cq; 1388 } 1389 1390 /* 1391 * For flow steering device managed mode it is required to use 1392 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1393 * required, but for simplicity just map the whole multicast 1394 * group table now. The table isn't very big and it's a lot 1395 * easier than trying to track ref counts. 1396 */ 1397 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1398 init_hca->mc_base, 1399 mlx4_get_mgm_entry_size(dev), 1400 dev->caps.num_mgms + dev->caps.num_amgms, 1401 dev->caps.num_mgms + dev->caps.num_amgms, 1402 0, 0); 1403 if (err) { 1404 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1405 goto err_unmap_srq; 1406 } 1407 1408 return 0; 1409 1410 err_unmap_srq: 1411 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1412 1413 err_unmap_cq: 1414 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1415 1416 err_unmap_rdmarc: 1417 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1418 1419 err_unmap_altc: 1420 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1421 1422 err_unmap_auxc: 1423 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1424 1425 err_unmap_qp: 1426 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1427 1428 err_unmap_dmpt: 1429 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1430 1431 err_unmap_mtt: 1432 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1433 1434 err_unmap_eq: 1435 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1436 1437 err_unmap_cmpt: 1438 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1439 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1440 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1441 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1442 1443 err_unmap_aux: 1444 mlx4_UNMAP_ICM_AUX(dev); 1445 1446 err_free_aux: 1447 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1448 1449 return err; 1450 } 1451 1452 static void mlx4_free_icms(struct mlx4_dev *dev) 1453 { 1454 struct mlx4_priv *priv = mlx4_priv(dev); 1455 1456 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1457 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1458 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1459 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1460 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1461 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1462 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1463 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1464 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1465 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1466 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1467 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1468 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1469 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1470 1471 mlx4_UNMAP_ICM_AUX(dev); 1472 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1473 } 1474 1475 static void mlx4_slave_exit(struct mlx4_dev *dev) 1476 { 1477 struct mlx4_priv *priv = mlx4_priv(dev); 1478 1479 mutex_lock(&priv->cmd.slave_cmd_mutex); 1480 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1481 mlx4_warn(dev, "Failed to close slave function\n"); 1482 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1483 } 1484 1485 static int map_bf_area(struct mlx4_dev *dev) 1486 { 1487 struct mlx4_priv *priv = mlx4_priv(dev); 1488 resource_size_t bf_start; 1489 resource_size_t bf_len; 1490 int err = 0; 1491 1492 if (!dev->caps.bf_reg_size) 1493 return -ENXIO; 1494 1495 bf_start = pci_resource_start(dev->pdev, 2) + 1496 (dev->caps.num_uars << PAGE_SHIFT); 1497 bf_len = pci_resource_len(dev->pdev, 2) - 1498 (dev->caps.num_uars << PAGE_SHIFT); 1499 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1500 if (!priv->bf_mapping) 1501 err = -ENOMEM; 1502 1503 return err; 1504 } 1505 1506 static void unmap_bf_area(struct mlx4_dev *dev) 1507 { 1508 if (mlx4_priv(dev)->bf_mapping) 1509 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1510 } 1511 1512 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1513 { 1514 u32 clockhi, clocklo, clockhi1; 1515 cycle_t cycles; 1516 int i; 1517 struct mlx4_priv *priv = mlx4_priv(dev); 1518 1519 for (i = 0; i < 10; i++) { 1520 clockhi = swab32(readl(priv->clock_mapping)); 1521 clocklo = swab32(readl(priv->clock_mapping + 4)); 1522 clockhi1 = swab32(readl(priv->clock_mapping)); 1523 if (clockhi == clockhi1) 1524 break; 1525 } 1526 1527 cycles = (u64) clockhi << 32 | (u64) clocklo; 1528 1529 return cycles; 1530 } 1531 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1532 1533 1534 static int map_internal_clock(struct mlx4_dev *dev) 1535 { 1536 struct mlx4_priv *priv = mlx4_priv(dev); 1537 1538 priv->clock_mapping = 1539 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + 1540 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1541 1542 if (!priv->clock_mapping) 1543 return -ENOMEM; 1544 1545 return 0; 1546 } 1547 1548 static void unmap_internal_clock(struct mlx4_dev *dev) 1549 { 1550 struct mlx4_priv *priv = mlx4_priv(dev); 1551 1552 if (priv->clock_mapping) 1553 iounmap(priv->clock_mapping); 1554 } 1555 1556 static void mlx4_close_hca(struct mlx4_dev *dev) 1557 { 1558 unmap_internal_clock(dev); 1559 unmap_bf_area(dev); 1560 if (mlx4_is_slave(dev)) 1561 mlx4_slave_exit(dev); 1562 else { 1563 mlx4_CLOSE_HCA(dev, 0); 1564 mlx4_free_icms(dev); 1565 } 1566 } 1567 1568 static void mlx4_close_fw(struct mlx4_dev *dev) 1569 { 1570 if (!mlx4_is_slave(dev)) { 1571 mlx4_UNMAP_FA(dev); 1572 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1573 } 1574 } 1575 1576 static int mlx4_init_slave(struct mlx4_dev *dev) 1577 { 1578 struct mlx4_priv *priv = mlx4_priv(dev); 1579 u64 dma = (u64) priv->mfunc.vhcr_dma; 1580 int ret_from_reset = 0; 1581 u32 slave_read; 1582 u32 cmd_channel_ver; 1583 1584 if (atomic_read(&pf_loading)) { 1585 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1586 return -EPROBE_DEFER; 1587 } 1588 1589 mutex_lock(&priv->cmd.slave_cmd_mutex); 1590 priv->cmd.max_cmds = 1; 1591 mlx4_warn(dev, "Sending reset\n"); 1592 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1593 MLX4_COMM_TIME); 1594 /* if we are in the middle of flr the slave will try 1595 * NUM_OF_RESET_RETRIES times before leaving.*/ 1596 if (ret_from_reset) { 1597 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1598 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1599 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1600 return -EPROBE_DEFER; 1601 } else 1602 goto err; 1603 } 1604 1605 /* check the driver version - the slave I/F revision 1606 * must match the master's */ 1607 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1608 cmd_channel_ver = mlx4_comm_get_version(); 1609 1610 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1611 MLX4_COMM_GET_IF_REV(slave_read)) { 1612 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1613 goto err; 1614 } 1615 1616 mlx4_warn(dev, "Sending vhcr0\n"); 1617 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1618 MLX4_COMM_TIME)) 1619 goto err; 1620 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1621 MLX4_COMM_TIME)) 1622 goto err; 1623 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1624 MLX4_COMM_TIME)) 1625 goto err; 1626 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1627 goto err; 1628 1629 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1630 return 0; 1631 1632 err: 1633 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1634 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1635 return -EIO; 1636 } 1637 1638 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1639 { 1640 int i; 1641 1642 for (i = 1; i <= dev->caps.num_ports; i++) { 1643 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1644 dev->caps.gid_table_len[i] = 1645 mlx4_get_slave_num_gids(dev, 0, i); 1646 else 1647 dev->caps.gid_table_len[i] = 1; 1648 dev->caps.pkey_table_len[i] = 1649 dev->phys_caps.pkey_phys_table_len[i] - 1; 1650 } 1651 } 1652 1653 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1654 { 1655 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1656 1657 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1658 i++) { 1659 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1660 break; 1661 } 1662 1663 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1664 } 1665 1666 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1667 { 1668 switch (dmfs_high_steer_mode) { 1669 case MLX4_STEERING_DMFS_A0_DEFAULT: 1670 return "default performance"; 1671 1672 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1673 return "dynamic hybrid mode"; 1674 1675 case MLX4_STEERING_DMFS_A0_STATIC: 1676 return "performance optimized for limited rule configuration (static)"; 1677 1678 case MLX4_STEERING_DMFS_A0_DISABLE: 1679 return "disabled performance optimized steering"; 1680 1681 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1682 return "performance optimized steering not supported"; 1683 1684 default: 1685 return "Unrecognized mode"; 1686 } 1687 } 1688 1689 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1690 1691 static void choose_steering_mode(struct mlx4_dev *dev, 1692 struct mlx4_dev_cap *dev_cap) 1693 { 1694 if (mlx4_log_num_mgm_entry_size <= 0) { 1695 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1696 if (dev->caps.dmfs_high_steer_mode == 1697 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1698 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1699 else 1700 dev->caps.dmfs_high_steer_mode = 1701 MLX4_STEERING_DMFS_A0_STATIC; 1702 } 1703 } 1704 1705 if (mlx4_log_num_mgm_entry_size <= 0 && 1706 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1707 (!mlx4_is_mfunc(dev) || 1708 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && 1709 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1710 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1711 dev->oper_log_mgm_entry_size = 1712 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1713 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1714 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1715 dev->caps.fs_log_max_ucast_qp_range_size = 1716 dev_cap->fs_log_max_ucast_qp_range_size; 1717 } else { 1718 if (dev->caps.dmfs_high_steer_mode != 1719 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1720 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1721 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1722 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1723 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1724 else { 1725 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1726 1727 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1728 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1729 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1730 } 1731 dev->oper_log_mgm_entry_size = 1732 mlx4_log_num_mgm_entry_size > 0 ? 1733 mlx4_log_num_mgm_entry_size : 1734 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1735 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1736 } 1737 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1738 mlx4_steering_mode_str(dev->caps.steering_mode), 1739 dev->oper_log_mgm_entry_size, 1740 mlx4_log_num_mgm_entry_size); 1741 } 1742 1743 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1744 struct mlx4_dev_cap *dev_cap) 1745 { 1746 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1747 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1748 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1749 else 1750 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1751 1752 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1753 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1754 } 1755 1756 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1757 { 1758 int i; 1759 struct mlx4_port_cap port_cap; 1760 1761 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1762 return -EINVAL; 1763 1764 for (i = 1; i <= dev->caps.num_ports; i++) { 1765 if (mlx4_dev_port(dev, i, &port_cap)) { 1766 mlx4_err(dev, 1767 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1768 } else if ((dev->caps.dmfs_high_steer_mode != 1769 MLX4_STEERING_DMFS_A0_DEFAULT) && 1770 (port_cap.dmfs_optimized_state == 1771 !!(dev->caps.dmfs_high_steer_mode == 1772 MLX4_STEERING_DMFS_A0_DISABLE))) { 1773 mlx4_err(dev, 1774 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1775 dmfs_high_rate_steering_mode_str( 1776 dev->caps.dmfs_high_steer_mode), 1777 (port_cap.dmfs_optimized_state ? 1778 "enabled" : "disabled")); 1779 } 1780 } 1781 1782 return 0; 1783 } 1784 1785 static int mlx4_init_fw(struct mlx4_dev *dev) 1786 { 1787 struct mlx4_mod_stat_cfg mlx4_cfg; 1788 int err = 0; 1789 1790 if (!mlx4_is_slave(dev)) { 1791 err = mlx4_QUERY_FW(dev); 1792 if (err) { 1793 if (err == -EACCES) 1794 mlx4_info(dev, "non-primary physical function, skipping\n"); 1795 else 1796 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1797 return err; 1798 } 1799 1800 err = mlx4_load_fw(dev); 1801 if (err) { 1802 mlx4_err(dev, "Failed to start FW, aborting\n"); 1803 return err; 1804 } 1805 1806 mlx4_cfg.log_pg_sz_m = 1; 1807 mlx4_cfg.log_pg_sz = 0; 1808 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1809 if (err) 1810 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1811 } 1812 1813 return err; 1814 } 1815 1816 static int mlx4_init_hca(struct mlx4_dev *dev) 1817 { 1818 struct mlx4_priv *priv = mlx4_priv(dev); 1819 struct mlx4_adapter adapter; 1820 struct mlx4_dev_cap dev_cap; 1821 struct mlx4_profile profile; 1822 struct mlx4_init_hca_param init_hca; 1823 u64 icm_size; 1824 struct mlx4_config_dev_params params; 1825 int err; 1826 1827 if (!mlx4_is_slave(dev)) { 1828 err = mlx4_dev_cap(dev, &dev_cap); 1829 if (err) { 1830 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1831 return err; 1832 } 1833 1834 choose_steering_mode(dev, &dev_cap); 1835 choose_tunnel_offload_mode(dev, &dev_cap); 1836 1837 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1838 mlx4_is_master(dev)) 1839 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1840 1841 err = mlx4_get_phys_port_id(dev); 1842 if (err) 1843 mlx4_err(dev, "Fail to get physical port id\n"); 1844 1845 if (mlx4_is_master(dev)) 1846 mlx4_parav_master_pf_caps(dev); 1847 1848 if (mlx4_low_memory_profile()) { 1849 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 1850 profile = low_mem_profile; 1851 } else { 1852 profile = default_profile; 1853 } 1854 if (dev->caps.steering_mode == 1855 MLX4_STEERING_MODE_DEVICE_MANAGED) 1856 profile.num_mcg = MLX4_FS_NUM_MCG; 1857 1858 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1859 &init_hca); 1860 if ((long long) icm_size < 0) { 1861 err = icm_size; 1862 return err; 1863 } 1864 1865 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1866 1867 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1868 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1869 init_hca.mw_enabled = 0; 1870 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 1871 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 1872 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 1873 1874 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1875 if (err) 1876 return err; 1877 1878 err = mlx4_INIT_HCA(dev, &init_hca); 1879 if (err) { 1880 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 1881 goto err_free_icm; 1882 } 1883 1884 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 1885 err = mlx4_query_func(dev, &dev_cap); 1886 if (err < 0) { 1887 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1888 goto err_close; 1889 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1890 dev->caps.num_eqs = dev_cap.max_eqs; 1891 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 1892 dev->caps.reserved_uars = dev_cap.reserved_uars; 1893 } 1894 } 1895 1896 /* 1897 * If TS is supported by FW 1898 * read HCA frequency by QUERY_HCA command 1899 */ 1900 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 1901 memset(&init_hca, 0, sizeof(init_hca)); 1902 err = mlx4_QUERY_HCA(dev, &init_hca); 1903 if (err) { 1904 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 1905 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1906 } else { 1907 dev->caps.hca_core_clock = 1908 init_hca.hca_core_clock; 1909 } 1910 1911 /* In case we got HCA frequency 0 - disable timestamping 1912 * to avoid dividing by zero 1913 */ 1914 if (!dev->caps.hca_core_clock) { 1915 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1916 mlx4_err(dev, 1917 "HCA frequency is 0 - timestamping is not supported\n"); 1918 } else if (map_internal_clock(dev)) { 1919 /* 1920 * Map internal clock, 1921 * in case of failure disable timestamping 1922 */ 1923 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1924 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 1925 } 1926 } 1927 1928 if (dev->caps.dmfs_high_steer_mode != 1929 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 1930 if (mlx4_validate_optimized_steering(dev)) 1931 mlx4_warn(dev, "Optimized steering validation failed\n"); 1932 1933 if (dev->caps.dmfs_high_steer_mode == 1934 MLX4_STEERING_DMFS_A0_DISABLE) { 1935 dev->caps.dmfs_high_rate_qpn_base = 1936 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1937 dev->caps.dmfs_high_rate_qpn_range = 1938 MLX4_A0_STEERING_TABLE_SIZE; 1939 } 1940 1941 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 1942 dmfs_high_rate_steering_mode_str( 1943 dev->caps.dmfs_high_steer_mode)); 1944 } 1945 } else { 1946 err = mlx4_init_slave(dev); 1947 if (err) { 1948 if (err != -EPROBE_DEFER) 1949 mlx4_err(dev, "Failed to initialize slave\n"); 1950 return err; 1951 } 1952 1953 err = mlx4_slave_cap(dev); 1954 if (err) { 1955 mlx4_err(dev, "Failed to obtain slave caps\n"); 1956 goto err_close; 1957 } 1958 } 1959 1960 if (map_bf_area(dev)) 1961 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1962 1963 /*Only the master set the ports, all the rest got it from it.*/ 1964 if (!mlx4_is_slave(dev)) 1965 mlx4_set_port_mask(dev); 1966 1967 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1968 if (err) { 1969 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 1970 goto unmap_bf; 1971 } 1972 1973 /* Query CONFIG_DEV parameters */ 1974 err = mlx4_config_dev_retrieval(dev, ¶ms); 1975 if (err && err != -ENOTSUPP) { 1976 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 1977 } else if (!err) { 1978 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 1979 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 1980 } 1981 priv->eq_table.inta_pin = adapter.inta_pin; 1982 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1983 1984 return 0; 1985 1986 unmap_bf: 1987 unmap_internal_clock(dev); 1988 unmap_bf_area(dev); 1989 1990 if (mlx4_is_slave(dev)) { 1991 kfree(dev->caps.qp0_qkey); 1992 kfree(dev->caps.qp0_tunnel); 1993 kfree(dev->caps.qp0_proxy); 1994 kfree(dev->caps.qp1_tunnel); 1995 kfree(dev->caps.qp1_proxy); 1996 } 1997 1998 err_close: 1999 if (mlx4_is_slave(dev)) 2000 mlx4_slave_exit(dev); 2001 else 2002 mlx4_CLOSE_HCA(dev, 0); 2003 2004 err_free_icm: 2005 if (!mlx4_is_slave(dev)) 2006 mlx4_free_icms(dev); 2007 2008 return err; 2009 } 2010 2011 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2012 { 2013 struct mlx4_priv *priv = mlx4_priv(dev); 2014 int nent; 2015 2016 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2017 return -ENOENT; 2018 2019 nent = dev->caps.max_counters; 2020 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2021 } 2022 2023 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2024 { 2025 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2026 } 2027 2028 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2029 { 2030 struct mlx4_priv *priv = mlx4_priv(dev); 2031 2032 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2033 return -ENOENT; 2034 2035 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2036 if (*idx == -1) 2037 return -ENOMEM; 2038 2039 return 0; 2040 } 2041 2042 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2043 { 2044 u64 out_param; 2045 int err; 2046 2047 if (mlx4_is_mfunc(dev)) { 2048 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2049 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2050 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2051 if (!err) 2052 *idx = get_param_l(&out_param); 2053 2054 return err; 2055 } 2056 return __mlx4_counter_alloc(dev, idx); 2057 } 2058 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2059 2060 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2061 { 2062 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2063 return; 2064 } 2065 2066 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2067 { 2068 u64 in_param = 0; 2069 2070 if (mlx4_is_mfunc(dev)) { 2071 set_param_l(&in_param, idx); 2072 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2073 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2074 MLX4_CMD_WRAPPED); 2075 return; 2076 } 2077 __mlx4_counter_free(dev, idx); 2078 } 2079 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2080 2081 static int mlx4_setup_hca(struct mlx4_dev *dev) 2082 { 2083 struct mlx4_priv *priv = mlx4_priv(dev); 2084 int err; 2085 int port; 2086 __be32 ib_port_default_caps; 2087 2088 err = mlx4_init_uar_table(dev); 2089 if (err) { 2090 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2091 return err; 2092 } 2093 2094 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2095 if (err) { 2096 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2097 goto err_uar_table_free; 2098 } 2099 2100 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2101 if (!priv->kar) { 2102 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2103 err = -ENOMEM; 2104 goto err_uar_free; 2105 } 2106 2107 err = mlx4_init_pd_table(dev); 2108 if (err) { 2109 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2110 goto err_kar_unmap; 2111 } 2112 2113 err = mlx4_init_xrcd_table(dev); 2114 if (err) { 2115 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2116 goto err_pd_table_free; 2117 } 2118 2119 err = mlx4_init_mr_table(dev); 2120 if (err) { 2121 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2122 goto err_xrcd_table_free; 2123 } 2124 2125 if (!mlx4_is_slave(dev)) { 2126 err = mlx4_init_mcg_table(dev); 2127 if (err) { 2128 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2129 goto err_mr_table_free; 2130 } 2131 err = mlx4_config_mad_demux(dev); 2132 if (err) { 2133 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2134 goto err_mcg_table_free; 2135 } 2136 } 2137 2138 err = mlx4_init_eq_table(dev); 2139 if (err) { 2140 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2141 goto err_mcg_table_free; 2142 } 2143 2144 err = mlx4_cmd_use_events(dev); 2145 if (err) { 2146 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2147 goto err_eq_table_free; 2148 } 2149 2150 err = mlx4_NOP(dev); 2151 if (err) { 2152 if (dev->flags & MLX4_FLAG_MSI_X) { 2153 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2154 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2155 mlx4_warn(dev, "Trying again without MSI-X\n"); 2156 } else { 2157 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2158 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2159 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2160 } 2161 2162 goto err_cmd_poll; 2163 } 2164 2165 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2166 2167 err = mlx4_init_cq_table(dev); 2168 if (err) { 2169 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2170 goto err_cmd_poll; 2171 } 2172 2173 err = mlx4_init_srq_table(dev); 2174 if (err) { 2175 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2176 goto err_cq_table_free; 2177 } 2178 2179 err = mlx4_init_qp_table(dev); 2180 if (err) { 2181 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2182 goto err_srq_table_free; 2183 } 2184 2185 err = mlx4_init_counters_table(dev); 2186 if (err && err != -ENOENT) { 2187 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2188 goto err_qp_table_free; 2189 } 2190 2191 if (!mlx4_is_slave(dev)) { 2192 for (port = 1; port <= dev->caps.num_ports; port++) { 2193 ib_port_default_caps = 0; 2194 err = mlx4_get_port_ib_caps(dev, port, 2195 &ib_port_default_caps); 2196 if (err) 2197 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2198 port, err); 2199 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2200 2201 /* initialize per-slave default ib port capabilities */ 2202 if (mlx4_is_master(dev)) { 2203 int i; 2204 for (i = 0; i < dev->num_slaves; i++) { 2205 if (i == mlx4_master_func_num(dev)) 2206 continue; 2207 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2208 ib_port_default_caps; 2209 } 2210 } 2211 2212 if (mlx4_is_mfunc(dev)) 2213 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2214 else 2215 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2216 2217 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2218 dev->caps.pkey_table_len[port] : -1); 2219 if (err) { 2220 mlx4_err(dev, "Failed to set port %d, aborting\n", 2221 port); 2222 goto err_counters_table_free; 2223 } 2224 } 2225 } 2226 2227 return 0; 2228 2229 err_counters_table_free: 2230 mlx4_cleanup_counters_table(dev); 2231 2232 err_qp_table_free: 2233 mlx4_cleanup_qp_table(dev); 2234 2235 err_srq_table_free: 2236 mlx4_cleanup_srq_table(dev); 2237 2238 err_cq_table_free: 2239 mlx4_cleanup_cq_table(dev); 2240 2241 err_cmd_poll: 2242 mlx4_cmd_use_polling(dev); 2243 2244 err_eq_table_free: 2245 mlx4_cleanup_eq_table(dev); 2246 2247 err_mcg_table_free: 2248 if (!mlx4_is_slave(dev)) 2249 mlx4_cleanup_mcg_table(dev); 2250 2251 err_mr_table_free: 2252 mlx4_cleanup_mr_table(dev); 2253 2254 err_xrcd_table_free: 2255 mlx4_cleanup_xrcd_table(dev); 2256 2257 err_pd_table_free: 2258 mlx4_cleanup_pd_table(dev); 2259 2260 err_kar_unmap: 2261 iounmap(priv->kar); 2262 2263 err_uar_free: 2264 mlx4_uar_free(dev, &priv->driver_uar); 2265 2266 err_uar_table_free: 2267 mlx4_cleanup_uar_table(dev); 2268 return err; 2269 } 2270 2271 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2272 { 2273 struct mlx4_priv *priv = mlx4_priv(dev); 2274 struct msix_entry *entries; 2275 int i; 2276 2277 if (msi_x) { 2278 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2279 2280 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2281 nreq); 2282 2283 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2284 if (!entries) 2285 goto no_msi; 2286 2287 for (i = 0; i < nreq; ++i) 2288 entries[i].entry = i; 2289 2290 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); 2291 2292 if (nreq < 0) { 2293 kfree(entries); 2294 goto no_msi; 2295 } else if (nreq < MSIX_LEGACY_SZ + 2296 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2297 /*Working in legacy mode , all EQ's shared*/ 2298 dev->caps.comp_pool = 0; 2299 dev->caps.num_comp_vectors = nreq - 1; 2300 } else { 2301 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2302 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2303 } 2304 for (i = 0; i < nreq; ++i) 2305 priv->eq_table.eq[i].irq = entries[i].vector; 2306 2307 dev->flags |= MLX4_FLAG_MSI_X; 2308 2309 kfree(entries); 2310 return; 2311 } 2312 2313 no_msi: 2314 dev->caps.num_comp_vectors = 1; 2315 dev->caps.comp_pool = 0; 2316 2317 for (i = 0; i < 2; ++i) 2318 priv->eq_table.eq[i].irq = dev->pdev->irq; 2319 } 2320 2321 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2322 { 2323 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2324 int err = 0; 2325 2326 info->dev = dev; 2327 info->port = port; 2328 if (!mlx4_is_slave(dev)) { 2329 mlx4_init_mac_table(dev, &info->mac_table); 2330 mlx4_init_vlan_table(dev, &info->vlan_table); 2331 mlx4_init_roce_gid_table(dev, &info->gid_table); 2332 info->base_qpn = mlx4_get_base_qpn(dev, port); 2333 } 2334 2335 sprintf(info->dev_name, "mlx4_port%d", port); 2336 info->port_attr.attr.name = info->dev_name; 2337 if (mlx4_is_mfunc(dev)) 2338 info->port_attr.attr.mode = S_IRUGO; 2339 else { 2340 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2341 info->port_attr.store = set_port_type; 2342 } 2343 info->port_attr.show = show_port_type; 2344 sysfs_attr_init(&info->port_attr.attr); 2345 2346 err = device_create_file(&dev->pdev->dev, &info->port_attr); 2347 if (err) { 2348 mlx4_err(dev, "Failed to create file for port %d\n", port); 2349 info->port = -1; 2350 } 2351 2352 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2353 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2354 if (mlx4_is_mfunc(dev)) 2355 info->port_mtu_attr.attr.mode = S_IRUGO; 2356 else { 2357 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2358 info->port_mtu_attr.store = set_port_ib_mtu; 2359 } 2360 info->port_mtu_attr.show = show_port_ib_mtu; 2361 sysfs_attr_init(&info->port_mtu_attr.attr); 2362 2363 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 2364 if (err) { 2365 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2366 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2367 info->port = -1; 2368 } 2369 2370 return err; 2371 } 2372 2373 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2374 { 2375 if (info->port < 0) 2376 return; 2377 2378 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2379 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 2380 } 2381 2382 static int mlx4_init_steering(struct mlx4_dev *dev) 2383 { 2384 struct mlx4_priv *priv = mlx4_priv(dev); 2385 int num_entries = dev->caps.num_ports; 2386 int i, j; 2387 2388 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2389 if (!priv->steer) 2390 return -ENOMEM; 2391 2392 for (i = 0; i < num_entries; i++) 2393 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2394 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2395 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2396 } 2397 return 0; 2398 } 2399 2400 static void mlx4_clear_steering(struct mlx4_dev *dev) 2401 { 2402 struct mlx4_priv *priv = mlx4_priv(dev); 2403 struct mlx4_steer_index *entry, *tmp_entry; 2404 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2405 int num_entries = dev->caps.num_ports; 2406 int i, j; 2407 2408 for (i = 0; i < num_entries; i++) { 2409 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2410 list_for_each_entry_safe(pqp, tmp_pqp, 2411 &priv->steer[i].promisc_qps[j], 2412 list) { 2413 list_del(&pqp->list); 2414 kfree(pqp); 2415 } 2416 list_for_each_entry_safe(entry, tmp_entry, 2417 &priv->steer[i].steer_entries[j], 2418 list) { 2419 list_del(&entry->list); 2420 list_for_each_entry_safe(pqp, tmp_pqp, 2421 &entry->duplicates, 2422 list) { 2423 list_del(&pqp->list); 2424 kfree(pqp); 2425 } 2426 kfree(entry); 2427 } 2428 } 2429 } 2430 kfree(priv->steer); 2431 } 2432 2433 static int extended_func_num(struct pci_dev *pdev) 2434 { 2435 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2436 } 2437 2438 #define MLX4_OWNER_BASE 0x8069c 2439 #define MLX4_OWNER_SIZE 4 2440 2441 static int mlx4_get_ownership(struct mlx4_dev *dev) 2442 { 2443 void __iomem *owner; 2444 u32 ret; 2445 2446 if (pci_channel_offline(dev->pdev)) 2447 return -EIO; 2448 2449 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2450 MLX4_OWNER_SIZE); 2451 if (!owner) { 2452 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2453 return -ENOMEM; 2454 } 2455 2456 ret = readl(owner); 2457 iounmap(owner); 2458 return (int) !!ret; 2459 } 2460 2461 static void mlx4_free_ownership(struct mlx4_dev *dev) 2462 { 2463 void __iomem *owner; 2464 2465 if (pci_channel_offline(dev->pdev)) 2466 return; 2467 2468 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2469 MLX4_OWNER_SIZE); 2470 if (!owner) { 2471 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2472 return; 2473 } 2474 writel(0, owner); 2475 msleep(1000); 2476 iounmap(owner); 2477 } 2478 2479 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2480 !!((flags) & MLX4_FLAG_MASTER)) 2481 2482 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2483 u8 total_vfs, int existing_vfs) 2484 { 2485 u64 dev_flags = dev->flags; 2486 int err = 0; 2487 2488 atomic_inc(&pf_loading); 2489 if (dev->flags & MLX4_FLAG_SRIOV) { 2490 if (existing_vfs != total_vfs) { 2491 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2492 existing_vfs, total_vfs); 2493 total_vfs = existing_vfs; 2494 } 2495 } 2496 2497 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2498 if (NULL == dev->dev_vfs) { 2499 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2500 goto disable_sriov; 2501 } 2502 2503 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2504 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2505 err = pci_enable_sriov(pdev, total_vfs); 2506 } 2507 if (err) { 2508 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2509 err); 2510 goto disable_sriov; 2511 } else { 2512 mlx4_warn(dev, "Running in master mode\n"); 2513 dev_flags |= MLX4_FLAG_SRIOV | 2514 MLX4_FLAG_MASTER; 2515 dev_flags &= ~MLX4_FLAG_SLAVE; 2516 dev->num_vfs = total_vfs; 2517 } 2518 return dev_flags; 2519 2520 disable_sriov: 2521 atomic_dec(&pf_loading); 2522 dev->num_vfs = 0; 2523 kfree(dev->dev_vfs); 2524 return dev_flags & ~MLX4_FLAG_MASTER; 2525 } 2526 2527 enum { 2528 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2529 }; 2530 2531 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2532 int *nvfs) 2533 { 2534 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2535 /* Checking for 64 VFs as a limitation of CX2 */ 2536 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2537 requested_vfs >= 64) { 2538 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2539 requested_vfs); 2540 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2541 } 2542 return 0; 2543 } 2544 2545 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2546 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2547 { 2548 struct mlx4_dev *dev; 2549 unsigned sum = 0; 2550 int err; 2551 int port; 2552 int i; 2553 struct mlx4_dev_cap *dev_cap = NULL; 2554 int existing_vfs = 0; 2555 2556 dev = &priv->dev; 2557 2558 INIT_LIST_HEAD(&priv->ctx_list); 2559 spin_lock_init(&priv->ctx_lock); 2560 2561 mutex_init(&priv->port_mutex); 2562 2563 INIT_LIST_HEAD(&priv->pgdir_list); 2564 mutex_init(&priv->pgdir_mutex); 2565 2566 INIT_LIST_HEAD(&priv->bf_list); 2567 mutex_init(&priv->bf_mutex); 2568 2569 dev->rev_id = pdev->revision; 2570 dev->numa_node = dev_to_node(&pdev->dev); 2571 2572 /* Detect if this device is a virtual function */ 2573 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2574 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2575 dev->flags |= MLX4_FLAG_SLAVE; 2576 } else { 2577 /* We reset the device and enable SRIOV only for physical 2578 * devices. Try to claim ownership on the device; 2579 * if already taken, skip -- do not allow multiple PFs */ 2580 err = mlx4_get_ownership(dev); 2581 if (err) { 2582 if (err < 0) 2583 return err; 2584 else { 2585 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2586 return -EINVAL; 2587 } 2588 } 2589 2590 atomic_set(&priv->opreq_count, 0); 2591 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2592 2593 /* 2594 * Now reset the HCA before we touch the PCI capabilities or 2595 * attempt a firmware command, since a boot ROM may have left 2596 * the HCA in an undefined state. 2597 */ 2598 err = mlx4_reset(dev); 2599 if (err) { 2600 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2601 goto err_sriov; 2602 } 2603 2604 if (total_vfs) { 2605 dev->flags = MLX4_FLAG_MASTER; 2606 existing_vfs = pci_num_vf(pdev); 2607 if (existing_vfs) 2608 dev->flags |= MLX4_FLAG_SRIOV; 2609 dev->num_vfs = total_vfs; 2610 } 2611 } 2612 2613 slave_start: 2614 err = mlx4_cmd_init(dev); 2615 if (err) { 2616 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2617 goto err_sriov; 2618 } 2619 2620 /* In slave functions, the communication channel must be initialized 2621 * before posting commands. Also, init num_slaves before calling 2622 * mlx4_init_hca */ 2623 if (mlx4_is_mfunc(dev)) { 2624 if (mlx4_is_master(dev)) { 2625 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2626 2627 } else { 2628 dev->num_slaves = 0; 2629 err = mlx4_multi_func_init(dev); 2630 if (err) { 2631 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2632 goto err_cmd; 2633 } 2634 } 2635 } 2636 2637 err = mlx4_init_fw(dev); 2638 if (err) { 2639 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2640 goto err_mfunc; 2641 } 2642 2643 if (mlx4_is_master(dev)) { 2644 /* when we hit the goto slave_start below, dev_cap already initialized */ 2645 if (!dev_cap) { 2646 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2647 2648 if (!dev_cap) { 2649 err = -ENOMEM; 2650 goto err_fw; 2651 } 2652 2653 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2654 if (err) { 2655 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2656 goto err_fw; 2657 } 2658 2659 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2660 goto err_fw; 2661 2662 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2663 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2664 existing_vfs); 2665 2666 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2667 dev->flags = dev_flags; 2668 if (!SRIOV_VALID_STATE(dev->flags)) { 2669 mlx4_err(dev, "Invalid SRIOV state\n"); 2670 goto err_sriov; 2671 } 2672 err = mlx4_reset(dev); 2673 if (err) { 2674 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2675 goto err_sriov; 2676 } 2677 goto slave_start; 2678 } 2679 } else { 2680 /* Legacy mode FW requires SRIOV to be enabled before 2681 * doing QUERY_DEV_CAP, since max_eq's value is different if 2682 * SRIOV is enabled. 2683 */ 2684 memset(dev_cap, 0, sizeof(*dev_cap)); 2685 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2686 if (err) { 2687 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2688 goto err_fw; 2689 } 2690 2691 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2692 goto err_fw; 2693 } 2694 } 2695 2696 err = mlx4_init_hca(dev); 2697 if (err) { 2698 if (err == -EACCES) { 2699 /* Not primary Physical function 2700 * Running in slave mode */ 2701 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2702 /* We're not a PF */ 2703 if (dev->flags & MLX4_FLAG_SRIOV) { 2704 if (!existing_vfs) 2705 pci_disable_sriov(pdev); 2706 if (mlx4_is_master(dev)) 2707 atomic_dec(&pf_loading); 2708 dev->flags &= ~MLX4_FLAG_SRIOV; 2709 } 2710 if (!mlx4_is_slave(dev)) 2711 mlx4_free_ownership(dev); 2712 dev->flags |= MLX4_FLAG_SLAVE; 2713 dev->flags &= ~MLX4_FLAG_MASTER; 2714 goto slave_start; 2715 } else 2716 goto err_fw; 2717 } 2718 2719 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2720 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); 2721 2722 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2723 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2724 dev->flags = dev_flags; 2725 err = mlx4_cmd_init(dev); 2726 if (err) { 2727 /* Only VHCR is cleaned up, so could still 2728 * send FW commands 2729 */ 2730 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2731 goto err_close; 2732 } 2733 } else { 2734 dev->flags = dev_flags; 2735 } 2736 2737 if (!SRIOV_VALID_STATE(dev->flags)) { 2738 mlx4_err(dev, "Invalid SRIOV state\n"); 2739 goto err_close; 2740 } 2741 } 2742 2743 /* check if the device is functioning at its maximum possible speed. 2744 * No return code for this call, just warn the user in case of PCI 2745 * express device capabilities are under-satisfied by the bus. 2746 */ 2747 if (!mlx4_is_slave(dev)) 2748 mlx4_check_pcie_caps(dev); 2749 2750 /* In master functions, the communication channel must be initialized 2751 * after obtaining its address from fw */ 2752 if (mlx4_is_master(dev)) { 2753 int ib_ports = 0; 2754 2755 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2756 ib_ports++; 2757 2758 if (ib_ports && 2759 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2760 mlx4_err(dev, 2761 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2762 err = -EINVAL; 2763 goto err_close; 2764 } 2765 if (dev->caps.num_ports < 2 && 2766 num_vfs_argc > 1) { 2767 err = -EINVAL; 2768 mlx4_err(dev, 2769 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2770 dev->caps.num_ports); 2771 goto err_close; 2772 } 2773 memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); 2774 2775 for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { 2776 unsigned j; 2777 2778 for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { 2779 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2780 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2781 dev->caps.num_ports; 2782 } 2783 } 2784 2785 /* In master functions, the communication channel 2786 * must be initialized after obtaining its address from fw 2787 */ 2788 err = mlx4_multi_func_init(dev); 2789 if (err) { 2790 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2791 goto err_close; 2792 } 2793 } 2794 2795 err = mlx4_alloc_eq_table(dev); 2796 if (err) 2797 goto err_master_mfunc; 2798 2799 priv->msix_ctl.pool_bm = 0; 2800 mutex_init(&priv->msix_ctl.pool_lock); 2801 2802 mlx4_enable_msi_x(dev); 2803 if ((mlx4_is_mfunc(dev)) && 2804 !(dev->flags & MLX4_FLAG_MSI_X)) { 2805 err = -ENOSYS; 2806 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2807 goto err_free_eq; 2808 } 2809 2810 if (!mlx4_is_slave(dev)) { 2811 err = mlx4_init_steering(dev); 2812 if (err) 2813 goto err_disable_msix; 2814 } 2815 2816 err = mlx4_setup_hca(dev); 2817 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2818 !mlx4_is_mfunc(dev)) { 2819 dev->flags &= ~MLX4_FLAG_MSI_X; 2820 dev->caps.num_comp_vectors = 1; 2821 dev->caps.comp_pool = 0; 2822 pci_disable_msix(pdev); 2823 err = mlx4_setup_hca(dev); 2824 } 2825 2826 if (err) 2827 goto err_steer; 2828 2829 mlx4_init_quotas(dev); 2830 2831 for (port = 1; port <= dev->caps.num_ports; port++) { 2832 err = mlx4_init_port_info(dev, port); 2833 if (err) 2834 goto err_port; 2835 } 2836 2837 err = mlx4_register_device(dev); 2838 if (err) 2839 goto err_port; 2840 2841 mlx4_request_modules(dev); 2842 2843 mlx4_sense_init(dev); 2844 mlx4_start_sense(dev); 2845 2846 priv->removed = 0; 2847 2848 if (mlx4_is_master(dev) && dev->num_vfs) 2849 atomic_dec(&pf_loading); 2850 2851 kfree(dev_cap); 2852 return 0; 2853 2854 err_port: 2855 for (--port; port >= 1; --port) 2856 mlx4_cleanup_port_info(&priv->port[port]); 2857 2858 mlx4_cleanup_counters_table(dev); 2859 mlx4_cleanup_qp_table(dev); 2860 mlx4_cleanup_srq_table(dev); 2861 mlx4_cleanup_cq_table(dev); 2862 mlx4_cmd_use_polling(dev); 2863 mlx4_cleanup_eq_table(dev); 2864 mlx4_cleanup_mcg_table(dev); 2865 mlx4_cleanup_mr_table(dev); 2866 mlx4_cleanup_xrcd_table(dev); 2867 mlx4_cleanup_pd_table(dev); 2868 mlx4_cleanup_uar_table(dev); 2869 2870 err_steer: 2871 if (!mlx4_is_slave(dev)) 2872 mlx4_clear_steering(dev); 2873 2874 err_disable_msix: 2875 if (dev->flags & MLX4_FLAG_MSI_X) 2876 pci_disable_msix(pdev); 2877 2878 err_free_eq: 2879 mlx4_free_eq_table(dev); 2880 2881 err_master_mfunc: 2882 if (mlx4_is_master(dev)) 2883 mlx4_multi_func_cleanup(dev); 2884 2885 if (mlx4_is_slave(dev)) { 2886 kfree(dev->caps.qp0_qkey); 2887 kfree(dev->caps.qp0_tunnel); 2888 kfree(dev->caps.qp0_proxy); 2889 kfree(dev->caps.qp1_tunnel); 2890 kfree(dev->caps.qp1_proxy); 2891 } 2892 2893 err_close: 2894 mlx4_close_hca(dev); 2895 2896 err_fw: 2897 mlx4_close_fw(dev); 2898 2899 err_mfunc: 2900 if (mlx4_is_slave(dev)) 2901 mlx4_multi_func_cleanup(dev); 2902 2903 err_cmd: 2904 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2905 2906 err_sriov: 2907 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) 2908 pci_disable_sriov(pdev); 2909 2910 if (mlx4_is_master(dev) && dev->num_vfs) 2911 atomic_dec(&pf_loading); 2912 2913 kfree(priv->dev.dev_vfs); 2914 2915 if (!mlx4_is_slave(dev)) 2916 mlx4_free_ownership(dev); 2917 2918 kfree(dev_cap); 2919 return err; 2920 } 2921 2922 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 2923 struct mlx4_priv *priv) 2924 { 2925 int err; 2926 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 2927 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 2928 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 2929 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 2930 unsigned total_vfs = 0; 2931 unsigned int i; 2932 2933 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 2934 2935 err = pci_enable_device(pdev); 2936 if (err) { 2937 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 2938 return err; 2939 } 2940 2941 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 2942 * per port, we must limit the number of VFs to 63 (since their are 2943 * 128 MACs) 2944 */ 2945 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 2946 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 2947 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 2948 if (nvfs[i] < 0) { 2949 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 2950 err = -EINVAL; 2951 goto err_disable_pdev; 2952 } 2953 } 2954 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 2955 i++) { 2956 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 2957 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 2958 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 2959 err = -EINVAL; 2960 goto err_disable_pdev; 2961 } 2962 } 2963 if (total_vfs >= MLX4_MAX_NUM_VF) { 2964 dev_err(&pdev->dev, 2965 "Requested more VF's (%d) than allowed (%d)\n", 2966 total_vfs, MLX4_MAX_NUM_VF - 1); 2967 err = -EINVAL; 2968 goto err_disable_pdev; 2969 } 2970 2971 for (i = 0; i < MLX4_MAX_PORTS; i++) { 2972 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 2973 dev_err(&pdev->dev, 2974 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 2975 nvfs[i] + nvfs[2], i + 1, 2976 MLX4_MAX_NUM_VF_P_PORT - 1); 2977 err = -EINVAL; 2978 goto err_disable_pdev; 2979 } 2980 } 2981 2982 /* Check for BARs. */ 2983 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2984 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2985 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 2986 pci_dev_data, pci_resource_flags(pdev, 0)); 2987 err = -ENODEV; 2988 goto err_disable_pdev; 2989 } 2990 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2991 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 2992 err = -ENODEV; 2993 goto err_disable_pdev; 2994 } 2995 2996 err = pci_request_regions(pdev, DRV_NAME); 2997 if (err) { 2998 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 2999 goto err_disable_pdev; 3000 } 3001 3002 pci_set_master(pdev); 3003 3004 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3005 if (err) { 3006 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3007 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3008 if (err) { 3009 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3010 goto err_release_regions; 3011 } 3012 } 3013 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3014 if (err) { 3015 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3016 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3017 if (err) { 3018 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3019 goto err_release_regions; 3020 } 3021 } 3022 3023 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3024 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3025 /* Detect if this device is a virtual function */ 3026 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3027 /* When acting as pf, we normally skip vfs unless explicitly 3028 * requested to probe them. 3029 */ 3030 if (total_vfs) { 3031 unsigned vfs_offset = 0; 3032 3033 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3034 vfs_offset + nvfs[i] < extended_func_num(pdev); 3035 vfs_offset += nvfs[i], i++) 3036 ; 3037 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3038 err = -ENODEV; 3039 goto err_release_regions; 3040 } 3041 if ((extended_func_num(pdev) - vfs_offset) 3042 > prb_vf[i]) { 3043 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3044 extended_func_num(pdev)); 3045 err = -ENODEV; 3046 goto err_release_regions; 3047 } 3048 } 3049 } 3050 3051 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3052 if (err) 3053 goto err_release_regions; 3054 return 0; 3055 3056 err_release_regions: 3057 pci_release_regions(pdev); 3058 3059 err_disable_pdev: 3060 pci_disable_device(pdev); 3061 pci_set_drvdata(pdev, NULL); 3062 return err; 3063 } 3064 3065 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3066 { 3067 struct mlx4_priv *priv; 3068 struct mlx4_dev *dev; 3069 int ret; 3070 3071 printk_once(KERN_INFO "%s", mlx4_version); 3072 3073 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3074 if (!priv) 3075 return -ENOMEM; 3076 3077 dev = &priv->dev; 3078 dev->pdev = pdev; 3079 pci_set_drvdata(pdev, dev); 3080 priv->pci_dev_data = id->driver_data; 3081 3082 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3083 if (ret) 3084 kfree(priv); 3085 3086 return ret; 3087 } 3088 3089 static void mlx4_unload_one(struct pci_dev *pdev) 3090 { 3091 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3092 struct mlx4_priv *priv = mlx4_priv(dev); 3093 int pci_dev_data; 3094 int p; 3095 int active_vfs = 0; 3096 3097 if (priv->removed) 3098 return; 3099 3100 pci_dev_data = priv->pci_dev_data; 3101 3102 /* Disabling SR-IOV is not allowed while there are active vf's */ 3103 if (mlx4_is_master(dev)) { 3104 active_vfs = mlx4_how_many_lives_vf(dev); 3105 if (active_vfs) { 3106 pr_warn("Removing PF when there are active VF's !!\n"); 3107 pr_warn("Will not disable SR-IOV.\n"); 3108 } 3109 } 3110 mlx4_stop_sense(dev); 3111 mlx4_unregister_device(dev); 3112 3113 for (p = 1; p <= dev->caps.num_ports; p++) { 3114 mlx4_cleanup_port_info(&priv->port[p]); 3115 mlx4_CLOSE_PORT(dev, p); 3116 } 3117 3118 if (mlx4_is_master(dev)) 3119 mlx4_free_resource_tracker(dev, 3120 RES_TR_FREE_SLAVES_ONLY); 3121 3122 mlx4_cleanup_counters_table(dev); 3123 mlx4_cleanup_qp_table(dev); 3124 mlx4_cleanup_srq_table(dev); 3125 mlx4_cleanup_cq_table(dev); 3126 mlx4_cmd_use_polling(dev); 3127 mlx4_cleanup_eq_table(dev); 3128 mlx4_cleanup_mcg_table(dev); 3129 mlx4_cleanup_mr_table(dev); 3130 mlx4_cleanup_xrcd_table(dev); 3131 mlx4_cleanup_pd_table(dev); 3132 3133 if (mlx4_is_master(dev)) 3134 mlx4_free_resource_tracker(dev, 3135 RES_TR_FREE_STRUCTS_ONLY); 3136 3137 iounmap(priv->kar); 3138 mlx4_uar_free(dev, &priv->driver_uar); 3139 mlx4_cleanup_uar_table(dev); 3140 if (!mlx4_is_slave(dev)) 3141 mlx4_clear_steering(dev); 3142 mlx4_free_eq_table(dev); 3143 if (mlx4_is_master(dev)) 3144 mlx4_multi_func_cleanup(dev); 3145 mlx4_close_hca(dev); 3146 mlx4_close_fw(dev); 3147 if (mlx4_is_slave(dev)) 3148 mlx4_multi_func_cleanup(dev); 3149 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3150 3151 if (dev->flags & MLX4_FLAG_MSI_X) 3152 pci_disable_msix(pdev); 3153 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3154 mlx4_warn(dev, "Disabling SR-IOV\n"); 3155 pci_disable_sriov(pdev); 3156 dev->flags &= ~MLX4_FLAG_SRIOV; 3157 dev->num_vfs = 0; 3158 } 3159 3160 if (!mlx4_is_slave(dev)) 3161 mlx4_free_ownership(dev); 3162 3163 kfree(dev->caps.qp0_qkey); 3164 kfree(dev->caps.qp0_tunnel); 3165 kfree(dev->caps.qp0_proxy); 3166 kfree(dev->caps.qp1_tunnel); 3167 kfree(dev->caps.qp1_proxy); 3168 kfree(dev->dev_vfs); 3169 3170 memset(priv, 0, sizeof(*priv)); 3171 priv->pci_dev_data = pci_dev_data; 3172 priv->removed = 1; 3173 } 3174 3175 static void mlx4_remove_one(struct pci_dev *pdev) 3176 { 3177 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3178 struct mlx4_priv *priv = mlx4_priv(dev); 3179 3180 mlx4_unload_one(pdev); 3181 pci_release_regions(pdev); 3182 pci_disable_device(pdev); 3183 kfree(priv); 3184 pci_set_drvdata(pdev, NULL); 3185 } 3186 3187 int mlx4_restart_one(struct pci_dev *pdev) 3188 { 3189 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3190 struct mlx4_priv *priv = mlx4_priv(dev); 3191 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3192 int pci_dev_data, err, total_vfs; 3193 3194 pci_dev_data = priv->pci_dev_data; 3195 total_vfs = dev->num_vfs; 3196 memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); 3197 3198 mlx4_unload_one(pdev); 3199 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3200 if (err) { 3201 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3202 __func__, pci_name(pdev), err); 3203 return err; 3204 } 3205 3206 return err; 3207 } 3208 3209 static const struct pci_device_id mlx4_pci_table[] = { 3210 /* MT25408 "Hermon" SDR */ 3211 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3212 /* MT25408 "Hermon" DDR */ 3213 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3214 /* MT25408 "Hermon" QDR */ 3215 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3216 /* MT25408 "Hermon" DDR PCIe gen2 */ 3217 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3218 /* MT25408 "Hermon" QDR PCIe gen2 */ 3219 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3220 /* MT25408 "Hermon" EN 10GigE */ 3221 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3222 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3223 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3224 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3225 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3226 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3227 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3228 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3229 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3230 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3231 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3232 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3233 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3234 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3235 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3236 /* MT27500 Family [ConnectX-3] */ 3237 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3238 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3239 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3240 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3241 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3242 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3243 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3244 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3245 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3246 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3247 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3248 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3249 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3250 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3251 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3252 { 0, } 3253 }; 3254 3255 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3256 3257 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3258 pci_channel_state_t state) 3259 { 3260 mlx4_unload_one(pdev); 3261 3262 return state == pci_channel_io_perm_failure ? 3263 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 3264 } 3265 3266 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3267 { 3268 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3269 struct mlx4_priv *priv = mlx4_priv(dev); 3270 int ret; 3271 3272 ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); 3273 3274 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3275 } 3276 3277 static const struct pci_error_handlers mlx4_err_handler = { 3278 .error_detected = mlx4_pci_err_detected, 3279 .slot_reset = mlx4_pci_slot_reset, 3280 }; 3281 3282 static struct pci_driver mlx4_driver = { 3283 .name = DRV_NAME, 3284 .id_table = mlx4_pci_table, 3285 .probe = mlx4_init_one, 3286 .shutdown = mlx4_unload_one, 3287 .remove = mlx4_remove_one, 3288 .err_handler = &mlx4_err_handler, 3289 }; 3290 3291 static int __init mlx4_verify_params(void) 3292 { 3293 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3294 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3295 return -1; 3296 } 3297 3298 if (log_num_vlan != 0) 3299 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3300 MLX4_LOG_NUM_VLANS); 3301 3302 if (use_prio != 0) 3303 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3304 3305 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3306 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3307 log_mtts_per_seg); 3308 return -1; 3309 } 3310 3311 /* Check if module param for ports type has legal combination */ 3312 if (port_type_array[0] == false && port_type_array[1] == true) { 3313 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3314 port_type_array[0] = true; 3315 } 3316 3317 if (mlx4_log_num_mgm_entry_size < -7 || 3318 (mlx4_log_num_mgm_entry_size > 0 && 3319 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3320 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3321 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3322 mlx4_log_num_mgm_entry_size, 3323 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3324 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3325 return -1; 3326 } 3327 3328 return 0; 3329 } 3330 3331 static int __init mlx4_init(void) 3332 { 3333 int ret; 3334 3335 if (mlx4_verify_params()) 3336 return -EINVAL; 3337 3338 mlx4_catas_init(); 3339 3340 mlx4_wq = create_singlethread_workqueue("mlx4"); 3341 if (!mlx4_wq) 3342 return -ENOMEM; 3343 3344 ret = pci_register_driver(&mlx4_driver); 3345 if (ret < 0) 3346 destroy_workqueue(mlx4_wq); 3347 return ret < 0 ? ret : 0; 3348 } 3349 3350 static void __exit mlx4_cleanup(void) 3351 { 3352 pci_unregister_driver(&mlx4_driver); 3353 destroy_workqueue(mlx4_wq); 3354 } 3355 3356 module_init(mlx4_init); 3357 module_exit(mlx4_cleanup); 3358