1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 static char mlx4_version[] = 112 DRV_NAME ": Mellanox ConnectX core driver v" 113 DRV_VERSION " (" DRV_RELDATE ")\n"; 114 115 static struct mlx4_profile default_profile = { 116 .num_qp = 1 << 18, 117 .num_srq = 1 << 16, 118 .rdmarc_per_qp = 1 << 4, 119 .num_cq = 1 << 16, 120 .num_mcg = 1 << 13, 121 .num_mpt = 1 << 19, 122 .num_mtt = 1 << 20, /* It is really num mtt segements */ 123 }; 124 125 static struct mlx4_profile low_mem_profile = { 126 .num_qp = 1 << 17, 127 .num_srq = 1 << 6, 128 .rdmarc_per_qp = 1 << 4, 129 .num_cq = 1 << 8, 130 .num_mcg = 1 << 8, 131 .num_mpt = 1 << 9, 132 .num_mtt = 1 << 7, 133 }; 134 135 static int log_num_mac = 7; 136 module_param_named(log_num_mac, log_num_mac, int, 0444); 137 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 138 139 static int log_num_vlan; 140 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 141 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 142 /* Log2 max number of VLANs per ETH port (0-7) */ 143 #define MLX4_LOG_NUM_VLANS 7 144 #define MLX4_MIN_LOG_NUM_VLANS 0 145 #define MLX4_MIN_LOG_NUM_MAC 1 146 147 static bool use_prio; 148 module_param_named(use_prio, use_prio, bool, 0444); 149 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 150 151 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 152 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 153 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 154 155 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 156 static int arr_argc = 2; 157 module_param_array(port_type_array, int, &arr_argc, 0444); 158 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 159 "1 for IB, 2 for Ethernet"); 160 161 struct mlx4_port_config { 162 struct list_head list; 163 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 164 struct pci_dev *pdev; 165 }; 166 167 static atomic_t pf_loading = ATOMIC_INIT(0); 168 169 int mlx4_check_port_params(struct mlx4_dev *dev, 170 enum mlx4_port_type *port_type) 171 { 172 int i; 173 174 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 175 for (i = 0; i < dev->caps.num_ports - 1; i++) { 176 if (port_type[i] != port_type[i + 1]) { 177 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 178 return -EINVAL; 179 } 180 } 181 } 182 183 for (i = 0; i < dev->caps.num_ports; i++) { 184 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 185 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 186 i + 1); 187 return -EINVAL; 188 } 189 } 190 return 0; 191 } 192 193 static void mlx4_set_port_mask(struct mlx4_dev *dev) 194 { 195 int i; 196 197 for (i = 1; i <= dev->caps.num_ports; ++i) 198 dev->caps.port_mask[i] = dev->caps.port_type[i]; 199 } 200 201 enum { 202 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 203 }; 204 205 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 206 { 207 int err = 0; 208 struct mlx4_func func; 209 210 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 211 err = mlx4_QUERY_FUNC(dev, &func, 0); 212 if (err) { 213 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 214 return err; 215 } 216 dev_cap->max_eqs = func.max_eq; 217 dev_cap->reserved_eqs = func.rsvd_eqs; 218 dev_cap->reserved_uars = func.rsvd_uars; 219 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 220 } 221 return err; 222 } 223 224 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 225 { 226 struct mlx4_caps *dev_cap = &dev->caps; 227 228 /* FW not supporting or cancelled by user */ 229 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 230 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 231 return; 232 233 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 234 * When FW has NCSI it may decide not to report 64B CQE/EQEs 235 */ 236 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 237 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 238 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 239 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 240 return; 241 } 242 243 if (cache_line_size() == 128 || cache_line_size() == 256) { 244 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 245 /* Changing the real data inside CQE size to 32B */ 246 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 247 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 248 249 if (mlx4_is_master(dev)) 250 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 251 } else { 252 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 253 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 254 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 255 } 256 } 257 258 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 259 struct mlx4_port_cap *port_cap) 260 { 261 dev->caps.vl_cap[port] = port_cap->max_vl; 262 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 263 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 264 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 265 /* set gid and pkey table operating lengths by default 266 * to non-sriov values 267 */ 268 dev->caps.gid_table_len[port] = port_cap->max_gids; 269 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 270 dev->caps.port_width_cap[port] = port_cap->max_port_width; 271 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 272 dev->caps.def_mac[port] = port_cap->def_mac; 273 dev->caps.supported_type[port] = port_cap->supported_port_types; 274 dev->caps.suggested_type[port] = port_cap->suggested_type; 275 dev->caps.default_sense[port] = port_cap->default_sense; 276 dev->caps.trans_type[port] = port_cap->trans_type; 277 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 278 dev->caps.wavelength[port] = port_cap->wavelength; 279 dev->caps.trans_code[port] = port_cap->trans_code; 280 281 return 0; 282 } 283 284 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 285 struct mlx4_port_cap *port_cap) 286 { 287 int err = 0; 288 289 err = mlx4_QUERY_PORT(dev, port, port_cap); 290 291 if (err) 292 mlx4_err(dev, "QUERY_PORT command failed.\n"); 293 294 return err; 295 } 296 297 #define MLX4_A0_STEERING_TABLE_SIZE 256 298 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 299 { 300 int err; 301 int i; 302 303 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 304 if (err) { 305 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 306 return err; 307 } 308 mlx4_dev_cap_dump(dev, dev_cap); 309 310 if (dev_cap->min_page_sz > PAGE_SIZE) { 311 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 312 dev_cap->min_page_sz, PAGE_SIZE); 313 return -ENODEV; 314 } 315 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 316 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 317 dev_cap->num_ports, MLX4_MAX_PORTS); 318 return -ENODEV; 319 } 320 321 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 322 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 323 dev_cap->uar_size, 324 (unsigned long long) pci_resource_len(dev->pdev, 2)); 325 return -ENODEV; 326 } 327 328 dev->caps.num_ports = dev_cap->num_ports; 329 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 330 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 331 dev->caps.num_sys_eqs : 332 MLX4_MAX_EQ_NUM; 333 for (i = 1; i <= dev->caps.num_ports; ++i) { 334 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 335 if (err) { 336 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 337 return err; 338 } 339 } 340 341 dev->caps.uar_page_size = PAGE_SIZE; 342 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 343 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 344 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 345 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 346 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 347 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 348 dev->caps.max_wqes = dev_cap->max_qp_sz; 349 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 350 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 351 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 352 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 353 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 354 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 355 /* 356 * Subtract 1 from the limit because we need to allocate a 357 * spare CQE so the HCA HW can tell the difference between an 358 * empty CQ and a full CQ. 359 */ 360 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 361 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 362 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 363 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 364 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 365 366 /* The first 128 UARs are used for EQ doorbells */ 367 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 368 dev->caps.reserved_pds = dev_cap->reserved_pds; 369 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 370 dev_cap->reserved_xrcds : 0; 371 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 372 dev_cap->max_xrcds : 0; 373 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 374 375 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 376 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 377 dev->caps.flags = dev_cap->flags; 378 dev->caps.flags2 = dev_cap->flags2; 379 dev->caps.bmme_flags = dev_cap->bmme_flags; 380 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 381 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 382 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 383 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 384 385 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 386 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 387 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 388 /* Don't do sense port on multifunction devices (for now at least) */ 389 if (mlx4_is_mfunc(dev)) 390 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 391 392 if (mlx4_low_memory_profile()) { 393 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 394 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 395 } else { 396 dev->caps.log_num_macs = log_num_mac; 397 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 398 } 399 400 for (i = 1; i <= dev->caps.num_ports; ++i) { 401 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 402 if (dev->caps.supported_type[i]) { 403 /* if only ETH is supported - assign ETH */ 404 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 405 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 406 /* if only IB is supported, assign IB */ 407 else if (dev->caps.supported_type[i] == 408 MLX4_PORT_TYPE_IB) 409 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 410 else { 411 /* if IB and ETH are supported, we set the port 412 * type according to user selection of port type; 413 * if user selected none, take the FW hint */ 414 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 415 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 416 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 417 else 418 dev->caps.port_type[i] = port_type_array[i - 1]; 419 } 420 } 421 /* 422 * Link sensing is allowed on the port if 3 conditions are true: 423 * 1. Both protocols are supported on the port. 424 * 2. Different types are supported on the port 425 * 3. FW declared that it supports link sensing 426 */ 427 mlx4_priv(dev)->sense.sense_allowed[i] = 428 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 429 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 430 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 431 432 /* 433 * If "default_sense" bit is set, we move the port to "AUTO" mode 434 * and perform sense_port FW command to try and set the correct 435 * port type from beginning 436 */ 437 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 438 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 439 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 440 mlx4_SENSE_PORT(dev, i, &sensed_port); 441 if (sensed_port != MLX4_PORT_TYPE_NONE) 442 dev->caps.port_type[i] = sensed_port; 443 } else { 444 dev->caps.possible_type[i] = dev->caps.port_type[i]; 445 } 446 447 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 448 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 449 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 450 i, 1 << dev->caps.log_num_macs); 451 } 452 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 453 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 454 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 455 i, 1 << dev->caps.log_num_vlans); 456 } 457 } 458 459 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 460 461 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 462 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 463 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 464 (1 << dev->caps.log_num_macs) * 465 (1 << dev->caps.log_num_vlans) * 466 dev->caps.num_ports; 467 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 468 469 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 470 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 471 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 472 else 473 dev->caps.dmfs_high_rate_qpn_base = 474 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 475 476 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 477 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 478 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 479 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 480 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 481 } else { 482 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 483 dev->caps.dmfs_high_rate_qpn_base = 484 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 485 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 486 } 487 488 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 489 dev->caps.dmfs_high_rate_qpn_range; 490 491 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 492 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 493 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 494 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 495 496 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 497 498 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 499 if (dev_cap->flags & 500 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 501 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 502 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 503 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 504 } 505 506 if (dev_cap->flags2 & 507 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 508 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 509 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 510 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 511 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 512 } 513 } 514 515 if ((dev->caps.flags & 516 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 517 mlx4_is_master(dev)) 518 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 519 520 if (!mlx4_is_slave(dev)) { 521 mlx4_enable_cqe_eqe_stride(dev); 522 dev->caps.alloc_res_qp_mask = 523 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 524 MLX4_RESERVE_A0_QP; 525 } else { 526 dev->caps.alloc_res_qp_mask = 0; 527 } 528 529 return 0; 530 } 531 532 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 533 enum pci_bus_speed *speed, 534 enum pcie_link_width *width) 535 { 536 u32 lnkcap1, lnkcap2; 537 int err1, err2; 538 539 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 540 541 *speed = PCI_SPEED_UNKNOWN; 542 *width = PCIE_LNK_WIDTH_UNKNOWN; 543 544 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); 545 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); 546 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 547 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 548 *speed = PCIE_SPEED_8_0GT; 549 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 550 *speed = PCIE_SPEED_5_0GT; 551 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 552 *speed = PCIE_SPEED_2_5GT; 553 } 554 if (!err1) { 555 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 556 if (!lnkcap2) { /* pre-r3.0 */ 557 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 558 *speed = PCIE_SPEED_5_0GT; 559 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 560 *speed = PCIE_SPEED_2_5GT; 561 } 562 } 563 564 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 565 return err1 ? err1 : 566 err2 ? err2 : -EINVAL; 567 } 568 return 0; 569 } 570 571 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 572 { 573 enum pcie_link_width width, width_cap; 574 enum pci_bus_speed speed, speed_cap; 575 int err; 576 577 #define PCIE_SPEED_STR(speed) \ 578 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 579 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 580 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 581 "Unknown") 582 583 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 584 if (err) { 585 mlx4_warn(dev, 586 "Unable to determine PCIe device BW capabilities\n"); 587 return; 588 } 589 590 err = pcie_get_minimum_link(dev->pdev, &speed, &width); 591 if (err || speed == PCI_SPEED_UNKNOWN || 592 width == PCIE_LNK_WIDTH_UNKNOWN) { 593 mlx4_warn(dev, 594 "Unable to determine PCI device chain minimum BW\n"); 595 return; 596 } 597 598 if (width != width_cap || speed != speed_cap) 599 mlx4_warn(dev, 600 "PCIe BW is different than device's capability\n"); 601 602 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 603 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 604 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 605 width, width_cap); 606 return; 607 } 608 609 /*The function checks if there are live vf, return the num of them*/ 610 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 611 { 612 struct mlx4_priv *priv = mlx4_priv(dev); 613 struct mlx4_slave_state *s_state; 614 int i; 615 int ret = 0; 616 617 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 618 s_state = &priv->mfunc.master.slave_state[i]; 619 if (s_state->active && s_state->last_cmd != 620 MLX4_COMM_CMD_RESET) { 621 mlx4_warn(dev, "%s: slave: %d is still active\n", 622 __func__, i); 623 ret++; 624 } 625 } 626 return ret; 627 } 628 629 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 630 { 631 u32 qk = MLX4_RESERVED_QKEY_BASE; 632 633 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 634 qpn < dev->phys_caps.base_proxy_sqpn) 635 return -EINVAL; 636 637 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 638 /* tunnel qp */ 639 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 640 else 641 qk += qpn - dev->phys_caps.base_proxy_sqpn; 642 *qkey = qk; 643 return 0; 644 } 645 EXPORT_SYMBOL(mlx4_get_parav_qkey); 646 647 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 648 { 649 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 650 651 if (!mlx4_is_master(dev)) 652 return; 653 654 priv->virt2phys_pkey[slave][port - 1][i] = val; 655 } 656 EXPORT_SYMBOL(mlx4_sync_pkey_table); 657 658 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 659 { 660 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 661 662 if (!mlx4_is_master(dev)) 663 return; 664 665 priv->slave_node_guids[slave] = guid; 666 } 667 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 668 669 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 670 { 671 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 672 673 if (!mlx4_is_master(dev)) 674 return 0; 675 676 return priv->slave_node_guids[slave]; 677 } 678 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 679 680 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 681 { 682 struct mlx4_priv *priv = mlx4_priv(dev); 683 struct mlx4_slave_state *s_slave; 684 685 if (!mlx4_is_master(dev)) 686 return 0; 687 688 s_slave = &priv->mfunc.master.slave_state[slave]; 689 return !!s_slave->active; 690 } 691 EXPORT_SYMBOL(mlx4_is_slave_active); 692 693 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 694 struct mlx4_dev_cap *dev_cap, 695 struct mlx4_init_hca_param *hca_param) 696 { 697 dev->caps.steering_mode = hca_param->steering_mode; 698 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 699 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 700 dev->caps.fs_log_max_ucast_qp_range_size = 701 dev_cap->fs_log_max_ucast_qp_range_size; 702 } else 703 dev->caps.num_qp_per_mgm = 704 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 705 706 mlx4_dbg(dev, "Steering mode is: %s\n", 707 mlx4_steering_mode_str(dev->caps.steering_mode)); 708 } 709 710 static int mlx4_slave_cap(struct mlx4_dev *dev) 711 { 712 int err; 713 u32 page_size; 714 struct mlx4_dev_cap dev_cap; 715 struct mlx4_func_cap func_cap; 716 struct mlx4_init_hca_param hca_param; 717 u8 i; 718 719 memset(&hca_param, 0, sizeof(hca_param)); 720 err = mlx4_QUERY_HCA(dev, &hca_param); 721 if (err) { 722 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 723 return err; 724 } 725 726 /* fail if the hca has an unknown global capability 727 * at this time global_caps should be always zeroed 728 */ 729 if (hca_param.global_caps) { 730 mlx4_err(dev, "Unknown hca global capabilities\n"); 731 return -ENOSYS; 732 } 733 734 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 735 736 dev->caps.hca_core_clock = hca_param.hca_core_clock; 737 738 memset(&dev_cap, 0, sizeof(dev_cap)); 739 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 740 err = mlx4_dev_cap(dev, &dev_cap); 741 if (err) { 742 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 743 return err; 744 } 745 746 err = mlx4_QUERY_FW(dev); 747 if (err) 748 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 749 750 page_size = ~dev->caps.page_size_cap + 1; 751 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 752 if (page_size > PAGE_SIZE) { 753 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 754 page_size, PAGE_SIZE); 755 return -ENODEV; 756 } 757 758 /* slave gets uar page size from QUERY_HCA fw command */ 759 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 760 761 /* TODO: relax this assumption */ 762 if (dev->caps.uar_page_size != PAGE_SIZE) { 763 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 764 dev->caps.uar_page_size, PAGE_SIZE); 765 return -ENODEV; 766 } 767 768 memset(&func_cap, 0, sizeof(func_cap)); 769 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 770 if (err) { 771 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 772 err); 773 return err; 774 } 775 776 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 777 PF_CONTEXT_BEHAVIOUR_MASK) { 778 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 779 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 780 return -ENOSYS; 781 } 782 783 dev->caps.num_ports = func_cap.num_ports; 784 dev->quotas.qp = func_cap.qp_quota; 785 dev->quotas.srq = func_cap.srq_quota; 786 dev->quotas.cq = func_cap.cq_quota; 787 dev->quotas.mpt = func_cap.mpt_quota; 788 dev->quotas.mtt = func_cap.mtt_quota; 789 dev->caps.num_qps = 1 << hca_param.log_num_qps; 790 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 791 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 792 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 793 dev->caps.num_eqs = func_cap.max_eq; 794 dev->caps.reserved_eqs = func_cap.reserved_eq; 795 dev->caps.num_pds = MLX4_NUM_PDS; 796 dev->caps.num_mgms = 0; 797 dev->caps.num_amgms = 0; 798 799 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 800 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 801 dev->caps.num_ports, MLX4_MAX_PORTS); 802 return -ENODEV; 803 } 804 805 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 806 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 807 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 808 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 809 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 810 811 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 812 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 813 !dev->caps.qp0_qkey) { 814 err = -ENOMEM; 815 goto err_mem; 816 } 817 818 for (i = 1; i <= dev->caps.num_ports; ++i) { 819 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 820 if (err) { 821 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 822 i, err); 823 goto err_mem; 824 } 825 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 826 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 827 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 828 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 829 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 830 dev->caps.port_mask[i] = dev->caps.port_type[i]; 831 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 832 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 833 &dev->caps.gid_table_len[i], 834 &dev->caps.pkey_table_len[i])) 835 goto err_mem; 836 } 837 838 if (dev->caps.uar_page_size * (dev->caps.num_uars - 839 dev->caps.reserved_uars) > 840 pci_resource_len(dev->pdev, 2)) { 841 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 842 dev->caps.uar_page_size * dev->caps.num_uars, 843 (unsigned long long) pci_resource_len(dev->pdev, 2)); 844 goto err_mem; 845 } 846 847 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 848 dev->caps.eqe_size = 64; 849 dev->caps.eqe_factor = 1; 850 } else { 851 dev->caps.eqe_size = 32; 852 dev->caps.eqe_factor = 0; 853 } 854 855 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 856 dev->caps.cqe_size = 64; 857 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 858 } else { 859 dev->caps.cqe_size = 32; 860 } 861 862 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 863 dev->caps.eqe_size = hca_param.eqe_size; 864 dev->caps.eqe_factor = 0; 865 } 866 867 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 868 dev->caps.cqe_size = hca_param.cqe_size; 869 /* User still need to know when CQE > 32B */ 870 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 871 } 872 873 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 874 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 875 876 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 877 878 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 879 dev->caps.bf_reg_size) 880 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 881 882 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 883 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 884 885 return 0; 886 887 err_mem: 888 kfree(dev->caps.qp0_qkey); 889 kfree(dev->caps.qp0_tunnel); 890 kfree(dev->caps.qp0_proxy); 891 kfree(dev->caps.qp1_tunnel); 892 kfree(dev->caps.qp1_proxy); 893 dev->caps.qp0_qkey = NULL; 894 dev->caps.qp0_tunnel = NULL; 895 dev->caps.qp0_proxy = NULL; 896 dev->caps.qp1_tunnel = NULL; 897 dev->caps.qp1_proxy = NULL; 898 899 return err; 900 } 901 902 static void mlx4_request_modules(struct mlx4_dev *dev) 903 { 904 int port; 905 int has_ib_port = false; 906 int has_eth_port = false; 907 #define EN_DRV_NAME "mlx4_en" 908 #define IB_DRV_NAME "mlx4_ib" 909 910 for (port = 1; port <= dev->caps.num_ports; port++) { 911 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 912 has_ib_port = true; 913 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 914 has_eth_port = true; 915 } 916 917 if (has_eth_port) 918 request_module_nowait(EN_DRV_NAME); 919 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 920 request_module_nowait(IB_DRV_NAME); 921 } 922 923 /* 924 * Change the port configuration of the device. 925 * Every user of this function must hold the port mutex. 926 */ 927 int mlx4_change_port_types(struct mlx4_dev *dev, 928 enum mlx4_port_type *port_types) 929 { 930 int err = 0; 931 int change = 0; 932 int port; 933 934 for (port = 0; port < dev->caps.num_ports; port++) { 935 /* Change the port type only if the new type is different 936 * from the current, and not set to Auto */ 937 if (port_types[port] != dev->caps.port_type[port + 1]) 938 change = 1; 939 } 940 if (change) { 941 mlx4_unregister_device(dev); 942 for (port = 1; port <= dev->caps.num_ports; port++) { 943 mlx4_CLOSE_PORT(dev, port); 944 dev->caps.port_type[port] = port_types[port - 1]; 945 err = mlx4_SET_PORT(dev, port, -1); 946 if (err) { 947 mlx4_err(dev, "Failed to set port %d, aborting\n", 948 port); 949 goto out; 950 } 951 } 952 mlx4_set_port_mask(dev); 953 err = mlx4_register_device(dev); 954 if (err) { 955 mlx4_err(dev, "Failed to register device\n"); 956 goto out; 957 } 958 mlx4_request_modules(dev); 959 } 960 961 out: 962 return err; 963 } 964 965 static ssize_t show_port_type(struct device *dev, 966 struct device_attribute *attr, 967 char *buf) 968 { 969 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 970 port_attr); 971 struct mlx4_dev *mdev = info->dev; 972 char type[8]; 973 974 sprintf(type, "%s", 975 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 976 "ib" : "eth"); 977 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 978 sprintf(buf, "auto (%s)\n", type); 979 else 980 sprintf(buf, "%s\n", type); 981 982 return strlen(buf); 983 } 984 985 static ssize_t set_port_type(struct device *dev, 986 struct device_attribute *attr, 987 const char *buf, size_t count) 988 { 989 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 990 port_attr); 991 struct mlx4_dev *mdev = info->dev; 992 struct mlx4_priv *priv = mlx4_priv(mdev); 993 enum mlx4_port_type types[MLX4_MAX_PORTS]; 994 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 995 static DEFINE_MUTEX(set_port_type_mutex); 996 int i; 997 int err = 0; 998 999 mutex_lock(&set_port_type_mutex); 1000 1001 if (!strcmp(buf, "ib\n")) 1002 info->tmp_type = MLX4_PORT_TYPE_IB; 1003 else if (!strcmp(buf, "eth\n")) 1004 info->tmp_type = MLX4_PORT_TYPE_ETH; 1005 else if (!strcmp(buf, "auto\n")) 1006 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1007 else { 1008 mlx4_err(mdev, "%s is not supported port type\n", buf); 1009 err = -EINVAL; 1010 goto err_out; 1011 } 1012 1013 mlx4_stop_sense(mdev); 1014 mutex_lock(&priv->port_mutex); 1015 /* Possible type is always the one that was delivered */ 1016 mdev->caps.possible_type[info->port] = info->tmp_type; 1017 1018 for (i = 0; i < mdev->caps.num_ports; i++) { 1019 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1020 mdev->caps.possible_type[i+1]; 1021 if (types[i] == MLX4_PORT_TYPE_AUTO) 1022 types[i] = mdev->caps.port_type[i+1]; 1023 } 1024 1025 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1026 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1027 for (i = 1; i <= mdev->caps.num_ports; i++) { 1028 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1029 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1030 err = -EINVAL; 1031 } 1032 } 1033 } 1034 if (err) { 1035 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1036 goto out; 1037 } 1038 1039 mlx4_do_sense_ports(mdev, new_types, types); 1040 1041 err = mlx4_check_port_params(mdev, new_types); 1042 if (err) 1043 goto out; 1044 1045 /* We are about to apply the changes after the configuration 1046 * was verified, no need to remember the temporary types 1047 * any more */ 1048 for (i = 0; i < mdev->caps.num_ports; i++) 1049 priv->port[i + 1].tmp_type = 0; 1050 1051 err = mlx4_change_port_types(mdev, new_types); 1052 1053 out: 1054 mlx4_start_sense(mdev); 1055 mutex_unlock(&priv->port_mutex); 1056 err_out: 1057 mutex_unlock(&set_port_type_mutex); 1058 1059 return err ? err : count; 1060 } 1061 1062 enum ibta_mtu { 1063 IB_MTU_256 = 1, 1064 IB_MTU_512 = 2, 1065 IB_MTU_1024 = 3, 1066 IB_MTU_2048 = 4, 1067 IB_MTU_4096 = 5 1068 }; 1069 1070 static inline int int_to_ibta_mtu(int mtu) 1071 { 1072 switch (mtu) { 1073 case 256: return IB_MTU_256; 1074 case 512: return IB_MTU_512; 1075 case 1024: return IB_MTU_1024; 1076 case 2048: return IB_MTU_2048; 1077 case 4096: return IB_MTU_4096; 1078 default: return -1; 1079 } 1080 } 1081 1082 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1083 { 1084 switch (mtu) { 1085 case IB_MTU_256: return 256; 1086 case IB_MTU_512: return 512; 1087 case IB_MTU_1024: return 1024; 1088 case IB_MTU_2048: return 2048; 1089 case IB_MTU_4096: return 4096; 1090 default: return -1; 1091 } 1092 } 1093 1094 static ssize_t show_port_ib_mtu(struct device *dev, 1095 struct device_attribute *attr, 1096 char *buf) 1097 { 1098 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1099 port_mtu_attr); 1100 struct mlx4_dev *mdev = info->dev; 1101 1102 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1103 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1104 1105 sprintf(buf, "%d\n", 1106 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1107 return strlen(buf); 1108 } 1109 1110 static ssize_t set_port_ib_mtu(struct device *dev, 1111 struct device_attribute *attr, 1112 const char *buf, size_t count) 1113 { 1114 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1115 port_mtu_attr); 1116 struct mlx4_dev *mdev = info->dev; 1117 struct mlx4_priv *priv = mlx4_priv(mdev); 1118 int err, port, mtu, ibta_mtu = -1; 1119 1120 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1121 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1122 return -EINVAL; 1123 } 1124 1125 err = kstrtoint(buf, 0, &mtu); 1126 if (!err) 1127 ibta_mtu = int_to_ibta_mtu(mtu); 1128 1129 if (err || ibta_mtu < 0) { 1130 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1131 return -EINVAL; 1132 } 1133 1134 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1135 1136 mlx4_stop_sense(mdev); 1137 mutex_lock(&priv->port_mutex); 1138 mlx4_unregister_device(mdev); 1139 for (port = 1; port <= mdev->caps.num_ports; port++) { 1140 mlx4_CLOSE_PORT(mdev, port); 1141 err = mlx4_SET_PORT(mdev, port, -1); 1142 if (err) { 1143 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1144 port); 1145 goto err_set_port; 1146 } 1147 } 1148 err = mlx4_register_device(mdev); 1149 err_set_port: 1150 mutex_unlock(&priv->port_mutex); 1151 mlx4_start_sense(mdev); 1152 return err ? err : count; 1153 } 1154 1155 static int mlx4_load_fw(struct mlx4_dev *dev) 1156 { 1157 struct mlx4_priv *priv = mlx4_priv(dev); 1158 int err; 1159 1160 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1161 GFP_HIGHUSER | __GFP_NOWARN, 0); 1162 if (!priv->fw.fw_icm) { 1163 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1164 return -ENOMEM; 1165 } 1166 1167 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1168 if (err) { 1169 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1170 goto err_free; 1171 } 1172 1173 err = mlx4_RUN_FW(dev); 1174 if (err) { 1175 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1176 goto err_unmap_fa; 1177 } 1178 1179 return 0; 1180 1181 err_unmap_fa: 1182 mlx4_UNMAP_FA(dev); 1183 1184 err_free: 1185 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1186 return err; 1187 } 1188 1189 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1190 int cmpt_entry_sz) 1191 { 1192 struct mlx4_priv *priv = mlx4_priv(dev); 1193 int err; 1194 int num_eqs; 1195 1196 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1197 cmpt_base + 1198 ((u64) (MLX4_CMPT_TYPE_QP * 1199 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1200 cmpt_entry_sz, dev->caps.num_qps, 1201 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1202 0, 0); 1203 if (err) 1204 goto err; 1205 1206 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1207 cmpt_base + 1208 ((u64) (MLX4_CMPT_TYPE_SRQ * 1209 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1210 cmpt_entry_sz, dev->caps.num_srqs, 1211 dev->caps.reserved_srqs, 0, 0); 1212 if (err) 1213 goto err_qp; 1214 1215 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1216 cmpt_base + 1217 ((u64) (MLX4_CMPT_TYPE_CQ * 1218 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1219 cmpt_entry_sz, dev->caps.num_cqs, 1220 dev->caps.reserved_cqs, 0, 0); 1221 if (err) 1222 goto err_srq; 1223 1224 num_eqs = dev->phys_caps.num_phys_eqs; 1225 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1226 cmpt_base + 1227 ((u64) (MLX4_CMPT_TYPE_EQ * 1228 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1229 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1230 if (err) 1231 goto err_cq; 1232 1233 return 0; 1234 1235 err_cq: 1236 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1237 1238 err_srq: 1239 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1240 1241 err_qp: 1242 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1243 1244 err: 1245 return err; 1246 } 1247 1248 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1249 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1250 { 1251 struct mlx4_priv *priv = mlx4_priv(dev); 1252 u64 aux_pages; 1253 int num_eqs; 1254 int err; 1255 1256 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1257 if (err) { 1258 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1259 return err; 1260 } 1261 1262 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1263 (unsigned long long) icm_size >> 10, 1264 (unsigned long long) aux_pages << 2); 1265 1266 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1267 GFP_HIGHUSER | __GFP_NOWARN, 0); 1268 if (!priv->fw.aux_icm) { 1269 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1270 return -ENOMEM; 1271 } 1272 1273 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1274 if (err) { 1275 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1276 goto err_free_aux; 1277 } 1278 1279 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1280 if (err) { 1281 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1282 goto err_unmap_aux; 1283 } 1284 1285 1286 num_eqs = dev->phys_caps.num_phys_eqs; 1287 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1288 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1289 num_eqs, num_eqs, 0, 0); 1290 if (err) { 1291 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1292 goto err_unmap_cmpt; 1293 } 1294 1295 /* 1296 * Reserved MTT entries must be aligned up to a cacheline 1297 * boundary, since the FW will write to them, while the driver 1298 * writes to all other MTT entries. (The variable 1299 * dev->caps.mtt_entry_sz below is really the MTT segment 1300 * size, not the raw entry size) 1301 */ 1302 dev->caps.reserved_mtts = 1303 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1304 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1305 1306 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1307 init_hca->mtt_base, 1308 dev->caps.mtt_entry_sz, 1309 dev->caps.num_mtts, 1310 dev->caps.reserved_mtts, 1, 0); 1311 if (err) { 1312 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1313 goto err_unmap_eq; 1314 } 1315 1316 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1317 init_hca->dmpt_base, 1318 dev_cap->dmpt_entry_sz, 1319 dev->caps.num_mpts, 1320 dev->caps.reserved_mrws, 1, 1); 1321 if (err) { 1322 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1323 goto err_unmap_mtt; 1324 } 1325 1326 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1327 init_hca->qpc_base, 1328 dev_cap->qpc_entry_sz, 1329 dev->caps.num_qps, 1330 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1331 0, 0); 1332 if (err) { 1333 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1334 goto err_unmap_dmpt; 1335 } 1336 1337 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1338 init_hca->auxc_base, 1339 dev_cap->aux_entry_sz, 1340 dev->caps.num_qps, 1341 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1342 0, 0); 1343 if (err) { 1344 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1345 goto err_unmap_qp; 1346 } 1347 1348 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1349 init_hca->altc_base, 1350 dev_cap->altc_entry_sz, 1351 dev->caps.num_qps, 1352 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1353 0, 0); 1354 if (err) { 1355 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1356 goto err_unmap_auxc; 1357 } 1358 1359 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1360 init_hca->rdmarc_base, 1361 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1362 dev->caps.num_qps, 1363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1364 0, 0); 1365 if (err) { 1366 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1367 goto err_unmap_altc; 1368 } 1369 1370 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1371 init_hca->cqc_base, 1372 dev_cap->cqc_entry_sz, 1373 dev->caps.num_cqs, 1374 dev->caps.reserved_cqs, 0, 0); 1375 if (err) { 1376 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1377 goto err_unmap_rdmarc; 1378 } 1379 1380 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1381 init_hca->srqc_base, 1382 dev_cap->srq_entry_sz, 1383 dev->caps.num_srqs, 1384 dev->caps.reserved_srqs, 0, 0); 1385 if (err) { 1386 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1387 goto err_unmap_cq; 1388 } 1389 1390 /* 1391 * For flow steering device managed mode it is required to use 1392 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1393 * required, but for simplicity just map the whole multicast 1394 * group table now. The table isn't very big and it's a lot 1395 * easier than trying to track ref counts. 1396 */ 1397 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1398 init_hca->mc_base, 1399 mlx4_get_mgm_entry_size(dev), 1400 dev->caps.num_mgms + dev->caps.num_amgms, 1401 dev->caps.num_mgms + dev->caps.num_amgms, 1402 0, 0); 1403 if (err) { 1404 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1405 goto err_unmap_srq; 1406 } 1407 1408 return 0; 1409 1410 err_unmap_srq: 1411 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1412 1413 err_unmap_cq: 1414 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1415 1416 err_unmap_rdmarc: 1417 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1418 1419 err_unmap_altc: 1420 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1421 1422 err_unmap_auxc: 1423 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1424 1425 err_unmap_qp: 1426 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1427 1428 err_unmap_dmpt: 1429 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1430 1431 err_unmap_mtt: 1432 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1433 1434 err_unmap_eq: 1435 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1436 1437 err_unmap_cmpt: 1438 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1439 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1440 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1441 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1442 1443 err_unmap_aux: 1444 mlx4_UNMAP_ICM_AUX(dev); 1445 1446 err_free_aux: 1447 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1448 1449 return err; 1450 } 1451 1452 static void mlx4_free_icms(struct mlx4_dev *dev) 1453 { 1454 struct mlx4_priv *priv = mlx4_priv(dev); 1455 1456 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1457 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1458 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1459 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1460 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1461 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1462 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1463 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1464 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1465 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1466 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1467 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1468 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1469 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1470 1471 mlx4_UNMAP_ICM_AUX(dev); 1472 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1473 } 1474 1475 static void mlx4_slave_exit(struct mlx4_dev *dev) 1476 { 1477 struct mlx4_priv *priv = mlx4_priv(dev); 1478 1479 mutex_lock(&priv->cmd.slave_cmd_mutex); 1480 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1481 mlx4_warn(dev, "Failed to close slave function\n"); 1482 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1483 } 1484 1485 static int map_bf_area(struct mlx4_dev *dev) 1486 { 1487 struct mlx4_priv *priv = mlx4_priv(dev); 1488 resource_size_t bf_start; 1489 resource_size_t bf_len; 1490 int err = 0; 1491 1492 if (!dev->caps.bf_reg_size) 1493 return -ENXIO; 1494 1495 bf_start = pci_resource_start(dev->pdev, 2) + 1496 (dev->caps.num_uars << PAGE_SHIFT); 1497 bf_len = pci_resource_len(dev->pdev, 2) - 1498 (dev->caps.num_uars << PAGE_SHIFT); 1499 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1500 if (!priv->bf_mapping) 1501 err = -ENOMEM; 1502 1503 return err; 1504 } 1505 1506 static void unmap_bf_area(struct mlx4_dev *dev) 1507 { 1508 if (mlx4_priv(dev)->bf_mapping) 1509 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1510 } 1511 1512 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1513 { 1514 u32 clockhi, clocklo, clockhi1; 1515 cycle_t cycles; 1516 int i; 1517 struct mlx4_priv *priv = mlx4_priv(dev); 1518 1519 for (i = 0; i < 10; i++) { 1520 clockhi = swab32(readl(priv->clock_mapping)); 1521 clocklo = swab32(readl(priv->clock_mapping + 4)); 1522 clockhi1 = swab32(readl(priv->clock_mapping)); 1523 if (clockhi == clockhi1) 1524 break; 1525 } 1526 1527 cycles = (u64) clockhi << 32 | (u64) clocklo; 1528 1529 return cycles; 1530 } 1531 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1532 1533 1534 static int map_internal_clock(struct mlx4_dev *dev) 1535 { 1536 struct mlx4_priv *priv = mlx4_priv(dev); 1537 1538 priv->clock_mapping = 1539 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + 1540 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1541 1542 if (!priv->clock_mapping) 1543 return -ENOMEM; 1544 1545 return 0; 1546 } 1547 1548 static void unmap_internal_clock(struct mlx4_dev *dev) 1549 { 1550 struct mlx4_priv *priv = mlx4_priv(dev); 1551 1552 if (priv->clock_mapping) 1553 iounmap(priv->clock_mapping); 1554 } 1555 1556 static void mlx4_close_hca(struct mlx4_dev *dev) 1557 { 1558 unmap_internal_clock(dev); 1559 unmap_bf_area(dev); 1560 if (mlx4_is_slave(dev)) 1561 mlx4_slave_exit(dev); 1562 else { 1563 mlx4_CLOSE_HCA(dev, 0); 1564 mlx4_free_icms(dev); 1565 } 1566 } 1567 1568 static void mlx4_close_fw(struct mlx4_dev *dev) 1569 { 1570 if (!mlx4_is_slave(dev)) { 1571 mlx4_UNMAP_FA(dev); 1572 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1573 } 1574 } 1575 1576 static int mlx4_init_slave(struct mlx4_dev *dev) 1577 { 1578 struct mlx4_priv *priv = mlx4_priv(dev); 1579 u64 dma = (u64) priv->mfunc.vhcr_dma; 1580 int ret_from_reset = 0; 1581 u32 slave_read; 1582 u32 cmd_channel_ver; 1583 1584 if (atomic_read(&pf_loading)) { 1585 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1586 return -EPROBE_DEFER; 1587 } 1588 1589 mutex_lock(&priv->cmd.slave_cmd_mutex); 1590 priv->cmd.max_cmds = 1; 1591 mlx4_warn(dev, "Sending reset\n"); 1592 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1593 MLX4_COMM_TIME); 1594 /* if we are in the middle of flr the slave will try 1595 * NUM_OF_RESET_RETRIES times before leaving.*/ 1596 if (ret_from_reset) { 1597 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1598 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1599 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1600 return -EPROBE_DEFER; 1601 } else 1602 goto err; 1603 } 1604 1605 /* check the driver version - the slave I/F revision 1606 * must match the master's */ 1607 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1608 cmd_channel_ver = mlx4_comm_get_version(); 1609 1610 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1611 MLX4_COMM_GET_IF_REV(slave_read)) { 1612 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1613 goto err; 1614 } 1615 1616 mlx4_warn(dev, "Sending vhcr0\n"); 1617 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1618 MLX4_COMM_TIME)) 1619 goto err; 1620 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1621 MLX4_COMM_TIME)) 1622 goto err; 1623 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1624 MLX4_COMM_TIME)) 1625 goto err; 1626 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1627 goto err; 1628 1629 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1630 return 0; 1631 1632 err: 1633 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1634 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1635 return -EIO; 1636 } 1637 1638 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1639 { 1640 int i; 1641 1642 for (i = 1; i <= dev->caps.num_ports; i++) { 1643 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1644 dev->caps.gid_table_len[i] = 1645 mlx4_get_slave_num_gids(dev, 0, i); 1646 else 1647 dev->caps.gid_table_len[i] = 1; 1648 dev->caps.pkey_table_len[i] = 1649 dev->phys_caps.pkey_phys_table_len[i] - 1; 1650 } 1651 } 1652 1653 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1654 { 1655 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1656 1657 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1658 i++) { 1659 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1660 break; 1661 } 1662 1663 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1664 } 1665 1666 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1667 { 1668 switch (dmfs_high_steer_mode) { 1669 case MLX4_STEERING_DMFS_A0_DEFAULT: 1670 return "default performance"; 1671 1672 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1673 return "dynamic hybrid mode"; 1674 1675 case MLX4_STEERING_DMFS_A0_STATIC: 1676 return "performance optimized for limited rule configuration (static)"; 1677 1678 case MLX4_STEERING_DMFS_A0_DISABLE: 1679 return "disabled performance optimized steering"; 1680 1681 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1682 return "performance optimized steering not supported"; 1683 1684 default: 1685 return "Unrecognized mode"; 1686 } 1687 } 1688 1689 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1690 1691 static void choose_steering_mode(struct mlx4_dev *dev, 1692 struct mlx4_dev_cap *dev_cap) 1693 { 1694 if (mlx4_log_num_mgm_entry_size <= 0) { 1695 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1696 if (dev->caps.dmfs_high_steer_mode == 1697 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1698 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1699 else 1700 dev->caps.dmfs_high_steer_mode = 1701 MLX4_STEERING_DMFS_A0_STATIC; 1702 } 1703 } 1704 1705 if (mlx4_log_num_mgm_entry_size <= 0 && 1706 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1707 (!mlx4_is_mfunc(dev) || 1708 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && 1709 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1710 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1711 dev->oper_log_mgm_entry_size = 1712 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1713 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1714 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1715 dev->caps.fs_log_max_ucast_qp_range_size = 1716 dev_cap->fs_log_max_ucast_qp_range_size; 1717 } else { 1718 if (dev->caps.dmfs_high_steer_mode != 1719 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1720 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1721 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1722 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1723 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1724 else { 1725 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1726 1727 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1728 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1729 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1730 } 1731 dev->oper_log_mgm_entry_size = 1732 mlx4_log_num_mgm_entry_size > 0 ? 1733 mlx4_log_num_mgm_entry_size : 1734 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1735 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1736 } 1737 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1738 mlx4_steering_mode_str(dev->caps.steering_mode), 1739 dev->oper_log_mgm_entry_size, 1740 mlx4_log_num_mgm_entry_size); 1741 } 1742 1743 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1744 struct mlx4_dev_cap *dev_cap) 1745 { 1746 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1747 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && 1748 dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 1749 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1750 else 1751 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1752 1753 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1754 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1755 } 1756 1757 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1758 { 1759 int i; 1760 struct mlx4_port_cap port_cap; 1761 1762 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1763 return -EINVAL; 1764 1765 for (i = 1; i <= dev->caps.num_ports; i++) { 1766 if (mlx4_dev_port(dev, i, &port_cap)) { 1767 mlx4_err(dev, 1768 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1769 } else if ((dev->caps.dmfs_high_steer_mode != 1770 MLX4_STEERING_DMFS_A0_DEFAULT) && 1771 (port_cap.dmfs_optimized_state == 1772 !!(dev->caps.dmfs_high_steer_mode == 1773 MLX4_STEERING_DMFS_A0_DISABLE))) { 1774 mlx4_err(dev, 1775 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1776 dmfs_high_rate_steering_mode_str( 1777 dev->caps.dmfs_high_steer_mode), 1778 (port_cap.dmfs_optimized_state ? 1779 "enabled" : "disabled")); 1780 } 1781 } 1782 1783 return 0; 1784 } 1785 1786 static int mlx4_init_fw(struct mlx4_dev *dev) 1787 { 1788 struct mlx4_mod_stat_cfg mlx4_cfg; 1789 int err = 0; 1790 1791 if (!mlx4_is_slave(dev)) { 1792 err = mlx4_QUERY_FW(dev); 1793 if (err) { 1794 if (err == -EACCES) 1795 mlx4_info(dev, "non-primary physical function, skipping\n"); 1796 else 1797 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1798 return err; 1799 } 1800 1801 err = mlx4_load_fw(dev); 1802 if (err) { 1803 mlx4_err(dev, "Failed to start FW, aborting\n"); 1804 return err; 1805 } 1806 1807 mlx4_cfg.log_pg_sz_m = 1; 1808 mlx4_cfg.log_pg_sz = 0; 1809 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1810 if (err) 1811 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1812 } 1813 1814 return err; 1815 } 1816 1817 static int mlx4_init_hca(struct mlx4_dev *dev) 1818 { 1819 struct mlx4_priv *priv = mlx4_priv(dev); 1820 struct mlx4_adapter adapter; 1821 struct mlx4_dev_cap dev_cap; 1822 struct mlx4_profile profile; 1823 struct mlx4_init_hca_param init_hca; 1824 u64 icm_size; 1825 struct mlx4_config_dev_params params; 1826 int err; 1827 1828 if (!mlx4_is_slave(dev)) { 1829 err = mlx4_dev_cap(dev, &dev_cap); 1830 if (err) { 1831 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1832 goto err_stop_fw; 1833 } 1834 1835 choose_steering_mode(dev, &dev_cap); 1836 choose_tunnel_offload_mode(dev, &dev_cap); 1837 1838 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1839 mlx4_is_master(dev)) 1840 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1841 1842 err = mlx4_get_phys_port_id(dev); 1843 if (err) 1844 mlx4_err(dev, "Fail to get physical port id\n"); 1845 1846 if (mlx4_is_master(dev)) 1847 mlx4_parav_master_pf_caps(dev); 1848 1849 if (mlx4_low_memory_profile()) { 1850 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 1851 profile = low_mem_profile; 1852 } else { 1853 profile = default_profile; 1854 } 1855 if (dev->caps.steering_mode == 1856 MLX4_STEERING_MODE_DEVICE_MANAGED) 1857 profile.num_mcg = MLX4_FS_NUM_MCG; 1858 1859 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1860 &init_hca); 1861 if ((long long) icm_size < 0) { 1862 err = icm_size; 1863 goto err_stop_fw; 1864 } 1865 1866 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1867 1868 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1869 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1870 init_hca.mw_enabled = 0; 1871 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 1872 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 1873 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 1874 1875 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1876 if (err) 1877 goto err_stop_fw; 1878 1879 err = mlx4_INIT_HCA(dev, &init_hca); 1880 if (err) { 1881 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 1882 goto err_free_icm; 1883 } 1884 1885 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 1886 err = mlx4_query_func(dev, &dev_cap); 1887 if (err < 0) { 1888 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1889 goto err_stop_fw; 1890 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1891 dev->caps.num_eqs = dev_cap.max_eqs; 1892 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 1893 dev->caps.reserved_uars = dev_cap.reserved_uars; 1894 } 1895 } 1896 1897 /* 1898 * If TS is supported by FW 1899 * read HCA frequency by QUERY_HCA command 1900 */ 1901 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 1902 memset(&init_hca, 0, sizeof(init_hca)); 1903 err = mlx4_QUERY_HCA(dev, &init_hca); 1904 if (err) { 1905 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 1906 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1907 } else { 1908 dev->caps.hca_core_clock = 1909 init_hca.hca_core_clock; 1910 } 1911 1912 /* In case we got HCA frequency 0 - disable timestamping 1913 * to avoid dividing by zero 1914 */ 1915 if (!dev->caps.hca_core_clock) { 1916 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1917 mlx4_err(dev, 1918 "HCA frequency is 0 - timestamping is not supported\n"); 1919 } else if (map_internal_clock(dev)) { 1920 /* 1921 * Map internal clock, 1922 * in case of failure disable timestamping 1923 */ 1924 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1925 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 1926 } 1927 } 1928 1929 if (dev->caps.dmfs_high_steer_mode != 1930 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 1931 if (mlx4_validate_optimized_steering(dev)) 1932 mlx4_warn(dev, "Optimized steering validation failed\n"); 1933 1934 if (dev->caps.dmfs_high_steer_mode == 1935 MLX4_STEERING_DMFS_A0_DISABLE) { 1936 dev->caps.dmfs_high_rate_qpn_base = 1937 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1938 dev->caps.dmfs_high_rate_qpn_range = 1939 MLX4_A0_STEERING_TABLE_SIZE; 1940 } 1941 1942 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 1943 dmfs_high_rate_steering_mode_str( 1944 dev->caps.dmfs_high_steer_mode)); 1945 } 1946 } else { 1947 err = mlx4_init_slave(dev); 1948 if (err) { 1949 if (err != -EPROBE_DEFER) 1950 mlx4_err(dev, "Failed to initialize slave\n"); 1951 return err; 1952 } 1953 1954 err = mlx4_slave_cap(dev); 1955 if (err) { 1956 mlx4_err(dev, "Failed to obtain slave caps\n"); 1957 goto err_close; 1958 } 1959 } 1960 1961 if (map_bf_area(dev)) 1962 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1963 1964 /*Only the master set the ports, all the rest got it from it.*/ 1965 if (!mlx4_is_slave(dev)) 1966 mlx4_set_port_mask(dev); 1967 1968 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1969 if (err) { 1970 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 1971 goto unmap_bf; 1972 } 1973 1974 /* Query CONFIG_DEV parameters */ 1975 err = mlx4_config_dev_retrieval(dev, ¶ms); 1976 if (err && err != -ENOTSUPP) { 1977 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 1978 } else if (!err) { 1979 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 1980 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 1981 } 1982 priv->eq_table.inta_pin = adapter.inta_pin; 1983 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1984 1985 return 0; 1986 1987 unmap_bf: 1988 unmap_internal_clock(dev); 1989 unmap_bf_area(dev); 1990 1991 if (mlx4_is_slave(dev)) { 1992 kfree(dev->caps.qp0_qkey); 1993 kfree(dev->caps.qp0_tunnel); 1994 kfree(dev->caps.qp0_proxy); 1995 kfree(dev->caps.qp1_tunnel); 1996 kfree(dev->caps.qp1_proxy); 1997 } 1998 1999 err_close: 2000 if (mlx4_is_slave(dev)) 2001 mlx4_slave_exit(dev); 2002 else 2003 mlx4_CLOSE_HCA(dev, 0); 2004 2005 err_free_icm: 2006 if (!mlx4_is_slave(dev)) 2007 mlx4_free_icms(dev); 2008 2009 err_stop_fw: 2010 if (!mlx4_is_slave(dev)) { 2011 mlx4_UNMAP_FA(dev); 2012 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 2013 } 2014 return err; 2015 } 2016 2017 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2018 { 2019 struct mlx4_priv *priv = mlx4_priv(dev); 2020 int nent; 2021 2022 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2023 return -ENOENT; 2024 2025 nent = dev->caps.max_counters; 2026 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2027 } 2028 2029 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2030 { 2031 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2032 } 2033 2034 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2035 { 2036 struct mlx4_priv *priv = mlx4_priv(dev); 2037 2038 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2039 return -ENOENT; 2040 2041 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2042 if (*idx == -1) 2043 return -ENOMEM; 2044 2045 return 0; 2046 } 2047 2048 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2049 { 2050 u64 out_param; 2051 int err; 2052 2053 if (mlx4_is_mfunc(dev)) { 2054 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2055 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2056 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2057 if (!err) 2058 *idx = get_param_l(&out_param); 2059 2060 return err; 2061 } 2062 return __mlx4_counter_alloc(dev, idx); 2063 } 2064 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2065 2066 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2067 { 2068 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2069 return; 2070 } 2071 2072 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2073 { 2074 u64 in_param = 0; 2075 2076 if (mlx4_is_mfunc(dev)) { 2077 set_param_l(&in_param, idx); 2078 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2079 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2080 MLX4_CMD_WRAPPED); 2081 return; 2082 } 2083 __mlx4_counter_free(dev, idx); 2084 } 2085 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2086 2087 static int mlx4_setup_hca(struct mlx4_dev *dev) 2088 { 2089 struct mlx4_priv *priv = mlx4_priv(dev); 2090 int err; 2091 int port; 2092 __be32 ib_port_default_caps; 2093 2094 err = mlx4_init_uar_table(dev); 2095 if (err) { 2096 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2097 return err; 2098 } 2099 2100 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2101 if (err) { 2102 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2103 goto err_uar_table_free; 2104 } 2105 2106 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2107 if (!priv->kar) { 2108 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2109 err = -ENOMEM; 2110 goto err_uar_free; 2111 } 2112 2113 err = mlx4_init_pd_table(dev); 2114 if (err) { 2115 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2116 goto err_kar_unmap; 2117 } 2118 2119 err = mlx4_init_xrcd_table(dev); 2120 if (err) { 2121 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2122 goto err_pd_table_free; 2123 } 2124 2125 err = mlx4_init_mr_table(dev); 2126 if (err) { 2127 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2128 goto err_xrcd_table_free; 2129 } 2130 2131 if (!mlx4_is_slave(dev)) { 2132 err = mlx4_init_mcg_table(dev); 2133 if (err) { 2134 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2135 goto err_mr_table_free; 2136 } 2137 err = mlx4_config_mad_demux(dev); 2138 if (err) { 2139 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2140 goto err_mcg_table_free; 2141 } 2142 } 2143 2144 err = mlx4_init_eq_table(dev); 2145 if (err) { 2146 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2147 goto err_mcg_table_free; 2148 } 2149 2150 err = mlx4_cmd_use_events(dev); 2151 if (err) { 2152 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2153 goto err_eq_table_free; 2154 } 2155 2156 err = mlx4_NOP(dev); 2157 if (err) { 2158 if (dev->flags & MLX4_FLAG_MSI_X) { 2159 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2160 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2161 mlx4_warn(dev, "Trying again without MSI-X\n"); 2162 } else { 2163 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2164 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2165 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2166 } 2167 2168 goto err_cmd_poll; 2169 } 2170 2171 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2172 2173 err = mlx4_init_cq_table(dev); 2174 if (err) { 2175 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2176 goto err_cmd_poll; 2177 } 2178 2179 err = mlx4_init_srq_table(dev); 2180 if (err) { 2181 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2182 goto err_cq_table_free; 2183 } 2184 2185 err = mlx4_init_qp_table(dev); 2186 if (err) { 2187 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2188 goto err_srq_table_free; 2189 } 2190 2191 err = mlx4_init_counters_table(dev); 2192 if (err && err != -ENOENT) { 2193 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2194 goto err_qp_table_free; 2195 } 2196 2197 if (!mlx4_is_slave(dev)) { 2198 for (port = 1; port <= dev->caps.num_ports; port++) { 2199 ib_port_default_caps = 0; 2200 err = mlx4_get_port_ib_caps(dev, port, 2201 &ib_port_default_caps); 2202 if (err) 2203 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2204 port, err); 2205 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2206 2207 /* initialize per-slave default ib port capabilities */ 2208 if (mlx4_is_master(dev)) { 2209 int i; 2210 for (i = 0; i < dev->num_slaves; i++) { 2211 if (i == mlx4_master_func_num(dev)) 2212 continue; 2213 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2214 ib_port_default_caps; 2215 } 2216 } 2217 2218 if (mlx4_is_mfunc(dev)) 2219 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2220 else 2221 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2222 2223 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2224 dev->caps.pkey_table_len[port] : -1); 2225 if (err) { 2226 mlx4_err(dev, "Failed to set port %d, aborting\n", 2227 port); 2228 goto err_counters_table_free; 2229 } 2230 } 2231 } 2232 2233 return 0; 2234 2235 err_counters_table_free: 2236 mlx4_cleanup_counters_table(dev); 2237 2238 err_qp_table_free: 2239 mlx4_cleanup_qp_table(dev); 2240 2241 err_srq_table_free: 2242 mlx4_cleanup_srq_table(dev); 2243 2244 err_cq_table_free: 2245 mlx4_cleanup_cq_table(dev); 2246 2247 err_cmd_poll: 2248 mlx4_cmd_use_polling(dev); 2249 2250 err_eq_table_free: 2251 mlx4_cleanup_eq_table(dev); 2252 2253 err_mcg_table_free: 2254 if (!mlx4_is_slave(dev)) 2255 mlx4_cleanup_mcg_table(dev); 2256 2257 err_mr_table_free: 2258 mlx4_cleanup_mr_table(dev); 2259 2260 err_xrcd_table_free: 2261 mlx4_cleanup_xrcd_table(dev); 2262 2263 err_pd_table_free: 2264 mlx4_cleanup_pd_table(dev); 2265 2266 err_kar_unmap: 2267 iounmap(priv->kar); 2268 2269 err_uar_free: 2270 mlx4_uar_free(dev, &priv->driver_uar); 2271 2272 err_uar_table_free: 2273 mlx4_cleanup_uar_table(dev); 2274 return err; 2275 } 2276 2277 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2278 { 2279 struct mlx4_priv *priv = mlx4_priv(dev); 2280 struct msix_entry *entries; 2281 int i; 2282 2283 if (msi_x) { 2284 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2285 2286 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2287 nreq); 2288 2289 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2290 if (!entries) 2291 goto no_msi; 2292 2293 for (i = 0; i < nreq; ++i) 2294 entries[i].entry = i; 2295 2296 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); 2297 2298 if (nreq < 0) { 2299 kfree(entries); 2300 goto no_msi; 2301 } else if (nreq < MSIX_LEGACY_SZ + 2302 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2303 /*Working in legacy mode , all EQ's shared*/ 2304 dev->caps.comp_pool = 0; 2305 dev->caps.num_comp_vectors = nreq - 1; 2306 } else { 2307 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2308 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2309 } 2310 for (i = 0; i < nreq; ++i) 2311 priv->eq_table.eq[i].irq = entries[i].vector; 2312 2313 dev->flags |= MLX4_FLAG_MSI_X; 2314 2315 kfree(entries); 2316 return; 2317 } 2318 2319 no_msi: 2320 dev->caps.num_comp_vectors = 1; 2321 dev->caps.comp_pool = 0; 2322 2323 for (i = 0; i < 2; ++i) 2324 priv->eq_table.eq[i].irq = dev->pdev->irq; 2325 } 2326 2327 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2328 { 2329 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2330 int err = 0; 2331 2332 info->dev = dev; 2333 info->port = port; 2334 if (!mlx4_is_slave(dev)) { 2335 mlx4_init_mac_table(dev, &info->mac_table); 2336 mlx4_init_vlan_table(dev, &info->vlan_table); 2337 mlx4_init_roce_gid_table(dev, &info->gid_table); 2338 info->base_qpn = mlx4_get_base_qpn(dev, port); 2339 } 2340 2341 sprintf(info->dev_name, "mlx4_port%d", port); 2342 info->port_attr.attr.name = info->dev_name; 2343 if (mlx4_is_mfunc(dev)) 2344 info->port_attr.attr.mode = S_IRUGO; 2345 else { 2346 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2347 info->port_attr.store = set_port_type; 2348 } 2349 info->port_attr.show = show_port_type; 2350 sysfs_attr_init(&info->port_attr.attr); 2351 2352 err = device_create_file(&dev->pdev->dev, &info->port_attr); 2353 if (err) { 2354 mlx4_err(dev, "Failed to create file for port %d\n", port); 2355 info->port = -1; 2356 } 2357 2358 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2359 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2360 if (mlx4_is_mfunc(dev)) 2361 info->port_mtu_attr.attr.mode = S_IRUGO; 2362 else { 2363 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2364 info->port_mtu_attr.store = set_port_ib_mtu; 2365 } 2366 info->port_mtu_attr.show = show_port_ib_mtu; 2367 sysfs_attr_init(&info->port_mtu_attr.attr); 2368 2369 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 2370 if (err) { 2371 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2372 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2373 info->port = -1; 2374 } 2375 2376 return err; 2377 } 2378 2379 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2380 { 2381 if (info->port < 0) 2382 return; 2383 2384 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2385 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 2386 } 2387 2388 static int mlx4_init_steering(struct mlx4_dev *dev) 2389 { 2390 struct mlx4_priv *priv = mlx4_priv(dev); 2391 int num_entries = dev->caps.num_ports; 2392 int i, j; 2393 2394 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2395 if (!priv->steer) 2396 return -ENOMEM; 2397 2398 for (i = 0; i < num_entries; i++) 2399 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2400 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2401 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2402 } 2403 return 0; 2404 } 2405 2406 static void mlx4_clear_steering(struct mlx4_dev *dev) 2407 { 2408 struct mlx4_priv *priv = mlx4_priv(dev); 2409 struct mlx4_steer_index *entry, *tmp_entry; 2410 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2411 int num_entries = dev->caps.num_ports; 2412 int i, j; 2413 2414 for (i = 0; i < num_entries; i++) { 2415 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2416 list_for_each_entry_safe(pqp, tmp_pqp, 2417 &priv->steer[i].promisc_qps[j], 2418 list) { 2419 list_del(&pqp->list); 2420 kfree(pqp); 2421 } 2422 list_for_each_entry_safe(entry, tmp_entry, 2423 &priv->steer[i].steer_entries[j], 2424 list) { 2425 list_del(&entry->list); 2426 list_for_each_entry_safe(pqp, tmp_pqp, 2427 &entry->duplicates, 2428 list) { 2429 list_del(&pqp->list); 2430 kfree(pqp); 2431 } 2432 kfree(entry); 2433 } 2434 } 2435 } 2436 kfree(priv->steer); 2437 } 2438 2439 static int extended_func_num(struct pci_dev *pdev) 2440 { 2441 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2442 } 2443 2444 #define MLX4_OWNER_BASE 0x8069c 2445 #define MLX4_OWNER_SIZE 4 2446 2447 static int mlx4_get_ownership(struct mlx4_dev *dev) 2448 { 2449 void __iomem *owner; 2450 u32 ret; 2451 2452 if (pci_channel_offline(dev->pdev)) 2453 return -EIO; 2454 2455 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2456 MLX4_OWNER_SIZE); 2457 if (!owner) { 2458 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2459 return -ENOMEM; 2460 } 2461 2462 ret = readl(owner); 2463 iounmap(owner); 2464 return (int) !!ret; 2465 } 2466 2467 static void mlx4_free_ownership(struct mlx4_dev *dev) 2468 { 2469 void __iomem *owner; 2470 2471 if (pci_channel_offline(dev->pdev)) 2472 return; 2473 2474 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2475 MLX4_OWNER_SIZE); 2476 if (!owner) { 2477 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2478 return; 2479 } 2480 writel(0, owner); 2481 msleep(1000); 2482 iounmap(owner); 2483 } 2484 2485 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2486 !!((flags) & MLX4_FLAG_MASTER)) 2487 2488 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2489 u8 total_vfs, int existing_vfs) 2490 { 2491 u64 dev_flags = dev->flags; 2492 int err = 0; 2493 2494 atomic_inc(&pf_loading); 2495 if (dev->flags & MLX4_FLAG_SRIOV) { 2496 if (existing_vfs != total_vfs) { 2497 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2498 existing_vfs, total_vfs); 2499 total_vfs = existing_vfs; 2500 } 2501 } 2502 2503 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2504 if (NULL == dev->dev_vfs) { 2505 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2506 goto disable_sriov; 2507 } 2508 2509 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2510 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2511 err = pci_enable_sriov(pdev, total_vfs); 2512 } 2513 if (err) { 2514 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2515 err); 2516 goto disable_sriov; 2517 } else { 2518 mlx4_warn(dev, "Running in master mode\n"); 2519 dev_flags |= MLX4_FLAG_SRIOV | 2520 MLX4_FLAG_MASTER; 2521 dev_flags &= ~MLX4_FLAG_SLAVE; 2522 dev->num_vfs = total_vfs; 2523 } 2524 return dev_flags; 2525 2526 disable_sriov: 2527 atomic_dec(&pf_loading); 2528 dev->num_vfs = 0; 2529 kfree(dev->dev_vfs); 2530 return dev_flags & ~MLX4_FLAG_MASTER; 2531 } 2532 2533 enum { 2534 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2535 }; 2536 2537 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2538 int *nvfs) 2539 { 2540 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2541 /* Checking for 64 VFs as a limitation of CX2 */ 2542 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2543 requested_vfs >= 64) { 2544 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2545 requested_vfs); 2546 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2547 } 2548 return 0; 2549 } 2550 2551 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2552 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2553 { 2554 struct mlx4_dev *dev; 2555 unsigned sum = 0; 2556 int err; 2557 int port; 2558 int i; 2559 struct mlx4_dev_cap *dev_cap = NULL; 2560 int existing_vfs = 0; 2561 2562 dev = &priv->dev; 2563 2564 INIT_LIST_HEAD(&priv->ctx_list); 2565 spin_lock_init(&priv->ctx_lock); 2566 2567 mutex_init(&priv->port_mutex); 2568 2569 INIT_LIST_HEAD(&priv->pgdir_list); 2570 mutex_init(&priv->pgdir_mutex); 2571 2572 INIT_LIST_HEAD(&priv->bf_list); 2573 mutex_init(&priv->bf_mutex); 2574 2575 dev->rev_id = pdev->revision; 2576 dev->numa_node = dev_to_node(&pdev->dev); 2577 2578 /* Detect if this device is a virtual function */ 2579 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2580 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2581 dev->flags |= MLX4_FLAG_SLAVE; 2582 } else { 2583 /* We reset the device and enable SRIOV only for physical 2584 * devices. Try to claim ownership on the device; 2585 * if already taken, skip -- do not allow multiple PFs */ 2586 err = mlx4_get_ownership(dev); 2587 if (err) { 2588 if (err < 0) 2589 return err; 2590 else { 2591 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2592 return -EINVAL; 2593 } 2594 } 2595 2596 atomic_set(&priv->opreq_count, 0); 2597 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2598 2599 /* 2600 * Now reset the HCA before we touch the PCI capabilities or 2601 * attempt a firmware command, since a boot ROM may have left 2602 * the HCA in an undefined state. 2603 */ 2604 err = mlx4_reset(dev); 2605 if (err) { 2606 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2607 goto err_sriov; 2608 } 2609 2610 if (total_vfs) { 2611 dev->flags = MLX4_FLAG_MASTER; 2612 existing_vfs = pci_num_vf(pdev); 2613 if (existing_vfs) 2614 dev->flags |= MLX4_FLAG_SRIOV; 2615 dev->num_vfs = total_vfs; 2616 } 2617 } 2618 2619 slave_start: 2620 err = mlx4_cmd_init(dev); 2621 if (err) { 2622 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2623 goto err_sriov; 2624 } 2625 2626 /* In slave functions, the communication channel must be initialized 2627 * before posting commands. Also, init num_slaves before calling 2628 * mlx4_init_hca */ 2629 if (mlx4_is_mfunc(dev)) { 2630 if (mlx4_is_master(dev)) { 2631 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2632 2633 } else { 2634 dev->num_slaves = 0; 2635 err = mlx4_multi_func_init(dev); 2636 if (err) { 2637 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2638 goto err_cmd; 2639 } 2640 } 2641 } 2642 2643 err = mlx4_init_fw(dev); 2644 if (err) { 2645 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2646 goto err_mfunc; 2647 } 2648 2649 if (mlx4_is_master(dev)) { 2650 /* when we hit the goto slave_start below, dev_cap already initialized */ 2651 if (!dev_cap) { 2652 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2653 2654 if (!dev_cap) { 2655 err = -ENOMEM; 2656 goto err_fw; 2657 } 2658 2659 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2660 if (err) { 2661 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2662 goto err_fw; 2663 } 2664 2665 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2666 goto err_fw; 2667 2668 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2669 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2670 existing_vfs); 2671 2672 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2673 dev->flags = dev_flags; 2674 if (!SRIOV_VALID_STATE(dev->flags)) { 2675 mlx4_err(dev, "Invalid SRIOV state\n"); 2676 goto err_sriov; 2677 } 2678 err = mlx4_reset(dev); 2679 if (err) { 2680 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2681 goto err_sriov; 2682 } 2683 goto slave_start; 2684 } 2685 } else { 2686 /* Legacy mode FW requires SRIOV to be enabled before 2687 * doing QUERY_DEV_CAP, since max_eq's value is different if 2688 * SRIOV is enabled. 2689 */ 2690 memset(dev_cap, 0, sizeof(*dev_cap)); 2691 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2692 if (err) { 2693 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2694 goto err_fw; 2695 } 2696 2697 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2698 goto err_fw; 2699 } 2700 } 2701 2702 err = mlx4_init_hca(dev); 2703 if (err) { 2704 if (err == -EACCES) { 2705 /* Not primary Physical function 2706 * Running in slave mode */ 2707 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2708 /* We're not a PF */ 2709 if (dev->flags & MLX4_FLAG_SRIOV) { 2710 if (!existing_vfs) 2711 pci_disable_sriov(pdev); 2712 if (mlx4_is_master(dev)) 2713 atomic_dec(&pf_loading); 2714 dev->flags &= ~MLX4_FLAG_SRIOV; 2715 } 2716 if (!mlx4_is_slave(dev)) 2717 mlx4_free_ownership(dev); 2718 dev->flags |= MLX4_FLAG_SLAVE; 2719 dev->flags &= ~MLX4_FLAG_MASTER; 2720 goto slave_start; 2721 } else 2722 goto err_fw; 2723 } 2724 2725 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2726 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); 2727 2728 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2729 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2730 dev->flags = dev_flags; 2731 err = mlx4_cmd_init(dev); 2732 if (err) { 2733 /* Only VHCR is cleaned up, so could still 2734 * send FW commands 2735 */ 2736 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2737 goto err_close; 2738 } 2739 } else { 2740 dev->flags = dev_flags; 2741 } 2742 2743 if (!SRIOV_VALID_STATE(dev->flags)) { 2744 mlx4_err(dev, "Invalid SRIOV state\n"); 2745 goto err_close; 2746 } 2747 } 2748 2749 /* check if the device is functioning at its maximum possible speed. 2750 * No return code for this call, just warn the user in case of PCI 2751 * express device capabilities are under-satisfied by the bus. 2752 */ 2753 if (!mlx4_is_slave(dev)) 2754 mlx4_check_pcie_caps(dev); 2755 2756 /* In master functions, the communication channel must be initialized 2757 * after obtaining its address from fw */ 2758 if (mlx4_is_master(dev)) { 2759 int ib_ports = 0; 2760 2761 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2762 ib_ports++; 2763 2764 if (ib_ports && 2765 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2766 mlx4_err(dev, 2767 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2768 err = -EINVAL; 2769 goto err_close; 2770 } 2771 if (dev->caps.num_ports < 2 && 2772 num_vfs_argc > 1) { 2773 err = -EINVAL; 2774 mlx4_err(dev, 2775 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2776 dev->caps.num_ports); 2777 goto err_close; 2778 } 2779 memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); 2780 2781 for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { 2782 unsigned j; 2783 2784 for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { 2785 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2786 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2787 dev->caps.num_ports; 2788 } 2789 } 2790 2791 /* In master functions, the communication channel 2792 * must be initialized after obtaining its address from fw 2793 */ 2794 err = mlx4_multi_func_init(dev); 2795 if (err) { 2796 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2797 goto err_close; 2798 } 2799 } 2800 2801 err = mlx4_alloc_eq_table(dev); 2802 if (err) 2803 goto err_master_mfunc; 2804 2805 priv->msix_ctl.pool_bm = 0; 2806 mutex_init(&priv->msix_ctl.pool_lock); 2807 2808 mlx4_enable_msi_x(dev); 2809 if ((mlx4_is_mfunc(dev)) && 2810 !(dev->flags & MLX4_FLAG_MSI_X)) { 2811 err = -ENOSYS; 2812 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2813 goto err_free_eq; 2814 } 2815 2816 if (!mlx4_is_slave(dev)) { 2817 err = mlx4_init_steering(dev); 2818 if (err) 2819 goto err_disable_msix; 2820 } 2821 2822 err = mlx4_setup_hca(dev); 2823 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2824 !mlx4_is_mfunc(dev)) { 2825 dev->flags &= ~MLX4_FLAG_MSI_X; 2826 dev->caps.num_comp_vectors = 1; 2827 dev->caps.comp_pool = 0; 2828 pci_disable_msix(pdev); 2829 err = mlx4_setup_hca(dev); 2830 } 2831 2832 if (err) 2833 goto err_steer; 2834 2835 mlx4_init_quotas(dev); 2836 2837 for (port = 1; port <= dev->caps.num_ports; port++) { 2838 err = mlx4_init_port_info(dev, port); 2839 if (err) 2840 goto err_port; 2841 } 2842 2843 err = mlx4_register_device(dev); 2844 if (err) 2845 goto err_port; 2846 2847 mlx4_request_modules(dev); 2848 2849 mlx4_sense_init(dev); 2850 mlx4_start_sense(dev); 2851 2852 priv->removed = 0; 2853 2854 if (mlx4_is_master(dev) && dev->num_vfs) 2855 atomic_dec(&pf_loading); 2856 2857 kfree(dev_cap); 2858 return 0; 2859 2860 err_port: 2861 for (--port; port >= 1; --port) 2862 mlx4_cleanup_port_info(&priv->port[port]); 2863 2864 mlx4_cleanup_counters_table(dev); 2865 mlx4_cleanup_qp_table(dev); 2866 mlx4_cleanup_srq_table(dev); 2867 mlx4_cleanup_cq_table(dev); 2868 mlx4_cmd_use_polling(dev); 2869 mlx4_cleanup_eq_table(dev); 2870 mlx4_cleanup_mcg_table(dev); 2871 mlx4_cleanup_mr_table(dev); 2872 mlx4_cleanup_xrcd_table(dev); 2873 mlx4_cleanup_pd_table(dev); 2874 mlx4_cleanup_uar_table(dev); 2875 2876 err_steer: 2877 if (!mlx4_is_slave(dev)) 2878 mlx4_clear_steering(dev); 2879 2880 err_disable_msix: 2881 if (dev->flags & MLX4_FLAG_MSI_X) 2882 pci_disable_msix(pdev); 2883 2884 err_free_eq: 2885 mlx4_free_eq_table(dev); 2886 2887 err_master_mfunc: 2888 if (mlx4_is_master(dev)) 2889 mlx4_multi_func_cleanup(dev); 2890 2891 if (mlx4_is_slave(dev)) { 2892 kfree(dev->caps.qp0_qkey); 2893 kfree(dev->caps.qp0_tunnel); 2894 kfree(dev->caps.qp0_proxy); 2895 kfree(dev->caps.qp1_tunnel); 2896 kfree(dev->caps.qp1_proxy); 2897 } 2898 2899 err_close: 2900 mlx4_close_hca(dev); 2901 2902 err_fw: 2903 mlx4_close_fw(dev); 2904 2905 err_mfunc: 2906 if (mlx4_is_slave(dev)) 2907 mlx4_multi_func_cleanup(dev); 2908 2909 err_cmd: 2910 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2911 2912 err_sriov: 2913 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) 2914 pci_disable_sriov(pdev); 2915 2916 if (mlx4_is_master(dev) && dev->num_vfs) 2917 atomic_dec(&pf_loading); 2918 2919 kfree(priv->dev.dev_vfs); 2920 2921 if (!mlx4_is_slave(dev)) 2922 mlx4_free_ownership(dev); 2923 2924 kfree(dev_cap); 2925 return err; 2926 } 2927 2928 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 2929 struct mlx4_priv *priv) 2930 { 2931 int err; 2932 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 2933 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 2934 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 2935 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 2936 unsigned total_vfs = 0; 2937 unsigned int i; 2938 2939 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 2940 2941 err = pci_enable_device(pdev); 2942 if (err) { 2943 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 2944 return err; 2945 } 2946 2947 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 2948 * per port, we must limit the number of VFs to 63 (since their are 2949 * 128 MACs) 2950 */ 2951 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 2952 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 2953 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 2954 if (nvfs[i] < 0) { 2955 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 2956 err = -EINVAL; 2957 goto err_disable_pdev; 2958 } 2959 } 2960 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 2961 i++) { 2962 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 2963 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 2964 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 2965 err = -EINVAL; 2966 goto err_disable_pdev; 2967 } 2968 } 2969 if (total_vfs >= MLX4_MAX_NUM_VF) { 2970 dev_err(&pdev->dev, 2971 "Requested more VF's (%d) than allowed (%d)\n", 2972 total_vfs, MLX4_MAX_NUM_VF - 1); 2973 err = -EINVAL; 2974 goto err_disable_pdev; 2975 } 2976 2977 for (i = 0; i < MLX4_MAX_PORTS; i++) { 2978 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 2979 dev_err(&pdev->dev, 2980 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 2981 nvfs[i] + nvfs[2], i + 1, 2982 MLX4_MAX_NUM_VF_P_PORT - 1); 2983 err = -EINVAL; 2984 goto err_disable_pdev; 2985 } 2986 } 2987 2988 /* Check for BARs. */ 2989 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2990 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2991 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 2992 pci_dev_data, pci_resource_flags(pdev, 0)); 2993 err = -ENODEV; 2994 goto err_disable_pdev; 2995 } 2996 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2997 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 2998 err = -ENODEV; 2999 goto err_disable_pdev; 3000 } 3001 3002 err = pci_request_regions(pdev, DRV_NAME); 3003 if (err) { 3004 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3005 goto err_disable_pdev; 3006 } 3007 3008 pci_set_master(pdev); 3009 3010 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3011 if (err) { 3012 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3013 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3014 if (err) { 3015 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3016 goto err_release_regions; 3017 } 3018 } 3019 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3020 if (err) { 3021 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3022 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3023 if (err) { 3024 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3025 goto err_release_regions; 3026 } 3027 } 3028 3029 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3030 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3031 /* Detect if this device is a virtual function */ 3032 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3033 /* When acting as pf, we normally skip vfs unless explicitly 3034 * requested to probe them. 3035 */ 3036 if (total_vfs) { 3037 unsigned vfs_offset = 0; 3038 3039 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3040 vfs_offset + nvfs[i] < extended_func_num(pdev); 3041 vfs_offset += nvfs[i], i++) 3042 ; 3043 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3044 err = -ENODEV; 3045 goto err_release_regions; 3046 } 3047 if ((extended_func_num(pdev) - vfs_offset) 3048 > prb_vf[i]) { 3049 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3050 extended_func_num(pdev)); 3051 err = -ENODEV; 3052 goto err_release_regions; 3053 } 3054 } 3055 } 3056 3057 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3058 if (err) 3059 goto err_release_regions; 3060 return 0; 3061 3062 err_release_regions: 3063 pci_release_regions(pdev); 3064 3065 err_disable_pdev: 3066 pci_disable_device(pdev); 3067 pci_set_drvdata(pdev, NULL); 3068 return err; 3069 } 3070 3071 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3072 { 3073 struct mlx4_priv *priv; 3074 struct mlx4_dev *dev; 3075 int ret; 3076 3077 printk_once(KERN_INFO "%s", mlx4_version); 3078 3079 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3080 if (!priv) 3081 return -ENOMEM; 3082 3083 dev = &priv->dev; 3084 dev->pdev = pdev; 3085 pci_set_drvdata(pdev, dev); 3086 priv->pci_dev_data = id->driver_data; 3087 3088 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3089 if (ret) 3090 kfree(priv); 3091 3092 return ret; 3093 } 3094 3095 static void mlx4_unload_one(struct pci_dev *pdev) 3096 { 3097 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3098 struct mlx4_priv *priv = mlx4_priv(dev); 3099 int pci_dev_data; 3100 int p; 3101 int active_vfs = 0; 3102 3103 if (priv->removed) 3104 return; 3105 3106 pci_dev_data = priv->pci_dev_data; 3107 3108 /* Disabling SR-IOV is not allowed while there are active vf's */ 3109 if (mlx4_is_master(dev)) { 3110 active_vfs = mlx4_how_many_lives_vf(dev); 3111 if (active_vfs) { 3112 pr_warn("Removing PF when there are active VF's !!\n"); 3113 pr_warn("Will not disable SR-IOV.\n"); 3114 } 3115 } 3116 mlx4_stop_sense(dev); 3117 mlx4_unregister_device(dev); 3118 3119 for (p = 1; p <= dev->caps.num_ports; p++) { 3120 mlx4_cleanup_port_info(&priv->port[p]); 3121 mlx4_CLOSE_PORT(dev, p); 3122 } 3123 3124 if (mlx4_is_master(dev)) 3125 mlx4_free_resource_tracker(dev, 3126 RES_TR_FREE_SLAVES_ONLY); 3127 3128 mlx4_cleanup_counters_table(dev); 3129 mlx4_cleanup_qp_table(dev); 3130 mlx4_cleanup_srq_table(dev); 3131 mlx4_cleanup_cq_table(dev); 3132 mlx4_cmd_use_polling(dev); 3133 mlx4_cleanup_eq_table(dev); 3134 mlx4_cleanup_mcg_table(dev); 3135 mlx4_cleanup_mr_table(dev); 3136 mlx4_cleanup_xrcd_table(dev); 3137 mlx4_cleanup_pd_table(dev); 3138 3139 if (mlx4_is_master(dev)) 3140 mlx4_free_resource_tracker(dev, 3141 RES_TR_FREE_STRUCTS_ONLY); 3142 3143 iounmap(priv->kar); 3144 mlx4_uar_free(dev, &priv->driver_uar); 3145 mlx4_cleanup_uar_table(dev); 3146 if (!mlx4_is_slave(dev)) 3147 mlx4_clear_steering(dev); 3148 mlx4_free_eq_table(dev); 3149 if (mlx4_is_master(dev)) 3150 mlx4_multi_func_cleanup(dev); 3151 mlx4_close_hca(dev); 3152 mlx4_close_fw(dev); 3153 if (mlx4_is_slave(dev)) 3154 mlx4_multi_func_cleanup(dev); 3155 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3156 3157 if (dev->flags & MLX4_FLAG_MSI_X) 3158 pci_disable_msix(pdev); 3159 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3160 mlx4_warn(dev, "Disabling SR-IOV\n"); 3161 pci_disable_sriov(pdev); 3162 dev->flags &= ~MLX4_FLAG_SRIOV; 3163 dev->num_vfs = 0; 3164 } 3165 3166 if (!mlx4_is_slave(dev)) 3167 mlx4_free_ownership(dev); 3168 3169 kfree(dev->caps.qp0_qkey); 3170 kfree(dev->caps.qp0_tunnel); 3171 kfree(dev->caps.qp0_proxy); 3172 kfree(dev->caps.qp1_tunnel); 3173 kfree(dev->caps.qp1_proxy); 3174 kfree(dev->dev_vfs); 3175 3176 memset(priv, 0, sizeof(*priv)); 3177 priv->pci_dev_data = pci_dev_data; 3178 priv->removed = 1; 3179 } 3180 3181 static void mlx4_remove_one(struct pci_dev *pdev) 3182 { 3183 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3184 struct mlx4_priv *priv = mlx4_priv(dev); 3185 3186 mlx4_unload_one(pdev); 3187 pci_release_regions(pdev); 3188 pci_disable_device(pdev); 3189 kfree(priv); 3190 pci_set_drvdata(pdev, NULL); 3191 } 3192 3193 int mlx4_restart_one(struct pci_dev *pdev) 3194 { 3195 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3196 struct mlx4_priv *priv = mlx4_priv(dev); 3197 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3198 int pci_dev_data, err, total_vfs; 3199 3200 pci_dev_data = priv->pci_dev_data; 3201 total_vfs = dev->num_vfs; 3202 memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); 3203 3204 mlx4_unload_one(pdev); 3205 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3206 if (err) { 3207 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3208 __func__, pci_name(pdev), err); 3209 return err; 3210 } 3211 3212 return err; 3213 } 3214 3215 static const struct pci_device_id mlx4_pci_table[] = { 3216 /* MT25408 "Hermon" SDR */ 3217 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3218 /* MT25408 "Hermon" DDR */ 3219 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3220 /* MT25408 "Hermon" QDR */ 3221 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3222 /* MT25408 "Hermon" DDR PCIe gen2 */ 3223 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3224 /* MT25408 "Hermon" QDR PCIe gen2 */ 3225 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3226 /* MT25408 "Hermon" EN 10GigE */ 3227 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3228 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3229 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3230 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3231 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3232 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3233 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3234 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3235 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3236 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3237 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3238 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3239 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3240 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3241 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3242 /* MT27500 Family [ConnectX-3] */ 3243 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3244 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3245 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3246 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3247 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3248 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3249 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3250 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3251 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3252 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3253 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3254 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3255 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3256 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3257 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3258 { 0, } 3259 }; 3260 3261 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3262 3263 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3264 pci_channel_state_t state) 3265 { 3266 mlx4_unload_one(pdev); 3267 3268 return state == pci_channel_io_perm_failure ? 3269 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 3270 } 3271 3272 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3273 { 3274 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3275 struct mlx4_priv *priv = mlx4_priv(dev); 3276 int ret; 3277 3278 ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); 3279 3280 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3281 } 3282 3283 static const struct pci_error_handlers mlx4_err_handler = { 3284 .error_detected = mlx4_pci_err_detected, 3285 .slot_reset = mlx4_pci_slot_reset, 3286 }; 3287 3288 static struct pci_driver mlx4_driver = { 3289 .name = DRV_NAME, 3290 .id_table = mlx4_pci_table, 3291 .probe = mlx4_init_one, 3292 .shutdown = mlx4_unload_one, 3293 .remove = mlx4_remove_one, 3294 .err_handler = &mlx4_err_handler, 3295 }; 3296 3297 static int __init mlx4_verify_params(void) 3298 { 3299 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3300 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3301 return -1; 3302 } 3303 3304 if (log_num_vlan != 0) 3305 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3306 MLX4_LOG_NUM_VLANS); 3307 3308 if (use_prio != 0) 3309 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3310 3311 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3312 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3313 log_mtts_per_seg); 3314 return -1; 3315 } 3316 3317 /* Check if module param for ports type has legal combination */ 3318 if (port_type_array[0] == false && port_type_array[1] == true) { 3319 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3320 port_type_array[0] = true; 3321 } 3322 3323 if (mlx4_log_num_mgm_entry_size < -7 || 3324 (mlx4_log_num_mgm_entry_size > 0 && 3325 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3326 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3327 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3328 mlx4_log_num_mgm_entry_size, 3329 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3330 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3331 return -1; 3332 } 3333 3334 return 0; 3335 } 3336 3337 static int __init mlx4_init(void) 3338 { 3339 int ret; 3340 3341 if (mlx4_verify_params()) 3342 return -EINVAL; 3343 3344 mlx4_catas_init(); 3345 3346 mlx4_wq = create_singlethread_workqueue("mlx4"); 3347 if (!mlx4_wq) 3348 return -ENOMEM; 3349 3350 ret = pci_register_driver(&mlx4_driver); 3351 if (ret < 0) 3352 destroy_workqueue(mlx4_wq); 3353 return ret < 0 ? ret : 0; 3354 } 3355 3356 static void __exit mlx4_cleanup(void) 3357 { 3358 pci_unregister_driver(&mlx4_driver); 3359 destroy_workqueue(mlx4_wq); 3360 } 3361 3362 module_init(mlx4_init); 3363 module_exit(mlx4_cleanup); 3364