1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 45 #include <linux/mlx4/device.h> 46 #include <linux/mlx4/doorbell.h> 47 48 #include "mlx4.h" 49 #include "fw.h" 50 #include "icm.h" 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 struct workqueue_struct *mlx4_wq; 58 59 #ifdef CONFIG_MLX4_DEBUG 60 61 int mlx4_debug_level = 0; 62 module_param_named(debug_level, mlx4_debug_level, int, 0644); 63 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 64 65 #endif /* CONFIG_MLX4_DEBUG */ 66 67 #ifdef CONFIG_PCI_MSI 68 69 static int msi_x = 1; 70 module_param(msi_x, int, 0444); 71 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 72 73 #else /* CONFIG_PCI_MSI */ 74 75 #define msi_x (0) 76 77 #endif /* CONFIG_PCI_MSI */ 78 79 static int num_vfs; 80 module_param(num_vfs, int, 0444); 81 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82 83 static int probe_vf; 84 module_param(probe_vf, int, 0644); 85 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86 87 int mlx4_log_num_mgm_entry_size = 10; 88 module_param_named(log_num_mgm_entry_size, 89 mlx4_log_num_mgm_entry_size, int, 0444); 90 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91 " of qp per mcg, for example:" 92 " 10 gives 248.range: 9<=" 93 " log_num_mgm_entry_size <= 12"); 94 95 #define MLX4_VF (1 << 0) 96 97 #define HCA_GLOBAL_CAP_MASK 0 98 #define PF_CONTEXT_BEHAVIOUR_MASK 0 99 100 static char mlx4_version[] __devinitdata = 101 DRV_NAME ": Mellanox ConnectX core driver v" 102 DRV_VERSION " (" DRV_RELDATE ")\n"; 103 104 static struct mlx4_profile default_profile = { 105 .num_qp = 1 << 18, 106 .num_srq = 1 << 16, 107 .rdmarc_per_qp = 1 << 4, 108 .num_cq = 1 << 16, 109 .num_mcg = 1 << 13, 110 .num_mpt = 1 << 19, 111 .num_mtt = 1 << 20, /* It is really num mtt segements */ 112 }; 113 114 static int log_num_mac = 7; 115 module_param_named(log_num_mac, log_num_mac, int, 0444); 116 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 117 118 static int log_num_vlan; 119 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 120 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 121 /* Log2 max number of VLANs per ETH port (0-7) */ 122 #define MLX4_LOG_NUM_VLANS 7 123 124 static bool use_prio; 125 module_param_named(use_prio, use_prio, bool, 0444); 126 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 127 "(0/1, default 0)"); 128 129 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 132 133 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 134 static int arr_argc = 2; 135 module_param_array(port_type_array, int, &arr_argc, 0444); 136 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 137 "1 for IB, 2 for Ethernet"); 138 139 struct mlx4_port_config { 140 struct list_head list; 141 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 142 struct pci_dev *pdev; 143 }; 144 145 static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) 146 { 147 return dev->caps.reserved_eqs + 148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); 149 } 150 151 int mlx4_check_port_params(struct mlx4_dev *dev, 152 enum mlx4_port_type *port_type) 153 { 154 int i; 155 156 for (i = 0; i < dev->caps.num_ports - 1; i++) { 157 if (port_type[i] != port_type[i + 1]) { 158 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 159 mlx4_err(dev, "Only same port types supported " 160 "on this HCA, aborting.\n"); 161 return -EINVAL; 162 } 163 if (port_type[i] == MLX4_PORT_TYPE_ETH && 164 port_type[i + 1] == MLX4_PORT_TYPE_IB) 165 return -EINVAL; 166 } 167 } 168 169 for (i = 0; i < dev->caps.num_ports; i++) { 170 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 171 mlx4_err(dev, "Requested port type for port %d is not " 172 "supported on this HCA\n", i + 1); 173 return -EINVAL; 174 } 175 } 176 return 0; 177 } 178 179 static void mlx4_set_port_mask(struct mlx4_dev *dev) 180 { 181 int i; 182 183 for (i = 1; i <= dev->caps.num_ports; ++i) 184 dev->caps.port_mask[i] = dev->caps.port_type[i]; 185 } 186 187 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 188 { 189 int err; 190 int i; 191 192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 193 if (err) { 194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 195 return err; 196 } 197 198 if (dev_cap->min_page_sz > PAGE_SIZE) { 199 mlx4_err(dev, "HCA minimum page size of %d bigger than " 200 "kernel PAGE_SIZE of %ld, aborting.\n", 201 dev_cap->min_page_sz, PAGE_SIZE); 202 return -ENODEV; 203 } 204 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 205 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 206 "aborting.\n", 207 dev_cap->num_ports, MLX4_MAX_PORTS); 208 return -ENODEV; 209 } 210 211 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 212 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 213 "PCI resource 2 size of 0x%llx, aborting.\n", 214 dev_cap->uar_size, 215 (unsigned long long) pci_resource_len(dev->pdev, 2)); 216 return -ENODEV; 217 } 218 219 dev->caps.num_ports = dev_cap->num_ports; 220 for (i = 1; i <= dev->caps.num_ports; ++i) { 221 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 223 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 224 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 225 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 226 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 227 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 228 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 229 dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; 230 dev->caps.default_sense[i] = dev_cap->default_sense[i]; 231 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 232 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 233 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 234 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 235 } 236 237 dev->caps.uar_page_size = PAGE_SIZE; 238 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 239 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 240 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 241 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 242 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 243 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 244 dev->caps.max_wqes = dev_cap->max_qp_sz; 245 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 246 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 247 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 248 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 249 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 250 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 251 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 252 /* 253 * Subtract 1 from the limit because we need to allocate a 254 * spare CQE so the HCA HW can tell the difference between an 255 * empty CQ and a full CQ. 256 */ 257 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 258 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 259 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 260 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 261 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 262 263 /* The first 128 UARs are used for EQ doorbells */ 264 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 265 dev->caps.reserved_pds = dev_cap->reserved_pds; 266 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 267 dev_cap->reserved_xrcds : 0; 268 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 269 dev_cap->max_xrcds : 0; 270 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 271 272 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 273 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 274 dev->caps.flags = dev_cap->flags; 275 dev->caps.bmme_flags = dev_cap->bmme_flags; 276 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 277 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 278 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 279 280 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 281 if (dev->pdev->device != 0x1003) 282 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 283 284 dev->caps.log_num_macs = log_num_mac; 285 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 286 dev->caps.log_num_prios = use_prio ? 3 : 0; 287 288 for (i = 1; i <= dev->caps.num_ports; ++i) { 289 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 290 if (dev->caps.supported_type[i]) { 291 /* if only ETH is supported - assign ETH */ 292 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 293 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 294 /* if only IB is supported, 295 * assign IB only if SRIOV is off*/ 296 else if (dev->caps.supported_type[i] == 297 MLX4_PORT_TYPE_IB) { 298 if (dev->flags & MLX4_FLAG_SRIOV) 299 dev->caps.port_type[i] = 300 MLX4_PORT_TYPE_NONE; 301 else 302 dev->caps.port_type[i] = 303 MLX4_PORT_TYPE_IB; 304 /* if IB and ETH are supported, 305 * first of all check if SRIOV is on */ 306 } else if (dev->flags & MLX4_FLAG_SRIOV) 307 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 308 else { 309 /* In non-SRIOV mode, we set the port type 310 * according to user selection of port type, 311 * if usere selected none, take the FW hint */ 312 if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) 313 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 314 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 315 else 316 dev->caps.port_type[i] = port_type_array[i-1]; 317 } 318 } 319 /* 320 * Link sensing is allowed on the port if 3 conditions are true: 321 * 1. Both protocols are supported on the port. 322 * 2. Different types are supported on the port 323 * 3. FW declared that it supports link sensing 324 */ 325 mlx4_priv(dev)->sense.sense_allowed[i] = 326 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 327 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 328 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 329 330 /* 331 * If "default_sense" bit is set, we move the port to "AUTO" mode 332 * and perform sense_port FW command to try and set the correct 333 * port type from beginning 334 */ 335 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 336 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 337 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 338 mlx4_SENSE_PORT(dev, i, &sensed_port); 339 if (sensed_port != MLX4_PORT_TYPE_NONE) 340 dev->caps.port_type[i] = sensed_port; 341 } else { 342 dev->caps.possible_type[i] = dev->caps.port_type[i]; 343 } 344 345 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 346 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 347 mlx4_warn(dev, "Requested number of MACs is too much " 348 "for port %d, reducing to %d.\n", 349 i, 1 << dev->caps.log_num_macs); 350 } 351 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 352 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 353 mlx4_warn(dev, "Requested number of VLANs is too much " 354 "for port %d, reducing to %d.\n", 355 i, 1 << dev->caps.log_num_vlans); 356 } 357 } 358 359 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 360 361 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 362 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 364 (1 << dev->caps.log_num_macs) * 365 (1 << dev->caps.log_num_vlans) * 366 (1 << dev->caps.log_num_prios) * 367 dev->caps.num_ports; 368 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 369 370 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 372 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 373 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 374 375 return 0; 376 } 377 /*The function checks if there are live vf, return the num of them*/ 378 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 379 { 380 struct mlx4_priv *priv = mlx4_priv(dev); 381 struct mlx4_slave_state *s_state; 382 int i; 383 int ret = 0; 384 385 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 386 s_state = &priv->mfunc.master.slave_state[i]; 387 if (s_state->active && s_state->last_cmd != 388 MLX4_COMM_CMD_RESET) { 389 mlx4_warn(dev, "%s: slave: %d is still active\n", 390 __func__, i); 391 ret++; 392 } 393 } 394 return ret; 395 } 396 397 static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 398 { 399 struct mlx4_priv *priv = mlx4_priv(dev); 400 struct mlx4_slave_state *s_slave; 401 402 if (!mlx4_is_master(dev)) 403 return 0; 404 405 s_slave = &priv->mfunc.master.slave_state[slave]; 406 return !!s_slave->active; 407 } 408 EXPORT_SYMBOL(mlx4_is_slave_active); 409 410 static int mlx4_slave_cap(struct mlx4_dev *dev) 411 { 412 int err; 413 u32 page_size; 414 struct mlx4_dev_cap dev_cap; 415 struct mlx4_func_cap func_cap; 416 struct mlx4_init_hca_param hca_param; 417 int i; 418 419 memset(&hca_param, 0, sizeof(hca_param)); 420 err = mlx4_QUERY_HCA(dev, &hca_param); 421 if (err) { 422 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 423 return err; 424 } 425 426 /*fail if the hca has an unknown capability */ 427 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 428 HCA_GLOBAL_CAP_MASK) { 429 mlx4_err(dev, "Unknown hca global capabilities\n"); 430 return -ENOSYS; 431 } 432 433 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 434 435 memset(&dev_cap, 0, sizeof(dev_cap)); 436 err = mlx4_dev_cap(dev, &dev_cap); 437 if (err) { 438 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 439 return err; 440 } 441 442 page_size = ~dev->caps.page_size_cap + 1; 443 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 444 if (page_size > PAGE_SIZE) { 445 mlx4_err(dev, "HCA minimum page size of %d bigger than " 446 "kernel PAGE_SIZE of %ld, aborting.\n", 447 page_size, PAGE_SIZE); 448 return -ENODEV; 449 } 450 451 /* slave gets uar page size from QUERY_HCA fw command */ 452 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 453 454 /* TODO: relax this assumption */ 455 if (dev->caps.uar_page_size != PAGE_SIZE) { 456 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 457 dev->caps.uar_page_size, PAGE_SIZE); 458 return -ENODEV; 459 } 460 461 memset(&func_cap, 0, sizeof(func_cap)); 462 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); 463 if (err) { 464 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); 465 return err; 466 } 467 468 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 469 PF_CONTEXT_BEHAVIOUR_MASK) { 470 mlx4_err(dev, "Unknown pf context behaviour\n"); 471 return -ENOSYS; 472 } 473 474 dev->caps.num_ports = func_cap.num_ports; 475 dev->caps.num_qps = func_cap.qp_quota; 476 dev->caps.num_srqs = func_cap.srq_quota; 477 dev->caps.num_cqs = func_cap.cq_quota; 478 dev->caps.num_eqs = func_cap.max_eq; 479 dev->caps.reserved_eqs = func_cap.reserved_eq; 480 dev->caps.num_mpts = func_cap.mpt_quota; 481 dev->caps.num_mtts = func_cap.mtt_quota; 482 dev->caps.num_pds = MLX4_NUM_PDS; 483 dev->caps.num_mgms = 0; 484 dev->caps.num_amgms = 0; 485 486 for (i = 1; i <= dev->caps.num_ports; ++i) 487 dev->caps.port_mask[i] = dev->caps.port_type[i]; 488 489 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 490 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 491 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 492 return -ENODEV; 493 } 494 495 if (dev->caps.uar_page_size * (dev->caps.num_uars - 496 dev->caps.reserved_uars) > 497 pci_resource_len(dev->pdev, 2)) { 498 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 499 "PCI resource 2 size of 0x%llx, aborting.\n", 500 dev->caps.uar_page_size * dev->caps.num_uars, 501 (unsigned long long) pci_resource_len(dev->pdev, 2)); 502 return -ENODEV; 503 } 504 505 #if 0 506 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); 507 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", 508 dev->caps.num_uars, dev->caps.reserved_uars, 509 dev->caps.uar_page_size * dev->caps.num_uars, 510 pci_resource_len(dev->pdev, 2)); 511 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, 512 dev->caps.reserved_eqs); 513 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", 514 dev->caps.num_pds, dev->caps.reserved_pds, 515 dev->caps.slave_pd_shift, dev->caps.pd_base); 516 #endif 517 return 0; 518 } 519 520 /* 521 * Change the port configuration of the device. 522 * Every user of this function must hold the port mutex. 523 */ 524 int mlx4_change_port_types(struct mlx4_dev *dev, 525 enum mlx4_port_type *port_types) 526 { 527 int err = 0; 528 int change = 0; 529 int port; 530 531 for (port = 0; port < dev->caps.num_ports; port++) { 532 /* Change the port type only if the new type is different 533 * from the current, and not set to Auto */ 534 if (port_types[port] != dev->caps.port_type[port + 1]) { 535 change = 1; 536 dev->caps.port_type[port + 1] = port_types[port]; 537 } 538 } 539 if (change) { 540 mlx4_unregister_device(dev); 541 for (port = 1; port <= dev->caps.num_ports; port++) { 542 mlx4_CLOSE_PORT(dev, port); 543 err = mlx4_SET_PORT(dev, port); 544 if (err) { 545 mlx4_err(dev, "Failed to set port %d, " 546 "aborting\n", port); 547 goto out; 548 } 549 } 550 mlx4_set_port_mask(dev); 551 err = mlx4_register_device(dev); 552 } 553 554 out: 555 return err; 556 } 557 558 static ssize_t show_port_type(struct device *dev, 559 struct device_attribute *attr, 560 char *buf) 561 { 562 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 563 port_attr); 564 struct mlx4_dev *mdev = info->dev; 565 char type[8]; 566 567 sprintf(type, "%s", 568 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 569 "ib" : "eth"); 570 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 571 sprintf(buf, "auto (%s)\n", type); 572 else 573 sprintf(buf, "%s\n", type); 574 575 return strlen(buf); 576 } 577 578 static ssize_t set_port_type(struct device *dev, 579 struct device_attribute *attr, 580 const char *buf, size_t count) 581 { 582 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 583 port_attr); 584 struct mlx4_dev *mdev = info->dev; 585 struct mlx4_priv *priv = mlx4_priv(mdev); 586 enum mlx4_port_type types[MLX4_MAX_PORTS]; 587 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 588 int i; 589 int err = 0; 590 591 if (!strcmp(buf, "ib\n")) 592 info->tmp_type = MLX4_PORT_TYPE_IB; 593 else if (!strcmp(buf, "eth\n")) 594 info->tmp_type = MLX4_PORT_TYPE_ETH; 595 else if (!strcmp(buf, "auto\n")) 596 info->tmp_type = MLX4_PORT_TYPE_AUTO; 597 else { 598 mlx4_err(mdev, "%s is not supported port type\n", buf); 599 return -EINVAL; 600 } 601 602 mlx4_stop_sense(mdev); 603 mutex_lock(&priv->port_mutex); 604 /* Possible type is always the one that was delivered */ 605 mdev->caps.possible_type[info->port] = info->tmp_type; 606 607 for (i = 0; i < mdev->caps.num_ports; i++) { 608 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 609 mdev->caps.possible_type[i+1]; 610 if (types[i] == MLX4_PORT_TYPE_AUTO) 611 types[i] = mdev->caps.port_type[i+1]; 612 } 613 614 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 615 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 616 for (i = 1; i <= mdev->caps.num_ports; i++) { 617 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 618 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 619 err = -EINVAL; 620 } 621 } 622 } 623 if (err) { 624 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 625 "Set only 'eth' or 'ib' for both ports " 626 "(should be the same)\n"); 627 goto out; 628 } 629 630 mlx4_do_sense_ports(mdev, new_types, types); 631 632 err = mlx4_check_port_params(mdev, new_types); 633 if (err) 634 goto out; 635 636 /* We are about to apply the changes after the configuration 637 * was verified, no need to remember the temporary types 638 * any more */ 639 for (i = 0; i < mdev->caps.num_ports; i++) 640 priv->port[i + 1].tmp_type = 0; 641 642 err = mlx4_change_port_types(mdev, new_types); 643 644 out: 645 mlx4_start_sense(mdev); 646 mutex_unlock(&priv->port_mutex); 647 return err ? err : count; 648 } 649 650 static int mlx4_load_fw(struct mlx4_dev *dev) 651 { 652 struct mlx4_priv *priv = mlx4_priv(dev); 653 int err; 654 655 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 656 GFP_HIGHUSER | __GFP_NOWARN, 0); 657 if (!priv->fw.fw_icm) { 658 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 659 return -ENOMEM; 660 } 661 662 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 663 if (err) { 664 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 665 goto err_free; 666 } 667 668 err = mlx4_RUN_FW(dev); 669 if (err) { 670 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 671 goto err_unmap_fa; 672 } 673 674 return 0; 675 676 err_unmap_fa: 677 mlx4_UNMAP_FA(dev); 678 679 err_free: 680 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 681 return err; 682 } 683 684 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 685 int cmpt_entry_sz) 686 { 687 struct mlx4_priv *priv = mlx4_priv(dev); 688 int err; 689 int num_eqs; 690 691 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 692 cmpt_base + 693 ((u64) (MLX4_CMPT_TYPE_QP * 694 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 695 cmpt_entry_sz, dev->caps.num_qps, 696 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 697 0, 0); 698 if (err) 699 goto err; 700 701 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 702 cmpt_base + 703 ((u64) (MLX4_CMPT_TYPE_SRQ * 704 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 705 cmpt_entry_sz, dev->caps.num_srqs, 706 dev->caps.reserved_srqs, 0, 0); 707 if (err) 708 goto err_qp; 709 710 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 711 cmpt_base + 712 ((u64) (MLX4_CMPT_TYPE_CQ * 713 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 714 cmpt_entry_sz, dev->caps.num_cqs, 715 dev->caps.reserved_cqs, 0, 0); 716 if (err) 717 goto err_srq; 718 719 num_eqs = (mlx4_is_master(dev)) ? 720 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 721 dev->caps.num_eqs; 722 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 723 cmpt_base + 724 ((u64) (MLX4_CMPT_TYPE_EQ * 725 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 726 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 727 if (err) 728 goto err_cq; 729 730 return 0; 731 732 err_cq: 733 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 734 735 err_srq: 736 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 737 738 err_qp: 739 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 740 741 err: 742 return err; 743 } 744 745 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 746 struct mlx4_init_hca_param *init_hca, u64 icm_size) 747 { 748 struct mlx4_priv *priv = mlx4_priv(dev); 749 u64 aux_pages; 750 int num_eqs; 751 int err; 752 753 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 754 if (err) { 755 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 756 return err; 757 } 758 759 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 760 (unsigned long long) icm_size >> 10, 761 (unsigned long long) aux_pages << 2); 762 763 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 764 GFP_HIGHUSER | __GFP_NOWARN, 0); 765 if (!priv->fw.aux_icm) { 766 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 767 return -ENOMEM; 768 } 769 770 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 771 if (err) { 772 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 773 goto err_free_aux; 774 } 775 776 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 777 if (err) { 778 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 779 goto err_unmap_aux; 780 } 781 782 783 num_eqs = (mlx4_is_master(dev)) ? 784 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 785 dev->caps.num_eqs; 786 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 787 init_hca->eqc_base, dev_cap->eqc_entry_sz, 788 num_eqs, num_eqs, 0, 0); 789 if (err) { 790 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 791 goto err_unmap_cmpt; 792 } 793 794 /* 795 * Reserved MTT entries must be aligned up to a cacheline 796 * boundary, since the FW will write to them, while the driver 797 * writes to all other MTT entries. (The variable 798 * dev->caps.mtt_entry_sz below is really the MTT segment 799 * size, not the raw entry size) 800 */ 801 dev->caps.reserved_mtts = 802 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 803 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 804 805 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 806 init_hca->mtt_base, 807 dev->caps.mtt_entry_sz, 808 dev->caps.num_mtts, 809 dev->caps.reserved_mtts, 1, 0); 810 if (err) { 811 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 812 goto err_unmap_eq; 813 } 814 815 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 816 init_hca->dmpt_base, 817 dev_cap->dmpt_entry_sz, 818 dev->caps.num_mpts, 819 dev->caps.reserved_mrws, 1, 1); 820 if (err) { 821 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 822 goto err_unmap_mtt; 823 } 824 825 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 826 init_hca->qpc_base, 827 dev_cap->qpc_entry_sz, 828 dev->caps.num_qps, 829 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 830 0, 0); 831 if (err) { 832 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 833 goto err_unmap_dmpt; 834 } 835 836 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 837 init_hca->auxc_base, 838 dev_cap->aux_entry_sz, 839 dev->caps.num_qps, 840 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 841 0, 0); 842 if (err) { 843 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 844 goto err_unmap_qp; 845 } 846 847 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 848 init_hca->altc_base, 849 dev_cap->altc_entry_sz, 850 dev->caps.num_qps, 851 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 852 0, 0); 853 if (err) { 854 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 855 goto err_unmap_auxc; 856 } 857 858 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 859 init_hca->rdmarc_base, 860 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 861 dev->caps.num_qps, 862 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 863 0, 0); 864 if (err) { 865 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 866 goto err_unmap_altc; 867 } 868 869 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 870 init_hca->cqc_base, 871 dev_cap->cqc_entry_sz, 872 dev->caps.num_cqs, 873 dev->caps.reserved_cqs, 0, 0); 874 if (err) { 875 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 876 goto err_unmap_rdmarc; 877 } 878 879 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 880 init_hca->srqc_base, 881 dev_cap->srq_entry_sz, 882 dev->caps.num_srqs, 883 dev->caps.reserved_srqs, 0, 0); 884 if (err) { 885 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 886 goto err_unmap_cq; 887 } 888 889 /* 890 * It's not strictly required, but for simplicity just map the 891 * whole multicast group table now. The table isn't very big 892 * and it's a lot easier than trying to track ref counts. 893 */ 894 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 895 init_hca->mc_base, 896 mlx4_get_mgm_entry_size(dev), 897 dev->caps.num_mgms + dev->caps.num_amgms, 898 dev->caps.num_mgms + dev->caps.num_amgms, 899 0, 0); 900 if (err) { 901 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 902 goto err_unmap_srq; 903 } 904 905 return 0; 906 907 err_unmap_srq: 908 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 909 910 err_unmap_cq: 911 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 912 913 err_unmap_rdmarc: 914 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 915 916 err_unmap_altc: 917 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 918 919 err_unmap_auxc: 920 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 921 922 err_unmap_qp: 923 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 924 925 err_unmap_dmpt: 926 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 927 928 err_unmap_mtt: 929 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 930 931 err_unmap_eq: 932 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 933 934 err_unmap_cmpt: 935 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 936 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 937 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 938 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 939 940 err_unmap_aux: 941 mlx4_UNMAP_ICM_AUX(dev); 942 943 err_free_aux: 944 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 945 946 return err; 947 } 948 949 static void mlx4_free_icms(struct mlx4_dev *dev) 950 { 951 struct mlx4_priv *priv = mlx4_priv(dev); 952 953 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 954 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 955 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 956 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 957 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 958 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 959 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 960 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 961 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 962 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 963 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 964 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 965 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 966 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 967 968 mlx4_UNMAP_ICM_AUX(dev); 969 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 970 } 971 972 static void mlx4_slave_exit(struct mlx4_dev *dev) 973 { 974 struct mlx4_priv *priv = mlx4_priv(dev); 975 976 down(&priv->cmd.slave_sem); 977 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 978 mlx4_warn(dev, "Failed to close slave function.\n"); 979 up(&priv->cmd.slave_sem); 980 } 981 982 static int map_bf_area(struct mlx4_dev *dev) 983 { 984 struct mlx4_priv *priv = mlx4_priv(dev); 985 resource_size_t bf_start; 986 resource_size_t bf_len; 987 int err = 0; 988 989 bf_start = pci_resource_start(dev->pdev, 2) + 990 (dev->caps.num_uars << PAGE_SHIFT); 991 bf_len = pci_resource_len(dev->pdev, 2) - 992 (dev->caps.num_uars << PAGE_SHIFT); 993 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 994 if (!priv->bf_mapping) 995 err = -ENOMEM; 996 997 return err; 998 } 999 1000 static void unmap_bf_area(struct mlx4_dev *dev) 1001 { 1002 if (mlx4_priv(dev)->bf_mapping) 1003 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1004 } 1005 1006 static void mlx4_close_hca(struct mlx4_dev *dev) 1007 { 1008 unmap_bf_area(dev); 1009 if (mlx4_is_slave(dev)) 1010 mlx4_slave_exit(dev); 1011 else { 1012 mlx4_CLOSE_HCA(dev, 0); 1013 mlx4_free_icms(dev); 1014 mlx4_UNMAP_FA(dev); 1015 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1016 } 1017 } 1018 1019 static int mlx4_init_slave(struct mlx4_dev *dev) 1020 { 1021 struct mlx4_priv *priv = mlx4_priv(dev); 1022 u64 dma = (u64) priv->mfunc.vhcr_dma; 1023 int num_of_reset_retries = NUM_OF_RESET_RETRIES; 1024 int ret_from_reset = 0; 1025 u32 slave_read; 1026 u32 cmd_channel_ver; 1027 1028 down(&priv->cmd.slave_sem); 1029 priv->cmd.max_cmds = 1; 1030 mlx4_warn(dev, "Sending reset\n"); 1031 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1032 MLX4_COMM_TIME); 1033 /* if we are in the middle of flr the slave will try 1034 * NUM_OF_RESET_RETRIES times before leaving.*/ 1035 if (ret_from_reset) { 1036 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1037 msleep(SLEEP_TIME_IN_RESET); 1038 while (ret_from_reset && num_of_reset_retries) { 1039 mlx4_warn(dev, "slave is currently in the" 1040 "middle of FLR. retrying..." 1041 "(try num:%d)\n", 1042 (NUM_OF_RESET_RETRIES - 1043 num_of_reset_retries + 1)); 1044 ret_from_reset = 1045 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 1046 0, MLX4_COMM_TIME); 1047 num_of_reset_retries = num_of_reset_retries - 1; 1048 } 1049 } else 1050 goto err; 1051 } 1052 1053 /* check the driver version - the slave I/F revision 1054 * must match the master's */ 1055 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1056 cmd_channel_ver = mlx4_comm_get_version(); 1057 1058 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1059 MLX4_COMM_GET_IF_REV(slave_read)) { 1060 mlx4_err(dev, "slave driver version is not supported" 1061 " by the master\n"); 1062 goto err; 1063 } 1064 1065 mlx4_warn(dev, "Sending vhcr0\n"); 1066 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1067 MLX4_COMM_TIME)) 1068 goto err; 1069 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1070 MLX4_COMM_TIME)) 1071 goto err; 1072 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1073 MLX4_COMM_TIME)) 1074 goto err; 1075 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1076 goto err; 1077 up(&priv->cmd.slave_sem); 1078 return 0; 1079 1080 err: 1081 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1082 up(&priv->cmd.slave_sem); 1083 return -EIO; 1084 } 1085 1086 static int mlx4_init_hca(struct mlx4_dev *dev) 1087 { 1088 struct mlx4_priv *priv = mlx4_priv(dev); 1089 struct mlx4_adapter adapter; 1090 struct mlx4_dev_cap dev_cap; 1091 struct mlx4_mod_stat_cfg mlx4_cfg; 1092 struct mlx4_profile profile; 1093 struct mlx4_init_hca_param init_hca; 1094 u64 icm_size; 1095 int err; 1096 1097 if (!mlx4_is_slave(dev)) { 1098 err = mlx4_QUERY_FW(dev); 1099 if (err) { 1100 if (err == -EACCES) 1101 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1102 else 1103 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1104 goto unmap_bf; 1105 } 1106 1107 err = mlx4_load_fw(dev); 1108 if (err) { 1109 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1110 goto unmap_bf; 1111 } 1112 1113 mlx4_cfg.log_pg_sz_m = 1; 1114 mlx4_cfg.log_pg_sz = 0; 1115 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1116 if (err) 1117 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1118 1119 err = mlx4_dev_cap(dev, &dev_cap); 1120 if (err) { 1121 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1122 goto err_stop_fw; 1123 } 1124 1125 profile = default_profile; 1126 1127 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1128 &init_hca); 1129 if ((long long) icm_size < 0) { 1130 err = icm_size; 1131 goto err_stop_fw; 1132 } 1133 1134 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1135 1136 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1137 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1138 1139 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1140 if (err) 1141 goto err_stop_fw; 1142 1143 err = mlx4_INIT_HCA(dev, &init_hca); 1144 if (err) { 1145 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1146 goto err_free_icm; 1147 } 1148 } else { 1149 err = mlx4_init_slave(dev); 1150 if (err) { 1151 mlx4_err(dev, "Failed to initialize slave\n"); 1152 goto unmap_bf; 1153 } 1154 1155 err = mlx4_slave_cap(dev); 1156 if (err) { 1157 mlx4_err(dev, "Failed to obtain slave caps\n"); 1158 goto err_close; 1159 } 1160 } 1161 1162 if (map_bf_area(dev)) 1163 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1164 1165 /*Only the master set the ports, all the rest got it from it.*/ 1166 if (!mlx4_is_slave(dev)) 1167 mlx4_set_port_mask(dev); 1168 1169 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1170 if (err) { 1171 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1172 goto err_close; 1173 } 1174 1175 priv->eq_table.inta_pin = adapter.inta_pin; 1176 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1177 1178 return 0; 1179 1180 err_close: 1181 mlx4_close_hca(dev); 1182 1183 err_free_icm: 1184 if (!mlx4_is_slave(dev)) 1185 mlx4_free_icms(dev); 1186 1187 err_stop_fw: 1188 if (!mlx4_is_slave(dev)) { 1189 mlx4_UNMAP_FA(dev); 1190 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1191 } 1192 unmap_bf: 1193 unmap_bf_area(dev); 1194 return err; 1195 } 1196 1197 static int mlx4_init_counters_table(struct mlx4_dev *dev) 1198 { 1199 struct mlx4_priv *priv = mlx4_priv(dev); 1200 int nent; 1201 1202 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1203 return -ENOENT; 1204 1205 nent = dev->caps.max_counters; 1206 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 1207 } 1208 1209 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 1210 { 1211 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1212 } 1213 1214 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1215 { 1216 struct mlx4_priv *priv = mlx4_priv(dev); 1217 1218 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1219 return -ENOENT; 1220 1221 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 1222 if (*idx == -1) 1223 return -ENOMEM; 1224 1225 return 0; 1226 } 1227 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1228 1229 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1230 { 1231 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1232 return; 1233 } 1234 EXPORT_SYMBOL_GPL(mlx4_counter_free); 1235 1236 static int mlx4_setup_hca(struct mlx4_dev *dev) 1237 { 1238 struct mlx4_priv *priv = mlx4_priv(dev); 1239 int err; 1240 int port; 1241 __be32 ib_port_default_caps; 1242 1243 err = mlx4_init_uar_table(dev); 1244 if (err) { 1245 mlx4_err(dev, "Failed to initialize " 1246 "user access region table, aborting.\n"); 1247 return err; 1248 } 1249 1250 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1251 if (err) { 1252 mlx4_err(dev, "Failed to allocate driver access region, " 1253 "aborting.\n"); 1254 goto err_uar_table_free; 1255 } 1256 1257 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1258 if (!priv->kar) { 1259 mlx4_err(dev, "Couldn't map kernel access region, " 1260 "aborting.\n"); 1261 err = -ENOMEM; 1262 goto err_uar_free; 1263 } 1264 1265 err = mlx4_init_pd_table(dev); 1266 if (err) { 1267 mlx4_err(dev, "Failed to initialize " 1268 "protection domain table, aborting.\n"); 1269 goto err_kar_unmap; 1270 } 1271 1272 err = mlx4_init_xrcd_table(dev); 1273 if (err) { 1274 mlx4_err(dev, "Failed to initialize " 1275 "reliable connection domain table, aborting.\n"); 1276 goto err_pd_table_free; 1277 } 1278 1279 err = mlx4_init_mr_table(dev); 1280 if (err) { 1281 mlx4_err(dev, "Failed to initialize " 1282 "memory region table, aborting.\n"); 1283 goto err_xrcd_table_free; 1284 } 1285 1286 err = mlx4_init_eq_table(dev); 1287 if (err) { 1288 mlx4_err(dev, "Failed to initialize " 1289 "event queue table, aborting.\n"); 1290 goto err_mr_table_free; 1291 } 1292 1293 err = mlx4_cmd_use_events(dev); 1294 if (err) { 1295 mlx4_err(dev, "Failed to switch to event-driven " 1296 "firmware commands, aborting.\n"); 1297 goto err_eq_table_free; 1298 } 1299 1300 err = mlx4_NOP(dev); 1301 if (err) { 1302 if (dev->flags & MLX4_FLAG_MSI_X) { 1303 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1304 "interrupt IRQ %d).\n", 1305 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1306 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1307 } else { 1308 mlx4_err(dev, "NOP command failed to generate interrupt " 1309 "(IRQ %d), aborting.\n", 1310 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1311 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1312 } 1313 1314 goto err_cmd_poll; 1315 } 1316 1317 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 1318 1319 err = mlx4_init_cq_table(dev); 1320 if (err) { 1321 mlx4_err(dev, "Failed to initialize " 1322 "completion queue table, aborting.\n"); 1323 goto err_cmd_poll; 1324 } 1325 1326 err = mlx4_init_srq_table(dev); 1327 if (err) { 1328 mlx4_err(dev, "Failed to initialize " 1329 "shared receive queue table, aborting.\n"); 1330 goto err_cq_table_free; 1331 } 1332 1333 err = mlx4_init_qp_table(dev); 1334 if (err) { 1335 mlx4_err(dev, "Failed to initialize " 1336 "queue pair table, aborting.\n"); 1337 goto err_srq_table_free; 1338 } 1339 1340 if (!mlx4_is_slave(dev)) { 1341 err = mlx4_init_mcg_table(dev); 1342 if (err) { 1343 mlx4_err(dev, "Failed to initialize " 1344 "multicast group table, aborting.\n"); 1345 goto err_qp_table_free; 1346 } 1347 } 1348 1349 err = mlx4_init_counters_table(dev); 1350 if (err && err != -ENOENT) { 1351 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1352 goto err_mcg_table_free; 1353 } 1354 1355 if (!mlx4_is_slave(dev)) { 1356 for (port = 1; port <= dev->caps.num_ports; port++) { 1357 ib_port_default_caps = 0; 1358 err = mlx4_get_port_ib_caps(dev, port, 1359 &ib_port_default_caps); 1360 if (err) 1361 mlx4_warn(dev, "failed to get port %d default " 1362 "ib capabilities (%d). Continuing " 1363 "with caps = 0\n", port, err); 1364 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1365 1366 err = mlx4_check_ext_port_caps(dev, port); 1367 if (err) 1368 mlx4_warn(dev, "failed to get port %d extended " 1369 "port capabilities support info (%d)." 1370 " Assuming not supported\n", 1371 port, err); 1372 1373 err = mlx4_SET_PORT(dev, port); 1374 if (err) { 1375 mlx4_err(dev, "Failed to set port %d, aborting\n", 1376 port); 1377 goto err_counters_table_free; 1378 } 1379 } 1380 } 1381 1382 return 0; 1383 1384 err_counters_table_free: 1385 mlx4_cleanup_counters_table(dev); 1386 1387 err_mcg_table_free: 1388 mlx4_cleanup_mcg_table(dev); 1389 1390 err_qp_table_free: 1391 mlx4_cleanup_qp_table(dev); 1392 1393 err_srq_table_free: 1394 mlx4_cleanup_srq_table(dev); 1395 1396 err_cq_table_free: 1397 mlx4_cleanup_cq_table(dev); 1398 1399 err_cmd_poll: 1400 mlx4_cmd_use_polling(dev); 1401 1402 err_eq_table_free: 1403 mlx4_cleanup_eq_table(dev); 1404 1405 err_mr_table_free: 1406 mlx4_cleanup_mr_table(dev); 1407 1408 err_xrcd_table_free: 1409 mlx4_cleanup_xrcd_table(dev); 1410 1411 err_pd_table_free: 1412 mlx4_cleanup_pd_table(dev); 1413 1414 err_kar_unmap: 1415 iounmap(priv->kar); 1416 1417 err_uar_free: 1418 mlx4_uar_free(dev, &priv->driver_uar); 1419 1420 err_uar_table_free: 1421 mlx4_cleanup_uar_table(dev); 1422 return err; 1423 } 1424 1425 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1426 { 1427 struct mlx4_priv *priv = mlx4_priv(dev); 1428 struct msix_entry *entries; 1429 int nreq = min_t(int, dev->caps.num_ports * 1430 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1431 + MSIX_LEGACY_SZ, MAX_MSIX); 1432 int err; 1433 int i; 1434 1435 if (msi_x) { 1436 /* In multifunction mode each function gets 2 msi-X vectors 1437 * one for data path completions anf the other for asynch events 1438 * or command completions */ 1439 if (mlx4_is_mfunc(dev)) { 1440 nreq = 2; 1441 } else { 1442 nreq = min_t(int, dev->caps.num_eqs - 1443 dev->caps.reserved_eqs, nreq); 1444 } 1445 1446 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1447 if (!entries) 1448 goto no_msi; 1449 1450 for (i = 0; i < nreq; ++i) 1451 entries[i].entry = i; 1452 1453 retry: 1454 err = pci_enable_msix(dev->pdev, entries, nreq); 1455 if (err) { 1456 /* Try again if at least 2 vectors are available */ 1457 if (err > 1) { 1458 mlx4_info(dev, "Requested %d vectors, " 1459 "but only %d MSI-X vectors available, " 1460 "trying again\n", nreq, err); 1461 nreq = err; 1462 goto retry; 1463 } 1464 kfree(entries); 1465 goto no_msi; 1466 } 1467 1468 if (nreq < 1469 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1470 /*Working in legacy mode , all EQ's shared*/ 1471 dev->caps.comp_pool = 0; 1472 dev->caps.num_comp_vectors = nreq - 1; 1473 } else { 1474 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1475 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1476 } 1477 for (i = 0; i < nreq; ++i) 1478 priv->eq_table.eq[i].irq = entries[i].vector; 1479 1480 dev->flags |= MLX4_FLAG_MSI_X; 1481 1482 kfree(entries); 1483 return; 1484 } 1485 1486 no_msi: 1487 dev->caps.num_comp_vectors = 1; 1488 dev->caps.comp_pool = 0; 1489 1490 for (i = 0; i < 2; ++i) 1491 priv->eq_table.eq[i].irq = dev->pdev->irq; 1492 } 1493 1494 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1495 { 1496 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1497 int err = 0; 1498 1499 info->dev = dev; 1500 info->port = port; 1501 if (!mlx4_is_slave(dev)) { 1502 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); 1503 mlx4_init_mac_table(dev, &info->mac_table); 1504 mlx4_init_vlan_table(dev, &info->vlan_table); 1505 info->base_qpn = 1506 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 1507 (port - 1) * (1 << log_num_mac); 1508 } 1509 1510 sprintf(info->dev_name, "mlx4_port%d", port); 1511 info->port_attr.attr.name = info->dev_name; 1512 if (mlx4_is_mfunc(dev)) 1513 info->port_attr.attr.mode = S_IRUGO; 1514 else { 1515 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1516 info->port_attr.store = set_port_type; 1517 } 1518 info->port_attr.show = show_port_type; 1519 sysfs_attr_init(&info->port_attr.attr); 1520 1521 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1522 if (err) { 1523 mlx4_err(dev, "Failed to create file for port %d\n", port); 1524 info->port = -1; 1525 } 1526 1527 return err; 1528 } 1529 1530 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1531 { 1532 if (info->port < 0) 1533 return; 1534 1535 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1536 } 1537 1538 static int mlx4_init_steering(struct mlx4_dev *dev) 1539 { 1540 struct mlx4_priv *priv = mlx4_priv(dev); 1541 int num_entries = dev->caps.num_ports; 1542 int i, j; 1543 1544 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1545 if (!priv->steer) 1546 return -ENOMEM; 1547 1548 for (i = 0; i < num_entries; i++) { 1549 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1550 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1551 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1552 } 1553 INIT_LIST_HEAD(&priv->steer[i].high_prios); 1554 } 1555 return 0; 1556 } 1557 1558 static void mlx4_clear_steering(struct mlx4_dev *dev) 1559 { 1560 struct mlx4_priv *priv = mlx4_priv(dev); 1561 struct mlx4_steer_index *entry, *tmp_entry; 1562 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1563 int num_entries = dev->caps.num_ports; 1564 int i, j; 1565 1566 for (i = 0; i < num_entries; i++) { 1567 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1568 list_for_each_entry_safe(pqp, tmp_pqp, 1569 &priv->steer[i].promisc_qps[j], 1570 list) { 1571 list_del(&pqp->list); 1572 kfree(pqp); 1573 } 1574 list_for_each_entry_safe(entry, tmp_entry, 1575 &priv->steer[i].steer_entries[j], 1576 list) { 1577 list_del(&entry->list); 1578 list_for_each_entry_safe(pqp, tmp_pqp, 1579 &entry->duplicates, 1580 list) { 1581 list_del(&pqp->list); 1582 kfree(pqp); 1583 } 1584 kfree(entry); 1585 } 1586 } 1587 } 1588 kfree(priv->steer); 1589 } 1590 1591 static int extended_func_num(struct pci_dev *pdev) 1592 { 1593 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 1594 } 1595 1596 #define MLX4_OWNER_BASE 0x8069c 1597 #define MLX4_OWNER_SIZE 4 1598 1599 static int mlx4_get_ownership(struct mlx4_dev *dev) 1600 { 1601 void __iomem *owner; 1602 u32 ret; 1603 1604 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1605 MLX4_OWNER_SIZE); 1606 if (!owner) { 1607 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1608 return -ENOMEM; 1609 } 1610 1611 ret = readl(owner); 1612 iounmap(owner); 1613 return (int) !!ret; 1614 } 1615 1616 static void mlx4_free_ownership(struct mlx4_dev *dev) 1617 { 1618 void __iomem *owner; 1619 1620 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1621 MLX4_OWNER_SIZE); 1622 if (!owner) { 1623 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1624 return; 1625 } 1626 writel(0, owner); 1627 msleep(1000); 1628 iounmap(owner); 1629 } 1630 1631 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1632 { 1633 struct mlx4_priv *priv; 1634 struct mlx4_dev *dev; 1635 int err; 1636 int port; 1637 1638 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1639 1640 err = pci_enable_device(pdev); 1641 if (err) { 1642 dev_err(&pdev->dev, "Cannot enable PCI device, " 1643 "aborting.\n"); 1644 return err; 1645 } 1646 if (num_vfs > MLX4_MAX_NUM_VF) { 1647 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 1648 num_vfs, MLX4_MAX_NUM_VF); 1649 return -EINVAL; 1650 } 1651 /* 1652 * Check for BARs. 1653 */ 1654 if (((id == NULL) || !(id->driver_data & MLX4_VF)) && 1655 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1656 dev_err(&pdev->dev, "Missing DCS, aborting." 1657 "(id == 0X%p, id->driver_data: 0x%lx," 1658 " pci_resource_flags(pdev, 0):0x%lx)\n", id, 1659 id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); 1660 err = -ENODEV; 1661 goto err_disable_pdev; 1662 } 1663 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1664 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1665 err = -ENODEV; 1666 goto err_disable_pdev; 1667 } 1668 1669 err = pci_request_regions(pdev, DRV_NAME); 1670 if (err) { 1671 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1672 goto err_disable_pdev; 1673 } 1674 1675 pci_set_master(pdev); 1676 1677 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1678 if (err) { 1679 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1680 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1681 if (err) { 1682 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1683 goto err_release_regions; 1684 } 1685 } 1686 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1687 if (err) { 1688 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1689 "consistent PCI DMA mask.\n"); 1690 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1691 if (err) { 1692 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1693 "aborting.\n"); 1694 goto err_release_regions; 1695 } 1696 } 1697 1698 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1699 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1700 1701 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1702 if (!priv) { 1703 dev_err(&pdev->dev, "Device struct alloc failed, " 1704 "aborting.\n"); 1705 err = -ENOMEM; 1706 goto err_release_regions; 1707 } 1708 1709 dev = &priv->dev; 1710 dev->pdev = pdev; 1711 INIT_LIST_HEAD(&priv->ctx_list); 1712 spin_lock_init(&priv->ctx_lock); 1713 1714 mutex_init(&priv->port_mutex); 1715 1716 INIT_LIST_HEAD(&priv->pgdir_list); 1717 mutex_init(&priv->pgdir_mutex); 1718 1719 INIT_LIST_HEAD(&priv->bf_list); 1720 mutex_init(&priv->bf_mutex); 1721 1722 dev->rev_id = pdev->revision; 1723 /* Detect if this device is a virtual function */ 1724 if (id && id->driver_data & MLX4_VF) { 1725 /* When acting as pf, we normally skip vfs unless explicitly 1726 * requested to probe them. */ 1727 if (num_vfs && extended_func_num(pdev) > probe_vf) { 1728 mlx4_warn(dev, "Skipping virtual function:%d\n", 1729 extended_func_num(pdev)); 1730 err = -ENODEV; 1731 goto err_free_dev; 1732 } 1733 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 1734 dev->flags |= MLX4_FLAG_SLAVE; 1735 } else { 1736 /* We reset the device and enable SRIOV only for physical 1737 * devices. Try to claim ownership on the device; 1738 * if already taken, skip -- do not allow multiple PFs */ 1739 err = mlx4_get_ownership(dev); 1740 if (err) { 1741 if (err < 0) 1742 goto err_free_dev; 1743 else { 1744 mlx4_warn(dev, "Multiple PFs not yet supported." 1745 " Skipping PF.\n"); 1746 err = -EINVAL; 1747 goto err_free_dev; 1748 } 1749 } 1750 1751 if (num_vfs) { 1752 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); 1753 err = pci_enable_sriov(pdev, num_vfs); 1754 if (err) { 1755 mlx4_err(dev, "Failed to enable sriov," 1756 "continuing without sriov enabled" 1757 " (err = %d).\n", err); 1758 num_vfs = 0; 1759 err = 0; 1760 } else { 1761 mlx4_warn(dev, "Running in master mode\n"); 1762 dev->flags |= MLX4_FLAG_SRIOV | 1763 MLX4_FLAG_MASTER; 1764 dev->num_vfs = num_vfs; 1765 } 1766 } 1767 1768 /* 1769 * Now reset the HCA before we touch the PCI capabilities or 1770 * attempt a firmware command, since a boot ROM may have left 1771 * the HCA in an undefined state. 1772 */ 1773 err = mlx4_reset(dev); 1774 if (err) { 1775 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1776 goto err_rel_own; 1777 } 1778 } 1779 1780 slave_start: 1781 if (mlx4_cmd_init(dev)) { 1782 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1783 goto err_sriov; 1784 } 1785 1786 /* In slave functions, the communication channel must be initialized 1787 * before posting commands. Also, init num_slaves before calling 1788 * mlx4_init_hca */ 1789 if (mlx4_is_mfunc(dev)) { 1790 if (mlx4_is_master(dev)) 1791 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 1792 else { 1793 dev->num_slaves = 0; 1794 if (mlx4_multi_func_init(dev)) { 1795 mlx4_err(dev, "Failed to init slave mfunc" 1796 " interface, aborting.\n"); 1797 goto err_cmd; 1798 } 1799 } 1800 } 1801 1802 err = mlx4_init_hca(dev); 1803 if (err) { 1804 if (err == -EACCES) { 1805 /* Not primary Physical function 1806 * Running in slave mode */ 1807 mlx4_cmd_cleanup(dev); 1808 dev->flags |= MLX4_FLAG_SLAVE; 1809 dev->flags &= ~MLX4_FLAG_MASTER; 1810 goto slave_start; 1811 } else 1812 goto err_mfunc; 1813 } 1814 1815 /* In master functions, the communication channel must be initialized 1816 * after obtaining its address from fw */ 1817 if (mlx4_is_master(dev)) { 1818 if (mlx4_multi_func_init(dev)) { 1819 mlx4_err(dev, "Failed to init master mfunc" 1820 "interface, aborting.\n"); 1821 goto err_close; 1822 } 1823 } 1824 1825 err = mlx4_alloc_eq_table(dev); 1826 if (err) 1827 goto err_master_mfunc; 1828 1829 priv->msix_ctl.pool_bm = 0; 1830 spin_lock_init(&priv->msix_ctl.pool_lock); 1831 1832 mlx4_enable_msi_x(dev); 1833 if ((mlx4_is_mfunc(dev)) && 1834 !(dev->flags & MLX4_FLAG_MSI_X)) { 1835 mlx4_err(dev, "INTx is not supported in multi-function mode." 1836 " aborting.\n"); 1837 goto err_free_eq; 1838 } 1839 1840 if (!mlx4_is_slave(dev)) { 1841 err = mlx4_init_steering(dev); 1842 if (err) 1843 goto err_free_eq; 1844 } 1845 1846 err = mlx4_setup_hca(dev); 1847 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 1848 !mlx4_is_mfunc(dev)) { 1849 dev->flags &= ~MLX4_FLAG_MSI_X; 1850 pci_disable_msix(pdev); 1851 err = mlx4_setup_hca(dev); 1852 } 1853 1854 if (err) 1855 goto err_steer; 1856 1857 for (port = 1; port <= dev->caps.num_ports; port++) { 1858 err = mlx4_init_port_info(dev, port); 1859 if (err) 1860 goto err_port; 1861 } 1862 1863 err = mlx4_register_device(dev); 1864 if (err) 1865 goto err_port; 1866 1867 mlx4_sense_init(dev); 1868 mlx4_start_sense(dev); 1869 1870 pci_set_drvdata(pdev, dev); 1871 1872 return 0; 1873 1874 err_port: 1875 for (--port; port >= 1; --port) 1876 mlx4_cleanup_port_info(&priv->port[port]); 1877 1878 mlx4_cleanup_counters_table(dev); 1879 mlx4_cleanup_mcg_table(dev); 1880 mlx4_cleanup_qp_table(dev); 1881 mlx4_cleanup_srq_table(dev); 1882 mlx4_cleanup_cq_table(dev); 1883 mlx4_cmd_use_polling(dev); 1884 mlx4_cleanup_eq_table(dev); 1885 mlx4_cleanup_mr_table(dev); 1886 mlx4_cleanup_xrcd_table(dev); 1887 mlx4_cleanup_pd_table(dev); 1888 mlx4_cleanup_uar_table(dev); 1889 1890 err_steer: 1891 if (!mlx4_is_slave(dev)) 1892 mlx4_clear_steering(dev); 1893 1894 err_free_eq: 1895 mlx4_free_eq_table(dev); 1896 1897 err_master_mfunc: 1898 if (mlx4_is_master(dev)) 1899 mlx4_multi_func_cleanup(dev); 1900 1901 err_close: 1902 if (dev->flags & MLX4_FLAG_MSI_X) 1903 pci_disable_msix(pdev); 1904 1905 mlx4_close_hca(dev); 1906 1907 err_mfunc: 1908 if (mlx4_is_slave(dev)) 1909 mlx4_multi_func_cleanup(dev); 1910 1911 err_cmd: 1912 mlx4_cmd_cleanup(dev); 1913 1914 err_sriov: 1915 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) 1916 pci_disable_sriov(pdev); 1917 1918 err_rel_own: 1919 if (!mlx4_is_slave(dev)) 1920 mlx4_free_ownership(dev); 1921 1922 err_free_dev: 1923 kfree(priv); 1924 1925 err_release_regions: 1926 pci_release_regions(pdev); 1927 1928 err_disable_pdev: 1929 pci_disable_device(pdev); 1930 pci_set_drvdata(pdev, NULL); 1931 return err; 1932 } 1933 1934 static int __devinit mlx4_init_one(struct pci_dev *pdev, 1935 const struct pci_device_id *id) 1936 { 1937 printk_once(KERN_INFO "%s", mlx4_version); 1938 1939 return __mlx4_init_one(pdev, id); 1940 } 1941 1942 static void mlx4_remove_one(struct pci_dev *pdev) 1943 { 1944 struct mlx4_dev *dev = pci_get_drvdata(pdev); 1945 struct mlx4_priv *priv = mlx4_priv(dev); 1946 int p; 1947 1948 if (dev) { 1949 /* in SRIOV it is not allowed to unload the pf's 1950 * driver while there are alive vf's */ 1951 if (mlx4_is_master(dev)) { 1952 if (mlx4_how_many_lives_vf(dev)) 1953 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 1954 } 1955 mlx4_stop_sense(dev); 1956 mlx4_unregister_device(dev); 1957 1958 for (p = 1; p <= dev->caps.num_ports; p++) { 1959 mlx4_cleanup_port_info(&priv->port[p]); 1960 mlx4_CLOSE_PORT(dev, p); 1961 } 1962 1963 mlx4_cleanup_counters_table(dev); 1964 mlx4_cleanup_mcg_table(dev); 1965 mlx4_cleanup_qp_table(dev); 1966 mlx4_cleanup_srq_table(dev); 1967 mlx4_cleanup_cq_table(dev); 1968 mlx4_cmd_use_polling(dev); 1969 mlx4_cleanup_eq_table(dev); 1970 mlx4_cleanup_mr_table(dev); 1971 mlx4_cleanup_xrcd_table(dev); 1972 mlx4_cleanup_pd_table(dev); 1973 1974 if (mlx4_is_master(dev)) 1975 mlx4_free_resource_tracker(dev); 1976 1977 iounmap(priv->kar); 1978 mlx4_uar_free(dev, &priv->driver_uar); 1979 mlx4_cleanup_uar_table(dev); 1980 if (!mlx4_is_slave(dev)) 1981 mlx4_clear_steering(dev); 1982 mlx4_free_eq_table(dev); 1983 if (mlx4_is_master(dev)) 1984 mlx4_multi_func_cleanup(dev); 1985 mlx4_close_hca(dev); 1986 if (mlx4_is_slave(dev)) 1987 mlx4_multi_func_cleanup(dev); 1988 mlx4_cmd_cleanup(dev); 1989 1990 if (dev->flags & MLX4_FLAG_MSI_X) 1991 pci_disable_msix(pdev); 1992 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { 1993 mlx4_warn(dev, "Disabling sriov\n"); 1994 pci_disable_sriov(pdev); 1995 } 1996 1997 if (!mlx4_is_slave(dev)) 1998 mlx4_free_ownership(dev); 1999 kfree(priv); 2000 pci_release_regions(pdev); 2001 pci_disable_device(pdev); 2002 pci_set_drvdata(pdev, NULL); 2003 } 2004 } 2005 2006 int mlx4_restart_one(struct pci_dev *pdev) 2007 { 2008 mlx4_remove_one(pdev); 2009 return __mlx4_init_one(pdev, NULL); 2010 } 2011 2012 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2013 /* MT25408 "Hermon" SDR */ 2014 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2015 /* MT25408 "Hermon" DDR */ 2016 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2017 /* MT25408 "Hermon" QDR */ 2018 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2019 /* MT25408 "Hermon" DDR PCIe gen2 */ 2020 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2021 /* MT25408 "Hermon" QDR PCIe gen2 */ 2022 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2023 /* MT25408 "Hermon" EN 10GigE */ 2024 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2025 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2026 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2027 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2028 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2029 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2030 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2031 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2032 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2033 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2034 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2035 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2036 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2037 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2038 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, 2039 /* MT27500 Family [ConnectX-3] */ 2040 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2041 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2042 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, 2043 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2044 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2045 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2046 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 2047 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 2048 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 2049 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 2050 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 2051 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 2052 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 2053 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 2054 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 2055 { 0, } 2056 }; 2057 2058 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 2059 2060 static struct pci_driver mlx4_driver = { 2061 .name = DRV_NAME, 2062 .id_table = mlx4_pci_table, 2063 .probe = mlx4_init_one, 2064 .remove = __devexit_p(mlx4_remove_one) 2065 }; 2066 2067 static int __init mlx4_verify_params(void) 2068 { 2069 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2070 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2071 return -1; 2072 } 2073 2074 if (log_num_vlan != 0) 2075 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2076 MLX4_LOG_NUM_VLANS); 2077 2078 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2079 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2080 return -1; 2081 } 2082 2083 /* Check if module param for ports type has legal combination */ 2084 if (port_type_array[0] == false && port_type_array[1] == true) { 2085 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2086 port_type_array[0] = true; 2087 } 2088 2089 return 0; 2090 } 2091 2092 static int __init mlx4_init(void) 2093 { 2094 int ret; 2095 2096 if (mlx4_verify_params()) 2097 return -EINVAL; 2098 2099 mlx4_catas_init(); 2100 2101 mlx4_wq = create_singlethread_workqueue("mlx4"); 2102 if (!mlx4_wq) 2103 return -ENOMEM; 2104 2105 ret = pci_register_driver(&mlx4_driver); 2106 return ret < 0 ? ret : 0; 2107 } 2108 2109 static void __exit mlx4_cleanup(void) 2110 { 2111 pci_unregister_driver(&mlx4_driver); 2112 destroy_workqueue(mlx4_wq); 2113 } 2114 2115 module_init(mlx4_init); 2116 module_exit(mlx4_cleanup); 2117