1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 45 #include <linux/mlx4/device.h> 46 #include <linux/mlx4/doorbell.h> 47 48 #include "mlx4.h" 49 #include "fw.h" 50 #include "icm.h" 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 struct workqueue_struct *mlx4_wq; 58 59 #ifdef CONFIG_MLX4_DEBUG 60 61 int mlx4_debug_level = 0; 62 module_param_named(debug_level, mlx4_debug_level, int, 0644); 63 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 64 65 #endif /* CONFIG_MLX4_DEBUG */ 66 67 #ifdef CONFIG_PCI_MSI 68 69 static int msi_x = 1; 70 module_param(msi_x, int, 0444); 71 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 72 73 #else /* CONFIG_PCI_MSI */ 74 75 #define msi_x (0) 76 77 #endif /* CONFIG_PCI_MSI */ 78 79 static int num_vfs; 80 module_param(num_vfs, int, 0444); 81 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82 83 static int probe_vf; 84 module_param(probe_vf, int, 0644); 85 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86 87 int mlx4_log_num_mgm_entry_size = 10; 88 module_param_named(log_num_mgm_entry_size, 89 mlx4_log_num_mgm_entry_size, int, 0444); 90 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91 " of qp per mcg, for example:" 92 " 10 gives 248.range: 9<=" 93 " log_num_mgm_entry_size <= 12"); 94 95 #define MLX4_VF (1 << 0) 96 97 #define HCA_GLOBAL_CAP_MASK 0 98 #define PF_CONTEXT_BEHAVIOUR_MASK 0 99 100 static char mlx4_version[] __devinitdata = 101 DRV_NAME ": Mellanox ConnectX core driver v" 102 DRV_VERSION " (" DRV_RELDATE ")\n"; 103 104 static struct mlx4_profile default_profile = { 105 .num_qp = 1 << 18, 106 .num_srq = 1 << 16, 107 .rdmarc_per_qp = 1 << 4, 108 .num_cq = 1 << 16, 109 .num_mcg = 1 << 13, 110 .num_mpt = 1 << 19, 111 .num_mtt = 1 << 20, /* It is really num mtt segements */ 112 }; 113 114 static int log_num_mac = 7; 115 module_param_named(log_num_mac, log_num_mac, int, 0444); 116 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 117 118 static int log_num_vlan; 119 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 120 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 121 /* Log2 max number of VLANs per ETH port (0-7) */ 122 #define MLX4_LOG_NUM_VLANS 7 123 124 static bool use_prio; 125 module_param_named(use_prio, use_prio, bool, 0444); 126 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 127 "(0/1, default 0)"); 128 129 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 132 133 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 134 static int arr_argc = 2; 135 module_param_array(port_type_array, int, &arr_argc, 0444); 136 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 137 "1 for IB, 2 for Ethernet"); 138 139 struct mlx4_port_config { 140 struct list_head list; 141 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 142 struct pci_dev *pdev; 143 }; 144 145 int mlx4_check_port_params(struct mlx4_dev *dev, 146 enum mlx4_port_type *port_type) 147 { 148 int i; 149 150 for (i = 0; i < dev->caps.num_ports - 1; i++) { 151 if (port_type[i] != port_type[i + 1]) { 152 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 153 mlx4_err(dev, "Only same port types supported " 154 "on this HCA, aborting.\n"); 155 return -EINVAL; 156 } 157 if (port_type[i] == MLX4_PORT_TYPE_ETH && 158 port_type[i + 1] == MLX4_PORT_TYPE_IB) 159 return -EINVAL; 160 } 161 } 162 163 for (i = 0; i < dev->caps.num_ports; i++) { 164 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 165 mlx4_err(dev, "Requested port type for port %d is not " 166 "supported on this HCA\n", i + 1); 167 return -EINVAL; 168 } 169 } 170 return 0; 171 } 172 173 static void mlx4_set_port_mask(struct mlx4_dev *dev) 174 { 175 int i; 176 177 for (i = 1; i <= dev->caps.num_ports; ++i) 178 dev->caps.port_mask[i] = dev->caps.port_type[i]; 179 } 180 181 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 182 { 183 int err; 184 int i; 185 186 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 187 if (err) { 188 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 189 return err; 190 } 191 192 if (dev_cap->min_page_sz > PAGE_SIZE) { 193 mlx4_err(dev, "HCA minimum page size of %d bigger than " 194 "kernel PAGE_SIZE of %ld, aborting.\n", 195 dev_cap->min_page_sz, PAGE_SIZE); 196 return -ENODEV; 197 } 198 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 199 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 200 "aborting.\n", 201 dev_cap->num_ports, MLX4_MAX_PORTS); 202 return -ENODEV; 203 } 204 205 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 206 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 207 "PCI resource 2 size of 0x%llx, aborting.\n", 208 dev_cap->uar_size, 209 (unsigned long long) pci_resource_len(dev->pdev, 2)); 210 return -ENODEV; 211 } 212 213 dev->caps.num_ports = dev_cap->num_ports; 214 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 215 for (i = 1; i <= dev->caps.num_ports; ++i) { 216 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 217 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 218 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 219 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 220 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 221 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 222 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 223 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 224 dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; 225 dev->caps.default_sense[i] = dev_cap->default_sense[i]; 226 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 227 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 228 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 229 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 230 } 231 232 dev->caps.uar_page_size = PAGE_SIZE; 233 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 234 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 235 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 236 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 237 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 238 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 239 dev->caps.max_wqes = dev_cap->max_qp_sz; 240 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 241 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 242 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 243 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 244 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 245 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 246 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 247 /* 248 * Subtract 1 from the limit because we need to allocate a 249 * spare CQE so the HCA HW can tell the difference between an 250 * empty CQ and a full CQ. 251 */ 252 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 253 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 254 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 255 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 256 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 257 258 /* The first 128 UARs are used for EQ doorbells */ 259 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 260 dev->caps.reserved_pds = dev_cap->reserved_pds; 261 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 262 dev_cap->reserved_xrcds : 0; 263 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 264 dev_cap->max_xrcds : 0; 265 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 266 267 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 268 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 269 dev->caps.flags = dev_cap->flags; 270 dev->caps.flags2 = dev_cap->flags2; 271 dev->caps.bmme_flags = dev_cap->bmme_flags; 272 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 273 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 274 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 275 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 276 277 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 278 if (dev->pdev->device != 0x1003) 279 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 280 281 dev->caps.log_num_macs = log_num_mac; 282 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 283 dev->caps.log_num_prios = use_prio ? 3 : 0; 284 285 for (i = 1; i <= dev->caps.num_ports; ++i) { 286 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 287 if (dev->caps.supported_type[i]) { 288 /* if only ETH is supported - assign ETH */ 289 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 290 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 291 /* if only IB is supported, 292 * assign IB only if SRIOV is off*/ 293 else if (dev->caps.supported_type[i] == 294 MLX4_PORT_TYPE_IB) { 295 if (dev->flags & MLX4_FLAG_SRIOV) 296 dev->caps.port_type[i] = 297 MLX4_PORT_TYPE_NONE; 298 else 299 dev->caps.port_type[i] = 300 MLX4_PORT_TYPE_IB; 301 /* if IB and ETH are supported, 302 * first of all check if SRIOV is on */ 303 } else if (dev->flags & MLX4_FLAG_SRIOV) 304 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 305 else { 306 /* In non-SRIOV mode, we set the port type 307 * according to user selection of port type, 308 * if usere selected none, take the FW hint */ 309 if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) 310 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 311 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 312 else 313 dev->caps.port_type[i] = port_type_array[i-1]; 314 } 315 } 316 /* 317 * Link sensing is allowed on the port if 3 conditions are true: 318 * 1. Both protocols are supported on the port. 319 * 2. Different types are supported on the port 320 * 3. FW declared that it supports link sensing 321 */ 322 mlx4_priv(dev)->sense.sense_allowed[i] = 323 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 324 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 325 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 326 327 /* 328 * If "default_sense" bit is set, we move the port to "AUTO" mode 329 * and perform sense_port FW command to try and set the correct 330 * port type from beginning 331 */ 332 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 333 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 334 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 335 mlx4_SENSE_PORT(dev, i, &sensed_port); 336 if (sensed_port != MLX4_PORT_TYPE_NONE) 337 dev->caps.port_type[i] = sensed_port; 338 } else { 339 dev->caps.possible_type[i] = dev->caps.port_type[i]; 340 } 341 342 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 343 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 344 mlx4_warn(dev, "Requested number of MACs is too much " 345 "for port %d, reducing to %d.\n", 346 i, 1 << dev->caps.log_num_macs); 347 } 348 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 349 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 350 mlx4_warn(dev, "Requested number of VLANs is too much " 351 "for port %d, reducing to %d.\n", 352 i, 1 << dev->caps.log_num_vlans); 353 } 354 } 355 356 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 357 358 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 359 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 361 (1 << dev->caps.log_num_macs) * 362 (1 << dev->caps.log_num_vlans) * 363 (1 << dev->caps.log_num_prios) * 364 dev->caps.num_ports; 365 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 366 367 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 368 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 369 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 370 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 371 372 return 0; 373 } 374 /*The function checks if there are live vf, return the num of them*/ 375 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 376 { 377 struct mlx4_priv *priv = mlx4_priv(dev); 378 struct mlx4_slave_state *s_state; 379 int i; 380 int ret = 0; 381 382 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 383 s_state = &priv->mfunc.master.slave_state[i]; 384 if (s_state->active && s_state->last_cmd != 385 MLX4_COMM_CMD_RESET) { 386 mlx4_warn(dev, "%s: slave: %d is still active\n", 387 __func__, i); 388 ret++; 389 } 390 } 391 return ret; 392 } 393 394 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 395 { 396 struct mlx4_priv *priv = mlx4_priv(dev); 397 struct mlx4_slave_state *s_slave; 398 399 if (!mlx4_is_master(dev)) 400 return 0; 401 402 s_slave = &priv->mfunc.master.slave_state[slave]; 403 return !!s_slave->active; 404 } 405 EXPORT_SYMBOL(mlx4_is_slave_active); 406 407 static int mlx4_slave_cap(struct mlx4_dev *dev) 408 { 409 int err; 410 u32 page_size; 411 struct mlx4_dev_cap dev_cap; 412 struct mlx4_func_cap func_cap; 413 struct mlx4_init_hca_param hca_param; 414 int i; 415 416 memset(&hca_param, 0, sizeof(hca_param)); 417 err = mlx4_QUERY_HCA(dev, &hca_param); 418 if (err) { 419 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 420 return err; 421 } 422 423 /*fail if the hca has an unknown capability */ 424 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 425 HCA_GLOBAL_CAP_MASK) { 426 mlx4_err(dev, "Unknown hca global capabilities\n"); 427 return -ENOSYS; 428 } 429 430 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 431 432 memset(&dev_cap, 0, sizeof(dev_cap)); 433 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 434 err = mlx4_dev_cap(dev, &dev_cap); 435 if (err) { 436 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 437 return err; 438 } 439 440 err = mlx4_QUERY_FW(dev); 441 if (err) 442 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 443 444 page_size = ~dev->caps.page_size_cap + 1; 445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 446 if (page_size > PAGE_SIZE) { 447 mlx4_err(dev, "HCA minimum page size of %d bigger than " 448 "kernel PAGE_SIZE of %ld, aborting.\n", 449 page_size, PAGE_SIZE); 450 return -ENODEV; 451 } 452 453 /* slave gets uar page size from QUERY_HCA fw command */ 454 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 455 456 /* TODO: relax this assumption */ 457 if (dev->caps.uar_page_size != PAGE_SIZE) { 458 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 459 dev->caps.uar_page_size, PAGE_SIZE); 460 return -ENODEV; 461 } 462 463 memset(&func_cap, 0, sizeof(func_cap)); 464 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); 465 if (err) { 466 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); 467 return err; 468 } 469 470 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 471 PF_CONTEXT_BEHAVIOUR_MASK) { 472 mlx4_err(dev, "Unknown pf context behaviour\n"); 473 return -ENOSYS; 474 } 475 476 dev->caps.num_ports = func_cap.num_ports; 477 dev->caps.num_qps = func_cap.qp_quota; 478 dev->caps.num_srqs = func_cap.srq_quota; 479 dev->caps.num_cqs = func_cap.cq_quota; 480 dev->caps.num_eqs = func_cap.max_eq; 481 dev->caps.reserved_eqs = func_cap.reserved_eq; 482 dev->caps.num_mpts = func_cap.mpt_quota; 483 dev->caps.num_mtts = func_cap.mtt_quota; 484 dev->caps.num_pds = MLX4_NUM_PDS; 485 dev->caps.num_mgms = 0; 486 dev->caps.num_amgms = 0; 487 488 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 489 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 490 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 491 return -ENODEV; 492 } 493 494 for (i = 1; i <= dev->caps.num_ports; ++i) 495 dev->caps.port_mask[i] = dev->caps.port_type[i]; 496 497 if (dev->caps.uar_page_size * (dev->caps.num_uars - 498 dev->caps.reserved_uars) > 499 pci_resource_len(dev->pdev, 2)) { 500 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 501 "PCI resource 2 size of 0x%llx, aborting.\n", 502 dev->caps.uar_page_size * dev->caps.num_uars, 503 (unsigned long long) pci_resource_len(dev->pdev, 2)); 504 return -ENODEV; 505 } 506 507 return 0; 508 } 509 510 /* 511 * Change the port configuration of the device. 512 * Every user of this function must hold the port mutex. 513 */ 514 int mlx4_change_port_types(struct mlx4_dev *dev, 515 enum mlx4_port_type *port_types) 516 { 517 int err = 0; 518 int change = 0; 519 int port; 520 521 for (port = 0; port < dev->caps.num_ports; port++) { 522 /* Change the port type only if the new type is different 523 * from the current, and not set to Auto */ 524 if (port_types[port] != dev->caps.port_type[port + 1]) 525 change = 1; 526 } 527 if (change) { 528 mlx4_unregister_device(dev); 529 for (port = 1; port <= dev->caps.num_ports; port++) { 530 mlx4_CLOSE_PORT(dev, port); 531 dev->caps.port_type[port] = port_types[port - 1]; 532 err = mlx4_SET_PORT(dev, port); 533 if (err) { 534 mlx4_err(dev, "Failed to set port %d, " 535 "aborting\n", port); 536 goto out; 537 } 538 } 539 mlx4_set_port_mask(dev); 540 err = mlx4_register_device(dev); 541 } 542 543 out: 544 return err; 545 } 546 547 static ssize_t show_port_type(struct device *dev, 548 struct device_attribute *attr, 549 char *buf) 550 { 551 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 552 port_attr); 553 struct mlx4_dev *mdev = info->dev; 554 char type[8]; 555 556 sprintf(type, "%s", 557 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 558 "ib" : "eth"); 559 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 560 sprintf(buf, "auto (%s)\n", type); 561 else 562 sprintf(buf, "%s\n", type); 563 564 return strlen(buf); 565 } 566 567 static ssize_t set_port_type(struct device *dev, 568 struct device_attribute *attr, 569 const char *buf, size_t count) 570 { 571 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 572 port_attr); 573 struct mlx4_dev *mdev = info->dev; 574 struct mlx4_priv *priv = mlx4_priv(mdev); 575 enum mlx4_port_type types[MLX4_MAX_PORTS]; 576 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 577 int i; 578 int err = 0; 579 580 if (!strcmp(buf, "ib\n")) 581 info->tmp_type = MLX4_PORT_TYPE_IB; 582 else if (!strcmp(buf, "eth\n")) 583 info->tmp_type = MLX4_PORT_TYPE_ETH; 584 else if (!strcmp(buf, "auto\n")) 585 info->tmp_type = MLX4_PORT_TYPE_AUTO; 586 else { 587 mlx4_err(mdev, "%s is not supported port type\n", buf); 588 return -EINVAL; 589 } 590 591 mlx4_stop_sense(mdev); 592 mutex_lock(&priv->port_mutex); 593 /* Possible type is always the one that was delivered */ 594 mdev->caps.possible_type[info->port] = info->tmp_type; 595 596 for (i = 0; i < mdev->caps.num_ports; i++) { 597 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 598 mdev->caps.possible_type[i+1]; 599 if (types[i] == MLX4_PORT_TYPE_AUTO) 600 types[i] = mdev->caps.port_type[i+1]; 601 } 602 603 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 604 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 605 for (i = 1; i <= mdev->caps.num_ports; i++) { 606 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 607 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 608 err = -EINVAL; 609 } 610 } 611 } 612 if (err) { 613 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 614 "Set only 'eth' or 'ib' for both ports " 615 "(should be the same)\n"); 616 goto out; 617 } 618 619 mlx4_do_sense_ports(mdev, new_types, types); 620 621 err = mlx4_check_port_params(mdev, new_types); 622 if (err) 623 goto out; 624 625 /* We are about to apply the changes after the configuration 626 * was verified, no need to remember the temporary types 627 * any more */ 628 for (i = 0; i < mdev->caps.num_ports; i++) 629 priv->port[i + 1].tmp_type = 0; 630 631 err = mlx4_change_port_types(mdev, new_types); 632 633 out: 634 mlx4_start_sense(mdev); 635 mutex_unlock(&priv->port_mutex); 636 return err ? err : count; 637 } 638 639 enum ibta_mtu { 640 IB_MTU_256 = 1, 641 IB_MTU_512 = 2, 642 IB_MTU_1024 = 3, 643 IB_MTU_2048 = 4, 644 IB_MTU_4096 = 5 645 }; 646 647 static inline int int_to_ibta_mtu(int mtu) 648 { 649 switch (mtu) { 650 case 256: return IB_MTU_256; 651 case 512: return IB_MTU_512; 652 case 1024: return IB_MTU_1024; 653 case 2048: return IB_MTU_2048; 654 case 4096: return IB_MTU_4096; 655 default: return -1; 656 } 657 } 658 659 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 660 { 661 switch (mtu) { 662 case IB_MTU_256: return 256; 663 case IB_MTU_512: return 512; 664 case IB_MTU_1024: return 1024; 665 case IB_MTU_2048: return 2048; 666 case IB_MTU_4096: return 4096; 667 default: return -1; 668 } 669 } 670 671 static ssize_t show_port_ib_mtu(struct device *dev, 672 struct device_attribute *attr, 673 char *buf) 674 { 675 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 676 port_mtu_attr); 677 struct mlx4_dev *mdev = info->dev; 678 679 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 680 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 681 682 sprintf(buf, "%d\n", 683 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 684 return strlen(buf); 685 } 686 687 static ssize_t set_port_ib_mtu(struct device *dev, 688 struct device_attribute *attr, 689 const char *buf, size_t count) 690 { 691 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 692 port_mtu_attr); 693 struct mlx4_dev *mdev = info->dev; 694 struct mlx4_priv *priv = mlx4_priv(mdev); 695 int err, port, mtu, ibta_mtu = -1; 696 697 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 698 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 699 return -EINVAL; 700 } 701 702 err = sscanf(buf, "%d", &mtu); 703 if (err > 0) 704 ibta_mtu = int_to_ibta_mtu(mtu); 705 706 if (err <= 0 || ibta_mtu < 0) { 707 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 708 return -EINVAL; 709 } 710 711 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 712 713 mlx4_stop_sense(mdev); 714 mutex_lock(&priv->port_mutex); 715 mlx4_unregister_device(mdev); 716 for (port = 1; port <= mdev->caps.num_ports; port++) { 717 mlx4_CLOSE_PORT(mdev, port); 718 err = mlx4_SET_PORT(mdev, port); 719 if (err) { 720 mlx4_err(mdev, "Failed to set port %d, " 721 "aborting\n", port); 722 goto err_set_port; 723 } 724 } 725 err = mlx4_register_device(mdev); 726 err_set_port: 727 mutex_unlock(&priv->port_mutex); 728 mlx4_start_sense(mdev); 729 return err ? err : count; 730 } 731 732 static int mlx4_load_fw(struct mlx4_dev *dev) 733 { 734 struct mlx4_priv *priv = mlx4_priv(dev); 735 int err; 736 737 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 738 GFP_HIGHUSER | __GFP_NOWARN, 0); 739 if (!priv->fw.fw_icm) { 740 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 741 return -ENOMEM; 742 } 743 744 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 745 if (err) { 746 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 747 goto err_free; 748 } 749 750 err = mlx4_RUN_FW(dev); 751 if (err) { 752 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 753 goto err_unmap_fa; 754 } 755 756 return 0; 757 758 err_unmap_fa: 759 mlx4_UNMAP_FA(dev); 760 761 err_free: 762 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 763 return err; 764 } 765 766 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 767 int cmpt_entry_sz) 768 { 769 struct mlx4_priv *priv = mlx4_priv(dev); 770 int err; 771 int num_eqs; 772 773 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 774 cmpt_base + 775 ((u64) (MLX4_CMPT_TYPE_QP * 776 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 777 cmpt_entry_sz, dev->caps.num_qps, 778 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 779 0, 0); 780 if (err) 781 goto err; 782 783 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 784 cmpt_base + 785 ((u64) (MLX4_CMPT_TYPE_SRQ * 786 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 787 cmpt_entry_sz, dev->caps.num_srqs, 788 dev->caps.reserved_srqs, 0, 0); 789 if (err) 790 goto err_qp; 791 792 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 793 cmpt_base + 794 ((u64) (MLX4_CMPT_TYPE_CQ * 795 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 796 cmpt_entry_sz, dev->caps.num_cqs, 797 dev->caps.reserved_cqs, 0, 0); 798 if (err) 799 goto err_srq; 800 801 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 802 dev->caps.num_eqs; 803 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 804 cmpt_base + 805 ((u64) (MLX4_CMPT_TYPE_EQ * 806 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 807 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 808 if (err) 809 goto err_cq; 810 811 return 0; 812 813 err_cq: 814 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 815 816 err_srq: 817 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 818 819 err_qp: 820 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 821 822 err: 823 return err; 824 } 825 826 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 827 struct mlx4_init_hca_param *init_hca, u64 icm_size) 828 { 829 struct mlx4_priv *priv = mlx4_priv(dev); 830 u64 aux_pages; 831 int num_eqs; 832 int err; 833 834 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 835 if (err) { 836 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 837 return err; 838 } 839 840 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 841 (unsigned long long) icm_size >> 10, 842 (unsigned long long) aux_pages << 2); 843 844 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 845 GFP_HIGHUSER | __GFP_NOWARN, 0); 846 if (!priv->fw.aux_icm) { 847 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 848 return -ENOMEM; 849 } 850 851 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 852 if (err) { 853 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 854 goto err_free_aux; 855 } 856 857 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 858 if (err) { 859 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 860 goto err_unmap_aux; 861 } 862 863 864 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 865 dev->caps.num_eqs; 866 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 867 init_hca->eqc_base, dev_cap->eqc_entry_sz, 868 num_eqs, num_eqs, 0, 0); 869 if (err) { 870 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 871 goto err_unmap_cmpt; 872 } 873 874 /* 875 * Reserved MTT entries must be aligned up to a cacheline 876 * boundary, since the FW will write to them, while the driver 877 * writes to all other MTT entries. (The variable 878 * dev->caps.mtt_entry_sz below is really the MTT segment 879 * size, not the raw entry size) 880 */ 881 dev->caps.reserved_mtts = 882 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 883 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 884 885 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 886 init_hca->mtt_base, 887 dev->caps.mtt_entry_sz, 888 dev->caps.num_mtts, 889 dev->caps.reserved_mtts, 1, 0); 890 if (err) { 891 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 892 goto err_unmap_eq; 893 } 894 895 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 896 init_hca->dmpt_base, 897 dev_cap->dmpt_entry_sz, 898 dev->caps.num_mpts, 899 dev->caps.reserved_mrws, 1, 1); 900 if (err) { 901 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 902 goto err_unmap_mtt; 903 } 904 905 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 906 init_hca->qpc_base, 907 dev_cap->qpc_entry_sz, 908 dev->caps.num_qps, 909 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 910 0, 0); 911 if (err) { 912 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 913 goto err_unmap_dmpt; 914 } 915 916 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 917 init_hca->auxc_base, 918 dev_cap->aux_entry_sz, 919 dev->caps.num_qps, 920 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 921 0, 0); 922 if (err) { 923 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 924 goto err_unmap_qp; 925 } 926 927 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 928 init_hca->altc_base, 929 dev_cap->altc_entry_sz, 930 dev->caps.num_qps, 931 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 932 0, 0); 933 if (err) { 934 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 935 goto err_unmap_auxc; 936 } 937 938 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 939 init_hca->rdmarc_base, 940 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 941 dev->caps.num_qps, 942 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 943 0, 0); 944 if (err) { 945 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 946 goto err_unmap_altc; 947 } 948 949 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 950 init_hca->cqc_base, 951 dev_cap->cqc_entry_sz, 952 dev->caps.num_cqs, 953 dev->caps.reserved_cqs, 0, 0); 954 if (err) { 955 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 956 goto err_unmap_rdmarc; 957 } 958 959 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 960 init_hca->srqc_base, 961 dev_cap->srq_entry_sz, 962 dev->caps.num_srqs, 963 dev->caps.reserved_srqs, 0, 0); 964 if (err) { 965 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 966 goto err_unmap_cq; 967 } 968 969 /* 970 * It's not strictly required, but for simplicity just map the 971 * whole multicast group table now. The table isn't very big 972 * and it's a lot easier than trying to track ref counts. 973 */ 974 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 975 init_hca->mc_base, 976 mlx4_get_mgm_entry_size(dev), 977 dev->caps.num_mgms + dev->caps.num_amgms, 978 dev->caps.num_mgms + dev->caps.num_amgms, 979 0, 0); 980 if (err) { 981 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 982 goto err_unmap_srq; 983 } 984 985 return 0; 986 987 err_unmap_srq: 988 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 989 990 err_unmap_cq: 991 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 992 993 err_unmap_rdmarc: 994 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 995 996 err_unmap_altc: 997 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 998 999 err_unmap_auxc: 1000 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1001 1002 err_unmap_qp: 1003 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1004 1005 err_unmap_dmpt: 1006 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1007 1008 err_unmap_mtt: 1009 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1010 1011 err_unmap_eq: 1012 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1013 1014 err_unmap_cmpt: 1015 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1016 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1017 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1018 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1019 1020 err_unmap_aux: 1021 mlx4_UNMAP_ICM_AUX(dev); 1022 1023 err_free_aux: 1024 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1025 1026 return err; 1027 } 1028 1029 static void mlx4_free_icms(struct mlx4_dev *dev) 1030 { 1031 struct mlx4_priv *priv = mlx4_priv(dev); 1032 1033 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1034 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1035 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1036 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1037 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1038 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1039 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1040 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1041 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1042 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1043 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1044 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1045 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1046 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1047 1048 mlx4_UNMAP_ICM_AUX(dev); 1049 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1050 } 1051 1052 static void mlx4_slave_exit(struct mlx4_dev *dev) 1053 { 1054 struct mlx4_priv *priv = mlx4_priv(dev); 1055 1056 down(&priv->cmd.slave_sem); 1057 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1058 mlx4_warn(dev, "Failed to close slave function.\n"); 1059 up(&priv->cmd.slave_sem); 1060 } 1061 1062 static int map_bf_area(struct mlx4_dev *dev) 1063 { 1064 struct mlx4_priv *priv = mlx4_priv(dev); 1065 resource_size_t bf_start; 1066 resource_size_t bf_len; 1067 int err = 0; 1068 1069 if (!dev->caps.bf_reg_size) 1070 return -ENXIO; 1071 1072 bf_start = pci_resource_start(dev->pdev, 2) + 1073 (dev->caps.num_uars << PAGE_SHIFT); 1074 bf_len = pci_resource_len(dev->pdev, 2) - 1075 (dev->caps.num_uars << PAGE_SHIFT); 1076 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1077 if (!priv->bf_mapping) 1078 err = -ENOMEM; 1079 1080 return err; 1081 } 1082 1083 static void unmap_bf_area(struct mlx4_dev *dev) 1084 { 1085 if (mlx4_priv(dev)->bf_mapping) 1086 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1087 } 1088 1089 static void mlx4_close_hca(struct mlx4_dev *dev) 1090 { 1091 unmap_bf_area(dev); 1092 if (mlx4_is_slave(dev)) 1093 mlx4_slave_exit(dev); 1094 else { 1095 mlx4_CLOSE_HCA(dev, 0); 1096 mlx4_free_icms(dev); 1097 mlx4_UNMAP_FA(dev); 1098 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1099 } 1100 } 1101 1102 static int mlx4_init_slave(struct mlx4_dev *dev) 1103 { 1104 struct mlx4_priv *priv = mlx4_priv(dev); 1105 u64 dma = (u64) priv->mfunc.vhcr_dma; 1106 int num_of_reset_retries = NUM_OF_RESET_RETRIES; 1107 int ret_from_reset = 0; 1108 u32 slave_read; 1109 u32 cmd_channel_ver; 1110 1111 down(&priv->cmd.slave_sem); 1112 priv->cmd.max_cmds = 1; 1113 mlx4_warn(dev, "Sending reset\n"); 1114 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1115 MLX4_COMM_TIME); 1116 /* if we are in the middle of flr the slave will try 1117 * NUM_OF_RESET_RETRIES times before leaving.*/ 1118 if (ret_from_reset) { 1119 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1120 msleep(SLEEP_TIME_IN_RESET); 1121 while (ret_from_reset && num_of_reset_retries) { 1122 mlx4_warn(dev, "slave is currently in the" 1123 "middle of FLR. retrying..." 1124 "(try num:%d)\n", 1125 (NUM_OF_RESET_RETRIES - 1126 num_of_reset_retries + 1)); 1127 ret_from_reset = 1128 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 1129 0, MLX4_COMM_TIME); 1130 num_of_reset_retries = num_of_reset_retries - 1; 1131 } 1132 } else 1133 goto err; 1134 } 1135 1136 /* check the driver version - the slave I/F revision 1137 * must match the master's */ 1138 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1139 cmd_channel_ver = mlx4_comm_get_version(); 1140 1141 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1142 MLX4_COMM_GET_IF_REV(slave_read)) { 1143 mlx4_err(dev, "slave driver version is not supported" 1144 " by the master\n"); 1145 goto err; 1146 } 1147 1148 mlx4_warn(dev, "Sending vhcr0\n"); 1149 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1150 MLX4_COMM_TIME)) 1151 goto err; 1152 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1153 MLX4_COMM_TIME)) 1154 goto err; 1155 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1156 MLX4_COMM_TIME)) 1157 goto err; 1158 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1159 goto err; 1160 up(&priv->cmd.slave_sem); 1161 return 0; 1162 1163 err: 1164 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1165 up(&priv->cmd.slave_sem); 1166 return -EIO; 1167 } 1168 1169 static int mlx4_init_hca(struct mlx4_dev *dev) 1170 { 1171 struct mlx4_priv *priv = mlx4_priv(dev); 1172 struct mlx4_adapter adapter; 1173 struct mlx4_dev_cap dev_cap; 1174 struct mlx4_mod_stat_cfg mlx4_cfg; 1175 struct mlx4_profile profile; 1176 struct mlx4_init_hca_param init_hca; 1177 u64 icm_size; 1178 int err; 1179 1180 if (!mlx4_is_slave(dev)) { 1181 err = mlx4_QUERY_FW(dev); 1182 if (err) { 1183 if (err == -EACCES) 1184 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1185 else 1186 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1187 goto unmap_bf; 1188 } 1189 1190 err = mlx4_load_fw(dev); 1191 if (err) { 1192 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1193 goto unmap_bf; 1194 } 1195 1196 mlx4_cfg.log_pg_sz_m = 1; 1197 mlx4_cfg.log_pg_sz = 0; 1198 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1199 if (err) 1200 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1201 1202 err = mlx4_dev_cap(dev, &dev_cap); 1203 if (err) { 1204 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1205 goto err_stop_fw; 1206 } 1207 1208 profile = default_profile; 1209 1210 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1211 &init_hca); 1212 if ((long long) icm_size < 0) { 1213 err = icm_size; 1214 goto err_stop_fw; 1215 } 1216 1217 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1218 1219 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1220 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1221 1222 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1223 if (err) 1224 goto err_stop_fw; 1225 1226 err = mlx4_INIT_HCA(dev, &init_hca); 1227 if (err) { 1228 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1229 goto err_free_icm; 1230 } 1231 } else { 1232 err = mlx4_init_slave(dev); 1233 if (err) { 1234 mlx4_err(dev, "Failed to initialize slave\n"); 1235 goto unmap_bf; 1236 } 1237 1238 err = mlx4_slave_cap(dev); 1239 if (err) { 1240 mlx4_err(dev, "Failed to obtain slave caps\n"); 1241 goto err_close; 1242 } 1243 } 1244 1245 if (map_bf_area(dev)) 1246 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1247 1248 /*Only the master set the ports, all the rest got it from it.*/ 1249 if (!mlx4_is_slave(dev)) 1250 mlx4_set_port_mask(dev); 1251 1252 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1253 if (err) { 1254 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1255 goto err_close; 1256 } 1257 1258 priv->eq_table.inta_pin = adapter.inta_pin; 1259 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1260 1261 return 0; 1262 1263 err_close: 1264 mlx4_close_hca(dev); 1265 1266 err_free_icm: 1267 if (!mlx4_is_slave(dev)) 1268 mlx4_free_icms(dev); 1269 1270 err_stop_fw: 1271 if (!mlx4_is_slave(dev)) { 1272 mlx4_UNMAP_FA(dev); 1273 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1274 } 1275 unmap_bf: 1276 unmap_bf_area(dev); 1277 return err; 1278 } 1279 1280 static int mlx4_init_counters_table(struct mlx4_dev *dev) 1281 { 1282 struct mlx4_priv *priv = mlx4_priv(dev); 1283 int nent; 1284 1285 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1286 return -ENOENT; 1287 1288 nent = dev->caps.max_counters; 1289 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 1290 } 1291 1292 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 1293 { 1294 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1295 } 1296 1297 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1298 { 1299 struct mlx4_priv *priv = mlx4_priv(dev); 1300 1301 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1302 return -ENOENT; 1303 1304 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 1305 if (*idx == -1) 1306 return -ENOMEM; 1307 1308 return 0; 1309 } 1310 1311 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1312 { 1313 u64 out_param; 1314 int err; 1315 1316 if (mlx4_is_mfunc(dev)) { 1317 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 1318 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 1319 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 1320 if (!err) 1321 *idx = get_param_l(&out_param); 1322 1323 return err; 1324 } 1325 return __mlx4_counter_alloc(dev, idx); 1326 } 1327 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1328 1329 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1330 { 1331 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1332 return; 1333 } 1334 1335 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1336 { 1337 u64 in_param; 1338 1339 if (mlx4_is_mfunc(dev)) { 1340 set_param_l(&in_param, idx); 1341 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 1342 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 1343 MLX4_CMD_WRAPPED); 1344 return; 1345 } 1346 __mlx4_counter_free(dev, idx); 1347 } 1348 EXPORT_SYMBOL_GPL(mlx4_counter_free); 1349 1350 static int mlx4_setup_hca(struct mlx4_dev *dev) 1351 { 1352 struct mlx4_priv *priv = mlx4_priv(dev); 1353 int err; 1354 int port; 1355 __be32 ib_port_default_caps; 1356 1357 err = mlx4_init_uar_table(dev); 1358 if (err) { 1359 mlx4_err(dev, "Failed to initialize " 1360 "user access region table, aborting.\n"); 1361 return err; 1362 } 1363 1364 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1365 if (err) { 1366 mlx4_err(dev, "Failed to allocate driver access region, " 1367 "aborting.\n"); 1368 goto err_uar_table_free; 1369 } 1370 1371 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1372 if (!priv->kar) { 1373 mlx4_err(dev, "Couldn't map kernel access region, " 1374 "aborting.\n"); 1375 err = -ENOMEM; 1376 goto err_uar_free; 1377 } 1378 1379 err = mlx4_init_pd_table(dev); 1380 if (err) { 1381 mlx4_err(dev, "Failed to initialize " 1382 "protection domain table, aborting.\n"); 1383 goto err_kar_unmap; 1384 } 1385 1386 err = mlx4_init_xrcd_table(dev); 1387 if (err) { 1388 mlx4_err(dev, "Failed to initialize " 1389 "reliable connection domain table, aborting.\n"); 1390 goto err_pd_table_free; 1391 } 1392 1393 err = mlx4_init_mr_table(dev); 1394 if (err) { 1395 mlx4_err(dev, "Failed to initialize " 1396 "memory region table, aborting.\n"); 1397 goto err_xrcd_table_free; 1398 } 1399 1400 err = mlx4_init_eq_table(dev); 1401 if (err) { 1402 mlx4_err(dev, "Failed to initialize " 1403 "event queue table, aborting.\n"); 1404 goto err_mr_table_free; 1405 } 1406 1407 err = mlx4_cmd_use_events(dev); 1408 if (err) { 1409 mlx4_err(dev, "Failed to switch to event-driven " 1410 "firmware commands, aborting.\n"); 1411 goto err_eq_table_free; 1412 } 1413 1414 err = mlx4_NOP(dev); 1415 if (err) { 1416 if (dev->flags & MLX4_FLAG_MSI_X) { 1417 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1418 "interrupt IRQ %d).\n", 1419 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1420 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1421 } else { 1422 mlx4_err(dev, "NOP command failed to generate interrupt " 1423 "(IRQ %d), aborting.\n", 1424 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1425 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1426 } 1427 1428 goto err_cmd_poll; 1429 } 1430 1431 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 1432 1433 err = mlx4_init_cq_table(dev); 1434 if (err) { 1435 mlx4_err(dev, "Failed to initialize " 1436 "completion queue table, aborting.\n"); 1437 goto err_cmd_poll; 1438 } 1439 1440 err = mlx4_init_srq_table(dev); 1441 if (err) { 1442 mlx4_err(dev, "Failed to initialize " 1443 "shared receive queue table, aborting.\n"); 1444 goto err_cq_table_free; 1445 } 1446 1447 err = mlx4_init_qp_table(dev); 1448 if (err) { 1449 mlx4_err(dev, "Failed to initialize " 1450 "queue pair table, aborting.\n"); 1451 goto err_srq_table_free; 1452 } 1453 1454 if (!mlx4_is_slave(dev)) { 1455 err = mlx4_init_mcg_table(dev); 1456 if (err) { 1457 mlx4_err(dev, "Failed to initialize " 1458 "multicast group table, aborting.\n"); 1459 goto err_qp_table_free; 1460 } 1461 } 1462 1463 err = mlx4_init_counters_table(dev); 1464 if (err && err != -ENOENT) { 1465 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1466 goto err_mcg_table_free; 1467 } 1468 1469 if (!mlx4_is_slave(dev)) { 1470 for (port = 1; port <= dev->caps.num_ports; port++) { 1471 ib_port_default_caps = 0; 1472 err = mlx4_get_port_ib_caps(dev, port, 1473 &ib_port_default_caps); 1474 if (err) 1475 mlx4_warn(dev, "failed to get port %d default " 1476 "ib capabilities (%d). Continuing " 1477 "with caps = 0\n", port, err); 1478 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1479 1480 if (mlx4_is_mfunc(dev)) 1481 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 1482 else 1483 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 1484 1485 err = mlx4_SET_PORT(dev, port); 1486 if (err) { 1487 mlx4_err(dev, "Failed to set port %d, aborting\n", 1488 port); 1489 goto err_counters_table_free; 1490 } 1491 } 1492 } 1493 1494 return 0; 1495 1496 err_counters_table_free: 1497 mlx4_cleanup_counters_table(dev); 1498 1499 err_mcg_table_free: 1500 mlx4_cleanup_mcg_table(dev); 1501 1502 err_qp_table_free: 1503 mlx4_cleanup_qp_table(dev); 1504 1505 err_srq_table_free: 1506 mlx4_cleanup_srq_table(dev); 1507 1508 err_cq_table_free: 1509 mlx4_cleanup_cq_table(dev); 1510 1511 err_cmd_poll: 1512 mlx4_cmd_use_polling(dev); 1513 1514 err_eq_table_free: 1515 mlx4_cleanup_eq_table(dev); 1516 1517 err_mr_table_free: 1518 mlx4_cleanup_mr_table(dev); 1519 1520 err_xrcd_table_free: 1521 mlx4_cleanup_xrcd_table(dev); 1522 1523 err_pd_table_free: 1524 mlx4_cleanup_pd_table(dev); 1525 1526 err_kar_unmap: 1527 iounmap(priv->kar); 1528 1529 err_uar_free: 1530 mlx4_uar_free(dev, &priv->driver_uar); 1531 1532 err_uar_table_free: 1533 mlx4_cleanup_uar_table(dev); 1534 return err; 1535 } 1536 1537 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1538 { 1539 struct mlx4_priv *priv = mlx4_priv(dev); 1540 struct msix_entry *entries; 1541 int nreq = min_t(int, dev->caps.num_ports * 1542 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1543 + MSIX_LEGACY_SZ, MAX_MSIX); 1544 int err; 1545 int i; 1546 1547 if (msi_x) { 1548 /* In multifunction mode each function gets 2 msi-X vectors 1549 * one for data path completions anf the other for asynch events 1550 * or command completions */ 1551 if (mlx4_is_mfunc(dev)) { 1552 nreq = 2; 1553 } else { 1554 nreq = min_t(int, dev->caps.num_eqs - 1555 dev->caps.reserved_eqs, nreq); 1556 } 1557 1558 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1559 if (!entries) 1560 goto no_msi; 1561 1562 for (i = 0; i < nreq; ++i) 1563 entries[i].entry = i; 1564 1565 retry: 1566 err = pci_enable_msix(dev->pdev, entries, nreq); 1567 if (err) { 1568 /* Try again if at least 2 vectors are available */ 1569 if (err > 1) { 1570 mlx4_info(dev, "Requested %d vectors, " 1571 "but only %d MSI-X vectors available, " 1572 "trying again\n", nreq, err); 1573 nreq = err; 1574 goto retry; 1575 } 1576 kfree(entries); 1577 goto no_msi; 1578 } 1579 1580 if (nreq < 1581 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1582 /*Working in legacy mode , all EQ's shared*/ 1583 dev->caps.comp_pool = 0; 1584 dev->caps.num_comp_vectors = nreq - 1; 1585 } else { 1586 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1587 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1588 } 1589 for (i = 0; i < nreq; ++i) 1590 priv->eq_table.eq[i].irq = entries[i].vector; 1591 1592 dev->flags |= MLX4_FLAG_MSI_X; 1593 1594 kfree(entries); 1595 return; 1596 } 1597 1598 no_msi: 1599 dev->caps.num_comp_vectors = 1; 1600 dev->caps.comp_pool = 0; 1601 1602 for (i = 0; i < 2; ++i) 1603 priv->eq_table.eq[i].irq = dev->pdev->irq; 1604 } 1605 1606 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1607 { 1608 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1609 int err = 0; 1610 1611 info->dev = dev; 1612 info->port = port; 1613 if (!mlx4_is_slave(dev)) { 1614 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); 1615 mlx4_init_mac_table(dev, &info->mac_table); 1616 mlx4_init_vlan_table(dev, &info->vlan_table); 1617 info->base_qpn = 1618 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 1619 (port - 1) * (1 << log_num_mac); 1620 } 1621 1622 sprintf(info->dev_name, "mlx4_port%d", port); 1623 info->port_attr.attr.name = info->dev_name; 1624 if (mlx4_is_mfunc(dev)) 1625 info->port_attr.attr.mode = S_IRUGO; 1626 else { 1627 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1628 info->port_attr.store = set_port_type; 1629 } 1630 info->port_attr.show = show_port_type; 1631 sysfs_attr_init(&info->port_attr.attr); 1632 1633 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1634 if (err) { 1635 mlx4_err(dev, "Failed to create file for port %d\n", port); 1636 info->port = -1; 1637 } 1638 1639 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 1640 info->port_mtu_attr.attr.name = info->dev_mtu_name; 1641 if (mlx4_is_mfunc(dev)) 1642 info->port_mtu_attr.attr.mode = S_IRUGO; 1643 else { 1644 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 1645 info->port_mtu_attr.store = set_port_ib_mtu; 1646 } 1647 info->port_mtu_attr.show = show_port_ib_mtu; 1648 sysfs_attr_init(&info->port_mtu_attr.attr); 1649 1650 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 1651 if (err) { 1652 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 1653 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1654 info->port = -1; 1655 } 1656 1657 return err; 1658 } 1659 1660 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1661 { 1662 if (info->port < 0) 1663 return; 1664 1665 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1666 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 1667 } 1668 1669 static int mlx4_init_steering(struct mlx4_dev *dev) 1670 { 1671 struct mlx4_priv *priv = mlx4_priv(dev); 1672 int num_entries = dev->caps.num_ports; 1673 int i, j; 1674 1675 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1676 if (!priv->steer) 1677 return -ENOMEM; 1678 1679 for (i = 0; i < num_entries; i++) 1680 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1681 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1682 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1683 } 1684 return 0; 1685 } 1686 1687 static void mlx4_clear_steering(struct mlx4_dev *dev) 1688 { 1689 struct mlx4_priv *priv = mlx4_priv(dev); 1690 struct mlx4_steer_index *entry, *tmp_entry; 1691 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1692 int num_entries = dev->caps.num_ports; 1693 int i, j; 1694 1695 for (i = 0; i < num_entries; i++) { 1696 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1697 list_for_each_entry_safe(pqp, tmp_pqp, 1698 &priv->steer[i].promisc_qps[j], 1699 list) { 1700 list_del(&pqp->list); 1701 kfree(pqp); 1702 } 1703 list_for_each_entry_safe(entry, tmp_entry, 1704 &priv->steer[i].steer_entries[j], 1705 list) { 1706 list_del(&entry->list); 1707 list_for_each_entry_safe(pqp, tmp_pqp, 1708 &entry->duplicates, 1709 list) { 1710 list_del(&pqp->list); 1711 kfree(pqp); 1712 } 1713 kfree(entry); 1714 } 1715 } 1716 } 1717 kfree(priv->steer); 1718 } 1719 1720 static int extended_func_num(struct pci_dev *pdev) 1721 { 1722 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 1723 } 1724 1725 #define MLX4_OWNER_BASE 0x8069c 1726 #define MLX4_OWNER_SIZE 4 1727 1728 static int mlx4_get_ownership(struct mlx4_dev *dev) 1729 { 1730 void __iomem *owner; 1731 u32 ret; 1732 1733 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1734 MLX4_OWNER_SIZE); 1735 if (!owner) { 1736 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1737 return -ENOMEM; 1738 } 1739 1740 ret = readl(owner); 1741 iounmap(owner); 1742 return (int) !!ret; 1743 } 1744 1745 static void mlx4_free_ownership(struct mlx4_dev *dev) 1746 { 1747 void __iomem *owner; 1748 1749 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1750 MLX4_OWNER_SIZE); 1751 if (!owner) { 1752 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1753 return; 1754 } 1755 writel(0, owner); 1756 msleep(1000); 1757 iounmap(owner); 1758 } 1759 1760 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1761 { 1762 struct mlx4_priv *priv; 1763 struct mlx4_dev *dev; 1764 int err; 1765 int port; 1766 1767 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1768 1769 err = pci_enable_device(pdev); 1770 if (err) { 1771 dev_err(&pdev->dev, "Cannot enable PCI device, " 1772 "aborting.\n"); 1773 return err; 1774 } 1775 if (num_vfs > MLX4_MAX_NUM_VF) { 1776 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 1777 num_vfs, MLX4_MAX_NUM_VF); 1778 return -EINVAL; 1779 } 1780 /* 1781 * Check for BARs. 1782 */ 1783 if (((id == NULL) || !(id->driver_data & MLX4_VF)) && 1784 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1785 dev_err(&pdev->dev, "Missing DCS, aborting." 1786 "(id == 0X%p, id->driver_data: 0x%lx," 1787 " pci_resource_flags(pdev, 0):0x%lx)\n", id, 1788 id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); 1789 err = -ENODEV; 1790 goto err_disable_pdev; 1791 } 1792 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1793 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1794 err = -ENODEV; 1795 goto err_disable_pdev; 1796 } 1797 1798 err = pci_request_regions(pdev, DRV_NAME); 1799 if (err) { 1800 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1801 goto err_disable_pdev; 1802 } 1803 1804 pci_set_master(pdev); 1805 1806 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1807 if (err) { 1808 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1809 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1810 if (err) { 1811 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1812 goto err_release_regions; 1813 } 1814 } 1815 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1816 if (err) { 1817 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1818 "consistent PCI DMA mask.\n"); 1819 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1820 if (err) { 1821 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1822 "aborting.\n"); 1823 goto err_release_regions; 1824 } 1825 } 1826 1827 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1828 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1829 1830 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1831 if (!priv) { 1832 dev_err(&pdev->dev, "Device struct alloc failed, " 1833 "aborting.\n"); 1834 err = -ENOMEM; 1835 goto err_release_regions; 1836 } 1837 1838 dev = &priv->dev; 1839 dev->pdev = pdev; 1840 INIT_LIST_HEAD(&priv->ctx_list); 1841 spin_lock_init(&priv->ctx_lock); 1842 1843 mutex_init(&priv->port_mutex); 1844 1845 INIT_LIST_HEAD(&priv->pgdir_list); 1846 mutex_init(&priv->pgdir_mutex); 1847 1848 INIT_LIST_HEAD(&priv->bf_list); 1849 mutex_init(&priv->bf_mutex); 1850 1851 dev->rev_id = pdev->revision; 1852 /* Detect if this device is a virtual function */ 1853 if (id && id->driver_data & MLX4_VF) { 1854 /* When acting as pf, we normally skip vfs unless explicitly 1855 * requested to probe them. */ 1856 if (num_vfs && extended_func_num(pdev) > probe_vf) { 1857 mlx4_warn(dev, "Skipping virtual function:%d\n", 1858 extended_func_num(pdev)); 1859 err = -ENODEV; 1860 goto err_free_dev; 1861 } 1862 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 1863 dev->flags |= MLX4_FLAG_SLAVE; 1864 } else { 1865 /* We reset the device and enable SRIOV only for physical 1866 * devices. Try to claim ownership on the device; 1867 * if already taken, skip -- do not allow multiple PFs */ 1868 err = mlx4_get_ownership(dev); 1869 if (err) { 1870 if (err < 0) 1871 goto err_free_dev; 1872 else { 1873 mlx4_warn(dev, "Multiple PFs not yet supported." 1874 " Skipping PF.\n"); 1875 err = -EINVAL; 1876 goto err_free_dev; 1877 } 1878 } 1879 1880 if (num_vfs) { 1881 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); 1882 err = pci_enable_sriov(pdev, num_vfs); 1883 if (err) { 1884 mlx4_err(dev, "Failed to enable sriov," 1885 "continuing without sriov enabled" 1886 " (err = %d).\n", err); 1887 err = 0; 1888 } else { 1889 mlx4_warn(dev, "Running in master mode\n"); 1890 dev->flags |= MLX4_FLAG_SRIOV | 1891 MLX4_FLAG_MASTER; 1892 dev->num_vfs = num_vfs; 1893 } 1894 } 1895 1896 /* 1897 * Now reset the HCA before we touch the PCI capabilities or 1898 * attempt a firmware command, since a boot ROM may have left 1899 * the HCA in an undefined state. 1900 */ 1901 err = mlx4_reset(dev); 1902 if (err) { 1903 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1904 goto err_rel_own; 1905 } 1906 } 1907 1908 slave_start: 1909 if (mlx4_cmd_init(dev)) { 1910 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1911 goto err_sriov; 1912 } 1913 1914 /* In slave functions, the communication channel must be initialized 1915 * before posting commands. Also, init num_slaves before calling 1916 * mlx4_init_hca */ 1917 if (mlx4_is_mfunc(dev)) { 1918 if (mlx4_is_master(dev)) 1919 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 1920 else { 1921 dev->num_slaves = 0; 1922 if (mlx4_multi_func_init(dev)) { 1923 mlx4_err(dev, "Failed to init slave mfunc" 1924 " interface, aborting.\n"); 1925 goto err_cmd; 1926 } 1927 } 1928 } 1929 1930 err = mlx4_init_hca(dev); 1931 if (err) { 1932 if (err == -EACCES) { 1933 /* Not primary Physical function 1934 * Running in slave mode */ 1935 mlx4_cmd_cleanup(dev); 1936 dev->flags |= MLX4_FLAG_SLAVE; 1937 dev->flags &= ~MLX4_FLAG_MASTER; 1938 goto slave_start; 1939 } else 1940 goto err_mfunc; 1941 } 1942 1943 /* In master functions, the communication channel must be initialized 1944 * after obtaining its address from fw */ 1945 if (mlx4_is_master(dev)) { 1946 if (mlx4_multi_func_init(dev)) { 1947 mlx4_err(dev, "Failed to init master mfunc" 1948 "interface, aborting.\n"); 1949 goto err_close; 1950 } 1951 } 1952 1953 err = mlx4_alloc_eq_table(dev); 1954 if (err) 1955 goto err_master_mfunc; 1956 1957 priv->msix_ctl.pool_bm = 0; 1958 mutex_init(&priv->msix_ctl.pool_lock); 1959 1960 mlx4_enable_msi_x(dev); 1961 if ((mlx4_is_mfunc(dev)) && 1962 !(dev->flags & MLX4_FLAG_MSI_X)) { 1963 mlx4_err(dev, "INTx is not supported in multi-function mode." 1964 " aborting.\n"); 1965 goto err_free_eq; 1966 } 1967 1968 if (!mlx4_is_slave(dev)) { 1969 err = mlx4_init_steering(dev); 1970 if (err) 1971 goto err_free_eq; 1972 } 1973 1974 err = mlx4_setup_hca(dev); 1975 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 1976 !mlx4_is_mfunc(dev)) { 1977 dev->flags &= ~MLX4_FLAG_MSI_X; 1978 pci_disable_msix(pdev); 1979 err = mlx4_setup_hca(dev); 1980 } 1981 1982 if (err) 1983 goto err_steer; 1984 1985 for (port = 1; port <= dev->caps.num_ports; port++) { 1986 err = mlx4_init_port_info(dev, port); 1987 if (err) 1988 goto err_port; 1989 } 1990 1991 err = mlx4_register_device(dev); 1992 if (err) 1993 goto err_port; 1994 1995 mlx4_sense_init(dev); 1996 mlx4_start_sense(dev); 1997 1998 pci_set_drvdata(pdev, dev); 1999 2000 return 0; 2001 2002 err_port: 2003 for (--port; port >= 1; --port) 2004 mlx4_cleanup_port_info(&priv->port[port]); 2005 2006 mlx4_cleanup_counters_table(dev); 2007 mlx4_cleanup_mcg_table(dev); 2008 mlx4_cleanup_qp_table(dev); 2009 mlx4_cleanup_srq_table(dev); 2010 mlx4_cleanup_cq_table(dev); 2011 mlx4_cmd_use_polling(dev); 2012 mlx4_cleanup_eq_table(dev); 2013 mlx4_cleanup_mr_table(dev); 2014 mlx4_cleanup_xrcd_table(dev); 2015 mlx4_cleanup_pd_table(dev); 2016 mlx4_cleanup_uar_table(dev); 2017 2018 err_steer: 2019 if (!mlx4_is_slave(dev)) 2020 mlx4_clear_steering(dev); 2021 2022 err_free_eq: 2023 mlx4_free_eq_table(dev); 2024 2025 err_master_mfunc: 2026 if (mlx4_is_master(dev)) 2027 mlx4_multi_func_cleanup(dev); 2028 2029 err_close: 2030 if (dev->flags & MLX4_FLAG_MSI_X) 2031 pci_disable_msix(pdev); 2032 2033 mlx4_close_hca(dev); 2034 2035 err_mfunc: 2036 if (mlx4_is_slave(dev)) 2037 mlx4_multi_func_cleanup(dev); 2038 2039 err_cmd: 2040 mlx4_cmd_cleanup(dev); 2041 2042 err_sriov: 2043 if (dev->flags & MLX4_FLAG_SRIOV) 2044 pci_disable_sriov(pdev); 2045 2046 err_rel_own: 2047 if (!mlx4_is_slave(dev)) 2048 mlx4_free_ownership(dev); 2049 2050 err_free_dev: 2051 kfree(priv); 2052 2053 err_release_regions: 2054 pci_release_regions(pdev); 2055 2056 err_disable_pdev: 2057 pci_disable_device(pdev); 2058 pci_set_drvdata(pdev, NULL); 2059 return err; 2060 } 2061 2062 static int __devinit mlx4_init_one(struct pci_dev *pdev, 2063 const struct pci_device_id *id) 2064 { 2065 printk_once(KERN_INFO "%s", mlx4_version); 2066 2067 return __mlx4_init_one(pdev, id); 2068 } 2069 2070 static void mlx4_remove_one(struct pci_dev *pdev) 2071 { 2072 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2073 struct mlx4_priv *priv = mlx4_priv(dev); 2074 int p; 2075 2076 if (dev) { 2077 /* in SRIOV it is not allowed to unload the pf's 2078 * driver while there are alive vf's */ 2079 if (mlx4_is_master(dev)) { 2080 if (mlx4_how_many_lives_vf(dev)) 2081 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2082 } 2083 mlx4_stop_sense(dev); 2084 mlx4_unregister_device(dev); 2085 2086 for (p = 1; p <= dev->caps.num_ports; p++) { 2087 mlx4_cleanup_port_info(&priv->port[p]); 2088 mlx4_CLOSE_PORT(dev, p); 2089 } 2090 2091 if (mlx4_is_master(dev)) 2092 mlx4_free_resource_tracker(dev, 2093 RES_TR_FREE_SLAVES_ONLY); 2094 2095 mlx4_cleanup_counters_table(dev); 2096 mlx4_cleanup_mcg_table(dev); 2097 mlx4_cleanup_qp_table(dev); 2098 mlx4_cleanup_srq_table(dev); 2099 mlx4_cleanup_cq_table(dev); 2100 mlx4_cmd_use_polling(dev); 2101 mlx4_cleanup_eq_table(dev); 2102 mlx4_cleanup_mr_table(dev); 2103 mlx4_cleanup_xrcd_table(dev); 2104 mlx4_cleanup_pd_table(dev); 2105 2106 if (mlx4_is_master(dev)) 2107 mlx4_free_resource_tracker(dev, 2108 RES_TR_FREE_STRUCTS_ONLY); 2109 2110 iounmap(priv->kar); 2111 mlx4_uar_free(dev, &priv->driver_uar); 2112 mlx4_cleanup_uar_table(dev); 2113 if (!mlx4_is_slave(dev)) 2114 mlx4_clear_steering(dev); 2115 mlx4_free_eq_table(dev); 2116 if (mlx4_is_master(dev)) 2117 mlx4_multi_func_cleanup(dev); 2118 mlx4_close_hca(dev); 2119 if (mlx4_is_slave(dev)) 2120 mlx4_multi_func_cleanup(dev); 2121 mlx4_cmd_cleanup(dev); 2122 2123 if (dev->flags & MLX4_FLAG_MSI_X) 2124 pci_disable_msix(pdev); 2125 if (dev->flags & MLX4_FLAG_SRIOV) { 2126 mlx4_warn(dev, "Disabling sriov\n"); 2127 pci_disable_sriov(pdev); 2128 } 2129 2130 if (!mlx4_is_slave(dev)) 2131 mlx4_free_ownership(dev); 2132 kfree(priv); 2133 pci_release_regions(pdev); 2134 pci_disable_device(pdev); 2135 pci_set_drvdata(pdev, NULL); 2136 } 2137 } 2138 2139 int mlx4_restart_one(struct pci_dev *pdev) 2140 { 2141 mlx4_remove_one(pdev); 2142 return __mlx4_init_one(pdev, NULL); 2143 } 2144 2145 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2146 /* MT25408 "Hermon" SDR */ 2147 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2148 /* MT25408 "Hermon" DDR */ 2149 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2150 /* MT25408 "Hermon" QDR */ 2151 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2152 /* MT25408 "Hermon" DDR PCIe gen2 */ 2153 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2154 /* MT25408 "Hermon" QDR PCIe gen2 */ 2155 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2156 /* MT25408 "Hermon" EN 10GigE */ 2157 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2158 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2159 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2160 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2161 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2162 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2163 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2164 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2165 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2166 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2167 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2168 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2169 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2170 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2171 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, 2172 /* MT27500 Family [ConnectX-3] */ 2173 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2174 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2175 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, 2176 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2177 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2178 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2179 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 2180 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 2181 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 2182 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 2183 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 2184 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 2185 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 2186 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 2187 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 2188 { 0, } 2189 }; 2190 2191 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 2192 2193 static struct pci_driver mlx4_driver = { 2194 .name = DRV_NAME, 2195 .id_table = mlx4_pci_table, 2196 .probe = mlx4_init_one, 2197 .remove = __devexit_p(mlx4_remove_one) 2198 }; 2199 2200 static int __init mlx4_verify_params(void) 2201 { 2202 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2203 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2204 return -1; 2205 } 2206 2207 if (log_num_vlan != 0) 2208 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2209 MLX4_LOG_NUM_VLANS); 2210 2211 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2212 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2213 return -1; 2214 } 2215 2216 /* Check if module param for ports type has legal combination */ 2217 if (port_type_array[0] == false && port_type_array[1] == true) { 2218 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2219 port_type_array[0] = true; 2220 } 2221 2222 return 0; 2223 } 2224 2225 static int __init mlx4_init(void) 2226 { 2227 int ret; 2228 2229 if (mlx4_verify_params()) 2230 return -EINVAL; 2231 2232 mlx4_catas_init(); 2233 2234 mlx4_wq = create_singlethread_workqueue("mlx4"); 2235 if (!mlx4_wq) 2236 return -ENOMEM; 2237 2238 ret = pci_register_driver(&mlx4_driver); 2239 return ret < 0 ? ret : 0; 2240 } 2241 2242 static void __exit mlx4_cleanup(void) 2243 { 2244 pci_unregister_driver(&mlx4_driver); 2245 destroy_workqueue(mlx4_wq); 2246 } 2247 2248 module_init(mlx4_init); 2249 module_exit(mlx4_cleanup); 2250