1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/netdevice.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static int num_vfs; 81 module_param(num_vfs, int, 0444); 82 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 83 84 static int probe_vf; 85 module_param(probe_vf, int, 0644); 86 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 87 88 int mlx4_log_num_mgm_entry_size = 10; 89 module_param_named(log_num_mgm_entry_size, 90 mlx4_log_num_mgm_entry_size, int, 0444); 91 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 92 " of qp per mcg, for example:" 93 " 10 gives 248.range: 9<=" 94 " log_num_mgm_entry_size <= 12." 95 " Not in use with device managed" 96 " flow steering"); 97 98 #define HCA_GLOBAL_CAP_MASK 0 99 #define PF_CONTEXT_BEHAVIOUR_MASK 0 100 101 static char mlx4_version[] __devinitdata = 102 DRV_NAME ": Mellanox ConnectX core driver v" 103 DRV_VERSION " (" DRV_RELDATE ")\n"; 104 105 static struct mlx4_profile default_profile = { 106 .num_qp = 1 << 18, 107 .num_srq = 1 << 16, 108 .rdmarc_per_qp = 1 << 4, 109 .num_cq = 1 << 16, 110 .num_mcg = 1 << 13, 111 .num_mpt = 1 << 19, 112 .num_mtt = 1 << 20, /* It is really num mtt segements */ 113 }; 114 115 static int log_num_mac = 7; 116 module_param_named(log_num_mac, log_num_mac, int, 0444); 117 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 118 119 static int log_num_vlan; 120 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 121 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 122 /* Log2 max number of VLANs per ETH port (0-7) */ 123 #define MLX4_LOG_NUM_VLANS 7 124 125 static bool use_prio; 126 module_param_named(use_prio, use_prio, bool, 0444); 127 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 128 "(0/1, default 0)"); 129 130 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 131 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 132 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 133 134 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 135 static int arr_argc = 2; 136 module_param_array(port_type_array, int, &arr_argc, 0444); 137 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 138 "1 for IB, 2 for Ethernet"); 139 140 struct mlx4_port_config { 141 struct list_head list; 142 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 143 struct pci_dev *pdev; 144 }; 145 146 int mlx4_check_port_params(struct mlx4_dev *dev, 147 enum mlx4_port_type *port_type) 148 { 149 int i; 150 151 for (i = 0; i < dev->caps.num_ports - 1; i++) { 152 if (port_type[i] != port_type[i + 1]) { 153 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 154 mlx4_err(dev, "Only same port types supported " 155 "on this HCA, aborting.\n"); 156 return -EINVAL; 157 } 158 } 159 } 160 161 for (i = 0; i < dev->caps.num_ports; i++) { 162 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 163 mlx4_err(dev, "Requested port type for port %d is not " 164 "supported on this HCA\n", i + 1); 165 return -EINVAL; 166 } 167 } 168 return 0; 169 } 170 171 static void mlx4_set_port_mask(struct mlx4_dev *dev) 172 { 173 int i; 174 175 for (i = 1; i <= dev->caps.num_ports; ++i) 176 dev->caps.port_mask[i] = dev->caps.port_type[i]; 177 } 178 179 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 180 { 181 int err; 182 int i; 183 184 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 185 if (err) { 186 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 187 return err; 188 } 189 190 if (dev_cap->min_page_sz > PAGE_SIZE) { 191 mlx4_err(dev, "HCA minimum page size of %d bigger than " 192 "kernel PAGE_SIZE of %ld, aborting.\n", 193 dev_cap->min_page_sz, PAGE_SIZE); 194 return -ENODEV; 195 } 196 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 197 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 198 "aborting.\n", 199 dev_cap->num_ports, MLX4_MAX_PORTS); 200 return -ENODEV; 201 } 202 203 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 204 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 205 "PCI resource 2 size of 0x%llx, aborting.\n", 206 dev_cap->uar_size, 207 (unsigned long long) pci_resource_len(dev->pdev, 2)); 208 return -ENODEV; 209 } 210 211 dev->caps.num_ports = dev_cap->num_ports; 212 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 213 for (i = 1; i <= dev->caps.num_ports; ++i) { 214 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 215 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 216 dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; 217 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; 218 /* set gid and pkey table operating lengths by default 219 * to non-sriov values */ 220 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 221 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 222 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 223 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 224 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 225 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 226 dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; 227 dev->caps.default_sense[i] = dev_cap->default_sense[i]; 228 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 229 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 230 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 231 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 232 } 233 234 dev->caps.uar_page_size = PAGE_SIZE; 235 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 236 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 237 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 238 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 239 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 240 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 241 dev->caps.max_wqes = dev_cap->max_qp_sz; 242 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 243 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 244 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 245 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 246 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 247 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 248 /* 249 * Subtract 1 from the limit because we need to allocate a 250 * spare CQE so the HCA HW can tell the difference between an 251 * empty CQ and a full CQ. 252 */ 253 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 254 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 255 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 256 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 257 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 258 259 /* The first 128 UARs are used for EQ doorbells */ 260 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 261 dev->caps.reserved_pds = dev_cap->reserved_pds; 262 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 263 dev_cap->reserved_xrcds : 0; 264 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 265 dev_cap->max_xrcds : 0; 266 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 267 268 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 269 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 270 dev->caps.flags = dev_cap->flags; 271 dev->caps.flags2 = dev_cap->flags2; 272 dev->caps.bmme_flags = dev_cap->bmme_flags; 273 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 274 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 275 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 276 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 277 278 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 279 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 280 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 281 dev->caps.fs_log_max_ucast_qp_range_size = 282 dev_cap->fs_log_max_ucast_qp_range_size; 283 } else { 284 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 285 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) { 286 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 287 } else { 288 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 289 290 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 291 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 292 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags " 293 "set to use B0 steering. Falling back to A0 steering mode.\n"); 294 } 295 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 296 } 297 mlx4_dbg(dev, "Steering mode is: %s\n", 298 mlx4_steering_mode_str(dev->caps.steering_mode)); 299 300 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 301 if (dev->pdev->device != 0x1003) 302 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 303 304 dev->caps.log_num_macs = log_num_mac; 305 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 306 dev->caps.log_num_prios = use_prio ? 3 : 0; 307 308 for (i = 1; i <= dev->caps.num_ports; ++i) { 309 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 310 if (dev->caps.supported_type[i]) { 311 /* if only ETH is supported - assign ETH */ 312 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 313 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 314 /* if only IB is supported, assign IB */ 315 else if (dev->caps.supported_type[i] == 316 MLX4_PORT_TYPE_IB) 317 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 318 else { 319 /* if IB and ETH are supported, we set the port 320 * type according to user selection of port type; 321 * if user selected none, take the FW hint */ 322 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 323 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 324 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 325 else 326 dev->caps.port_type[i] = port_type_array[i - 1]; 327 } 328 } 329 /* 330 * Link sensing is allowed on the port if 3 conditions are true: 331 * 1. Both protocols are supported on the port. 332 * 2. Different types are supported on the port 333 * 3. FW declared that it supports link sensing 334 */ 335 mlx4_priv(dev)->sense.sense_allowed[i] = 336 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 337 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 338 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 339 340 /* 341 * If "default_sense" bit is set, we move the port to "AUTO" mode 342 * and perform sense_port FW command to try and set the correct 343 * port type from beginning 344 */ 345 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 346 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 347 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 348 mlx4_SENSE_PORT(dev, i, &sensed_port); 349 if (sensed_port != MLX4_PORT_TYPE_NONE) 350 dev->caps.port_type[i] = sensed_port; 351 } else { 352 dev->caps.possible_type[i] = dev->caps.port_type[i]; 353 } 354 355 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 356 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 357 mlx4_warn(dev, "Requested number of MACs is too much " 358 "for port %d, reducing to %d.\n", 359 i, 1 << dev->caps.log_num_macs); 360 } 361 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 362 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 363 mlx4_warn(dev, "Requested number of VLANs is too much " 364 "for port %d, reducing to %d.\n", 365 i, 1 << dev->caps.log_num_vlans); 366 } 367 } 368 369 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 370 371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 372 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 373 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 374 (1 << dev->caps.log_num_macs) * 375 (1 << dev->caps.log_num_vlans) * 376 (1 << dev->caps.log_num_prios) * 377 dev->caps.num_ports; 378 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 379 380 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 381 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 382 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 383 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 384 385 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 386 return 0; 387 } 388 /*The function checks if there are live vf, return the num of them*/ 389 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 390 { 391 struct mlx4_priv *priv = mlx4_priv(dev); 392 struct mlx4_slave_state *s_state; 393 int i; 394 int ret = 0; 395 396 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 397 s_state = &priv->mfunc.master.slave_state[i]; 398 if (s_state->active && s_state->last_cmd != 399 MLX4_COMM_CMD_RESET) { 400 mlx4_warn(dev, "%s: slave: %d is still active\n", 401 __func__, i); 402 ret++; 403 } 404 } 405 return ret; 406 } 407 408 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 409 { 410 u32 qk = MLX4_RESERVED_QKEY_BASE; 411 412 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 413 qpn < dev->phys_caps.base_proxy_sqpn) 414 return -EINVAL; 415 416 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 417 /* tunnel qp */ 418 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 419 else 420 qk += qpn - dev->phys_caps.base_proxy_sqpn; 421 *qkey = qk; 422 return 0; 423 } 424 EXPORT_SYMBOL(mlx4_get_parav_qkey); 425 426 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 427 { 428 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 429 430 if (!mlx4_is_master(dev)) 431 return; 432 433 priv->virt2phys_pkey[slave][port - 1][i] = val; 434 } 435 EXPORT_SYMBOL(mlx4_sync_pkey_table); 436 437 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 438 { 439 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 440 441 if (!mlx4_is_master(dev)) 442 return; 443 444 priv->slave_node_guids[slave] = guid; 445 } 446 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 447 448 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 449 { 450 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 451 452 if (!mlx4_is_master(dev)) 453 return 0; 454 455 return priv->slave_node_guids[slave]; 456 } 457 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 458 459 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 460 { 461 struct mlx4_priv *priv = mlx4_priv(dev); 462 struct mlx4_slave_state *s_slave; 463 464 if (!mlx4_is_master(dev)) 465 return 0; 466 467 s_slave = &priv->mfunc.master.slave_state[slave]; 468 return !!s_slave->active; 469 } 470 EXPORT_SYMBOL(mlx4_is_slave_active); 471 472 static int mlx4_slave_cap(struct mlx4_dev *dev) 473 { 474 int err; 475 u32 page_size; 476 struct mlx4_dev_cap dev_cap; 477 struct mlx4_func_cap func_cap; 478 struct mlx4_init_hca_param hca_param; 479 int i; 480 481 memset(&hca_param, 0, sizeof(hca_param)); 482 err = mlx4_QUERY_HCA(dev, &hca_param); 483 if (err) { 484 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 485 return err; 486 } 487 488 /*fail if the hca has an unknown capability */ 489 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 490 HCA_GLOBAL_CAP_MASK) { 491 mlx4_err(dev, "Unknown hca global capabilities\n"); 492 return -ENOSYS; 493 } 494 495 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 496 497 memset(&dev_cap, 0, sizeof(dev_cap)); 498 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 499 err = mlx4_dev_cap(dev, &dev_cap); 500 if (err) { 501 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 502 return err; 503 } 504 505 err = mlx4_QUERY_FW(dev); 506 if (err) 507 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 508 509 page_size = ~dev->caps.page_size_cap + 1; 510 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 511 if (page_size > PAGE_SIZE) { 512 mlx4_err(dev, "HCA minimum page size of %d bigger than " 513 "kernel PAGE_SIZE of %ld, aborting.\n", 514 page_size, PAGE_SIZE); 515 return -ENODEV; 516 } 517 518 /* slave gets uar page size from QUERY_HCA fw command */ 519 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 520 521 /* TODO: relax this assumption */ 522 if (dev->caps.uar_page_size != PAGE_SIZE) { 523 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 524 dev->caps.uar_page_size, PAGE_SIZE); 525 return -ENODEV; 526 } 527 528 memset(&func_cap, 0, sizeof(func_cap)); 529 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 530 if (err) { 531 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", 532 err); 533 return err; 534 } 535 536 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 537 PF_CONTEXT_BEHAVIOUR_MASK) { 538 mlx4_err(dev, "Unknown pf context behaviour\n"); 539 return -ENOSYS; 540 } 541 542 dev->caps.num_ports = func_cap.num_ports; 543 dev->caps.num_qps = func_cap.qp_quota; 544 dev->caps.num_srqs = func_cap.srq_quota; 545 dev->caps.num_cqs = func_cap.cq_quota; 546 dev->caps.num_eqs = func_cap.max_eq; 547 dev->caps.reserved_eqs = func_cap.reserved_eq; 548 dev->caps.num_mpts = func_cap.mpt_quota; 549 dev->caps.num_mtts = func_cap.mtt_quota; 550 dev->caps.num_pds = MLX4_NUM_PDS; 551 dev->caps.num_mgms = 0; 552 dev->caps.num_amgms = 0; 553 554 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 555 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 556 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 557 return -ENODEV; 558 } 559 560 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 561 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 562 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 563 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 564 565 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 566 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { 567 err = -ENOMEM; 568 goto err_mem; 569 } 570 571 for (i = 1; i <= dev->caps.num_ports; ++i) { 572 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 573 if (err) { 574 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" 575 " port %d, aborting (%d).\n", i, err); 576 goto err_mem; 577 } 578 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 579 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 580 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 581 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 582 dev->caps.port_mask[i] = dev->caps.port_type[i]; 583 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 584 &dev->caps.gid_table_len[i], 585 &dev->caps.pkey_table_len[i])) 586 goto err_mem; 587 } 588 589 if (dev->caps.uar_page_size * (dev->caps.num_uars - 590 dev->caps.reserved_uars) > 591 pci_resource_len(dev->pdev, 2)) { 592 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 593 "PCI resource 2 size of 0x%llx, aborting.\n", 594 dev->caps.uar_page_size * dev->caps.num_uars, 595 (unsigned long long) pci_resource_len(dev->pdev, 2)); 596 goto err_mem; 597 } 598 599 return 0; 600 601 err_mem: 602 kfree(dev->caps.qp0_tunnel); 603 kfree(dev->caps.qp0_proxy); 604 kfree(dev->caps.qp1_tunnel); 605 kfree(dev->caps.qp1_proxy); 606 dev->caps.qp0_tunnel = dev->caps.qp0_proxy = 607 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; 608 609 return err; 610 } 611 612 /* 613 * Change the port configuration of the device. 614 * Every user of this function must hold the port mutex. 615 */ 616 int mlx4_change_port_types(struct mlx4_dev *dev, 617 enum mlx4_port_type *port_types) 618 { 619 int err = 0; 620 int change = 0; 621 int port; 622 623 for (port = 0; port < dev->caps.num_ports; port++) { 624 /* Change the port type only if the new type is different 625 * from the current, and not set to Auto */ 626 if (port_types[port] != dev->caps.port_type[port + 1]) 627 change = 1; 628 } 629 if (change) { 630 mlx4_unregister_device(dev); 631 for (port = 1; port <= dev->caps.num_ports; port++) { 632 mlx4_CLOSE_PORT(dev, port); 633 dev->caps.port_type[port] = port_types[port - 1]; 634 err = mlx4_SET_PORT(dev, port, -1); 635 if (err) { 636 mlx4_err(dev, "Failed to set port %d, " 637 "aborting\n", port); 638 goto out; 639 } 640 } 641 mlx4_set_port_mask(dev); 642 err = mlx4_register_device(dev); 643 } 644 645 out: 646 return err; 647 } 648 649 static ssize_t show_port_type(struct device *dev, 650 struct device_attribute *attr, 651 char *buf) 652 { 653 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 654 port_attr); 655 struct mlx4_dev *mdev = info->dev; 656 char type[8]; 657 658 sprintf(type, "%s", 659 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 660 "ib" : "eth"); 661 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 662 sprintf(buf, "auto (%s)\n", type); 663 else 664 sprintf(buf, "%s\n", type); 665 666 return strlen(buf); 667 } 668 669 static ssize_t set_port_type(struct device *dev, 670 struct device_attribute *attr, 671 const char *buf, size_t count) 672 { 673 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 674 port_attr); 675 struct mlx4_dev *mdev = info->dev; 676 struct mlx4_priv *priv = mlx4_priv(mdev); 677 enum mlx4_port_type types[MLX4_MAX_PORTS]; 678 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 679 int i; 680 int err = 0; 681 682 if (!strcmp(buf, "ib\n")) 683 info->tmp_type = MLX4_PORT_TYPE_IB; 684 else if (!strcmp(buf, "eth\n")) 685 info->tmp_type = MLX4_PORT_TYPE_ETH; 686 else if (!strcmp(buf, "auto\n")) 687 info->tmp_type = MLX4_PORT_TYPE_AUTO; 688 else { 689 mlx4_err(mdev, "%s is not supported port type\n", buf); 690 return -EINVAL; 691 } 692 693 mlx4_stop_sense(mdev); 694 mutex_lock(&priv->port_mutex); 695 /* Possible type is always the one that was delivered */ 696 mdev->caps.possible_type[info->port] = info->tmp_type; 697 698 for (i = 0; i < mdev->caps.num_ports; i++) { 699 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 700 mdev->caps.possible_type[i+1]; 701 if (types[i] == MLX4_PORT_TYPE_AUTO) 702 types[i] = mdev->caps.port_type[i+1]; 703 } 704 705 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 706 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 707 for (i = 1; i <= mdev->caps.num_ports; i++) { 708 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 709 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 710 err = -EINVAL; 711 } 712 } 713 } 714 if (err) { 715 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 716 "Set only 'eth' or 'ib' for both ports " 717 "(should be the same)\n"); 718 goto out; 719 } 720 721 mlx4_do_sense_ports(mdev, new_types, types); 722 723 err = mlx4_check_port_params(mdev, new_types); 724 if (err) 725 goto out; 726 727 /* We are about to apply the changes after the configuration 728 * was verified, no need to remember the temporary types 729 * any more */ 730 for (i = 0; i < mdev->caps.num_ports; i++) 731 priv->port[i + 1].tmp_type = 0; 732 733 err = mlx4_change_port_types(mdev, new_types); 734 735 out: 736 mlx4_start_sense(mdev); 737 mutex_unlock(&priv->port_mutex); 738 return err ? err : count; 739 } 740 741 enum ibta_mtu { 742 IB_MTU_256 = 1, 743 IB_MTU_512 = 2, 744 IB_MTU_1024 = 3, 745 IB_MTU_2048 = 4, 746 IB_MTU_4096 = 5 747 }; 748 749 static inline int int_to_ibta_mtu(int mtu) 750 { 751 switch (mtu) { 752 case 256: return IB_MTU_256; 753 case 512: return IB_MTU_512; 754 case 1024: return IB_MTU_1024; 755 case 2048: return IB_MTU_2048; 756 case 4096: return IB_MTU_4096; 757 default: return -1; 758 } 759 } 760 761 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 762 { 763 switch (mtu) { 764 case IB_MTU_256: return 256; 765 case IB_MTU_512: return 512; 766 case IB_MTU_1024: return 1024; 767 case IB_MTU_2048: return 2048; 768 case IB_MTU_4096: return 4096; 769 default: return -1; 770 } 771 } 772 773 static ssize_t show_port_ib_mtu(struct device *dev, 774 struct device_attribute *attr, 775 char *buf) 776 { 777 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 778 port_mtu_attr); 779 struct mlx4_dev *mdev = info->dev; 780 781 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 782 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 783 784 sprintf(buf, "%d\n", 785 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 786 return strlen(buf); 787 } 788 789 static ssize_t set_port_ib_mtu(struct device *dev, 790 struct device_attribute *attr, 791 const char *buf, size_t count) 792 { 793 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 794 port_mtu_attr); 795 struct mlx4_dev *mdev = info->dev; 796 struct mlx4_priv *priv = mlx4_priv(mdev); 797 int err, port, mtu, ibta_mtu = -1; 798 799 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 800 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 801 return -EINVAL; 802 } 803 804 err = sscanf(buf, "%d", &mtu); 805 if (err > 0) 806 ibta_mtu = int_to_ibta_mtu(mtu); 807 808 if (err <= 0 || ibta_mtu < 0) { 809 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 810 return -EINVAL; 811 } 812 813 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 814 815 mlx4_stop_sense(mdev); 816 mutex_lock(&priv->port_mutex); 817 mlx4_unregister_device(mdev); 818 for (port = 1; port <= mdev->caps.num_ports; port++) { 819 mlx4_CLOSE_PORT(mdev, port); 820 err = mlx4_SET_PORT(mdev, port, -1); 821 if (err) { 822 mlx4_err(mdev, "Failed to set port %d, " 823 "aborting\n", port); 824 goto err_set_port; 825 } 826 } 827 err = mlx4_register_device(mdev); 828 err_set_port: 829 mutex_unlock(&priv->port_mutex); 830 mlx4_start_sense(mdev); 831 return err ? err : count; 832 } 833 834 static int mlx4_load_fw(struct mlx4_dev *dev) 835 { 836 struct mlx4_priv *priv = mlx4_priv(dev); 837 int err; 838 839 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 840 GFP_HIGHUSER | __GFP_NOWARN, 0); 841 if (!priv->fw.fw_icm) { 842 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 843 return -ENOMEM; 844 } 845 846 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 847 if (err) { 848 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 849 goto err_free; 850 } 851 852 err = mlx4_RUN_FW(dev); 853 if (err) { 854 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 855 goto err_unmap_fa; 856 } 857 858 return 0; 859 860 err_unmap_fa: 861 mlx4_UNMAP_FA(dev); 862 863 err_free: 864 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 865 return err; 866 } 867 868 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 869 int cmpt_entry_sz) 870 { 871 struct mlx4_priv *priv = mlx4_priv(dev); 872 int err; 873 int num_eqs; 874 875 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 876 cmpt_base + 877 ((u64) (MLX4_CMPT_TYPE_QP * 878 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 879 cmpt_entry_sz, dev->caps.num_qps, 880 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 881 0, 0); 882 if (err) 883 goto err; 884 885 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 886 cmpt_base + 887 ((u64) (MLX4_CMPT_TYPE_SRQ * 888 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 889 cmpt_entry_sz, dev->caps.num_srqs, 890 dev->caps.reserved_srqs, 0, 0); 891 if (err) 892 goto err_qp; 893 894 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 895 cmpt_base + 896 ((u64) (MLX4_CMPT_TYPE_CQ * 897 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 898 cmpt_entry_sz, dev->caps.num_cqs, 899 dev->caps.reserved_cqs, 0, 0); 900 if (err) 901 goto err_srq; 902 903 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 904 dev->caps.num_eqs; 905 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 906 cmpt_base + 907 ((u64) (MLX4_CMPT_TYPE_EQ * 908 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 909 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 910 if (err) 911 goto err_cq; 912 913 return 0; 914 915 err_cq: 916 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 917 918 err_srq: 919 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 920 921 err_qp: 922 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 923 924 err: 925 return err; 926 } 927 928 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 929 struct mlx4_init_hca_param *init_hca, u64 icm_size) 930 { 931 struct mlx4_priv *priv = mlx4_priv(dev); 932 u64 aux_pages; 933 int num_eqs; 934 int err; 935 936 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 937 if (err) { 938 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 939 return err; 940 } 941 942 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 943 (unsigned long long) icm_size >> 10, 944 (unsigned long long) aux_pages << 2); 945 946 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 947 GFP_HIGHUSER | __GFP_NOWARN, 0); 948 if (!priv->fw.aux_icm) { 949 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 950 return -ENOMEM; 951 } 952 953 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 954 if (err) { 955 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 956 goto err_free_aux; 957 } 958 959 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 960 if (err) { 961 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 962 goto err_unmap_aux; 963 } 964 965 966 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 967 dev->caps.num_eqs; 968 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 969 init_hca->eqc_base, dev_cap->eqc_entry_sz, 970 num_eqs, num_eqs, 0, 0); 971 if (err) { 972 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 973 goto err_unmap_cmpt; 974 } 975 976 /* 977 * Reserved MTT entries must be aligned up to a cacheline 978 * boundary, since the FW will write to them, while the driver 979 * writes to all other MTT entries. (The variable 980 * dev->caps.mtt_entry_sz below is really the MTT segment 981 * size, not the raw entry size) 982 */ 983 dev->caps.reserved_mtts = 984 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 985 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 986 987 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 988 init_hca->mtt_base, 989 dev->caps.mtt_entry_sz, 990 dev->caps.num_mtts, 991 dev->caps.reserved_mtts, 1, 0); 992 if (err) { 993 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 994 goto err_unmap_eq; 995 } 996 997 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 998 init_hca->dmpt_base, 999 dev_cap->dmpt_entry_sz, 1000 dev->caps.num_mpts, 1001 dev->caps.reserved_mrws, 1, 1); 1002 if (err) { 1003 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 1004 goto err_unmap_mtt; 1005 } 1006 1007 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1008 init_hca->qpc_base, 1009 dev_cap->qpc_entry_sz, 1010 dev->caps.num_qps, 1011 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1012 0, 0); 1013 if (err) { 1014 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 1015 goto err_unmap_dmpt; 1016 } 1017 1018 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1019 init_hca->auxc_base, 1020 dev_cap->aux_entry_sz, 1021 dev->caps.num_qps, 1022 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1023 0, 0); 1024 if (err) { 1025 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 1026 goto err_unmap_qp; 1027 } 1028 1029 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1030 init_hca->altc_base, 1031 dev_cap->altc_entry_sz, 1032 dev->caps.num_qps, 1033 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1034 0, 0); 1035 if (err) { 1036 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 1037 goto err_unmap_auxc; 1038 } 1039 1040 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1041 init_hca->rdmarc_base, 1042 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1043 dev->caps.num_qps, 1044 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1045 0, 0); 1046 if (err) { 1047 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1048 goto err_unmap_altc; 1049 } 1050 1051 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1052 init_hca->cqc_base, 1053 dev_cap->cqc_entry_sz, 1054 dev->caps.num_cqs, 1055 dev->caps.reserved_cqs, 0, 0); 1056 if (err) { 1057 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 1058 goto err_unmap_rdmarc; 1059 } 1060 1061 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1062 init_hca->srqc_base, 1063 dev_cap->srq_entry_sz, 1064 dev->caps.num_srqs, 1065 dev->caps.reserved_srqs, 0, 0); 1066 if (err) { 1067 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 1068 goto err_unmap_cq; 1069 } 1070 1071 /* 1072 * For flow steering device managed mode it is required to use 1073 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1074 * required, but for simplicity just map the whole multicast 1075 * group table now. The table isn't very big and it's a lot 1076 * easier than trying to track ref counts. 1077 */ 1078 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1079 init_hca->mc_base, 1080 mlx4_get_mgm_entry_size(dev), 1081 dev->caps.num_mgms + dev->caps.num_amgms, 1082 dev->caps.num_mgms + dev->caps.num_amgms, 1083 0, 0); 1084 if (err) { 1085 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 1086 goto err_unmap_srq; 1087 } 1088 1089 return 0; 1090 1091 err_unmap_srq: 1092 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1093 1094 err_unmap_cq: 1095 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1096 1097 err_unmap_rdmarc: 1098 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1099 1100 err_unmap_altc: 1101 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1102 1103 err_unmap_auxc: 1104 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1105 1106 err_unmap_qp: 1107 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1108 1109 err_unmap_dmpt: 1110 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1111 1112 err_unmap_mtt: 1113 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1114 1115 err_unmap_eq: 1116 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1117 1118 err_unmap_cmpt: 1119 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1120 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1121 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1122 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1123 1124 err_unmap_aux: 1125 mlx4_UNMAP_ICM_AUX(dev); 1126 1127 err_free_aux: 1128 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1129 1130 return err; 1131 } 1132 1133 static void mlx4_free_icms(struct mlx4_dev *dev) 1134 { 1135 struct mlx4_priv *priv = mlx4_priv(dev); 1136 1137 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1138 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1139 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1140 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1141 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1142 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1143 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1144 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1145 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1146 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1147 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1148 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1149 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1150 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1151 1152 mlx4_UNMAP_ICM_AUX(dev); 1153 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1154 } 1155 1156 static void mlx4_slave_exit(struct mlx4_dev *dev) 1157 { 1158 struct mlx4_priv *priv = mlx4_priv(dev); 1159 1160 mutex_lock(&priv->cmd.slave_cmd_mutex); 1161 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1162 mlx4_warn(dev, "Failed to close slave function.\n"); 1163 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1164 } 1165 1166 static int map_bf_area(struct mlx4_dev *dev) 1167 { 1168 struct mlx4_priv *priv = mlx4_priv(dev); 1169 resource_size_t bf_start; 1170 resource_size_t bf_len; 1171 int err = 0; 1172 1173 if (!dev->caps.bf_reg_size) 1174 return -ENXIO; 1175 1176 bf_start = pci_resource_start(dev->pdev, 2) + 1177 (dev->caps.num_uars << PAGE_SHIFT); 1178 bf_len = pci_resource_len(dev->pdev, 2) - 1179 (dev->caps.num_uars << PAGE_SHIFT); 1180 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1181 if (!priv->bf_mapping) 1182 err = -ENOMEM; 1183 1184 return err; 1185 } 1186 1187 static void unmap_bf_area(struct mlx4_dev *dev) 1188 { 1189 if (mlx4_priv(dev)->bf_mapping) 1190 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1191 } 1192 1193 static void mlx4_close_hca(struct mlx4_dev *dev) 1194 { 1195 unmap_bf_area(dev); 1196 if (mlx4_is_slave(dev)) 1197 mlx4_slave_exit(dev); 1198 else { 1199 mlx4_CLOSE_HCA(dev, 0); 1200 mlx4_free_icms(dev); 1201 mlx4_UNMAP_FA(dev); 1202 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1203 } 1204 } 1205 1206 static int mlx4_init_slave(struct mlx4_dev *dev) 1207 { 1208 struct mlx4_priv *priv = mlx4_priv(dev); 1209 u64 dma = (u64) priv->mfunc.vhcr_dma; 1210 int num_of_reset_retries = NUM_OF_RESET_RETRIES; 1211 int ret_from_reset = 0; 1212 u32 slave_read; 1213 u32 cmd_channel_ver; 1214 1215 mutex_lock(&priv->cmd.slave_cmd_mutex); 1216 priv->cmd.max_cmds = 1; 1217 mlx4_warn(dev, "Sending reset\n"); 1218 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1219 MLX4_COMM_TIME); 1220 /* if we are in the middle of flr the slave will try 1221 * NUM_OF_RESET_RETRIES times before leaving.*/ 1222 if (ret_from_reset) { 1223 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1224 msleep(SLEEP_TIME_IN_RESET); 1225 while (ret_from_reset && num_of_reset_retries) { 1226 mlx4_warn(dev, "slave is currently in the" 1227 "middle of FLR. retrying..." 1228 "(try num:%d)\n", 1229 (NUM_OF_RESET_RETRIES - 1230 num_of_reset_retries + 1)); 1231 ret_from_reset = 1232 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 1233 0, MLX4_COMM_TIME); 1234 num_of_reset_retries = num_of_reset_retries - 1; 1235 } 1236 } else 1237 goto err; 1238 } 1239 1240 /* check the driver version - the slave I/F revision 1241 * must match the master's */ 1242 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1243 cmd_channel_ver = mlx4_comm_get_version(); 1244 1245 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1246 MLX4_COMM_GET_IF_REV(slave_read)) { 1247 mlx4_err(dev, "slave driver version is not supported" 1248 " by the master\n"); 1249 goto err; 1250 } 1251 1252 mlx4_warn(dev, "Sending vhcr0\n"); 1253 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1254 MLX4_COMM_TIME)) 1255 goto err; 1256 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1257 MLX4_COMM_TIME)) 1258 goto err; 1259 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1260 MLX4_COMM_TIME)) 1261 goto err; 1262 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1263 goto err; 1264 1265 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1266 return 0; 1267 1268 err: 1269 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1270 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1271 return -EIO; 1272 } 1273 1274 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1275 { 1276 int i; 1277 1278 for (i = 1; i <= dev->caps.num_ports; i++) { 1279 dev->caps.gid_table_len[i] = 1; 1280 dev->caps.pkey_table_len[i] = 1281 dev->phys_caps.pkey_phys_table_len[i] - 1; 1282 } 1283 } 1284 1285 static int mlx4_init_hca(struct mlx4_dev *dev) 1286 { 1287 struct mlx4_priv *priv = mlx4_priv(dev); 1288 struct mlx4_adapter adapter; 1289 struct mlx4_dev_cap dev_cap; 1290 struct mlx4_mod_stat_cfg mlx4_cfg; 1291 struct mlx4_profile profile; 1292 struct mlx4_init_hca_param init_hca; 1293 u64 icm_size; 1294 int err; 1295 1296 if (!mlx4_is_slave(dev)) { 1297 err = mlx4_QUERY_FW(dev); 1298 if (err) { 1299 if (err == -EACCES) 1300 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1301 else 1302 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1303 return err; 1304 } 1305 1306 err = mlx4_load_fw(dev); 1307 if (err) { 1308 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1309 return err; 1310 } 1311 1312 mlx4_cfg.log_pg_sz_m = 1; 1313 mlx4_cfg.log_pg_sz = 0; 1314 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1315 if (err) 1316 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1317 1318 err = mlx4_dev_cap(dev, &dev_cap); 1319 if (err) { 1320 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1321 goto err_stop_fw; 1322 } 1323 1324 if (mlx4_is_master(dev)) 1325 mlx4_parav_master_pf_caps(dev); 1326 1327 priv->fs_hash_mode = MLX4_FS_L2_HASH; 1328 1329 switch (priv->fs_hash_mode) { 1330 case MLX4_FS_L2_HASH: 1331 init_hca.fs_hash_enable_bits = 0; 1332 break; 1333 1334 case MLX4_FS_L2_L3_L4_HASH: 1335 /* Enable flow steering with 1336 * udp unicast and tcp unicast 1337 */ 1338 init_hca.fs_hash_enable_bits = 1339 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN; 1340 break; 1341 } 1342 1343 profile = default_profile; 1344 if (dev->caps.steering_mode == 1345 MLX4_STEERING_MODE_DEVICE_MANAGED) 1346 profile.num_mcg = MLX4_FS_NUM_MCG; 1347 1348 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1349 &init_hca); 1350 if ((long long) icm_size < 0) { 1351 err = icm_size; 1352 goto err_stop_fw; 1353 } 1354 1355 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1356 1357 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1358 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1359 1360 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1361 if (err) 1362 goto err_stop_fw; 1363 1364 err = mlx4_INIT_HCA(dev, &init_hca); 1365 if (err) { 1366 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1367 goto err_free_icm; 1368 } 1369 } else { 1370 err = mlx4_init_slave(dev); 1371 if (err) { 1372 mlx4_err(dev, "Failed to initialize slave\n"); 1373 return err; 1374 } 1375 1376 err = mlx4_slave_cap(dev); 1377 if (err) { 1378 mlx4_err(dev, "Failed to obtain slave caps\n"); 1379 goto err_close; 1380 } 1381 } 1382 1383 if (map_bf_area(dev)) 1384 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1385 1386 /*Only the master set the ports, all the rest got it from it.*/ 1387 if (!mlx4_is_slave(dev)) 1388 mlx4_set_port_mask(dev); 1389 1390 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1391 if (err) { 1392 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1393 goto unmap_bf; 1394 } 1395 1396 priv->eq_table.inta_pin = adapter.inta_pin; 1397 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1398 1399 return 0; 1400 1401 unmap_bf: 1402 unmap_bf_area(dev); 1403 1404 err_close: 1405 mlx4_close_hca(dev); 1406 1407 err_free_icm: 1408 if (!mlx4_is_slave(dev)) 1409 mlx4_free_icms(dev); 1410 1411 err_stop_fw: 1412 if (!mlx4_is_slave(dev)) { 1413 mlx4_UNMAP_FA(dev); 1414 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1415 } 1416 return err; 1417 } 1418 1419 static int mlx4_init_counters_table(struct mlx4_dev *dev) 1420 { 1421 struct mlx4_priv *priv = mlx4_priv(dev); 1422 int nent; 1423 1424 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1425 return -ENOENT; 1426 1427 nent = dev->caps.max_counters; 1428 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 1429 } 1430 1431 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 1432 { 1433 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1434 } 1435 1436 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1437 { 1438 struct mlx4_priv *priv = mlx4_priv(dev); 1439 1440 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1441 return -ENOENT; 1442 1443 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 1444 if (*idx == -1) 1445 return -ENOMEM; 1446 1447 return 0; 1448 } 1449 1450 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1451 { 1452 u64 out_param; 1453 int err; 1454 1455 if (mlx4_is_mfunc(dev)) { 1456 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 1457 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 1458 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 1459 if (!err) 1460 *idx = get_param_l(&out_param); 1461 1462 return err; 1463 } 1464 return __mlx4_counter_alloc(dev, idx); 1465 } 1466 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1467 1468 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1469 { 1470 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1471 return; 1472 } 1473 1474 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1475 { 1476 u64 in_param; 1477 1478 if (mlx4_is_mfunc(dev)) { 1479 set_param_l(&in_param, idx); 1480 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 1481 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 1482 MLX4_CMD_WRAPPED); 1483 return; 1484 } 1485 __mlx4_counter_free(dev, idx); 1486 } 1487 EXPORT_SYMBOL_GPL(mlx4_counter_free); 1488 1489 static int mlx4_setup_hca(struct mlx4_dev *dev) 1490 { 1491 struct mlx4_priv *priv = mlx4_priv(dev); 1492 int err; 1493 int port; 1494 __be32 ib_port_default_caps; 1495 1496 err = mlx4_init_uar_table(dev); 1497 if (err) { 1498 mlx4_err(dev, "Failed to initialize " 1499 "user access region table, aborting.\n"); 1500 return err; 1501 } 1502 1503 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1504 if (err) { 1505 mlx4_err(dev, "Failed to allocate driver access region, " 1506 "aborting.\n"); 1507 goto err_uar_table_free; 1508 } 1509 1510 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1511 if (!priv->kar) { 1512 mlx4_err(dev, "Couldn't map kernel access region, " 1513 "aborting.\n"); 1514 err = -ENOMEM; 1515 goto err_uar_free; 1516 } 1517 1518 err = mlx4_init_pd_table(dev); 1519 if (err) { 1520 mlx4_err(dev, "Failed to initialize " 1521 "protection domain table, aborting.\n"); 1522 goto err_kar_unmap; 1523 } 1524 1525 err = mlx4_init_xrcd_table(dev); 1526 if (err) { 1527 mlx4_err(dev, "Failed to initialize " 1528 "reliable connection domain table, aborting.\n"); 1529 goto err_pd_table_free; 1530 } 1531 1532 err = mlx4_init_mr_table(dev); 1533 if (err) { 1534 mlx4_err(dev, "Failed to initialize " 1535 "memory region table, aborting.\n"); 1536 goto err_xrcd_table_free; 1537 } 1538 1539 err = mlx4_init_eq_table(dev); 1540 if (err) { 1541 mlx4_err(dev, "Failed to initialize " 1542 "event queue table, aborting.\n"); 1543 goto err_mr_table_free; 1544 } 1545 1546 err = mlx4_cmd_use_events(dev); 1547 if (err) { 1548 mlx4_err(dev, "Failed to switch to event-driven " 1549 "firmware commands, aborting.\n"); 1550 goto err_eq_table_free; 1551 } 1552 1553 err = mlx4_NOP(dev); 1554 if (err) { 1555 if (dev->flags & MLX4_FLAG_MSI_X) { 1556 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1557 "interrupt IRQ %d).\n", 1558 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1559 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1560 } else { 1561 mlx4_err(dev, "NOP command failed to generate interrupt " 1562 "(IRQ %d), aborting.\n", 1563 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1564 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1565 } 1566 1567 goto err_cmd_poll; 1568 } 1569 1570 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 1571 1572 err = mlx4_init_cq_table(dev); 1573 if (err) { 1574 mlx4_err(dev, "Failed to initialize " 1575 "completion queue table, aborting.\n"); 1576 goto err_cmd_poll; 1577 } 1578 1579 err = mlx4_init_srq_table(dev); 1580 if (err) { 1581 mlx4_err(dev, "Failed to initialize " 1582 "shared receive queue table, aborting.\n"); 1583 goto err_cq_table_free; 1584 } 1585 1586 err = mlx4_init_qp_table(dev); 1587 if (err) { 1588 mlx4_err(dev, "Failed to initialize " 1589 "queue pair table, aborting.\n"); 1590 goto err_srq_table_free; 1591 } 1592 1593 if (!mlx4_is_slave(dev)) { 1594 err = mlx4_init_mcg_table(dev); 1595 if (err) { 1596 mlx4_err(dev, "Failed to initialize " 1597 "multicast group table, aborting.\n"); 1598 goto err_qp_table_free; 1599 } 1600 } 1601 1602 err = mlx4_init_counters_table(dev); 1603 if (err && err != -ENOENT) { 1604 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1605 goto err_mcg_table_free; 1606 } 1607 1608 if (!mlx4_is_slave(dev)) { 1609 for (port = 1; port <= dev->caps.num_ports; port++) { 1610 ib_port_default_caps = 0; 1611 err = mlx4_get_port_ib_caps(dev, port, 1612 &ib_port_default_caps); 1613 if (err) 1614 mlx4_warn(dev, "failed to get port %d default " 1615 "ib capabilities (%d). Continuing " 1616 "with caps = 0\n", port, err); 1617 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1618 1619 /* initialize per-slave default ib port capabilities */ 1620 if (mlx4_is_master(dev)) { 1621 int i; 1622 for (i = 0; i < dev->num_slaves; i++) { 1623 if (i == mlx4_master_func_num(dev)) 1624 continue; 1625 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1626 ib_port_default_caps; 1627 } 1628 } 1629 1630 if (mlx4_is_mfunc(dev)) 1631 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 1632 else 1633 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 1634 1635 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 1636 dev->caps.pkey_table_len[port] : -1); 1637 if (err) { 1638 mlx4_err(dev, "Failed to set port %d, aborting\n", 1639 port); 1640 goto err_counters_table_free; 1641 } 1642 } 1643 } 1644 1645 return 0; 1646 1647 err_counters_table_free: 1648 mlx4_cleanup_counters_table(dev); 1649 1650 err_mcg_table_free: 1651 mlx4_cleanup_mcg_table(dev); 1652 1653 err_qp_table_free: 1654 mlx4_cleanup_qp_table(dev); 1655 1656 err_srq_table_free: 1657 mlx4_cleanup_srq_table(dev); 1658 1659 err_cq_table_free: 1660 mlx4_cleanup_cq_table(dev); 1661 1662 err_cmd_poll: 1663 mlx4_cmd_use_polling(dev); 1664 1665 err_eq_table_free: 1666 mlx4_cleanup_eq_table(dev); 1667 1668 err_mr_table_free: 1669 mlx4_cleanup_mr_table(dev); 1670 1671 err_xrcd_table_free: 1672 mlx4_cleanup_xrcd_table(dev); 1673 1674 err_pd_table_free: 1675 mlx4_cleanup_pd_table(dev); 1676 1677 err_kar_unmap: 1678 iounmap(priv->kar); 1679 1680 err_uar_free: 1681 mlx4_uar_free(dev, &priv->driver_uar); 1682 1683 err_uar_table_free: 1684 mlx4_cleanup_uar_table(dev); 1685 return err; 1686 } 1687 1688 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1689 { 1690 struct mlx4_priv *priv = mlx4_priv(dev); 1691 struct msix_entry *entries; 1692 int nreq = min_t(int, dev->caps.num_ports * 1693 min_t(int, netif_get_num_default_rss_queues() + 1, 1694 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); 1695 int err; 1696 int i; 1697 1698 if (msi_x) { 1699 /* In multifunction mode each function gets 2 msi-X vectors 1700 * one for data path completions anf the other for asynch events 1701 * or command completions */ 1702 if (mlx4_is_mfunc(dev)) { 1703 nreq = 2; 1704 } else { 1705 nreq = min_t(int, dev->caps.num_eqs - 1706 dev->caps.reserved_eqs, nreq); 1707 } 1708 1709 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1710 if (!entries) 1711 goto no_msi; 1712 1713 for (i = 0; i < nreq; ++i) 1714 entries[i].entry = i; 1715 1716 retry: 1717 err = pci_enable_msix(dev->pdev, entries, nreq); 1718 if (err) { 1719 /* Try again if at least 2 vectors are available */ 1720 if (err > 1) { 1721 mlx4_info(dev, "Requested %d vectors, " 1722 "but only %d MSI-X vectors available, " 1723 "trying again\n", nreq, err); 1724 nreq = err; 1725 goto retry; 1726 } 1727 kfree(entries); 1728 goto no_msi; 1729 } 1730 1731 if (nreq < 1732 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1733 /*Working in legacy mode , all EQ's shared*/ 1734 dev->caps.comp_pool = 0; 1735 dev->caps.num_comp_vectors = nreq - 1; 1736 } else { 1737 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1738 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1739 } 1740 for (i = 0; i < nreq; ++i) 1741 priv->eq_table.eq[i].irq = entries[i].vector; 1742 1743 dev->flags |= MLX4_FLAG_MSI_X; 1744 1745 kfree(entries); 1746 return; 1747 } 1748 1749 no_msi: 1750 dev->caps.num_comp_vectors = 1; 1751 dev->caps.comp_pool = 0; 1752 1753 for (i = 0; i < 2; ++i) 1754 priv->eq_table.eq[i].irq = dev->pdev->irq; 1755 } 1756 1757 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1758 { 1759 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1760 int err = 0; 1761 1762 info->dev = dev; 1763 info->port = port; 1764 if (!mlx4_is_slave(dev)) { 1765 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); 1766 mlx4_init_mac_table(dev, &info->mac_table); 1767 mlx4_init_vlan_table(dev, &info->vlan_table); 1768 info->base_qpn = 1769 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 1770 (port - 1) * (1 << log_num_mac); 1771 } 1772 1773 sprintf(info->dev_name, "mlx4_port%d", port); 1774 info->port_attr.attr.name = info->dev_name; 1775 if (mlx4_is_mfunc(dev)) 1776 info->port_attr.attr.mode = S_IRUGO; 1777 else { 1778 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1779 info->port_attr.store = set_port_type; 1780 } 1781 info->port_attr.show = show_port_type; 1782 sysfs_attr_init(&info->port_attr.attr); 1783 1784 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1785 if (err) { 1786 mlx4_err(dev, "Failed to create file for port %d\n", port); 1787 info->port = -1; 1788 } 1789 1790 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 1791 info->port_mtu_attr.attr.name = info->dev_mtu_name; 1792 if (mlx4_is_mfunc(dev)) 1793 info->port_mtu_attr.attr.mode = S_IRUGO; 1794 else { 1795 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 1796 info->port_mtu_attr.store = set_port_ib_mtu; 1797 } 1798 info->port_mtu_attr.show = show_port_ib_mtu; 1799 sysfs_attr_init(&info->port_mtu_attr.attr); 1800 1801 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 1802 if (err) { 1803 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 1804 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1805 info->port = -1; 1806 } 1807 1808 return err; 1809 } 1810 1811 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1812 { 1813 if (info->port < 0) 1814 return; 1815 1816 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1817 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 1818 } 1819 1820 static int mlx4_init_steering(struct mlx4_dev *dev) 1821 { 1822 struct mlx4_priv *priv = mlx4_priv(dev); 1823 int num_entries = dev->caps.num_ports; 1824 int i, j; 1825 1826 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1827 if (!priv->steer) 1828 return -ENOMEM; 1829 1830 for (i = 0; i < num_entries; i++) 1831 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1832 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1833 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1834 } 1835 return 0; 1836 } 1837 1838 static void mlx4_clear_steering(struct mlx4_dev *dev) 1839 { 1840 struct mlx4_priv *priv = mlx4_priv(dev); 1841 struct mlx4_steer_index *entry, *tmp_entry; 1842 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1843 int num_entries = dev->caps.num_ports; 1844 int i, j; 1845 1846 for (i = 0; i < num_entries; i++) { 1847 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1848 list_for_each_entry_safe(pqp, tmp_pqp, 1849 &priv->steer[i].promisc_qps[j], 1850 list) { 1851 list_del(&pqp->list); 1852 kfree(pqp); 1853 } 1854 list_for_each_entry_safe(entry, tmp_entry, 1855 &priv->steer[i].steer_entries[j], 1856 list) { 1857 list_del(&entry->list); 1858 list_for_each_entry_safe(pqp, tmp_pqp, 1859 &entry->duplicates, 1860 list) { 1861 list_del(&pqp->list); 1862 kfree(pqp); 1863 } 1864 kfree(entry); 1865 } 1866 } 1867 } 1868 kfree(priv->steer); 1869 } 1870 1871 static int extended_func_num(struct pci_dev *pdev) 1872 { 1873 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 1874 } 1875 1876 #define MLX4_OWNER_BASE 0x8069c 1877 #define MLX4_OWNER_SIZE 4 1878 1879 static int mlx4_get_ownership(struct mlx4_dev *dev) 1880 { 1881 void __iomem *owner; 1882 u32 ret; 1883 1884 if (pci_channel_offline(dev->pdev)) 1885 return -EIO; 1886 1887 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1888 MLX4_OWNER_SIZE); 1889 if (!owner) { 1890 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1891 return -ENOMEM; 1892 } 1893 1894 ret = readl(owner); 1895 iounmap(owner); 1896 return (int) !!ret; 1897 } 1898 1899 static void mlx4_free_ownership(struct mlx4_dev *dev) 1900 { 1901 void __iomem *owner; 1902 1903 if (pci_channel_offline(dev->pdev)) 1904 return; 1905 1906 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1907 MLX4_OWNER_SIZE); 1908 if (!owner) { 1909 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1910 return; 1911 } 1912 writel(0, owner); 1913 msleep(1000); 1914 iounmap(owner); 1915 } 1916 1917 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) 1918 { 1919 struct mlx4_priv *priv; 1920 struct mlx4_dev *dev; 1921 int err; 1922 int port; 1923 1924 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1925 1926 err = pci_enable_device(pdev); 1927 if (err) { 1928 dev_err(&pdev->dev, "Cannot enable PCI device, " 1929 "aborting.\n"); 1930 return err; 1931 } 1932 if (num_vfs > MLX4_MAX_NUM_VF) { 1933 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 1934 num_vfs, MLX4_MAX_NUM_VF); 1935 return -EINVAL; 1936 } 1937 /* 1938 * Check for BARs. 1939 */ 1940 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 1941 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1942 dev_err(&pdev->dev, "Missing DCS, aborting." 1943 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 1944 pci_dev_data, pci_resource_flags(pdev, 0)); 1945 err = -ENODEV; 1946 goto err_disable_pdev; 1947 } 1948 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1949 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1950 err = -ENODEV; 1951 goto err_disable_pdev; 1952 } 1953 1954 err = pci_request_regions(pdev, DRV_NAME); 1955 if (err) { 1956 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1957 goto err_disable_pdev; 1958 } 1959 1960 pci_set_master(pdev); 1961 1962 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1963 if (err) { 1964 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1965 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1966 if (err) { 1967 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1968 goto err_release_regions; 1969 } 1970 } 1971 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1972 if (err) { 1973 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1974 "consistent PCI DMA mask.\n"); 1975 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1976 if (err) { 1977 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1978 "aborting.\n"); 1979 goto err_release_regions; 1980 } 1981 } 1982 1983 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1984 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1985 1986 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1987 if (!priv) { 1988 dev_err(&pdev->dev, "Device struct alloc failed, " 1989 "aborting.\n"); 1990 err = -ENOMEM; 1991 goto err_release_regions; 1992 } 1993 1994 dev = &priv->dev; 1995 dev->pdev = pdev; 1996 INIT_LIST_HEAD(&priv->ctx_list); 1997 spin_lock_init(&priv->ctx_lock); 1998 1999 mutex_init(&priv->port_mutex); 2000 2001 INIT_LIST_HEAD(&priv->pgdir_list); 2002 mutex_init(&priv->pgdir_mutex); 2003 2004 INIT_LIST_HEAD(&priv->bf_list); 2005 mutex_init(&priv->bf_mutex); 2006 2007 dev->rev_id = pdev->revision; 2008 /* Detect if this device is a virtual function */ 2009 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2010 /* When acting as pf, we normally skip vfs unless explicitly 2011 * requested to probe them. */ 2012 if (num_vfs && extended_func_num(pdev) > probe_vf) { 2013 mlx4_warn(dev, "Skipping virtual function:%d\n", 2014 extended_func_num(pdev)); 2015 err = -ENODEV; 2016 goto err_free_dev; 2017 } 2018 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2019 dev->flags |= MLX4_FLAG_SLAVE; 2020 } else { 2021 /* We reset the device and enable SRIOV only for physical 2022 * devices. Try to claim ownership on the device; 2023 * if already taken, skip -- do not allow multiple PFs */ 2024 err = mlx4_get_ownership(dev); 2025 if (err) { 2026 if (err < 0) 2027 goto err_free_dev; 2028 else { 2029 mlx4_warn(dev, "Multiple PFs not yet supported." 2030 " Skipping PF.\n"); 2031 err = -EINVAL; 2032 goto err_free_dev; 2033 } 2034 } 2035 2036 if (num_vfs) { 2037 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); 2038 err = pci_enable_sriov(pdev, num_vfs); 2039 if (err) { 2040 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2041 err); 2042 err = 0; 2043 } else { 2044 mlx4_warn(dev, "Running in master mode\n"); 2045 dev->flags |= MLX4_FLAG_SRIOV | 2046 MLX4_FLAG_MASTER; 2047 dev->num_vfs = num_vfs; 2048 } 2049 } 2050 2051 /* 2052 * Now reset the HCA before we touch the PCI capabilities or 2053 * attempt a firmware command, since a boot ROM may have left 2054 * the HCA in an undefined state. 2055 */ 2056 err = mlx4_reset(dev); 2057 if (err) { 2058 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2059 goto err_rel_own; 2060 } 2061 } 2062 2063 slave_start: 2064 err = mlx4_cmd_init(dev); 2065 if (err) { 2066 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 2067 goto err_sriov; 2068 } 2069 2070 /* In slave functions, the communication channel must be initialized 2071 * before posting commands. Also, init num_slaves before calling 2072 * mlx4_init_hca */ 2073 if (mlx4_is_mfunc(dev)) { 2074 if (mlx4_is_master(dev)) 2075 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2076 else { 2077 dev->num_slaves = 0; 2078 if (mlx4_multi_func_init(dev)) { 2079 mlx4_err(dev, "Failed to init slave mfunc" 2080 " interface, aborting.\n"); 2081 goto err_cmd; 2082 } 2083 } 2084 } 2085 2086 err = mlx4_init_hca(dev); 2087 if (err) { 2088 if (err == -EACCES) { 2089 /* Not primary Physical function 2090 * Running in slave mode */ 2091 mlx4_cmd_cleanup(dev); 2092 dev->flags |= MLX4_FLAG_SLAVE; 2093 dev->flags &= ~MLX4_FLAG_MASTER; 2094 goto slave_start; 2095 } else 2096 goto err_mfunc; 2097 } 2098 2099 /* In master functions, the communication channel must be initialized 2100 * after obtaining its address from fw */ 2101 if (mlx4_is_master(dev)) { 2102 if (mlx4_multi_func_init(dev)) { 2103 mlx4_err(dev, "Failed to init master mfunc" 2104 "interface, aborting.\n"); 2105 goto err_close; 2106 } 2107 } 2108 2109 err = mlx4_alloc_eq_table(dev); 2110 if (err) 2111 goto err_master_mfunc; 2112 2113 priv->msix_ctl.pool_bm = 0; 2114 mutex_init(&priv->msix_ctl.pool_lock); 2115 2116 mlx4_enable_msi_x(dev); 2117 if ((mlx4_is_mfunc(dev)) && 2118 !(dev->flags & MLX4_FLAG_MSI_X)) { 2119 mlx4_err(dev, "INTx is not supported in multi-function mode." 2120 " aborting.\n"); 2121 goto err_free_eq; 2122 } 2123 2124 if (!mlx4_is_slave(dev)) { 2125 err = mlx4_init_steering(dev); 2126 if (err) 2127 goto err_free_eq; 2128 } 2129 2130 err = mlx4_setup_hca(dev); 2131 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2132 !mlx4_is_mfunc(dev)) { 2133 dev->flags &= ~MLX4_FLAG_MSI_X; 2134 dev->caps.num_comp_vectors = 1; 2135 dev->caps.comp_pool = 0; 2136 pci_disable_msix(pdev); 2137 err = mlx4_setup_hca(dev); 2138 } 2139 2140 if (err) 2141 goto err_steer; 2142 2143 for (port = 1; port <= dev->caps.num_ports; port++) { 2144 err = mlx4_init_port_info(dev, port); 2145 if (err) 2146 goto err_port; 2147 } 2148 2149 err = mlx4_register_device(dev); 2150 if (err) 2151 goto err_port; 2152 2153 mlx4_sense_init(dev); 2154 mlx4_start_sense(dev); 2155 2156 priv->pci_dev_data = pci_dev_data; 2157 pci_set_drvdata(pdev, dev); 2158 2159 return 0; 2160 2161 err_port: 2162 for (--port; port >= 1; --port) 2163 mlx4_cleanup_port_info(&priv->port[port]); 2164 2165 mlx4_cleanup_counters_table(dev); 2166 mlx4_cleanup_mcg_table(dev); 2167 mlx4_cleanup_qp_table(dev); 2168 mlx4_cleanup_srq_table(dev); 2169 mlx4_cleanup_cq_table(dev); 2170 mlx4_cmd_use_polling(dev); 2171 mlx4_cleanup_eq_table(dev); 2172 mlx4_cleanup_mr_table(dev); 2173 mlx4_cleanup_xrcd_table(dev); 2174 mlx4_cleanup_pd_table(dev); 2175 mlx4_cleanup_uar_table(dev); 2176 2177 err_steer: 2178 if (!mlx4_is_slave(dev)) 2179 mlx4_clear_steering(dev); 2180 2181 err_free_eq: 2182 mlx4_free_eq_table(dev); 2183 2184 err_master_mfunc: 2185 if (mlx4_is_master(dev)) 2186 mlx4_multi_func_cleanup(dev); 2187 2188 err_close: 2189 if (dev->flags & MLX4_FLAG_MSI_X) 2190 pci_disable_msix(pdev); 2191 2192 mlx4_close_hca(dev); 2193 2194 err_mfunc: 2195 if (mlx4_is_slave(dev)) 2196 mlx4_multi_func_cleanup(dev); 2197 2198 err_cmd: 2199 mlx4_cmd_cleanup(dev); 2200 2201 err_sriov: 2202 if (dev->flags & MLX4_FLAG_SRIOV) 2203 pci_disable_sriov(pdev); 2204 2205 err_rel_own: 2206 if (!mlx4_is_slave(dev)) 2207 mlx4_free_ownership(dev); 2208 2209 err_free_dev: 2210 kfree(priv); 2211 2212 err_release_regions: 2213 pci_release_regions(pdev); 2214 2215 err_disable_pdev: 2216 pci_disable_device(pdev); 2217 pci_set_drvdata(pdev, NULL); 2218 return err; 2219 } 2220 2221 static int __devinit mlx4_init_one(struct pci_dev *pdev, 2222 const struct pci_device_id *id) 2223 { 2224 printk_once(KERN_INFO "%s", mlx4_version); 2225 2226 return __mlx4_init_one(pdev, id->driver_data); 2227 } 2228 2229 static void mlx4_remove_one(struct pci_dev *pdev) 2230 { 2231 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2232 struct mlx4_priv *priv = mlx4_priv(dev); 2233 int p; 2234 2235 if (dev) { 2236 /* in SRIOV it is not allowed to unload the pf's 2237 * driver while there are alive vf's */ 2238 if (mlx4_is_master(dev)) { 2239 if (mlx4_how_many_lives_vf(dev)) 2240 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2241 } 2242 mlx4_stop_sense(dev); 2243 mlx4_unregister_device(dev); 2244 2245 for (p = 1; p <= dev->caps.num_ports; p++) { 2246 mlx4_cleanup_port_info(&priv->port[p]); 2247 mlx4_CLOSE_PORT(dev, p); 2248 } 2249 2250 if (mlx4_is_master(dev)) 2251 mlx4_free_resource_tracker(dev, 2252 RES_TR_FREE_SLAVES_ONLY); 2253 2254 mlx4_cleanup_counters_table(dev); 2255 mlx4_cleanup_mcg_table(dev); 2256 mlx4_cleanup_qp_table(dev); 2257 mlx4_cleanup_srq_table(dev); 2258 mlx4_cleanup_cq_table(dev); 2259 mlx4_cmd_use_polling(dev); 2260 mlx4_cleanup_eq_table(dev); 2261 mlx4_cleanup_mr_table(dev); 2262 mlx4_cleanup_xrcd_table(dev); 2263 mlx4_cleanup_pd_table(dev); 2264 2265 if (mlx4_is_master(dev)) 2266 mlx4_free_resource_tracker(dev, 2267 RES_TR_FREE_STRUCTS_ONLY); 2268 2269 iounmap(priv->kar); 2270 mlx4_uar_free(dev, &priv->driver_uar); 2271 mlx4_cleanup_uar_table(dev); 2272 if (!mlx4_is_slave(dev)) 2273 mlx4_clear_steering(dev); 2274 mlx4_free_eq_table(dev); 2275 if (mlx4_is_master(dev)) 2276 mlx4_multi_func_cleanup(dev); 2277 mlx4_close_hca(dev); 2278 if (mlx4_is_slave(dev)) 2279 mlx4_multi_func_cleanup(dev); 2280 mlx4_cmd_cleanup(dev); 2281 2282 if (dev->flags & MLX4_FLAG_MSI_X) 2283 pci_disable_msix(pdev); 2284 if (dev->flags & MLX4_FLAG_SRIOV) { 2285 mlx4_warn(dev, "Disabling SR-IOV\n"); 2286 pci_disable_sriov(pdev); 2287 } 2288 2289 if (!mlx4_is_slave(dev)) 2290 mlx4_free_ownership(dev); 2291 2292 kfree(dev->caps.qp0_tunnel); 2293 kfree(dev->caps.qp0_proxy); 2294 kfree(dev->caps.qp1_tunnel); 2295 kfree(dev->caps.qp1_proxy); 2296 2297 kfree(priv); 2298 pci_release_regions(pdev); 2299 pci_disable_device(pdev); 2300 pci_set_drvdata(pdev, NULL); 2301 } 2302 } 2303 2304 int mlx4_restart_one(struct pci_dev *pdev) 2305 { 2306 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2307 struct mlx4_priv *priv = mlx4_priv(dev); 2308 int pci_dev_data; 2309 2310 pci_dev_data = priv->pci_dev_data; 2311 mlx4_remove_one(pdev); 2312 return __mlx4_init_one(pdev, pci_dev_data); 2313 } 2314 2315 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2316 /* MT25408 "Hermon" SDR */ 2317 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2318 /* MT25408 "Hermon" DDR */ 2319 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2320 /* MT25408 "Hermon" QDR */ 2321 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2322 /* MT25408 "Hermon" DDR PCIe gen2 */ 2323 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2324 /* MT25408 "Hermon" QDR PCIe gen2 */ 2325 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2326 /* MT25408 "Hermon" EN 10GigE */ 2327 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2328 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2329 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2330 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2331 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2332 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2333 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2334 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2335 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2336 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2337 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2338 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2339 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2340 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2341 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 2342 /* MT27500 Family [ConnectX-3] */ 2343 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2344 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2345 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 2346 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2347 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2348 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2349 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 2350 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 2351 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 2352 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 2353 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 2354 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 2355 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 2356 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 2357 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 2358 { 0, } 2359 }; 2360 2361 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 2362 2363 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 2364 pci_channel_state_t state) 2365 { 2366 mlx4_remove_one(pdev); 2367 2368 return state == pci_channel_io_perm_failure ? 2369 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 2370 } 2371 2372 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2373 { 2374 int ret = __mlx4_init_one(pdev, 0); 2375 2376 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2377 } 2378 2379 static struct pci_error_handlers mlx4_err_handler = { 2380 .error_detected = mlx4_pci_err_detected, 2381 .slot_reset = mlx4_pci_slot_reset, 2382 }; 2383 2384 static struct pci_driver mlx4_driver = { 2385 .name = DRV_NAME, 2386 .id_table = mlx4_pci_table, 2387 .probe = mlx4_init_one, 2388 .remove = __devexit_p(mlx4_remove_one), 2389 .err_handler = &mlx4_err_handler, 2390 }; 2391 2392 static int __init mlx4_verify_params(void) 2393 { 2394 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2395 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2396 return -1; 2397 } 2398 2399 if (log_num_vlan != 0) 2400 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2401 MLX4_LOG_NUM_VLANS); 2402 2403 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2404 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2405 return -1; 2406 } 2407 2408 /* Check if module param for ports type has legal combination */ 2409 if (port_type_array[0] == false && port_type_array[1] == true) { 2410 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2411 port_type_array[0] = true; 2412 } 2413 2414 return 0; 2415 } 2416 2417 static int __init mlx4_init(void) 2418 { 2419 int ret; 2420 2421 if (mlx4_verify_params()) 2422 return -EINVAL; 2423 2424 mlx4_catas_init(); 2425 2426 mlx4_wq = create_singlethread_workqueue("mlx4"); 2427 if (!mlx4_wq) 2428 return -ENOMEM; 2429 2430 ret = pci_register_driver(&mlx4_driver); 2431 return ret < 0 ? ret : 0; 2432 } 2433 2434 static void __exit mlx4_cleanup(void) 2435 { 2436 pci_unregister_driver(&mlx4_driver); 2437 destroy_workqueue(mlx4_wq); 2438 } 2439 2440 module_init(mlx4_init); 2441 module_exit(mlx4_cleanup); 2442