1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 45 #include <linux/mlx4/device.h> 46 #include <linux/mlx4/doorbell.h> 47 48 #include "mlx4.h" 49 #include "fw.h" 50 #include "icm.h" 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 struct workqueue_struct *mlx4_wq; 58 59 #ifdef CONFIG_MLX4_DEBUG 60 61 int mlx4_debug_level = 0; 62 module_param_named(debug_level, mlx4_debug_level, int, 0644); 63 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 64 65 #endif /* CONFIG_MLX4_DEBUG */ 66 67 #ifdef CONFIG_PCI_MSI 68 69 static int msi_x = 1; 70 module_param(msi_x, int, 0444); 71 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 72 73 #else /* CONFIG_PCI_MSI */ 74 75 #define msi_x (0) 76 77 #endif /* CONFIG_PCI_MSI */ 78 79 static int num_vfs; 80 module_param(num_vfs, int, 0444); 81 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82 83 static int probe_vf; 84 module_param(probe_vf, int, 0644); 85 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86 87 int mlx4_log_num_mgm_entry_size = 10; 88 module_param_named(log_num_mgm_entry_size, 89 mlx4_log_num_mgm_entry_size, int, 0444); 90 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91 " of qp per mcg, for example:" 92 " 10 gives 248.range: 9<=" 93 " log_num_mgm_entry_size <= 12"); 94 95 #define MLX4_VF (1 << 0) 96 97 #define HCA_GLOBAL_CAP_MASK 0 98 #define PF_CONTEXT_BEHAVIOUR_MASK 0 99 100 static char mlx4_version[] __devinitdata = 101 DRV_NAME ": Mellanox ConnectX core driver v" 102 DRV_VERSION " (" DRV_RELDATE ")\n"; 103 104 static struct mlx4_profile default_profile = { 105 .num_qp = 1 << 18, 106 .num_srq = 1 << 16, 107 .rdmarc_per_qp = 1 << 4, 108 .num_cq = 1 << 16, 109 .num_mcg = 1 << 13, 110 .num_mpt = 1 << 19, 111 .num_mtt = 1 << 20, /* It is really num mtt segements */ 112 }; 113 114 static int log_num_mac = 7; 115 module_param_named(log_num_mac, log_num_mac, int, 0444); 116 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 117 118 static int log_num_vlan; 119 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 120 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 121 /* Log2 max number of VLANs per ETH port (0-7) */ 122 #define MLX4_LOG_NUM_VLANS 7 123 124 static bool use_prio; 125 module_param_named(use_prio, use_prio, bool, 0444); 126 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 127 "(0/1, default 0)"); 128 129 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 132 133 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 134 static int arr_argc = 2; 135 module_param_array(port_type_array, int, &arr_argc, 0444); 136 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 137 "1 for IB, 2 for Ethernet"); 138 139 struct mlx4_port_config { 140 struct list_head list; 141 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 142 struct pci_dev *pdev; 143 }; 144 145 int mlx4_check_port_params(struct mlx4_dev *dev, 146 enum mlx4_port_type *port_type) 147 { 148 int i; 149 150 for (i = 0; i < dev->caps.num_ports - 1; i++) { 151 if (port_type[i] != port_type[i + 1]) { 152 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 153 mlx4_err(dev, "Only same port types supported " 154 "on this HCA, aborting.\n"); 155 return -EINVAL; 156 } 157 if (port_type[i] == MLX4_PORT_TYPE_ETH && 158 port_type[i + 1] == MLX4_PORT_TYPE_IB) 159 return -EINVAL; 160 } 161 } 162 163 for (i = 0; i < dev->caps.num_ports; i++) { 164 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 165 mlx4_err(dev, "Requested port type for port %d is not " 166 "supported on this HCA\n", i + 1); 167 return -EINVAL; 168 } 169 } 170 return 0; 171 } 172 173 static void mlx4_set_port_mask(struct mlx4_dev *dev) 174 { 175 int i; 176 177 for (i = 1; i <= dev->caps.num_ports; ++i) 178 dev->caps.port_mask[i] = dev->caps.port_type[i]; 179 } 180 181 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 182 { 183 int err; 184 int i; 185 186 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 187 if (err) { 188 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 189 return err; 190 } 191 192 if (dev_cap->min_page_sz > PAGE_SIZE) { 193 mlx4_err(dev, "HCA minimum page size of %d bigger than " 194 "kernel PAGE_SIZE of %ld, aborting.\n", 195 dev_cap->min_page_sz, PAGE_SIZE); 196 return -ENODEV; 197 } 198 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 199 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 200 "aborting.\n", 201 dev_cap->num_ports, MLX4_MAX_PORTS); 202 return -ENODEV; 203 } 204 205 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 206 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 207 "PCI resource 2 size of 0x%llx, aborting.\n", 208 dev_cap->uar_size, 209 (unsigned long long) pci_resource_len(dev->pdev, 2)); 210 return -ENODEV; 211 } 212 213 dev->caps.num_ports = dev_cap->num_ports; 214 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 215 for (i = 1; i <= dev->caps.num_ports; ++i) { 216 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 217 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 218 dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; 219 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; 220 /* set gid and pkey table operating lengths by default 221 * to non-sriov values */ 222 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 223 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 224 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 225 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 226 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 227 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 228 dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; 229 dev->caps.default_sense[i] = dev_cap->default_sense[i]; 230 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 231 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 232 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 233 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 234 } 235 236 dev->caps.uar_page_size = PAGE_SIZE; 237 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 238 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 239 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 240 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 241 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 242 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 243 dev->caps.max_wqes = dev_cap->max_qp_sz; 244 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 245 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 246 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 247 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 248 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 249 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 250 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 251 /* 252 * Subtract 1 from the limit because we need to allocate a 253 * spare CQE so the HCA HW can tell the difference between an 254 * empty CQ and a full CQ. 255 */ 256 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 257 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 258 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 259 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 260 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 261 262 /* The first 128 UARs are used for EQ doorbells */ 263 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 264 dev->caps.reserved_pds = dev_cap->reserved_pds; 265 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 266 dev_cap->reserved_xrcds : 0; 267 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 268 dev_cap->max_xrcds : 0; 269 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 270 271 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 272 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 273 dev->caps.flags = dev_cap->flags; 274 dev->caps.flags2 = dev_cap->flags2; 275 dev->caps.bmme_flags = dev_cap->bmme_flags; 276 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 277 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 278 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 279 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 280 281 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 282 if (dev->pdev->device != 0x1003) 283 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 284 285 dev->caps.log_num_macs = log_num_mac; 286 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 287 dev->caps.log_num_prios = use_prio ? 3 : 0; 288 289 for (i = 1; i <= dev->caps.num_ports; ++i) { 290 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 291 if (dev->caps.supported_type[i]) { 292 /* if only ETH is supported - assign ETH */ 293 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 294 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 295 /* if only IB is supported, assign IB */ 296 else if (dev->caps.supported_type[i] == 297 MLX4_PORT_TYPE_IB) 298 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 299 else { 300 /* if IB and ETH are supported, we set the port 301 * type according to user selection of port type; 302 * if user selected none, take the FW hint */ 303 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 304 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 305 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 306 else 307 dev->caps.port_type[i] = port_type_array[i - 1]; 308 } 309 } 310 /* 311 * Link sensing is allowed on the port if 3 conditions are true: 312 * 1. Both protocols are supported on the port. 313 * 2. Different types are supported on the port 314 * 3. FW declared that it supports link sensing 315 */ 316 mlx4_priv(dev)->sense.sense_allowed[i] = 317 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 318 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 319 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 320 321 /* 322 * If "default_sense" bit is set, we move the port to "AUTO" mode 323 * and perform sense_port FW command to try and set the correct 324 * port type from beginning 325 */ 326 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 327 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 328 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 329 mlx4_SENSE_PORT(dev, i, &sensed_port); 330 if (sensed_port != MLX4_PORT_TYPE_NONE) 331 dev->caps.port_type[i] = sensed_port; 332 } else { 333 dev->caps.possible_type[i] = dev->caps.port_type[i]; 334 } 335 336 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 337 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 338 mlx4_warn(dev, "Requested number of MACs is too much " 339 "for port %d, reducing to %d.\n", 340 i, 1 << dev->caps.log_num_macs); 341 } 342 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 343 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 344 mlx4_warn(dev, "Requested number of VLANs is too much " 345 "for port %d, reducing to %d.\n", 346 i, 1 << dev->caps.log_num_vlans); 347 } 348 } 349 350 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 351 352 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 353 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 354 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 355 (1 << dev->caps.log_num_macs) * 356 (1 << dev->caps.log_num_vlans) * 357 (1 << dev->caps.log_num_prios) * 358 dev->caps.num_ports; 359 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 360 361 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 362 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 364 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 365 366 return 0; 367 } 368 /*The function checks if there are live vf, return the num of them*/ 369 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 370 { 371 struct mlx4_priv *priv = mlx4_priv(dev); 372 struct mlx4_slave_state *s_state; 373 int i; 374 int ret = 0; 375 376 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 377 s_state = &priv->mfunc.master.slave_state[i]; 378 if (s_state->active && s_state->last_cmd != 379 MLX4_COMM_CMD_RESET) { 380 mlx4_warn(dev, "%s: slave: %d is still active\n", 381 __func__, i); 382 ret++; 383 } 384 } 385 return ret; 386 } 387 388 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 389 { 390 u32 qk = MLX4_RESERVED_QKEY_BASE; 391 if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 392 qpn < dev->caps.sqp_start) 393 return -EINVAL; 394 395 if (qpn >= dev->caps.base_tunnel_sqpn) 396 /* tunnel qp */ 397 qk += qpn - dev->caps.base_tunnel_sqpn; 398 else 399 qk += qpn - dev->caps.sqp_start; 400 *qkey = qk; 401 return 0; 402 } 403 EXPORT_SYMBOL(mlx4_get_parav_qkey); 404 405 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 406 { 407 struct mlx4_priv *priv = mlx4_priv(dev); 408 struct mlx4_slave_state *s_slave; 409 410 if (!mlx4_is_master(dev)) 411 return 0; 412 413 s_slave = &priv->mfunc.master.slave_state[slave]; 414 return !!s_slave->active; 415 } 416 EXPORT_SYMBOL(mlx4_is_slave_active); 417 418 static int mlx4_slave_cap(struct mlx4_dev *dev) 419 { 420 int err; 421 u32 page_size; 422 struct mlx4_dev_cap dev_cap; 423 struct mlx4_func_cap func_cap; 424 struct mlx4_init_hca_param hca_param; 425 int i; 426 427 memset(&hca_param, 0, sizeof(hca_param)); 428 err = mlx4_QUERY_HCA(dev, &hca_param); 429 if (err) { 430 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 431 return err; 432 } 433 434 /*fail if the hca has an unknown capability */ 435 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 436 HCA_GLOBAL_CAP_MASK) { 437 mlx4_err(dev, "Unknown hca global capabilities\n"); 438 return -ENOSYS; 439 } 440 441 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 442 443 memset(&dev_cap, 0, sizeof(dev_cap)); 444 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 445 err = mlx4_dev_cap(dev, &dev_cap); 446 if (err) { 447 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 448 return err; 449 } 450 451 err = mlx4_QUERY_FW(dev); 452 if (err) 453 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 454 455 page_size = ~dev->caps.page_size_cap + 1; 456 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 457 if (page_size > PAGE_SIZE) { 458 mlx4_err(dev, "HCA minimum page size of %d bigger than " 459 "kernel PAGE_SIZE of %ld, aborting.\n", 460 page_size, PAGE_SIZE); 461 return -ENODEV; 462 } 463 464 /* slave gets uar page size from QUERY_HCA fw command */ 465 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 466 467 /* TODO: relax this assumption */ 468 if (dev->caps.uar_page_size != PAGE_SIZE) { 469 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 470 dev->caps.uar_page_size, PAGE_SIZE); 471 return -ENODEV; 472 } 473 474 memset(&func_cap, 0, sizeof(func_cap)); 475 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); 476 if (err) { 477 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); 478 return err; 479 } 480 481 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 482 PF_CONTEXT_BEHAVIOUR_MASK) { 483 mlx4_err(dev, "Unknown pf context behaviour\n"); 484 return -ENOSYS; 485 } 486 487 dev->caps.num_ports = func_cap.num_ports; 488 dev->caps.num_qps = func_cap.qp_quota; 489 dev->caps.num_srqs = func_cap.srq_quota; 490 dev->caps.num_cqs = func_cap.cq_quota; 491 dev->caps.num_eqs = func_cap.max_eq; 492 dev->caps.reserved_eqs = func_cap.reserved_eq; 493 dev->caps.num_mpts = func_cap.mpt_quota; 494 dev->caps.num_mtts = func_cap.mtt_quota; 495 dev->caps.num_pds = MLX4_NUM_PDS; 496 dev->caps.num_mgms = 0; 497 dev->caps.num_amgms = 0; 498 499 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 500 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 501 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 502 return -ENODEV; 503 } 504 505 for (i = 1; i <= dev->caps.num_ports; ++i) { 506 dev->caps.port_mask[i] = dev->caps.port_type[i]; 507 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 508 &dev->caps.gid_table_len[i], 509 &dev->caps.pkey_table_len[i])) 510 return -ENODEV; 511 } 512 513 if (dev->caps.uar_page_size * (dev->caps.num_uars - 514 dev->caps.reserved_uars) > 515 pci_resource_len(dev->pdev, 2)) { 516 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 517 "PCI resource 2 size of 0x%llx, aborting.\n", 518 dev->caps.uar_page_size * dev->caps.num_uars, 519 (unsigned long long) pci_resource_len(dev->pdev, 2)); 520 return -ENODEV; 521 } 522 523 return 0; 524 } 525 526 /* 527 * Change the port configuration of the device. 528 * Every user of this function must hold the port mutex. 529 */ 530 int mlx4_change_port_types(struct mlx4_dev *dev, 531 enum mlx4_port_type *port_types) 532 { 533 int err = 0; 534 int change = 0; 535 int port; 536 537 for (port = 0; port < dev->caps.num_ports; port++) { 538 /* Change the port type only if the new type is different 539 * from the current, and not set to Auto */ 540 if (port_types[port] != dev->caps.port_type[port + 1]) 541 change = 1; 542 } 543 if (change) { 544 mlx4_unregister_device(dev); 545 for (port = 1; port <= dev->caps.num_ports; port++) { 546 mlx4_CLOSE_PORT(dev, port); 547 dev->caps.port_type[port] = port_types[port - 1]; 548 err = mlx4_SET_PORT(dev, port, -1); 549 if (err) { 550 mlx4_err(dev, "Failed to set port %d, " 551 "aborting\n", port); 552 goto out; 553 } 554 } 555 mlx4_set_port_mask(dev); 556 err = mlx4_register_device(dev); 557 } 558 559 out: 560 return err; 561 } 562 563 static ssize_t show_port_type(struct device *dev, 564 struct device_attribute *attr, 565 char *buf) 566 { 567 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 568 port_attr); 569 struct mlx4_dev *mdev = info->dev; 570 char type[8]; 571 572 sprintf(type, "%s", 573 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 574 "ib" : "eth"); 575 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 576 sprintf(buf, "auto (%s)\n", type); 577 else 578 sprintf(buf, "%s\n", type); 579 580 return strlen(buf); 581 } 582 583 static ssize_t set_port_type(struct device *dev, 584 struct device_attribute *attr, 585 const char *buf, size_t count) 586 { 587 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 588 port_attr); 589 struct mlx4_dev *mdev = info->dev; 590 struct mlx4_priv *priv = mlx4_priv(mdev); 591 enum mlx4_port_type types[MLX4_MAX_PORTS]; 592 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 593 int i; 594 int err = 0; 595 596 if (!strcmp(buf, "ib\n")) 597 info->tmp_type = MLX4_PORT_TYPE_IB; 598 else if (!strcmp(buf, "eth\n")) 599 info->tmp_type = MLX4_PORT_TYPE_ETH; 600 else if (!strcmp(buf, "auto\n")) 601 info->tmp_type = MLX4_PORT_TYPE_AUTO; 602 else { 603 mlx4_err(mdev, "%s is not supported port type\n", buf); 604 return -EINVAL; 605 } 606 607 mlx4_stop_sense(mdev); 608 mutex_lock(&priv->port_mutex); 609 /* Possible type is always the one that was delivered */ 610 mdev->caps.possible_type[info->port] = info->tmp_type; 611 612 for (i = 0; i < mdev->caps.num_ports; i++) { 613 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 614 mdev->caps.possible_type[i+1]; 615 if (types[i] == MLX4_PORT_TYPE_AUTO) 616 types[i] = mdev->caps.port_type[i+1]; 617 } 618 619 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 620 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 621 for (i = 1; i <= mdev->caps.num_ports; i++) { 622 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 623 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 624 err = -EINVAL; 625 } 626 } 627 } 628 if (err) { 629 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 630 "Set only 'eth' or 'ib' for both ports " 631 "(should be the same)\n"); 632 goto out; 633 } 634 635 mlx4_do_sense_ports(mdev, new_types, types); 636 637 err = mlx4_check_port_params(mdev, new_types); 638 if (err) 639 goto out; 640 641 /* We are about to apply the changes after the configuration 642 * was verified, no need to remember the temporary types 643 * any more */ 644 for (i = 0; i < mdev->caps.num_ports; i++) 645 priv->port[i + 1].tmp_type = 0; 646 647 err = mlx4_change_port_types(mdev, new_types); 648 649 out: 650 mlx4_start_sense(mdev); 651 mutex_unlock(&priv->port_mutex); 652 return err ? err : count; 653 } 654 655 enum ibta_mtu { 656 IB_MTU_256 = 1, 657 IB_MTU_512 = 2, 658 IB_MTU_1024 = 3, 659 IB_MTU_2048 = 4, 660 IB_MTU_4096 = 5 661 }; 662 663 static inline int int_to_ibta_mtu(int mtu) 664 { 665 switch (mtu) { 666 case 256: return IB_MTU_256; 667 case 512: return IB_MTU_512; 668 case 1024: return IB_MTU_1024; 669 case 2048: return IB_MTU_2048; 670 case 4096: return IB_MTU_4096; 671 default: return -1; 672 } 673 } 674 675 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 676 { 677 switch (mtu) { 678 case IB_MTU_256: return 256; 679 case IB_MTU_512: return 512; 680 case IB_MTU_1024: return 1024; 681 case IB_MTU_2048: return 2048; 682 case IB_MTU_4096: return 4096; 683 default: return -1; 684 } 685 } 686 687 static ssize_t show_port_ib_mtu(struct device *dev, 688 struct device_attribute *attr, 689 char *buf) 690 { 691 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 692 port_mtu_attr); 693 struct mlx4_dev *mdev = info->dev; 694 695 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 696 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 697 698 sprintf(buf, "%d\n", 699 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 700 return strlen(buf); 701 } 702 703 static ssize_t set_port_ib_mtu(struct device *dev, 704 struct device_attribute *attr, 705 const char *buf, size_t count) 706 { 707 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 708 port_mtu_attr); 709 struct mlx4_dev *mdev = info->dev; 710 struct mlx4_priv *priv = mlx4_priv(mdev); 711 int err, port, mtu, ibta_mtu = -1; 712 713 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 714 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 715 return -EINVAL; 716 } 717 718 err = sscanf(buf, "%d", &mtu); 719 if (err > 0) 720 ibta_mtu = int_to_ibta_mtu(mtu); 721 722 if (err <= 0 || ibta_mtu < 0) { 723 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 724 return -EINVAL; 725 } 726 727 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 728 729 mlx4_stop_sense(mdev); 730 mutex_lock(&priv->port_mutex); 731 mlx4_unregister_device(mdev); 732 for (port = 1; port <= mdev->caps.num_ports; port++) { 733 mlx4_CLOSE_PORT(mdev, port); 734 err = mlx4_SET_PORT(mdev, port, -1); 735 if (err) { 736 mlx4_err(mdev, "Failed to set port %d, " 737 "aborting\n", port); 738 goto err_set_port; 739 } 740 } 741 err = mlx4_register_device(mdev); 742 err_set_port: 743 mutex_unlock(&priv->port_mutex); 744 mlx4_start_sense(mdev); 745 return err ? err : count; 746 } 747 748 static int mlx4_load_fw(struct mlx4_dev *dev) 749 { 750 struct mlx4_priv *priv = mlx4_priv(dev); 751 int err; 752 753 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 754 GFP_HIGHUSER | __GFP_NOWARN, 0); 755 if (!priv->fw.fw_icm) { 756 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 757 return -ENOMEM; 758 } 759 760 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 761 if (err) { 762 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 763 goto err_free; 764 } 765 766 err = mlx4_RUN_FW(dev); 767 if (err) { 768 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 769 goto err_unmap_fa; 770 } 771 772 return 0; 773 774 err_unmap_fa: 775 mlx4_UNMAP_FA(dev); 776 777 err_free: 778 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 779 return err; 780 } 781 782 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 783 int cmpt_entry_sz) 784 { 785 struct mlx4_priv *priv = mlx4_priv(dev); 786 int err; 787 int num_eqs; 788 789 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 790 cmpt_base + 791 ((u64) (MLX4_CMPT_TYPE_QP * 792 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 793 cmpt_entry_sz, dev->caps.num_qps, 794 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 795 0, 0); 796 if (err) 797 goto err; 798 799 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 800 cmpt_base + 801 ((u64) (MLX4_CMPT_TYPE_SRQ * 802 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 803 cmpt_entry_sz, dev->caps.num_srqs, 804 dev->caps.reserved_srqs, 0, 0); 805 if (err) 806 goto err_qp; 807 808 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 809 cmpt_base + 810 ((u64) (MLX4_CMPT_TYPE_CQ * 811 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 812 cmpt_entry_sz, dev->caps.num_cqs, 813 dev->caps.reserved_cqs, 0, 0); 814 if (err) 815 goto err_srq; 816 817 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 818 dev->caps.num_eqs; 819 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 820 cmpt_base + 821 ((u64) (MLX4_CMPT_TYPE_EQ * 822 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 823 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 824 if (err) 825 goto err_cq; 826 827 return 0; 828 829 err_cq: 830 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 831 832 err_srq: 833 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 834 835 err_qp: 836 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 837 838 err: 839 return err; 840 } 841 842 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 843 struct mlx4_init_hca_param *init_hca, u64 icm_size) 844 { 845 struct mlx4_priv *priv = mlx4_priv(dev); 846 u64 aux_pages; 847 int num_eqs; 848 int err; 849 850 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 851 if (err) { 852 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 853 return err; 854 } 855 856 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 857 (unsigned long long) icm_size >> 10, 858 (unsigned long long) aux_pages << 2); 859 860 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 861 GFP_HIGHUSER | __GFP_NOWARN, 0); 862 if (!priv->fw.aux_icm) { 863 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 864 return -ENOMEM; 865 } 866 867 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 868 if (err) { 869 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 870 goto err_free_aux; 871 } 872 873 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 874 if (err) { 875 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 876 goto err_unmap_aux; 877 } 878 879 880 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 881 dev->caps.num_eqs; 882 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 883 init_hca->eqc_base, dev_cap->eqc_entry_sz, 884 num_eqs, num_eqs, 0, 0); 885 if (err) { 886 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 887 goto err_unmap_cmpt; 888 } 889 890 /* 891 * Reserved MTT entries must be aligned up to a cacheline 892 * boundary, since the FW will write to them, while the driver 893 * writes to all other MTT entries. (The variable 894 * dev->caps.mtt_entry_sz below is really the MTT segment 895 * size, not the raw entry size) 896 */ 897 dev->caps.reserved_mtts = 898 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 899 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 900 901 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 902 init_hca->mtt_base, 903 dev->caps.mtt_entry_sz, 904 dev->caps.num_mtts, 905 dev->caps.reserved_mtts, 1, 0); 906 if (err) { 907 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 908 goto err_unmap_eq; 909 } 910 911 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 912 init_hca->dmpt_base, 913 dev_cap->dmpt_entry_sz, 914 dev->caps.num_mpts, 915 dev->caps.reserved_mrws, 1, 1); 916 if (err) { 917 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 918 goto err_unmap_mtt; 919 } 920 921 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 922 init_hca->qpc_base, 923 dev_cap->qpc_entry_sz, 924 dev->caps.num_qps, 925 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 926 0, 0); 927 if (err) { 928 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 929 goto err_unmap_dmpt; 930 } 931 932 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 933 init_hca->auxc_base, 934 dev_cap->aux_entry_sz, 935 dev->caps.num_qps, 936 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 937 0, 0); 938 if (err) { 939 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 940 goto err_unmap_qp; 941 } 942 943 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 944 init_hca->altc_base, 945 dev_cap->altc_entry_sz, 946 dev->caps.num_qps, 947 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 948 0, 0); 949 if (err) { 950 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 951 goto err_unmap_auxc; 952 } 953 954 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 955 init_hca->rdmarc_base, 956 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 957 dev->caps.num_qps, 958 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 959 0, 0); 960 if (err) { 961 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 962 goto err_unmap_altc; 963 } 964 965 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 966 init_hca->cqc_base, 967 dev_cap->cqc_entry_sz, 968 dev->caps.num_cqs, 969 dev->caps.reserved_cqs, 0, 0); 970 if (err) { 971 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 972 goto err_unmap_rdmarc; 973 } 974 975 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 976 init_hca->srqc_base, 977 dev_cap->srq_entry_sz, 978 dev->caps.num_srqs, 979 dev->caps.reserved_srqs, 0, 0); 980 if (err) { 981 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 982 goto err_unmap_cq; 983 } 984 985 /* 986 * It's not strictly required, but for simplicity just map the 987 * whole multicast group table now. The table isn't very big 988 * and it's a lot easier than trying to track ref counts. 989 */ 990 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 991 init_hca->mc_base, 992 mlx4_get_mgm_entry_size(dev), 993 dev->caps.num_mgms + dev->caps.num_amgms, 994 dev->caps.num_mgms + dev->caps.num_amgms, 995 0, 0); 996 if (err) { 997 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 998 goto err_unmap_srq; 999 } 1000 1001 return 0; 1002 1003 err_unmap_srq: 1004 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1005 1006 err_unmap_cq: 1007 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1008 1009 err_unmap_rdmarc: 1010 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1011 1012 err_unmap_altc: 1013 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1014 1015 err_unmap_auxc: 1016 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1017 1018 err_unmap_qp: 1019 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1020 1021 err_unmap_dmpt: 1022 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1023 1024 err_unmap_mtt: 1025 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1026 1027 err_unmap_eq: 1028 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1029 1030 err_unmap_cmpt: 1031 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1032 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1033 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1034 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1035 1036 err_unmap_aux: 1037 mlx4_UNMAP_ICM_AUX(dev); 1038 1039 err_free_aux: 1040 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1041 1042 return err; 1043 } 1044 1045 static void mlx4_free_icms(struct mlx4_dev *dev) 1046 { 1047 struct mlx4_priv *priv = mlx4_priv(dev); 1048 1049 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1050 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1051 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1052 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1053 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1054 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1055 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1056 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1057 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1058 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1059 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1060 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1061 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1062 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1063 1064 mlx4_UNMAP_ICM_AUX(dev); 1065 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1066 } 1067 1068 static void mlx4_slave_exit(struct mlx4_dev *dev) 1069 { 1070 struct mlx4_priv *priv = mlx4_priv(dev); 1071 1072 down(&priv->cmd.slave_sem); 1073 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1074 mlx4_warn(dev, "Failed to close slave function.\n"); 1075 up(&priv->cmd.slave_sem); 1076 } 1077 1078 static int map_bf_area(struct mlx4_dev *dev) 1079 { 1080 struct mlx4_priv *priv = mlx4_priv(dev); 1081 resource_size_t bf_start; 1082 resource_size_t bf_len; 1083 int err = 0; 1084 1085 if (!dev->caps.bf_reg_size) 1086 return -ENXIO; 1087 1088 bf_start = pci_resource_start(dev->pdev, 2) + 1089 (dev->caps.num_uars << PAGE_SHIFT); 1090 bf_len = pci_resource_len(dev->pdev, 2) - 1091 (dev->caps.num_uars << PAGE_SHIFT); 1092 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1093 if (!priv->bf_mapping) 1094 err = -ENOMEM; 1095 1096 return err; 1097 } 1098 1099 static void unmap_bf_area(struct mlx4_dev *dev) 1100 { 1101 if (mlx4_priv(dev)->bf_mapping) 1102 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1103 } 1104 1105 static void mlx4_close_hca(struct mlx4_dev *dev) 1106 { 1107 unmap_bf_area(dev); 1108 if (mlx4_is_slave(dev)) 1109 mlx4_slave_exit(dev); 1110 else { 1111 mlx4_CLOSE_HCA(dev, 0); 1112 mlx4_free_icms(dev); 1113 mlx4_UNMAP_FA(dev); 1114 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1115 } 1116 } 1117 1118 static int mlx4_init_slave(struct mlx4_dev *dev) 1119 { 1120 struct mlx4_priv *priv = mlx4_priv(dev); 1121 u64 dma = (u64) priv->mfunc.vhcr_dma; 1122 int num_of_reset_retries = NUM_OF_RESET_RETRIES; 1123 int ret_from_reset = 0; 1124 u32 slave_read; 1125 u32 cmd_channel_ver; 1126 1127 down(&priv->cmd.slave_sem); 1128 priv->cmd.max_cmds = 1; 1129 mlx4_warn(dev, "Sending reset\n"); 1130 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1131 MLX4_COMM_TIME); 1132 /* if we are in the middle of flr the slave will try 1133 * NUM_OF_RESET_RETRIES times before leaving.*/ 1134 if (ret_from_reset) { 1135 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1136 msleep(SLEEP_TIME_IN_RESET); 1137 while (ret_from_reset && num_of_reset_retries) { 1138 mlx4_warn(dev, "slave is currently in the" 1139 "middle of FLR. retrying..." 1140 "(try num:%d)\n", 1141 (NUM_OF_RESET_RETRIES - 1142 num_of_reset_retries + 1)); 1143 ret_from_reset = 1144 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 1145 0, MLX4_COMM_TIME); 1146 num_of_reset_retries = num_of_reset_retries - 1; 1147 } 1148 } else 1149 goto err; 1150 } 1151 1152 /* check the driver version - the slave I/F revision 1153 * must match the master's */ 1154 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1155 cmd_channel_ver = mlx4_comm_get_version(); 1156 1157 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1158 MLX4_COMM_GET_IF_REV(slave_read)) { 1159 mlx4_err(dev, "slave driver version is not supported" 1160 " by the master\n"); 1161 goto err; 1162 } 1163 1164 mlx4_warn(dev, "Sending vhcr0\n"); 1165 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1166 MLX4_COMM_TIME)) 1167 goto err; 1168 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1169 MLX4_COMM_TIME)) 1170 goto err; 1171 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1172 MLX4_COMM_TIME)) 1173 goto err; 1174 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1175 goto err; 1176 up(&priv->cmd.slave_sem); 1177 return 0; 1178 1179 err: 1180 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1181 up(&priv->cmd.slave_sem); 1182 return -EIO; 1183 } 1184 1185 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1186 { 1187 int i; 1188 1189 for (i = 1; i <= dev->caps.num_ports; i++) { 1190 dev->caps.gid_table_len[i] = 1; 1191 dev->caps.pkey_table_len[i] = 1192 dev->phys_caps.pkey_phys_table_len[i] - 1; 1193 } 1194 } 1195 1196 static int mlx4_init_hca(struct mlx4_dev *dev) 1197 { 1198 struct mlx4_priv *priv = mlx4_priv(dev); 1199 struct mlx4_adapter adapter; 1200 struct mlx4_dev_cap dev_cap; 1201 struct mlx4_mod_stat_cfg mlx4_cfg; 1202 struct mlx4_profile profile; 1203 struct mlx4_init_hca_param init_hca; 1204 u64 icm_size; 1205 int err; 1206 1207 if (!mlx4_is_slave(dev)) { 1208 err = mlx4_QUERY_FW(dev); 1209 if (err) { 1210 if (err == -EACCES) 1211 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1212 else 1213 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1214 goto unmap_bf; 1215 } 1216 1217 err = mlx4_load_fw(dev); 1218 if (err) { 1219 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1220 goto unmap_bf; 1221 } 1222 1223 mlx4_cfg.log_pg_sz_m = 1; 1224 mlx4_cfg.log_pg_sz = 0; 1225 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1226 if (err) 1227 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1228 1229 err = mlx4_dev_cap(dev, &dev_cap); 1230 if (err) { 1231 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1232 goto err_stop_fw; 1233 } 1234 1235 if (mlx4_is_master(dev)) 1236 mlx4_parav_master_pf_caps(dev); 1237 1238 profile = default_profile; 1239 1240 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1241 &init_hca); 1242 if ((long long) icm_size < 0) { 1243 err = icm_size; 1244 goto err_stop_fw; 1245 } 1246 1247 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1248 1249 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1250 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1251 1252 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1253 if (err) 1254 goto err_stop_fw; 1255 1256 err = mlx4_INIT_HCA(dev, &init_hca); 1257 if (err) { 1258 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1259 goto err_free_icm; 1260 } 1261 } else { 1262 err = mlx4_init_slave(dev); 1263 if (err) { 1264 mlx4_err(dev, "Failed to initialize slave\n"); 1265 goto unmap_bf; 1266 } 1267 1268 err = mlx4_slave_cap(dev); 1269 if (err) { 1270 mlx4_err(dev, "Failed to obtain slave caps\n"); 1271 goto err_close; 1272 } 1273 } 1274 1275 if (map_bf_area(dev)) 1276 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1277 1278 /*Only the master set the ports, all the rest got it from it.*/ 1279 if (!mlx4_is_slave(dev)) 1280 mlx4_set_port_mask(dev); 1281 1282 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1283 if (err) { 1284 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1285 goto err_close; 1286 } 1287 1288 priv->eq_table.inta_pin = adapter.inta_pin; 1289 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1290 1291 return 0; 1292 1293 err_close: 1294 mlx4_close_hca(dev); 1295 1296 err_free_icm: 1297 if (!mlx4_is_slave(dev)) 1298 mlx4_free_icms(dev); 1299 1300 err_stop_fw: 1301 if (!mlx4_is_slave(dev)) { 1302 mlx4_UNMAP_FA(dev); 1303 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1304 } 1305 unmap_bf: 1306 unmap_bf_area(dev); 1307 return err; 1308 } 1309 1310 static int mlx4_init_counters_table(struct mlx4_dev *dev) 1311 { 1312 struct mlx4_priv *priv = mlx4_priv(dev); 1313 int nent; 1314 1315 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1316 return -ENOENT; 1317 1318 nent = dev->caps.max_counters; 1319 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 1320 } 1321 1322 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 1323 { 1324 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1325 } 1326 1327 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1328 { 1329 struct mlx4_priv *priv = mlx4_priv(dev); 1330 1331 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1332 return -ENOENT; 1333 1334 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 1335 if (*idx == -1) 1336 return -ENOMEM; 1337 1338 return 0; 1339 } 1340 1341 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1342 { 1343 u64 out_param; 1344 int err; 1345 1346 if (mlx4_is_mfunc(dev)) { 1347 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 1348 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 1349 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 1350 if (!err) 1351 *idx = get_param_l(&out_param); 1352 1353 return err; 1354 } 1355 return __mlx4_counter_alloc(dev, idx); 1356 } 1357 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1358 1359 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1360 { 1361 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1362 return; 1363 } 1364 1365 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1366 { 1367 u64 in_param; 1368 1369 if (mlx4_is_mfunc(dev)) { 1370 set_param_l(&in_param, idx); 1371 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 1372 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 1373 MLX4_CMD_WRAPPED); 1374 return; 1375 } 1376 __mlx4_counter_free(dev, idx); 1377 } 1378 EXPORT_SYMBOL_GPL(mlx4_counter_free); 1379 1380 static int mlx4_setup_hca(struct mlx4_dev *dev) 1381 { 1382 struct mlx4_priv *priv = mlx4_priv(dev); 1383 int err; 1384 int port; 1385 __be32 ib_port_default_caps; 1386 1387 err = mlx4_init_uar_table(dev); 1388 if (err) { 1389 mlx4_err(dev, "Failed to initialize " 1390 "user access region table, aborting.\n"); 1391 return err; 1392 } 1393 1394 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1395 if (err) { 1396 mlx4_err(dev, "Failed to allocate driver access region, " 1397 "aborting.\n"); 1398 goto err_uar_table_free; 1399 } 1400 1401 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1402 if (!priv->kar) { 1403 mlx4_err(dev, "Couldn't map kernel access region, " 1404 "aborting.\n"); 1405 err = -ENOMEM; 1406 goto err_uar_free; 1407 } 1408 1409 err = mlx4_init_pd_table(dev); 1410 if (err) { 1411 mlx4_err(dev, "Failed to initialize " 1412 "protection domain table, aborting.\n"); 1413 goto err_kar_unmap; 1414 } 1415 1416 err = mlx4_init_xrcd_table(dev); 1417 if (err) { 1418 mlx4_err(dev, "Failed to initialize " 1419 "reliable connection domain table, aborting.\n"); 1420 goto err_pd_table_free; 1421 } 1422 1423 err = mlx4_init_mr_table(dev); 1424 if (err) { 1425 mlx4_err(dev, "Failed to initialize " 1426 "memory region table, aborting.\n"); 1427 goto err_xrcd_table_free; 1428 } 1429 1430 err = mlx4_init_eq_table(dev); 1431 if (err) { 1432 mlx4_err(dev, "Failed to initialize " 1433 "event queue table, aborting.\n"); 1434 goto err_mr_table_free; 1435 } 1436 1437 err = mlx4_cmd_use_events(dev); 1438 if (err) { 1439 mlx4_err(dev, "Failed to switch to event-driven " 1440 "firmware commands, aborting.\n"); 1441 goto err_eq_table_free; 1442 } 1443 1444 err = mlx4_NOP(dev); 1445 if (err) { 1446 if (dev->flags & MLX4_FLAG_MSI_X) { 1447 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1448 "interrupt IRQ %d).\n", 1449 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1450 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1451 } else { 1452 mlx4_err(dev, "NOP command failed to generate interrupt " 1453 "(IRQ %d), aborting.\n", 1454 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1455 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1456 } 1457 1458 goto err_cmd_poll; 1459 } 1460 1461 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 1462 1463 err = mlx4_init_cq_table(dev); 1464 if (err) { 1465 mlx4_err(dev, "Failed to initialize " 1466 "completion queue table, aborting.\n"); 1467 goto err_cmd_poll; 1468 } 1469 1470 err = mlx4_init_srq_table(dev); 1471 if (err) { 1472 mlx4_err(dev, "Failed to initialize " 1473 "shared receive queue table, aborting.\n"); 1474 goto err_cq_table_free; 1475 } 1476 1477 err = mlx4_init_qp_table(dev); 1478 if (err) { 1479 mlx4_err(dev, "Failed to initialize " 1480 "queue pair table, aborting.\n"); 1481 goto err_srq_table_free; 1482 } 1483 1484 if (!mlx4_is_slave(dev)) { 1485 err = mlx4_init_mcg_table(dev); 1486 if (err) { 1487 mlx4_err(dev, "Failed to initialize " 1488 "multicast group table, aborting.\n"); 1489 goto err_qp_table_free; 1490 } 1491 } 1492 1493 err = mlx4_init_counters_table(dev); 1494 if (err && err != -ENOENT) { 1495 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1496 goto err_mcg_table_free; 1497 } 1498 1499 if (!mlx4_is_slave(dev)) { 1500 for (port = 1; port <= dev->caps.num_ports; port++) { 1501 ib_port_default_caps = 0; 1502 err = mlx4_get_port_ib_caps(dev, port, 1503 &ib_port_default_caps); 1504 if (err) 1505 mlx4_warn(dev, "failed to get port %d default " 1506 "ib capabilities (%d). Continuing " 1507 "with caps = 0\n", port, err); 1508 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1509 1510 /* initialize per-slave default ib port capabilities */ 1511 if (mlx4_is_master(dev)) { 1512 int i; 1513 for (i = 0; i < dev->num_slaves; i++) { 1514 if (i == mlx4_master_func_num(dev)) 1515 continue; 1516 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1517 ib_port_default_caps; 1518 } 1519 } 1520 1521 if (mlx4_is_mfunc(dev)) 1522 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 1523 else 1524 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 1525 1526 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 1527 dev->caps.pkey_table_len[port] : -1); 1528 if (err) { 1529 mlx4_err(dev, "Failed to set port %d, aborting\n", 1530 port); 1531 goto err_counters_table_free; 1532 } 1533 } 1534 } 1535 1536 return 0; 1537 1538 err_counters_table_free: 1539 mlx4_cleanup_counters_table(dev); 1540 1541 err_mcg_table_free: 1542 mlx4_cleanup_mcg_table(dev); 1543 1544 err_qp_table_free: 1545 mlx4_cleanup_qp_table(dev); 1546 1547 err_srq_table_free: 1548 mlx4_cleanup_srq_table(dev); 1549 1550 err_cq_table_free: 1551 mlx4_cleanup_cq_table(dev); 1552 1553 err_cmd_poll: 1554 mlx4_cmd_use_polling(dev); 1555 1556 err_eq_table_free: 1557 mlx4_cleanup_eq_table(dev); 1558 1559 err_mr_table_free: 1560 mlx4_cleanup_mr_table(dev); 1561 1562 err_xrcd_table_free: 1563 mlx4_cleanup_xrcd_table(dev); 1564 1565 err_pd_table_free: 1566 mlx4_cleanup_pd_table(dev); 1567 1568 err_kar_unmap: 1569 iounmap(priv->kar); 1570 1571 err_uar_free: 1572 mlx4_uar_free(dev, &priv->driver_uar); 1573 1574 err_uar_table_free: 1575 mlx4_cleanup_uar_table(dev); 1576 return err; 1577 } 1578 1579 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1580 { 1581 struct mlx4_priv *priv = mlx4_priv(dev); 1582 struct msix_entry *entries; 1583 int nreq = min_t(int, dev->caps.num_ports * 1584 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1585 + MSIX_LEGACY_SZ, MAX_MSIX); 1586 int err; 1587 int i; 1588 1589 if (msi_x) { 1590 /* In multifunction mode each function gets 2 msi-X vectors 1591 * one for data path completions anf the other for asynch events 1592 * or command completions */ 1593 if (mlx4_is_mfunc(dev)) { 1594 nreq = 2; 1595 } else { 1596 nreq = min_t(int, dev->caps.num_eqs - 1597 dev->caps.reserved_eqs, nreq); 1598 } 1599 1600 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1601 if (!entries) 1602 goto no_msi; 1603 1604 for (i = 0; i < nreq; ++i) 1605 entries[i].entry = i; 1606 1607 retry: 1608 err = pci_enable_msix(dev->pdev, entries, nreq); 1609 if (err) { 1610 /* Try again if at least 2 vectors are available */ 1611 if (err > 1) { 1612 mlx4_info(dev, "Requested %d vectors, " 1613 "but only %d MSI-X vectors available, " 1614 "trying again\n", nreq, err); 1615 nreq = err; 1616 goto retry; 1617 } 1618 kfree(entries); 1619 goto no_msi; 1620 } 1621 1622 if (nreq < 1623 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1624 /*Working in legacy mode , all EQ's shared*/ 1625 dev->caps.comp_pool = 0; 1626 dev->caps.num_comp_vectors = nreq - 1; 1627 } else { 1628 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1629 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1630 } 1631 for (i = 0; i < nreq; ++i) 1632 priv->eq_table.eq[i].irq = entries[i].vector; 1633 1634 dev->flags |= MLX4_FLAG_MSI_X; 1635 1636 kfree(entries); 1637 return; 1638 } 1639 1640 no_msi: 1641 dev->caps.num_comp_vectors = 1; 1642 dev->caps.comp_pool = 0; 1643 1644 for (i = 0; i < 2; ++i) 1645 priv->eq_table.eq[i].irq = dev->pdev->irq; 1646 } 1647 1648 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1649 { 1650 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1651 int err = 0; 1652 1653 info->dev = dev; 1654 info->port = port; 1655 if (!mlx4_is_slave(dev)) { 1656 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); 1657 mlx4_init_mac_table(dev, &info->mac_table); 1658 mlx4_init_vlan_table(dev, &info->vlan_table); 1659 info->base_qpn = 1660 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 1661 (port - 1) * (1 << log_num_mac); 1662 } 1663 1664 sprintf(info->dev_name, "mlx4_port%d", port); 1665 info->port_attr.attr.name = info->dev_name; 1666 if (mlx4_is_mfunc(dev)) 1667 info->port_attr.attr.mode = S_IRUGO; 1668 else { 1669 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1670 info->port_attr.store = set_port_type; 1671 } 1672 info->port_attr.show = show_port_type; 1673 sysfs_attr_init(&info->port_attr.attr); 1674 1675 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1676 if (err) { 1677 mlx4_err(dev, "Failed to create file for port %d\n", port); 1678 info->port = -1; 1679 } 1680 1681 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 1682 info->port_mtu_attr.attr.name = info->dev_mtu_name; 1683 if (mlx4_is_mfunc(dev)) 1684 info->port_mtu_attr.attr.mode = S_IRUGO; 1685 else { 1686 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 1687 info->port_mtu_attr.store = set_port_ib_mtu; 1688 } 1689 info->port_mtu_attr.show = show_port_ib_mtu; 1690 sysfs_attr_init(&info->port_mtu_attr.attr); 1691 1692 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 1693 if (err) { 1694 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 1695 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1696 info->port = -1; 1697 } 1698 1699 return err; 1700 } 1701 1702 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1703 { 1704 if (info->port < 0) 1705 return; 1706 1707 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1708 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 1709 } 1710 1711 static int mlx4_init_steering(struct mlx4_dev *dev) 1712 { 1713 struct mlx4_priv *priv = mlx4_priv(dev); 1714 int num_entries = dev->caps.num_ports; 1715 int i, j; 1716 1717 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1718 if (!priv->steer) 1719 return -ENOMEM; 1720 1721 for (i = 0; i < num_entries; i++) 1722 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1723 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1724 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1725 } 1726 return 0; 1727 } 1728 1729 static void mlx4_clear_steering(struct mlx4_dev *dev) 1730 { 1731 struct mlx4_priv *priv = mlx4_priv(dev); 1732 struct mlx4_steer_index *entry, *tmp_entry; 1733 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1734 int num_entries = dev->caps.num_ports; 1735 int i, j; 1736 1737 for (i = 0; i < num_entries; i++) { 1738 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1739 list_for_each_entry_safe(pqp, tmp_pqp, 1740 &priv->steer[i].promisc_qps[j], 1741 list) { 1742 list_del(&pqp->list); 1743 kfree(pqp); 1744 } 1745 list_for_each_entry_safe(entry, tmp_entry, 1746 &priv->steer[i].steer_entries[j], 1747 list) { 1748 list_del(&entry->list); 1749 list_for_each_entry_safe(pqp, tmp_pqp, 1750 &entry->duplicates, 1751 list) { 1752 list_del(&pqp->list); 1753 kfree(pqp); 1754 } 1755 kfree(entry); 1756 } 1757 } 1758 } 1759 kfree(priv->steer); 1760 } 1761 1762 static int extended_func_num(struct pci_dev *pdev) 1763 { 1764 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 1765 } 1766 1767 #define MLX4_OWNER_BASE 0x8069c 1768 #define MLX4_OWNER_SIZE 4 1769 1770 static int mlx4_get_ownership(struct mlx4_dev *dev) 1771 { 1772 void __iomem *owner; 1773 u32 ret; 1774 1775 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1776 MLX4_OWNER_SIZE); 1777 if (!owner) { 1778 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1779 return -ENOMEM; 1780 } 1781 1782 ret = readl(owner); 1783 iounmap(owner); 1784 return (int) !!ret; 1785 } 1786 1787 static void mlx4_free_ownership(struct mlx4_dev *dev) 1788 { 1789 void __iomem *owner; 1790 1791 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1792 MLX4_OWNER_SIZE); 1793 if (!owner) { 1794 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1795 return; 1796 } 1797 writel(0, owner); 1798 msleep(1000); 1799 iounmap(owner); 1800 } 1801 1802 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1803 { 1804 struct mlx4_priv *priv; 1805 struct mlx4_dev *dev; 1806 int err; 1807 int port; 1808 1809 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1810 1811 err = pci_enable_device(pdev); 1812 if (err) { 1813 dev_err(&pdev->dev, "Cannot enable PCI device, " 1814 "aborting.\n"); 1815 return err; 1816 } 1817 if (num_vfs > MLX4_MAX_NUM_VF) { 1818 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 1819 num_vfs, MLX4_MAX_NUM_VF); 1820 return -EINVAL; 1821 } 1822 /* 1823 * Check for BARs. 1824 */ 1825 if (((id == NULL) || !(id->driver_data & MLX4_VF)) && 1826 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1827 dev_err(&pdev->dev, "Missing DCS, aborting." 1828 "(id == 0X%p, id->driver_data: 0x%lx," 1829 " pci_resource_flags(pdev, 0):0x%lx)\n", id, 1830 id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); 1831 err = -ENODEV; 1832 goto err_disable_pdev; 1833 } 1834 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1835 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1836 err = -ENODEV; 1837 goto err_disable_pdev; 1838 } 1839 1840 err = pci_request_regions(pdev, DRV_NAME); 1841 if (err) { 1842 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1843 goto err_disable_pdev; 1844 } 1845 1846 pci_set_master(pdev); 1847 1848 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1849 if (err) { 1850 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1851 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1852 if (err) { 1853 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1854 goto err_release_regions; 1855 } 1856 } 1857 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1858 if (err) { 1859 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1860 "consistent PCI DMA mask.\n"); 1861 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1862 if (err) { 1863 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1864 "aborting.\n"); 1865 goto err_release_regions; 1866 } 1867 } 1868 1869 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1870 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1871 1872 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1873 if (!priv) { 1874 dev_err(&pdev->dev, "Device struct alloc failed, " 1875 "aborting.\n"); 1876 err = -ENOMEM; 1877 goto err_release_regions; 1878 } 1879 1880 dev = &priv->dev; 1881 dev->pdev = pdev; 1882 INIT_LIST_HEAD(&priv->ctx_list); 1883 spin_lock_init(&priv->ctx_lock); 1884 1885 mutex_init(&priv->port_mutex); 1886 1887 INIT_LIST_HEAD(&priv->pgdir_list); 1888 mutex_init(&priv->pgdir_mutex); 1889 1890 INIT_LIST_HEAD(&priv->bf_list); 1891 mutex_init(&priv->bf_mutex); 1892 1893 dev->rev_id = pdev->revision; 1894 /* Detect if this device is a virtual function */ 1895 if (id && id->driver_data & MLX4_VF) { 1896 /* When acting as pf, we normally skip vfs unless explicitly 1897 * requested to probe them. */ 1898 if (num_vfs && extended_func_num(pdev) > probe_vf) { 1899 mlx4_warn(dev, "Skipping virtual function:%d\n", 1900 extended_func_num(pdev)); 1901 err = -ENODEV; 1902 goto err_free_dev; 1903 } 1904 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 1905 dev->flags |= MLX4_FLAG_SLAVE; 1906 } else { 1907 /* We reset the device and enable SRIOV only for physical 1908 * devices. Try to claim ownership on the device; 1909 * if already taken, skip -- do not allow multiple PFs */ 1910 err = mlx4_get_ownership(dev); 1911 if (err) { 1912 if (err < 0) 1913 goto err_free_dev; 1914 else { 1915 mlx4_warn(dev, "Multiple PFs not yet supported." 1916 " Skipping PF.\n"); 1917 err = -EINVAL; 1918 goto err_free_dev; 1919 } 1920 } 1921 1922 if (num_vfs) { 1923 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); 1924 err = pci_enable_sriov(pdev, num_vfs); 1925 if (err) { 1926 mlx4_err(dev, "Failed to enable sriov," 1927 "continuing without sriov enabled" 1928 " (err = %d).\n", err); 1929 err = 0; 1930 } else { 1931 mlx4_warn(dev, "Running in master mode\n"); 1932 dev->flags |= MLX4_FLAG_SRIOV | 1933 MLX4_FLAG_MASTER; 1934 dev->num_vfs = num_vfs; 1935 } 1936 } 1937 1938 /* 1939 * Now reset the HCA before we touch the PCI capabilities or 1940 * attempt a firmware command, since a boot ROM may have left 1941 * the HCA in an undefined state. 1942 */ 1943 err = mlx4_reset(dev); 1944 if (err) { 1945 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1946 goto err_rel_own; 1947 } 1948 } 1949 1950 slave_start: 1951 if (mlx4_cmd_init(dev)) { 1952 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1953 goto err_sriov; 1954 } 1955 1956 /* In slave functions, the communication channel must be initialized 1957 * before posting commands. Also, init num_slaves before calling 1958 * mlx4_init_hca */ 1959 if (mlx4_is_mfunc(dev)) { 1960 if (mlx4_is_master(dev)) 1961 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 1962 else { 1963 dev->num_slaves = 0; 1964 if (mlx4_multi_func_init(dev)) { 1965 mlx4_err(dev, "Failed to init slave mfunc" 1966 " interface, aborting.\n"); 1967 goto err_cmd; 1968 } 1969 } 1970 } 1971 1972 err = mlx4_init_hca(dev); 1973 if (err) { 1974 if (err == -EACCES) { 1975 /* Not primary Physical function 1976 * Running in slave mode */ 1977 mlx4_cmd_cleanup(dev); 1978 dev->flags |= MLX4_FLAG_SLAVE; 1979 dev->flags &= ~MLX4_FLAG_MASTER; 1980 goto slave_start; 1981 } else 1982 goto err_mfunc; 1983 } 1984 1985 /* In master functions, the communication channel must be initialized 1986 * after obtaining its address from fw */ 1987 if (mlx4_is_master(dev)) { 1988 if (mlx4_multi_func_init(dev)) { 1989 mlx4_err(dev, "Failed to init master mfunc" 1990 "interface, aborting.\n"); 1991 goto err_close; 1992 } 1993 } 1994 1995 err = mlx4_alloc_eq_table(dev); 1996 if (err) 1997 goto err_master_mfunc; 1998 1999 priv->msix_ctl.pool_bm = 0; 2000 mutex_init(&priv->msix_ctl.pool_lock); 2001 2002 mlx4_enable_msi_x(dev); 2003 if ((mlx4_is_mfunc(dev)) && 2004 !(dev->flags & MLX4_FLAG_MSI_X)) { 2005 mlx4_err(dev, "INTx is not supported in multi-function mode." 2006 " aborting.\n"); 2007 goto err_free_eq; 2008 } 2009 2010 if (!mlx4_is_slave(dev)) { 2011 err = mlx4_init_steering(dev); 2012 if (err) 2013 goto err_free_eq; 2014 } 2015 2016 err = mlx4_setup_hca(dev); 2017 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2018 !mlx4_is_mfunc(dev)) { 2019 dev->flags &= ~MLX4_FLAG_MSI_X; 2020 dev->caps.num_comp_vectors = 1; 2021 dev->caps.comp_pool = 0; 2022 pci_disable_msix(pdev); 2023 err = mlx4_setup_hca(dev); 2024 } 2025 2026 if (err) 2027 goto err_steer; 2028 2029 for (port = 1; port <= dev->caps.num_ports; port++) { 2030 err = mlx4_init_port_info(dev, port); 2031 if (err) 2032 goto err_port; 2033 } 2034 2035 err = mlx4_register_device(dev); 2036 if (err) 2037 goto err_port; 2038 2039 mlx4_sense_init(dev); 2040 mlx4_start_sense(dev); 2041 2042 pci_set_drvdata(pdev, dev); 2043 2044 return 0; 2045 2046 err_port: 2047 for (--port; port >= 1; --port) 2048 mlx4_cleanup_port_info(&priv->port[port]); 2049 2050 mlx4_cleanup_counters_table(dev); 2051 mlx4_cleanup_mcg_table(dev); 2052 mlx4_cleanup_qp_table(dev); 2053 mlx4_cleanup_srq_table(dev); 2054 mlx4_cleanup_cq_table(dev); 2055 mlx4_cmd_use_polling(dev); 2056 mlx4_cleanup_eq_table(dev); 2057 mlx4_cleanup_mr_table(dev); 2058 mlx4_cleanup_xrcd_table(dev); 2059 mlx4_cleanup_pd_table(dev); 2060 mlx4_cleanup_uar_table(dev); 2061 2062 err_steer: 2063 if (!mlx4_is_slave(dev)) 2064 mlx4_clear_steering(dev); 2065 2066 err_free_eq: 2067 mlx4_free_eq_table(dev); 2068 2069 err_master_mfunc: 2070 if (mlx4_is_master(dev)) 2071 mlx4_multi_func_cleanup(dev); 2072 2073 err_close: 2074 if (dev->flags & MLX4_FLAG_MSI_X) 2075 pci_disable_msix(pdev); 2076 2077 mlx4_close_hca(dev); 2078 2079 err_mfunc: 2080 if (mlx4_is_slave(dev)) 2081 mlx4_multi_func_cleanup(dev); 2082 2083 err_cmd: 2084 mlx4_cmd_cleanup(dev); 2085 2086 err_sriov: 2087 if (dev->flags & MLX4_FLAG_SRIOV) 2088 pci_disable_sriov(pdev); 2089 2090 err_rel_own: 2091 if (!mlx4_is_slave(dev)) 2092 mlx4_free_ownership(dev); 2093 2094 err_free_dev: 2095 kfree(priv); 2096 2097 err_release_regions: 2098 pci_release_regions(pdev); 2099 2100 err_disable_pdev: 2101 pci_disable_device(pdev); 2102 pci_set_drvdata(pdev, NULL); 2103 return err; 2104 } 2105 2106 static int __devinit mlx4_init_one(struct pci_dev *pdev, 2107 const struct pci_device_id *id) 2108 { 2109 printk_once(KERN_INFO "%s", mlx4_version); 2110 2111 return __mlx4_init_one(pdev, id); 2112 } 2113 2114 static void mlx4_remove_one(struct pci_dev *pdev) 2115 { 2116 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2117 struct mlx4_priv *priv = mlx4_priv(dev); 2118 int p; 2119 2120 if (dev) { 2121 /* in SRIOV it is not allowed to unload the pf's 2122 * driver while there are alive vf's */ 2123 if (mlx4_is_master(dev)) { 2124 if (mlx4_how_many_lives_vf(dev)) 2125 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2126 } 2127 mlx4_stop_sense(dev); 2128 mlx4_unregister_device(dev); 2129 2130 for (p = 1; p <= dev->caps.num_ports; p++) { 2131 mlx4_cleanup_port_info(&priv->port[p]); 2132 mlx4_CLOSE_PORT(dev, p); 2133 } 2134 2135 if (mlx4_is_master(dev)) 2136 mlx4_free_resource_tracker(dev, 2137 RES_TR_FREE_SLAVES_ONLY); 2138 2139 mlx4_cleanup_counters_table(dev); 2140 mlx4_cleanup_mcg_table(dev); 2141 mlx4_cleanup_qp_table(dev); 2142 mlx4_cleanup_srq_table(dev); 2143 mlx4_cleanup_cq_table(dev); 2144 mlx4_cmd_use_polling(dev); 2145 mlx4_cleanup_eq_table(dev); 2146 mlx4_cleanup_mr_table(dev); 2147 mlx4_cleanup_xrcd_table(dev); 2148 mlx4_cleanup_pd_table(dev); 2149 2150 if (mlx4_is_master(dev)) 2151 mlx4_free_resource_tracker(dev, 2152 RES_TR_FREE_STRUCTS_ONLY); 2153 2154 iounmap(priv->kar); 2155 mlx4_uar_free(dev, &priv->driver_uar); 2156 mlx4_cleanup_uar_table(dev); 2157 if (!mlx4_is_slave(dev)) 2158 mlx4_clear_steering(dev); 2159 mlx4_free_eq_table(dev); 2160 if (mlx4_is_master(dev)) 2161 mlx4_multi_func_cleanup(dev); 2162 mlx4_close_hca(dev); 2163 if (mlx4_is_slave(dev)) 2164 mlx4_multi_func_cleanup(dev); 2165 mlx4_cmd_cleanup(dev); 2166 2167 if (dev->flags & MLX4_FLAG_MSI_X) 2168 pci_disable_msix(pdev); 2169 if (dev->flags & MLX4_FLAG_SRIOV) { 2170 mlx4_warn(dev, "Disabling sriov\n"); 2171 pci_disable_sriov(pdev); 2172 } 2173 2174 if (!mlx4_is_slave(dev)) 2175 mlx4_free_ownership(dev); 2176 kfree(priv); 2177 pci_release_regions(pdev); 2178 pci_disable_device(pdev); 2179 pci_set_drvdata(pdev, NULL); 2180 } 2181 } 2182 2183 int mlx4_restart_one(struct pci_dev *pdev) 2184 { 2185 mlx4_remove_one(pdev); 2186 return __mlx4_init_one(pdev, NULL); 2187 } 2188 2189 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2190 /* MT25408 "Hermon" SDR */ 2191 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2192 /* MT25408 "Hermon" DDR */ 2193 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2194 /* MT25408 "Hermon" QDR */ 2195 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2196 /* MT25408 "Hermon" DDR PCIe gen2 */ 2197 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2198 /* MT25408 "Hermon" QDR PCIe gen2 */ 2199 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2200 /* MT25408 "Hermon" EN 10GigE */ 2201 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2202 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2203 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2204 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2205 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2206 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2207 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2208 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2209 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2210 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2211 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2212 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2213 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2214 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2215 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, 2216 /* MT27500 Family [ConnectX-3] */ 2217 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2218 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2219 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, 2220 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2221 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2222 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2223 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 2224 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 2225 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 2226 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 2227 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 2228 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 2229 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 2230 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 2231 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 2232 { 0, } 2233 }; 2234 2235 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 2236 2237 static struct pci_driver mlx4_driver = { 2238 .name = DRV_NAME, 2239 .id_table = mlx4_pci_table, 2240 .probe = mlx4_init_one, 2241 .remove = __devexit_p(mlx4_remove_one) 2242 }; 2243 2244 static int __init mlx4_verify_params(void) 2245 { 2246 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2247 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2248 return -1; 2249 } 2250 2251 if (log_num_vlan != 0) 2252 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2253 MLX4_LOG_NUM_VLANS); 2254 2255 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2256 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2257 return -1; 2258 } 2259 2260 /* Check if module param for ports type has legal combination */ 2261 if (port_type_array[0] == false && port_type_array[1] == true) { 2262 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2263 port_type_array[0] = true; 2264 } 2265 2266 return 0; 2267 } 2268 2269 static int __init mlx4_init(void) 2270 { 2271 int ret; 2272 2273 if (mlx4_verify_params()) 2274 return -EINVAL; 2275 2276 mlx4_catas_init(); 2277 2278 mlx4_wq = create_singlethread_workqueue("mlx4"); 2279 if (!mlx4_wq) 2280 return -ENOMEM; 2281 2282 ret = pci_register_driver(&mlx4_driver); 2283 return ret < 0 ? ret : 0; 2284 } 2285 2286 static void __exit mlx4_cleanup(void) 2287 { 2288 pci_unregister_driver(&mlx4_driver); 2289 destroy_workqueue(mlx4_wq); 2290 } 2291 2292 module_init(mlx4_init); 2293 module_exit(mlx4_cleanup); 2294