1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 int mlx4_check_port_params(struct mlx4_dev *dev, 172 enum mlx4_port_type *port_type) 173 { 174 int i; 175 176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 177 for (i = 0; i < dev->caps.num_ports - 1; i++) { 178 if (port_type[i] != port_type[i + 1]) { 179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 180 return -EINVAL; 181 } 182 } 183 } 184 185 for (i = 0; i < dev->caps.num_ports; i++) { 186 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 188 i + 1); 189 return -EINVAL; 190 } 191 } 192 return 0; 193 } 194 195 static void mlx4_set_port_mask(struct mlx4_dev *dev) 196 { 197 int i; 198 199 for (i = 1; i <= dev->caps.num_ports; ++i) 200 dev->caps.port_mask[i] = dev->caps.port_type[i]; 201 } 202 203 enum { 204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 205 }; 206 207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 208 { 209 int err = 0; 210 struct mlx4_func func; 211 212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 213 err = mlx4_QUERY_FUNC(dev, &func, 0); 214 if (err) { 215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 216 return err; 217 } 218 dev_cap->max_eqs = func.max_eq; 219 dev_cap->reserved_eqs = func.rsvd_eqs; 220 dev_cap->reserved_uars = func.rsvd_uars; 221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 222 } 223 return err; 224 } 225 226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 227 { 228 struct mlx4_caps *dev_cap = &dev->caps; 229 230 /* FW not supporting or cancelled by user */ 231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 233 return; 234 235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 236 * When FW has NCSI it may decide not to report 64B CQE/EQEs 237 */ 238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 242 return; 243 } 244 245 if (cache_line_size() == 128 || cache_line_size() == 256) { 246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 247 /* Changing the real data inside CQE size to 32B */ 248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 250 251 if (mlx4_is_master(dev)) 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 253 } else { 254 if (cache_line_size() != 32 && cache_line_size() != 64) 255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 258 } 259 } 260 261 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 262 struct mlx4_port_cap *port_cap) 263 { 264 dev->caps.vl_cap[port] = port_cap->max_vl; 265 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 266 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 267 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 268 /* set gid and pkey table operating lengths by default 269 * to non-sriov values 270 */ 271 dev->caps.gid_table_len[port] = port_cap->max_gids; 272 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 273 dev->caps.port_width_cap[port] = port_cap->max_port_width; 274 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 275 dev->caps.def_mac[port] = port_cap->def_mac; 276 dev->caps.supported_type[port] = port_cap->supported_port_types; 277 dev->caps.suggested_type[port] = port_cap->suggested_type; 278 dev->caps.default_sense[port] = port_cap->default_sense; 279 dev->caps.trans_type[port] = port_cap->trans_type; 280 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 281 dev->caps.wavelength[port] = port_cap->wavelength; 282 dev->caps.trans_code[port] = port_cap->trans_code; 283 284 return 0; 285 } 286 287 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 288 struct mlx4_port_cap *port_cap) 289 { 290 int err = 0; 291 292 err = mlx4_QUERY_PORT(dev, port, port_cap); 293 294 if (err) 295 mlx4_err(dev, "QUERY_PORT command failed.\n"); 296 297 return err; 298 } 299 300 #define MLX4_A0_STEERING_TABLE_SIZE 256 301 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 302 { 303 int err; 304 int i; 305 306 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 307 if (err) { 308 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 309 return err; 310 } 311 mlx4_dev_cap_dump(dev, dev_cap); 312 313 if (dev_cap->min_page_sz > PAGE_SIZE) { 314 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 315 dev_cap->min_page_sz, PAGE_SIZE); 316 return -ENODEV; 317 } 318 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 319 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 320 dev_cap->num_ports, MLX4_MAX_PORTS); 321 return -ENODEV; 322 } 323 324 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 325 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 326 dev_cap->uar_size, 327 (unsigned long long) 328 pci_resource_len(dev->persist->pdev, 2)); 329 return -ENODEV; 330 } 331 332 dev->caps.num_ports = dev_cap->num_ports; 333 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 334 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 335 dev->caps.num_sys_eqs : 336 MLX4_MAX_EQ_NUM; 337 for (i = 1; i <= dev->caps.num_ports; ++i) { 338 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 339 if (err) { 340 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 341 return err; 342 } 343 } 344 345 dev->caps.uar_page_size = PAGE_SIZE; 346 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 347 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 348 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 349 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 350 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 351 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 352 dev->caps.max_wqes = dev_cap->max_qp_sz; 353 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 354 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 355 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 356 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 357 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 358 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 359 /* 360 * Subtract 1 from the limit because we need to allocate a 361 * spare CQE so the HCA HW can tell the difference between an 362 * empty CQ and a full CQ. 363 */ 364 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 365 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 366 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 367 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 368 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 369 370 /* The first 128 UARs are used for EQ doorbells */ 371 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 372 dev->caps.reserved_pds = dev_cap->reserved_pds; 373 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 374 dev_cap->reserved_xrcds : 0; 375 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 376 dev_cap->max_xrcds : 0; 377 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 378 379 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 380 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 381 dev->caps.flags = dev_cap->flags; 382 dev->caps.flags2 = dev_cap->flags2; 383 dev->caps.bmme_flags = dev_cap->bmme_flags; 384 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 385 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 386 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 387 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 388 389 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 390 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 391 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 392 /* Don't do sense port on multifunction devices (for now at least) */ 393 if (mlx4_is_mfunc(dev)) 394 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 395 396 if (mlx4_low_memory_profile()) { 397 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 398 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 399 } else { 400 dev->caps.log_num_macs = log_num_mac; 401 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 402 } 403 404 for (i = 1; i <= dev->caps.num_ports; ++i) { 405 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 406 if (dev->caps.supported_type[i]) { 407 /* if only ETH is supported - assign ETH */ 408 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 409 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 410 /* if only IB is supported, assign IB */ 411 else if (dev->caps.supported_type[i] == 412 MLX4_PORT_TYPE_IB) 413 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 414 else { 415 /* if IB and ETH are supported, we set the port 416 * type according to user selection of port type; 417 * if user selected none, take the FW hint */ 418 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 419 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 420 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 421 else 422 dev->caps.port_type[i] = port_type_array[i - 1]; 423 } 424 } 425 /* 426 * Link sensing is allowed on the port if 3 conditions are true: 427 * 1. Both protocols are supported on the port. 428 * 2. Different types are supported on the port 429 * 3. FW declared that it supports link sensing 430 */ 431 mlx4_priv(dev)->sense.sense_allowed[i] = 432 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 433 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 434 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 435 436 /* 437 * If "default_sense" bit is set, we move the port to "AUTO" mode 438 * and perform sense_port FW command to try and set the correct 439 * port type from beginning 440 */ 441 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 442 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 443 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 444 mlx4_SENSE_PORT(dev, i, &sensed_port); 445 if (sensed_port != MLX4_PORT_TYPE_NONE) 446 dev->caps.port_type[i] = sensed_port; 447 } else { 448 dev->caps.possible_type[i] = dev->caps.port_type[i]; 449 } 450 451 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 452 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 453 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 454 i, 1 << dev->caps.log_num_macs); 455 } 456 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 457 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 458 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 459 i, 1 << dev->caps.log_num_vlans); 460 } 461 } 462 463 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 464 465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 467 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 468 (1 << dev->caps.log_num_macs) * 469 (1 << dev->caps.log_num_vlans) * 470 dev->caps.num_ports; 471 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 472 473 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 474 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 475 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 476 else 477 dev->caps.dmfs_high_rate_qpn_base = 478 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 479 480 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 481 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 482 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 483 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 484 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 485 } else { 486 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 487 dev->caps.dmfs_high_rate_qpn_base = 488 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 489 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 490 } 491 492 dev->caps.rl_caps = dev_cap->rl_caps; 493 494 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 495 dev->caps.dmfs_high_rate_qpn_range; 496 497 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 498 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 499 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 500 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 501 502 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 503 504 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 505 if (dev_cap->flags & 506 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 507 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 508 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 509 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 510 } 511 512 if (dev_cap->flags2 & 513 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 514 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 515 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 516 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 517 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 518 } 519 } 520 521 if ((dev->caps.flags & 522 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 523 mlx4_is_master(dev)) 524 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 525 526 if (!mlx4_is_slave(dev)) { 527 mlx4_enable_cqe_eqe_stride(dev); 528 dev->caps.alloc_res_qp_mask = 529 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 530 MLX4_RESERVE_A0_QP; 531 } else { 532 dev->caps.alloc_res_qp_mask = 0; 533 } 534 535 return 0; 536 } 537 538 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 539 enum pci_bus_speed *speed, 540 enum pcie_link_width *width) 541 { 542 u32 lnkcap1, lnkcap2; 543 int err1, err2; 544 545 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 546 547 *speed = PCI_SPEED_UNKNOWN; 548 *width = PCIE_LNK_WIDTH_UNKNOWN; 549 550 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 551 &lnkcap1); 552 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 553 &lnkcap2); 554 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 555 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 556 *speed = PCIE_SPEED_8_0GT; 557 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 558 *speed = PCIE_SPEED_5_0GT; 559 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 560 *speed = PCIE_SPEED_2_5GT; 561 } 562 if (!err1) { 563 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 564 if (!lnkcap2) { /* pre-r3.0 */ 565 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 566 *speed = PCIE_SPEED_5_0GT; 567 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 568 *speed = PCIE_SPEED_2_5GT; 569 } 570 } 571 572 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 573 return err1 ? err1 : 574 err2 ? err2 : -EINVAL; 575 } 576 return 0; 577 } 578 579 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 580 { 581 enum pcie_link_width width, width_cap; 582 enum pci_bus_speed speed, speed_cap; 583 int err; 584 585 #define PCIE_SPEED_STR(speed) \ 586 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 587 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 588 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 589 "Unknown") 590 591 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 592 if (err) { 593 mlx4_warn(dev, 594 "Unable to determine PCIe device BW capabilities\n"); 595 return; 596 } 597 598 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 599 if (err || speed == PCI_SPEED_UNKNOWN || 600 width == PCIE_LNK_WIDTH_UNKNOWN) { 601 mlx4_warn(dev, 602 "Unable to determine PCI device chain minimum BW\n"); 603 return; 604 } 605 606 if (width != width_cap || speed != speed_cap) 607 mlx4_warn(dev, 608 "PCIe BW is different than device's capability\n"); 609 610 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 611 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 612 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 613 width, width_cap); 614 return; 615 } 616 617 /*The function checks if there are live vf, return the num of them*/ 618 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 619 { 620 struct mlx4_priv *priv = mlx4_priv(dev); 621 struct mlx4_slave_state *s_state; 622 int i; 623 int ret = 0; 624 625 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 626 s_state = &priv->mfunc.master.slave_state[i]; 627 if (s_state->active && s_state->last_cmd != 628 MLX4_COMM_CMD_RESET) { 629 mlx4_warn(dev, "%s: slave: %d is still active\n", 630 __func__, i); 631 ret++; 632 } 633 } 634 return ret; 635 } 636 637 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 638 { 639 u32 qk = MLX4_RESERVED_QKEY_BASE; 640 641 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 642 qpn < dev->phys_caps.base_proxy_sqpn) 643 return -EINVAL; 644 645 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 646 /* tunnel qp */ 647 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 648 else 649 qk += qpn - dev->phys_caps.base_proxy_sqpn; 650 *qkey = qk; 651 return 0; 652 } 653 EXPORT_SYMBOL(mlx4_get_parav_qkey); 654 655 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 656 { 657 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 658 659 if (!mlx4_is_master(dev)) 660 return; 661 662 priv->virt2phys_pkey[slave][port - 1][i] = val; 663 } 664 EXPORT_SYMBOL(mlx4_sync_pkey_table); 665 666 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 667 { 668 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 669 670 if (!mlx4_is_master(dev)) 671 return; 672 673 priv->slave_node_guids[slave] = guid; 674 } 675 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 676 677 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 678 { 679 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 680 681 if (!mlx4_is_master(dev)) 682 return 0; 683 684 return priv->slave_node_guids[slave]; 685 } 686 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 687 688 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 689 { 690 struct mlx4_priv *priv = mlx4_priv(dev); 691 struct mlx4_slave_state *s_slave; 692 693 if (!mlx4_is_master(dev)) 694 return 0; 695 696 s_slave = &priv->mfunc.master.slave_state[slave]; 697 return !!s_slave->active; 698 } 699 EXPORT_SYMBOL(mlx4_is_slave_active); 700 701 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 702 struct mlx4_dev_cap *dev_cap, 703 struct mlx4_init_hca_param *hca_param) 704 { 705 dev->caps.steering_mode = hca_param->steering_mode; 706 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 707 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 708 dev->caps.fs_log_max_ucast_qp_range_size = 709 dev_cap->fs_log_max_ucast_qp_range_size; 710 } else 711 dev->caps.num_qp_per_mgm = 712 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 713 714 mlx4_dbg(dev, "Steering mode is: %s\n", 715 mlx4_steering_mode_str(dev->caps.steering_mode)); 716 } 717 718 static int mlx4_slave_cap(struct mlx4_dev *dev) 719 { 720 int err; 721 u32 page_size; 722 struct mlx4_dev_cap dev_cap; 723 struct mlx4_func_cap func_cap; 724 struct mlx4_init_hca_param hca_param; 725 u8 i; 726 727 memset(&hca_param, 0, sizeof(hca_param)); 728 err = mlx4_QUERY_HCA(dev, &hca_param); 729 if (err) { 730 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 731 return err; 732 } 733 734 /* fail if the hca has an unknown global capability 735 * at this time global_caps should be always zeroed 736 */ 737 if (hca_param.global_caps) { 738 mlx4_err(dev, "Unknown hca global capabilities\n"); 739 return -ENOSYS; 740 } 741 742 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 743 744 dev->caps.hca_core_clock = hca_param.hca_core_clock; 745 746 memset(&dev_cap, 0, sizeof(dev_cap)); 747 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 748 err = mlx4_dev_cap(dev, &dev_cap); 749 if (err) { 750 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 751 return err; 752 } 753 754 err = mlx4_QUERY_FW(dev); 755 if (err) 756 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 757 758 page_size = ~dev->caps.page_size_cap + 1; 759 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 760 if (page_size > PAGE_SIZE) { 761 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 762 page_size, PAGE_SIZE); 763 return -ENODEV; 764 } 765 766 /* slave gets uar page size from QUERY_HCA fw command */ 767 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 768 769 /* TODO: relax this assumption */ 770 if (dev->caps.uar_page_size != PAGE_SIZE) { 771 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 772 dev->caps.uar_page_size, PAGE_SIZE); 773 return -ENODEV; 774 } 775 776 memset(&func_cap, 0, sizeof(func_cap)); 777 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 778 if (err) { 779 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 780 err); 781 return err; 782 } 783 784 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 785 PF_CONTEXT_BEHAVIOUR_MASK) { 786 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 787 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 788 return -ENOSYS; 789 } 790 791 dev->caps.num_ports = func_cap.num_ports; 792 dev->quotas.qp = func_cap.qp_quota; 793 dev->quotas.srq = func_cap.srq_quota; 794 dev->quotas.cq = func_cap.cq_quota; 795 dev->quotas.mpt = func_cap.mpt_quota; 796 dev->quotas.mtt = func_cap.mtt_quota; 797 dev->caps.num_qps = 1 << hca_param.log_num_qps; 798 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 799 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 800 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 801 dev->caps.num_eqs = func_cap.max_eq; 802 dev->caps.reserved_eqs = func_cap.reserved_eq; 803 dev->caps.reserved_lkey = func_cap.reserved_lkey; 804 dev->caps.num_pds = MLX4_NUM_PDS; 805 dev->caps.num_mgms = 0; 806 dev->caps.num_amgms = 0; 807 808 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 809 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 810 dev->caps.num_ports, MLX4_MAX_PORTS); 811 return -ENODEV; 812 } 813 814 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 815 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 816 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 817 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 818 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 819 820 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 821 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 822 !dev->caps.qp0_qkey) { 823 err = -ENOMEM; 824 goto err_mem; 825 } 826 827 for (i = 1; i <= dev->caps.num_ports; ++i) { 828 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 829 if (err) { 830 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 831 i, err); 832 goto err_mem; 833 } 834 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 835 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 836 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 837 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 838 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 839 dev->caps.port_mask[i] = dev->caps.port_type[i]; 840 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 841 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 842 &dev->caps.gid_table_len[i], 843 &dev->caps.pkey_table_len[i])) 844 goto err_mem; 845 } 846 847 if (dev->caps.uar_page_size * (dev->caps.num_uars - 848 dev->caps.reserved_uars) > 849 pci_resource_len(dev->persist->pdev, 850 2)) { 851 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 852 dev->caps.uar_page_size * dev->caps.num_uars, 853 (unsigned long long) 854 pci_resource_len(dev->persist->pdev, 2)); 855 goto err_mem; 856 } 857 858 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 859 dev->caps.eqe_size = 64; 860 dev->caps.eqe_factor = 1; 861 } else { 862 dev->caps.eqe_size = 32; 863 dev->caps.eqe_factor = 0; 864 } 865 866 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 867 dev->caps.cqe_size = 64; 868 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 869 } else { 870 dev->caps.cqe_size = 32; 871 } 872 873 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 874 dev->caps.eqe_size = hca_param.eqe_size; 875 dev->caps.eqe_factor = 0; 876 } 877 878 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 879 dev->caps.cqe_size = hca_param.cqe_size; 880 /* User still need to know when CQE > 32B */ 881 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 882 } 883 884 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 885 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 886 887 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 888 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 889 hca_param.rss_ip_frags ? "on" : "off"); 890 891 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 892 dev->caps.bf_reg_size) 893 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 894 895 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 896 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 897 898 return 0; 899 900 err_mem: 901 kfree(dev->caps.qp0_qkey); 902 kfree(dev->caps.qp0_tunnel); 903 kfree(dev->caps.qp0_proxy); 904 kfree(dev->caps.qp1_tunnel); 905 kfree(dev->caps.qp1_proxy); 906 dev->caps.qp0_qkey = NULL; 907 dev->caps.qp0_tunnel = NULL; 908 dev->caps.qp0_proxy = NULL; 909 dev->caps.qp1_tunnel = NULL; 910 dev->caps.qp1_proxy = NULL; 911 912 return err; 913 } 914 915 static void mlx4_request_modules(struct mlx4_dev *dev) 916 { 917 int port; 918 int has_ib_port = false; 919 int has_eth_port = false; 920 #define EN_DRV_NAME "mlx4_en" 921 #define IB_DRV_NAME "mlx4_ib" 922 923 for (port = 1; port <= dev->caps.num_ports; port++) { 924 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 925 has_ib_port = true; 926 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 927 has_eth_port = true; 928 } 929 930 if (has_eth_port) 931 request_module_nowait(EN_DRV_NAME); 932 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 933 request_module_nowait(IB_DRV_NAME); 934 } 935 936 /* 937 * Change the port configuration of the device. 938 * Every user of this function must hold the port mutex. 939 */ 940 int mlx4_change_port_types(struct mlx4_dev *dev, 941 enum mlx4_port_type *port_types) 942 { 943 int err = 0; 944 int change = 0; 945 int port; 946 947 for (port = 0; port < dev->caps.num_ports; port++) { 948 /* Change the port type only if the new type is different 949 * from the current, and not set to Auto */ 950 if (port_types[port] != dev->caps.port_type[port + 1]) 951 change = 1; 952 } 953 if (change) { 954 mlx4_unregister_device(dev); 955 for (port = 1; port <= dev->caps.num_ports; port++) { 956 mlx4_CLOSE_PORT(dev, port); 957 dev->caps.port_type[port] = port_types[port - 1]; 958 err = mlx4_SET_PORT(dev, port, -1); 959 if (err) { 960 mlx4_err(dev, "Failed to set port %d, aborting\n", 961 port); 962 goto out; 963 } 964 } 965 mlx4_set_port_mask(dev); 966 err = mlx4_register_device(dev); 967 if (err) { 968 mlx4_err(dev, "Failed to register device\n"); 969 goto out; 970 } 971 mlx4_request_modules(dev); 972 } 973 974 out: 975 return err; 976 } 977 978 static ssize_t show_port_type(struct device *dev, 979 struct device_attribute *attr, 980 char *buf) 981 { 982 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 983 port_attr); 984 struct mlx4_dev *mdev = info->dev; 985 char type[8]; 986 987 sprintf(type, "%s", 988 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 989 "ib" : "eth"); 990 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 991 sprintf(buf, "auto (%s)\n", type); 992 else 993 sprintf(buf, "%s\n", type); 994 995 return strlen(buf); 996 } 997 998 static ssize_t set_port_type(struct device *dev, 999 struct device_attribute *attr, 1000 const char *buf, size_t count) 1001 { 1002 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1003 port_attr); 1004 struct mlx4_dev *mdev = info->dev; 1005 struct mlx4_priv *priv = mlx4_priv(mdev); 1006 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1007 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1008 static DEFINE_MUTEX(set_port_type_mutex); 1009 int i; 1010 int err = 0; 1011 1012 mutex_lock(&set_port_type_mutex); 1013 1014 if (!strcmp(buf, "ib\n")) 1015 info->tmp_type = MLX4_PORT_TYPE_IB; 1016 else if (!strcmp(buf, "eth\n")) 1017 info->tmp_type = MLX4_PORT_TYPE_ETH; 1018 else if (!strcmp(buf, "auto\n")) 1019 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1020 else { 1021 mlx4_err(mdev, "%s is not supported port type\n", buf); 1022 err = -EINVAL; 1023 goto err_out; 1024 } 1025 1026 mlx4_stop_sense(mdev); 1027 mutex_lock(&priv->port_mutex); 1028 /* Possible type is always the one that was delivered */ 1029 mdev->caps.possible_type[info->port] = info->tmp_type; 1030 1031 for (i = 0; i < mdev->caps.num_ports; i++) { 1032 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1033 mdev->caps.possible_type[i+1]; 1034 if (types[i] == MLX4_PORT_TYPE_AUTO) 1035 types[i] = mdev->caps.port_type[i+1]; 1036 } 1037 1038 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1039 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1040 for (i = 1; i <= mdev->caps.num_ports; i++) { 1041 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1042 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1043 err = -EINVAL; 1044 } 1045 } 1046 } 1047 if (err) { 1048 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1049 goto out; 1050 } 1051 1052 mlx4_do_sense_ports(mdev, new_types, types); 1053 1054 err = mlx4_check_port_params(mdev, new_types); 1055 if (err) 1056 goto out; 1057 1058 /* We are about to apply the changes after the configuration 1059 * was verified, no need to remember the temporary types 1060 * any more */ 1061 for (i = 0; i < mdev->caps.num_ports; i++) 1062 priv->port[i + 1].tmp_type = 0; 1063 1064 err = mlx4_change_port_types(mdev, new_types); 1065 1066 out: 1067 mlx4_start_sense(mdev); 1068 mutex_unlock(&priv->port_mutex); 1069 err_out: 1070 mutex_unlock(&set_port_type_mutex); 1071 1072 return err ? err : count; 1073 } 1074 1075 enum ibta_mtu { 1076 IB_MTU_256 = 1, 1077 IB_MTU_512 = 2, 1078 IB_MTU_1024 = 3, 1079 IB_MTU_2048 = 4, 1080 IB_MTU_4096 = 5 1081 }; 1082 1083 static inline int int_to_ibta_mtu(int mtu) 1084 { 1085 switch (mtu) { 1086 case 256: return IB_MTU_256; 1087 case 512: return IB_MTU_512; 1088 case 1024: return IB_MTU_1024; 1089 case 2048: return IB_MTU_2048; 1090 case 4096: return IB_MTU_4096; 1091 default: return -1; 1092 } 1093 } 1094 1095 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1096 { 1097 switch (mtu) { 1098 case IB_MTU_256: return 256; 1099 case IB_MTU_512: return 512; 1100 case IB_MTU_1024: return 1024; 1101 case IB_MTU_2048: return 2048; 1102 case IB_MTU_4096: return 4096; 1103 default: return -1; 1104 } 1105 } 1106 1107 static ssize_t show_port_ib_mtu(struct device *dev, 1108 struct device_attribute *attr, 1109 char *buf) 1110 { 1111 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1112 port_mtu_attr); 1113 struct mlx4_dev *mdev = info->dev; 1114 1115 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1116 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1117 1118 sprintf(buf, "%d\n", 1119 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1120 return strlen(buf); 1121 } 1122 1123 static ssize_t set_port_ib_mtu(struct device *dev, 1124 struct device_attribute *attr, 1125 const char *buf, size_t count) 1126 { 1127 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1128 port_mtu_attr); 1129 struct mlx4_dev *mdev = info->dev; 1130 struct mlx4_priv *priv = mlx4_priv(mdev); 1131 int err, port, mtu, ibta_mtu = -1; 1132 1133 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1134 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1135 return -EINVAL; 1136 } 1137 1138 err = kstrtoint(buf, 0, &mtu); 1139 if (!err) 1140 ibta_mtu = int_to_ibta_mtu(mtu); 1141 1142 if (err || ibta_mtu < 0) { 1143 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1144 return -EINVAL; 1145 } 1146 1147 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1148 1149 mlx4_stop_sense(mdev); 1150 mutex_lock(&priv->port_mutex); 1151 mlx4_unregister_device(mdev); 1152 for (port = 1; port <= mdev->caps.num_ports; port++) { 1153 mlx4_CLOSE_PORT(mdev, port); 1154 err = mlx4_SET_PORT(mdev, port, -1); 1155 if (err) { 1156 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1157 port); 1158 goto err_set_port; 1159 } 1160 } 1161 err = mlx4_register_device(mdev); 1162 err_set_port: 1163 mutex_unlock(&priv->port_mutex); 1164 mlx4_start_sense(mdev); 1165 return err ? err : count; 1166 } 1167 1168 int mlx4_bond(struct mlx4_dev *dev) 1169 { 1170 int ret = 0; 1171 struct mlx4_priv *priv = mlx4_priv(dev); 1172 1173 mutex_lock(&priv->bond_mutex); 1174 1175 if (!mlx4_is_bonded(dev)) 1176 ret = mlx4_do_bond(dev, true); 1177 else 1178 ret = 0; 1179 1180 mutex_unlock(&priv->bond_mutex); 1181 if (ret) 1182 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1183 else 1184 mlx4_dbg(dev, "Device is bonded\n"); 1185 return ret; 1186 } 1187 EXPORT_SYMBOL_GPL(mlx4_bond); 1188 1189 int mlx4_unbond(struct mlx4_dev *dev) 1190 { 1191 int ret = 0; 1192 struct mlx4_priv *priv = mlx4_priv(dev); 1193 1194 mutex_lock(&priv->bond_mutex); 1195 1196 if (mlx4_is_bonded(dev)) 1197 ret = mlx4_do_bond(dev, false); 1198 1199 mutex_unlock(&priv->bond_mutex); 1200 if (ret) 1201 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1202 else 1203 mlx4_dbg(dev, "Device is unbonded\n"); 1204 return ret; 1205 } 1206 EXPORT_SYMBOL_GPL(mlx4_unbond); 1207 1208 1209 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1210 { 1211 u8 port1 = v2p->port1; 1212 u8 port2 = v2p->port2; 1213 struct mlx4_priv *priv = mlx4_priv(dev); 1214 int err; 1215 1216 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1217 return -ENOTSUPP; 1218 1219 mutex_lock(&priv->bond_mutex); 1220 1221 /* zero means keep current mapping for this port */ 1222 if (port1 == 0) 1223 port1 = priv->v2p.port1; 1224 if (port2 == 0) 1225 port2 = priv->v2p.port2; 1226 1227 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1228 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1229 (port1 == 2 && port2 == 1)) { 1230 /* besides boundary checks cross mapping makes 1231 * no sense and therefore not allowed */ 1232 err = -EINVAL; 1233 } else if ((port1 == priv->v2p.port1) && 1234 (port2 == priv->v2p.port2)) { 1235 err = 0; 1236 } else { 1237 err = mlx4_virt2phy_port_map(dev, port1, port2); 1238 if (!err) { 1239 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1240 port1, port2); 1241 priv->v2p.port1 = port1; 1242 priv->v2p.port2 = port2; 1243 } else { 1244 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1245 } 1246 } 1247 1248 mutex_unlock(&priv->bond_mutex); 1249 return err; 1250 } 1251 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1252 1253 static int mlx4_load_fw(struct mlx4_dev *dev) 1254 { 1255 struct mlx4_priv *priv = mlx4_priv(dev); 1256 int err; 1257 1258 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1259 GFP_HIGHUSER | __GFP_NOWARN, 0); 1260 if (!priv->fw.fw_icm) { 1261 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1262 return -ENOMEM; 1263 } 1264 1265 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1266 if (err) { 1267 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1268 goto err_free; 1269 } 1270 1271 err = mlx4_RUN_FW(dev); 1272 if (err) { 1273 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1274 goto err_unmap_fa; 1275 } 1276 1277 return 0; 1278 1279 err_unmap_fa: 1280 mlx4_UNMAP_FA(dev); 1281 1282 err_free: 1283 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1284 return err; 1285 } 1286 1287 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1288 int cmpt_entry_sz) 1289 { 1290 struct mlx4_priv *priv = mlx4_priv(dev); 1291 int err; 1292 int num_eqs; 1293 1294 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1295 cmpt_base + 1296 ((u64) (MLX4_CMPT_TYPE_QP * 1297 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1298 cmpt_entry_sz, dev->caps.num_qps, 1299 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1300 0, 0); 1301 if (err) 1302 goto err; 1303 1304 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1305 cmpt_base + 1306 ((u64) (MLX4_CMPT_TYPE_SRQ * 1307 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1308 cmpt_entry_sz, dev->caps.num_srqs, 1309 dev->caps.reserved_srqs, 0, 0); 1310 if (err) 1311 goto err_qp; 1312 1313 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1314 cmpt_base + 1315 ((u64) (MLX4_CMPT_TYPE_CQ * 1316 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1317 cmpt_entry_sz, dev->caps.num_cqs, 1318 dev->caps.reserved_cqs, 0, 0); 1319 if (err) 1320 goto err_srq; 1321 1322 num_eqs = dev->phys_caps.num_phys_eqs; 1323 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1324 cmpt_base + 1325 ((u64) (MLX4_CMPT_TYPE_EQ * 1326 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1327 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1328 if (err) 1329 goto err_cq; 1330 1331 return 0; 1332 1333 err_cq: 1334 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1335 1336 err_srq: 1337 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1338 1339 err_qp: 1340 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1341 1342 err: 1343 return err; 1344 } 1345 1346 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1347 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1348 { 1349 struct mlx4_priv *priv = mlx4_priv(dev); 1350 u64 aux_pages; 1351 int num_eqs; 1352 int err; 1353 1354 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1355 if (err) { 1356 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1357 return err; 1358 } 1359 1360 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1361 (unsigned long long) icm_size >> 10, 1362 (unsigned long long) aux_pages << 2); 1363 1364 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1365 GFP_HIGHUSER | __GFP_NOWARN, 0); 1366 if (!priv->fw.aux_icm) { 1367 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1368 return -ENOMEM; 1369 } 1370 1371 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1372 if (err) { 1373 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1374 goto err_free_aux; 1375 } 1376 1377 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1378 if (err) { 1379 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1380 goto err_unmap_aux; 1381 } 1382 1383 1384 num_eqs = dev->phys_caps.num_phys_eqs; 1385 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1386 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1387 num_eqs, num_eqs, 0, 0); 1388 if (err) { 1389 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1390 goto err_unmap_cmpt; 1391 } 1392 1393 /* 1394 * Reserved MTT entries must be aligned up to a cacheline 1395 * boundary, since the FW will write to them, while the driver 1396 * writes to all other MTT entries. (The variable 1397 * dev->caps.mtt_entry_sz below is really the MTT segment 1398 * size, not the raw entry size) 1399 */ 1400 dev->caps.reserved_mtts = 1401 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1402 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1403 1404 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1405 init_hca->mtt_base, 1406 dev->caps.mtt_entry_sz, 1407 dev->caps.num_mtts, 1408 dev->caps.reserved_mtts, 1, 0); 1409 if (err) { 1410 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1411 goto err_unmap_eq; 1412 } 1413 1414 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1415 init_hca->dmpt_base, 1416 dev_cap->dmpt_entry_sz, 1417 dev->caps.num_mpts, 1418 dev->caps.reserved_mrws, 1, 1); 1419 if (err) { 1420 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1421 goto err_unmap_mtt; 1422 } 1423 1424 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1425 init_hca->qpc_base, 1426 dev_cap->qpc_entry_sz, 1427 dev->caps.num_qps, 1428 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1429 0, 0); 1430 if (err) { 1431 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1432 goto err_unmap_dmpt; 1433 } 1434 1435 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1436 init_hca->auxc_base, 1437 dev_cap->aux_entry_sz, 1438 dev->caps.num_qps, 1439 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1440 0, 0); 1441 if (err) { 1442 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1443 goto err_unmap_qp; 1444 } 1445 1446 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1447 init_hca->altc_base, 1448 dev_cap->altc_entry_sz, 1449 dev->caps.num_qps, 1450 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1451 0, 0); 1452 if (err) { 1453 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1454 goto err_unmap_auxc; 1455 } 1456 1457 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1458 init_hca->rdmarc_base, 1459 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1460 dev->caps.num_qps, 1461 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1462 0, 0); 1463 if (err) { 1464 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1465 goto err_unmap_altc; 1466 } 1467 1468 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1469 init_hca->cqc_base, 1470 dev_cap->cqc_entry_sz, 1471 dev->caps.num_cqs, 1472 dev->caps.reserved_cqs, 0, 0); 1473 if (err) { 1474 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1475 goto err_unmap_rdmarc; 1476 } 1477 1478 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1479 init_hca->srqc_base, 1480 dev_cap->srq_entry_sz, 1481 dev->caps.num_srqs, 1482 dev->caps.reserved_srqs, 0, 0); 1483 if (err) { 1484 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1485 goto err_unmap_cq; 1486 } 1487 1488 /* 1489 * For flow steering device managed mode it is required to use 1490 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1491 * required, but for simplicity just map the whole multicast 1492 * group table now. The table isn't very big and it's a lot 1493 * easier than trying to track ref counts. 1494 */ 1495 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1496 init_hca->mc_base, 1497 mlx4_get_mgm_entry_size(dev), 1498 dev->caps.num_mgms + dev->caps.num_amgms, 1499 dev->caps.num_mgms + dev->caps.num_amgms, 1500 0, 0); 1501 if (err) { 1502 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1503 goto err_unmap_srq; 1504 } 1505 1506 return 0; 1507 1508 err_unmap_srq: 1509 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1510 1511 err_unmap_cq: 1512 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1513 1514 err_unmap_rdmarc: 1515 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1516 1517 err_unmap_altc: 1518 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1519 1520 err_unmap_auxc: 1521 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1522 1523 err_unmap_qp: 1524 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1525 1526 err_unmap_dmpt: 1527 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1528 1529 err_unmap_mtt: 1530 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1531 1532 err_unmap_eq: 1533 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1534 1535 err_unmap_cmpt: 1536 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1537 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1538 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1539 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1540 1541 err_unmap_aux: 1542 mlx4_UNMAP_ICM_AUX(dev); 1543 1544 err_free_aux: 1545 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1546 1547 return err; 1548 } 1549 1550 static void mlx4_free_icms(struct mlx4_dev *dev) 1551 { 1552 struct mlx4_priv *priv = mlx4_priv(dev); 1553 1554 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1555 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1556 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1557 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1558 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1559 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1560 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1561 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1562 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1563 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1564 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1565 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1566 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1567 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1568 1569 mlx4_UNMAP_ICM_AUX(dev); 1570 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1571 } 1572 1573 static void mlx4_slave_exit(struct mlx4_dev *dev) 1574 { 1575 struct mlx4_priv *priv = mlx4_priv(dev); 1576 1577 mutex_lock(&priv->cmd.slave_cmd_mutex); 1578 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1579 MLX4_COMM_TIME)) 1580 mlx4_warn(dev, "Failed to close slave function\n"); 1581 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1582 } 1583 1584 static int map_bf_area(struct mlx4_dev *dev) 1585 { 1586 struct mlx4_priv *priv = mlx4_priv(dev); 1587 resource_size_t bf_start; 1588 resource_size_t bf_len; 1589 int err = 0; 1590 1591 if (!dev->caps.bf_reg_size) 1592 return -ENXIO; 1593 1594 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1595 (dev->caps.num_uars << PAGE_SHIFT); 1596 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1597 (dev->caps.num_uars << PAGE_SHIFT); 1598 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1599 if (!priv->bf_mapping) 1600 err = -ENOMEM; 1601 1602 return err; 1603 } 1604 1605 static void unmap_bf_area(struct mlx4_dev *dev) 1606 { 1607 if (mlx4_priv(dev)->bf_mapping) 1608 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1609 } 1610 1611 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1612 { 1613 u32 clockhi, clocklo, clockhi1; 1614 cycle_t cycles; 1615 int i; 1616 struct mlx4_priv *priv = mlx4_priv(dev); 1617 1618 for (i = 0; i < 10; i++) { 1619 clockhi = swab32(readl(priv->clock_mapping)); 1620 clocklo = swab32(readl(priv->clock_mapping + 4)); 1621 clockhi1 = swab32(readl(priv->clock_mapping)); 1622 if (clockhi == clockhi1) 1623 break; 1624 } 1625 1626 cycles = (u64) clockhi << 32 | (u64) clocklo; 1627 1628 return cycles; 1629 } 1630 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1631 1632 1633 static int map_internal_clock(struct mlx4_dev *dev) 1634 { 1635 struct mlx4_priv *priv = mlx4_priv(dev); 1636 1637 priv->clock_mapping = 1638 ioremap(pci_resource_start(dev->persist->pdev, 1639 priv->fw.clock_bar) + 1640 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1641 1642 if (!priv->clock_mapping) 1643 return -ENOMEM; 1644 1645 return 0; 1646 } 1647 1648 static void unmap_internal_clock(struct mlx4_dev *dev) 1649 { 1650 struct mlx4_priv *priv = mlx4_priv(dev); 1651 1652 if (priv->clock_mapping) 1653 iounmap(priv->clock_mapping); 1654 } 1655 1656 static void mlx4_close_hca(struct mlx4_dev *dev) 1657 { 1658 unmap_internal_clock(dev); 1659 unmap_bf_area(dev); 1660 if (mlx4_is_slave(dev)) 1661 mlx4_slave_exit(dev); 1662 else { 1663 mlx4_CLOSE_HCA(dev, 0); 1664 mlx4_free_icms(dev); 1665 } 1666 } 1667 1668 static void mlx4_close_fw(struct mlx4_dev *dev) 1669 { 1670 if (!mlx4_is_slave(dev)) { 1671 mlx4_UNMAP_FA(dev); 1672 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1673 } 1674 } 1675 1676 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1677 { 1678 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1679 1680 u32 comm_flags; 1681 u32 offline_bit; 1682 unsigned long end; 1683 struct mlx4_priv *priv = mlx4_priv(dev); 1684 1685 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1686 while (time_before(jiffies, end)) { 1687 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1688 MLX4_COMM_CHAN_FLAGS)); 1689 offline_bit = (comm_flags & 1690 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1691 if (!offline_bit) 1692 return 0; 1693 /* There are cases as part of AER/Reset flow that PF needs 1694 * around 100 msec to load. We therefore sleep for 100 msec 1695 * to allow other tasks to make use of that CPU during this 1696 * time interval. 1697 */ 1698 msleep(100); 1699 } 1700 mlx4_err(dev, "Communication channel is offline.\n"); 1701 return -EIO; 1702 } 1703 1704 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1705 { 1706 #define COMM_CHAN_RST_OFFSET 0x1e 1707 1708 struct mlx4_priv *priv = mlx4_priv(dev); 1709 u32 comm_rst; 1710 u32 comm_caps; 1711 1712 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1713 MLX4_COMM_CHAN_CAPS)); 1714 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1715 1716 if (comm_rst) 1717 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1718 } 1719 1720 static int mlx4_init_slave(struct mlx4_dev *dev) 1721 { 1722 struct mlx4_priv *priv = mlx4_priv(dev); 1723 u64 dma = (u64) priv->mfunc.vhcr_dma; 1724 int ret_from_reset = 0; 1725 u32 slave_read; 1726 u32 cmd_channel_ver; 1727 1728 if (atomic_read(&pf_loading)) { 1729 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1730 return -EPROBE_DEFER; 1731 } 1732 1733 mutex_lock(&priv->cmd.slave_cmd_mutex); 1734 priv->cmd.max_cmds = 1; 1735 if (mlx4_comm_check_offline(dev)) { 1736 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1737 goto err_offline; 1738 } 1739 1740 mlx4_reset_vf_support(dev); 1741 mlx4_warn(dev, "Sending reset\n"); 1742 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1743 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1744 /* if we are in the middle of flr the slave will try 1745 * NUM_OF_RESET_RETRIES times before leaving.*/ 1746 if (ret_from_reset) { 1747 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1748 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1749 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1750 return -EPROBE_DEFER; 1751 } else 1752 goto err; 1753 } 1754 1755 /* check the driver version - the slave I/F revision 1756 * must match the master's */ 1757 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1758 cmd_channel_ver = mlx4_comm_get_version(); 1759 1760 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1761 MLX4_COMM_GET_IF_REV(slave_read)) { 1762 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1763 goto err; 1764 } 1765 1766 mlx4_warn(dev, "Sending vhcr0\n"); 1767 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1768 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1769 goto err; 1770 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1771 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1772 goto err; 1773 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1774 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1775 goto err; 1776 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1777 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1778 goto err; 1779 1780 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1781 return 0; 1782 1783 err: 1784 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1785 err_offline: 1786 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1787 return -EIO; 1788 } 1789 1790 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1791 { 1792 int i; 1793 1794 for (i = 1; i <= dev->caps.num_ports; i++) { 1795 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1796 dev->caps.gid_table_len[i] = 1797 mlx4_get_slave_num_gids(dev, 0, i); 1798 else 1799 dev->caps.gid_table_len[i] = 1; 1800 dev->caps.pkey_table_len[i] = 1801 dev->phys_caps.pkey_phys_table_len[i] - 1; 1802 } 1803 } 1804 1805 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1806 { 1807 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1808 1809 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1810 i++) { 1811 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1812 break; 1813 } 1814 1815 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1816 } 1817 1818 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1819 { 1820 switch (dmfs_high_steer_mode) { 1821 case MLX4_STEERING_DMFS_A0_DEFAULT: 1822 return "default performance"; 1823 1824 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1825 return "dynamic hybrid mode"; 1826 1827 case MLX4_STEERING_DMFS_A0_STATIC: 1828 return "performance optimized for limited rule configuration (static)"; 1829 1830 case MLX4_STEERING_DMFS_A0_DISABLE: 1831 return "disabled performance optimized steering"; 1832 1833 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1834 return "performance optimized steering not supported"; 1835 1836 default: 1837 return "Unrecognized mode"; 1838 } 1839 } 1840 1841 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1842 1843 static void choose_steering_mode(struct mlx4_dev *dev, 1844 struct mlx4_dev_cap *dev_cap) 1845 { 1846 if (mlx4_log_num_mgm_entry_size <= 0) { 1847 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1848 if (dev->caps.dmfs_high_steer_mode == 1849 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1850 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1851 else 1852 dev->caps.dmfs_high_steer_mode = 1853 MLX4_STEERING_DMFS_A0_STATIC; 1854 } 1855 } 1856 1857 if (mlx4_log_num_mgm_entry_size <= 0 && 1858 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1859 (!mlx4_is_mfunc(dev) || 1860 (dev_cap->fs_max_num_qp_per_entry >= 1861 (dev->persist->num_vfs + 1))) && 1862 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1863 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1864 dev->oper_log_mgm_entry_size = 1865 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1866 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1867 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1868 dev->caps.fs_log_max_ucast_qp_range_size = 1869 dev_cap->fs_log_max_ucast_qp_range_size; 1870 } else { 1871 if (dev->caps.dmfs_high_steer_mode != 1872 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1873 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1874 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1875 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1876 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1877 else { 1878 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1879 1880 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1881 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1882 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1883 } 1884 dev->oper_log_mgm_entry_size = 1885 mlx4_log_num_mgm_entry_size > 0 ? 1886 mlx4_log_num_mgm_entry_size : 1887 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1888 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1889 } 1890 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1891 mlx4_steering_mode_str(dev->caps.steering_mode), 1892 dev->oper_log_mgm_entry_size, 1893 mlx4_log_num_mgm_entry_size); 1894 } 1895 1896 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1897 struct mlx4_dev_cap *dev_cap) 1898 { 1899 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1900 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1901 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1902 else 1903 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1904 1905 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1906 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1907 } 1908 1909 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1910 { 1911 int i; 1912 struct mlx4_port_cap port_cap; 1913 1914 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1915 return -EINVAL; 1916 1917 for (i = 1; i <= dev->caps.num_ports; i++) { 1918 if (mlx4_dev_port(dev, i, &port_cap)) { 1919 mlx4_err(dev, 1920 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1921 } else if ((dev->caps.dmfs_high_steer_mode != 1922 MLX4_STEERING_DMFS_A0_DEFAULT) && 1923 (port_cap.dmfs_optimized_state == 1924 !!(dev->caps.dmfs_high_steer_mode == 1925 MLX4_STEERING_DMFS_A0_DISABLE))) { 1926 mlx4_err(dev, 1927 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1928 dmfs_high_rate_steering_mode_str( 1929 dev->caps.dmfs_high_steer_mode), 1930 (port_cap.dmfs_optimized_state ? 1931 "enabled" : "disabled")); 1932 } 1933 } 1934 1935 return 0; 1936 } 1937 1938 static int mlx4_init_fw(struct mlx4_dev *dev) 1939 { 1940 struct mlx4_mod_stat_cfg mlx4_cfg; 1941 int err = 0; 1942 1943 if (!mlx4_is_slave(dev)) { 1944 err = mlx4_QUERY_FW(dev); 1945 if (err) { 1946 if (err == -EACCES) 1947 mlx4_info(dev, "non-primary physical function, skipping\n"); 1948 else 1949 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1950 return err; 1951 } 1952 1953 err = mlx4_load_fw(dev); 1954 if (err) { 1955 mlx4_err(dev, "Failed to start FW, aborting\n"); 1956 return err; 1957 } 1958 1959 mlx4_cfg.log_pg_sz_m = 1; 1960 mlx4_cfg.log_pg_sz = 0; 1961 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1962 if (err) 1963 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1964 } 1965 1966 return err; 1967 } 1968 1969 static int mlx4_init_hca(struct mlx4_dev *dev) 1970 { 1971 struct mlx4_priv *priv = mlx4_priv(dev); 1972 struct mlx4_adapter adapter; 1973 struct mlx4_dev_cap dev_cap; 1974 struct mlx4_profile profile; 1975 struct mlx4_init_hca_param init_hca; 1976 u64 icm_size; 1977 struct mlx4_config_dev_params params; 1978 int err; 1979 1980 if (!mlx4_is_slave(dev)) { 1981 err = mlx4_dev_cap(dev, &dev_cap); 1982 if (err) { 1983 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1984 return err; 1985 } 1986 1987 choose_steering_mode(dev, &dev_cap); 1988 choose_tunnel_offload_mode(dev, &dev_cap); 1989 1990 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1991 mlx4_is_master(dev)) 1992 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1993 1994 err = mlx4_get_phys_port_id(dev); 1995 if (err) 1996 mlx4_err(dev, "Fail to get physical port id\n"); 1997 1998 if (mlx4_is_master(dev)) 1999 mlx4_parav_master_pf_caps(dev); 2000 2001 if (mlx4_low_memory_profile()) { 2002 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2003 profile = low_mem_profile; 2004 } else { 2005 profile = default_profile; 2006 } 2007 if (dev->caps.steering_mode == 2008 MLX4_STEERING_MODE_DEVICE_MANAGED) 2009 profile.num_mcg = MLX4_FS_NUM_MCG; 2010 2011 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2012 &init_hca); 2013 if ((long long) icm_size < 0) { 2014 err = icm_size; 2015 return err; 2016 } 2017 2018 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2019 2020 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2021 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2022 init_hca.mw_enabled = 0; 2023 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2024 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2025 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2026 2027 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2028 if (err) 2029 return err; 2030 2031 err = mlx4_INIT_HCA(dev, &init_hca); 2032 if (err) { 2033 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2034 goto err_free_icm; 2035 } 2036 2037 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2038 err = mlx4_query_func(dev, &dev_cap); 2039 if (err < 0) { 2040 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2041 goto err_close; 2042 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2043 dev->caps.num_eqs = dev_cap.max_eqs; 2044 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2045 dev->caps.reserved_uars = dev_cap.reserved_uars; 2046 } 2047 } 2048 2049 /* 2050 * If TS is supported by FW 2051 * read HCA frequency by QUERY_HCA command 2052 */ 2053 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2054 memset(&init_hca, 0, sizeof(init_hca)); 2055 err = mlx4_QUERY_HCA(dev, &init_hca); 2056 if (err) { 2057 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2058 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2059 } else { 2060 dev->caps.hca_core_clock = 2061 init_hca.hca_core_clock; 2062 } 2063 2064 /* In case we got HCA frequency 0 - disable timestamping 2065 * to avoid dividing by zero 2066 */ 2067 if (!dev->caps.hca_core_clock) { 2068 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2069 mlx4_err(dev, 2070 "HCA frequency is 0 - timestamping is not supported\n"); 2071 } else if (map_internal_clock(dev)) { 2072 /* 2073 * Map internal clock, 2074 * in case of failure disable timestamping 2075 */ 2076 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2077 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2078 } 2079 } 2080 2081 if (dev->caps.dmfs_high_steer_mode != 2082 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2083 if (mlx4_validate_optimized_steering(dev)) 2084 mlx4_warn(dev, "Optimized steering validation failed\n"); 2085 2086 if (dev->caps.dmfs_high_steer_mode == 2087 MLX4_STEERING_DMFS_A0_DISABLE) { 2088 dev->caps.dmfs_high_rate_qpn_base = 2089 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2090 dev->caps.dmfs_high_rate_qpn_range = 2091 MLX4_A0_STEERING_TABLE_SIZE; 2092 } 2093 2094 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2095 dmfs_high_rate_steering_mode_str( 2096 dev->caps.dmfs_high_steer_mode)); 2097 } 2098 } else { 2099 err = mlx4_init_slave(dev); 2100 if (err) { 2101 if (err != -EPROBE_DEFER) 2102 mlx4_err(dev, "Failed to initialize slave\n"); 2103 return err; 2104 } 2105 2106 err = mlx4_slave_cap(dev); 2107 if (err) { 2108 mlx4_err(dev, "Failed to obtain slave caps\n"); 2109 goto err_close; 2110 } 2111 } 2112 2113 if (map_bf_area(dev)) 2114 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2115 2116 /*Only the master set the ports, all the rest got it from it.*/ 2117 if (!mlx4_is_slave(dev)) 2118 mlx4_set_port_mask(dev); 2119 2120 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2121 if (err) { 2122 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2123 goto unmap_bf; 2124 } 2125 2126 /* Query CONFIG_DEV parameters */ 2127 err = mlx4_config_dev_retrieval(dev, ¶ms); 2128 if (err && err != -ENOTSUPP) { 2129 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2130 } else if (!err) { 2131 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2132 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2133 } 2134 priv->eq_table.inta_pin = adapter.inta_pin; 2135 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2136 2137 return 0; 2138 2139 unmap_bf: 2140 unmap_internal_clock(dev); 2141 unmap_bf_area(dev); 2142 2143 if (mlx4_is_slave(dev)) { 2144 kfree(dev->caps.qp0_qkey); 2145 kfree(dev->caps.qp0_tunnel); 2146 kfree(dev->caps.qp0_proxy); 2147 kfree(dev->caps.qp1_tunnel); 2148 kfree(dev->caps.qp1_proxy); 2149 } 2150 2151 err_close: 2152 if (mlx4_is_slave(dev)) 2153 mlx4_slave_exit(dev); 2154 else 2155 mlx4_CLOSE_HCA(dev, 0); 2156 2157 err_free_icm: 2158 if (!mlx4_is_slave(dev)) 2159 mlx4_free_icms(dev); 2160 2161 return err; 2162 } 2163 2164 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2165 { 2166 struct mlx4_priv *priv = mlx4_priv(dev); 2167 int nent; 2168 2169 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2170 return -ENOENT; 2171 2172 nent = dev->caps.max_counters; 2173 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2174 } 2175 2176 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2177 { 2178 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2179 } 2180 2181 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2182 { 2183 struct mlx4_priv *priv = mlx4_priv(dev); 2184 2185 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2186 return -ENOENT; 2187 2188 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2189 if (*idx == -1) 2190 return -ENOMEM; 2191 2192 return 0; 2193 } 2194 2195 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2196 { 2197 u64 out_param; 2198 int err; 2199 2200 if (mlx4_is_mfunc(dev)) { 2201 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2202 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2203 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2204 if (!err) 2205 *idx = get_param_l(&out_param); 2206 2207 return err; 2208 } 2209 return __mlx4_counter_alloc(dev, idx); 2210 } 2211 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2212 2213 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2214 { 2215 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2216 return; 2217 } 2218 2219 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2220 { 2221 u64 in_param = 0; 2222 2223 if (mlx4_is_mfunc(dev)) { 2224 set_param_l(&in_param, idx); 2225 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2226 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2227 MLX4_CMD_WRAPPED); 2228 return; 2229 } 2230 __mlx4_counter_free(dev, idx); 2231 } 2232 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2233 2234 static int mlx4_setup_hca(struct mlx4_dev *dev) 2235 { 2236 struct mlx4_priv *priv = mlx4_priv(dev); 2237 int err; 2238 int port; 2239 __be32 ib_port_default_caps; 2240 2241 err = mlx4_init_uar_table(dev); 2242 if (err) { 2243 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2244 return err; 2245 } 2246 2247 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2248 if (err) { 2249 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2250 goto err_uar_table_free; 2251 } 2252 2253 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2254 if (!priv->kar) { 2255 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2256 err = -ENOMEM; 2257 goto err_uar_free; 2258 } 2259 2260 err = mlx4_init_pd_table(dev); 2261 if (err) { 2262 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2263 goto err_kar_unmap; 2264 } 2265 2266 err = mlx4_init_xrcd_table(dev); 2267 if (err) { 2268 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2269 goto err_pd_table_free; 2270 } 2271 2272 err = mlx4_init_mr_table(dev); 2273 if (err) { 2274 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2275 goto err_xrcd_table_free; 2276 } 2277 2278 if (!mlx4_is_slave(dev)) { 2279 err = mlx4_init_mcg_table(dev); 2280 if (err) { 2281 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2282 goto err_mr_table_free; 2283 } 2284 err = mlx4_config_mad_demux(dev); 2285 if (err) { 2286 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2287 goto err_mcg_table_free; 2288 } 2289 } 2290 2291 err = mlx4_init_eq_table(dev); 2292 if (err) { 2293 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2294 goto err_mcg_table_free; 2295 } 2296 2297 err = mlx4_cmd_use_events(dev); 2298 if (err) { 2299 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2300 goto err_eq_table_free; 2301 } 2302 2303 err = mlx4_NOP(dev); 2304 if (err) { 2305 if (dev->flags & MLX4_FLAG_MSI_X) { 2306 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2307 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2308 mlx4_warn(dev, "Trying again without MSI-X\n"); 2309 } else { 2310 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2311 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2312 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2313 } 2314 2315 goto err_cmd_poll; 2316 } 2317 2318 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2319 2320 err = mlx4_init_cq_table(dev); 2321 if (err) { 2322 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2323 goto err_cmd_poll; 2324 } 2325 2326 err = mlx4_init_srq_table(dev); 2327 if (err) { 2328 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2329 goto err_cq_table_free; 2330 } 2331 2332 err = mlx4_init_qp_table(dev); 2333 if (err) { 2334 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2335 goto err_srq_table_free; 2336 } 2337 2338 err = mlx4_init_counters_table(dev); 2339 if (err && err != -ENOENT) { 2340 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2341 goto err_qp_table_free; 2342 } 2343 2344 if (!mlx4_is_slave(dev)) { 2345 for (port = 1; port <= dev->caps.num_ports; port++) { 2346 ib_port_default_caps = 0; 2347 err = mlx4_get_port_ib_caps(dev, port, 2348 &ib_port_default_caps); 2349 if (err) 2350 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2351 port, err); 2352 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2353 2354 /* initialize per-slave default ib port capabilities */ 2355 if (mlx4_is_master(dev)) { 2356 int i; 2357 for (i = 0; i < dev->num_slaves; i++) { 2358 if (i == mlx4_master_func_num(dev)) 2359 continue; 2360 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2361 ib_port_default_caps; 2362 } 2363 } 2364 2365 if (mlx4_is_mfunc(dev)) 2366 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2367 else 2368 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2369 2370 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2371 dev->caps.pkey_table_len[port] : -1); 2372 if (err) { 2373 mlx4_err(dev, "Failed to set port %d, aborting\n", 2374 port); 2375 goto err_counters_table_free; 2376 } 2377 } 2378 } 2379 2380 return 0; 2381 2382 err_counters_table_free: 2383 mlx4_cleanup_counters_table(dev); 2384 2385 err_qp_table_free: 2386 mlx4_cleanup_qp_table(dev); 2387 2388 err_srq_table_free: 2389 mlx4_cleanup_srq_table(dev); 2390 2391 err_cq_table_free: 2392 mlx4_cleanup_cq_table(dev); 2393 2394 err_cmd_poll: 2395 mlx4_cmd_use_polling(dev); 2396 2397 err_eq_table_free: 2398 mlx4_cleanup_eq_table(dev); 2399 2400 err_mcg_table_free: 2401 if (!mlx4_is_slave(dev)) 2402 mlx4_cleanup_mcg_table(dev); 2403 2404 err_mr_table_free: 2405 mlx4_cleanup_mr_table(dev); 2406 2407 err_xrcd_table_free: 2408 mlx4_cleanup_xrcd_table(dev); 2409 2410 err_pd_table_free: 2411 mlx4_cleanup_pd_table(dev); 2412 2413 err_kar_unmap: 2414 iounmap(priv->kar); 2415 2416 err_uar_free: 2417 mlx4_uar_free(dev, &priv->driver_uar); 2418 2419 err_uar_table_free: 2420 mlx4_cleanup_uar_table(dev); 2421 return err; 2422 } 2423 2424 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2425 { 2426 struct mlx4_priv *priv = mlx4_priv(dev); 2427 struct msix_entry *entries; 2428 int i; 2429 2430 if (msi_x) { 2431 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2432 2433 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2434 nreq); 2435 2436 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2437 if (!entries) 2438 goto no_msi; 2439 2440 for (i = 0; i < nreq; ++i) 2441 entries[i].entry = i; 2442 2443 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2444 nreq); 2445 2446 if (nreq < 0) { 2447 kfree(entries); 2448 goto no_msi; 2449 } else if (nreq < MSIX_LEGACY_SZ + 2450 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2451 /*Working in legacy mode , all EQ's shared*/ 2452 dev->caps.comp_pool = 0; 2453 dev->caps.num_comp_vectors = nreq - 1; 2454 } else { 2455 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2456 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2457 } 2458 for (i = 0; i < nreq; ++i) 2459 priv->eq_table.eq[i].irq = entries[i].vector; 2460 2461 dev->flags |= MLX4_FLAG_MSI_X; 2462 2463 kfree(entries); 2464 return; 2465 } 2466 2467 no_msi: 2468 dev->caps.num_comp_vectors = 1; 2469 dev->caps.comp_pool = 0; 2470 2471 for (i = 0; i < 2; ++i) 2472 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2473 } 2474 2475 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2476 { 2477 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2478 int err = 0; 2479 2480 info->dev = dev; 2481 info->port = port; 2482 if (!mlx4_is_slave(dev)) { 2483 mlx4_init_mac_table(dev, &info->mac_table); 2484 mlx4_init_vlan_table(dev, &info->vlan_table); 2485 mlx4_init_roce_gid_table(dev, &info->gid_table); 2486 info->base_qpn = mlx4_get_base_qpn(dev, port); 2487 } 2488 2489 sprintf(info->dev_name, "mlx4_port%d", port); 2490 info->port_attr.attr.name = info->dev_name; 2491 if (mlx4_is_mfunc(dev)) 2492 info->port_attr.attr.mode = S_IRUGO; 2493 else { 2494 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2495 info->port_attr.store = set_port_type; 2496 } 2497 info->port_attr.show = show_port_type; 2498 sysfs_attr_init(&info->port_attr.attr); 2499 2500 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2501 if (err) { 2502 mlx4_err(dev, "Failed to create file for port %d\n", port); 2503 info->port = -1; 2504 } 2505 2506 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2507 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2508 if (mlx4_is_mfunc(dev)) 2509 info->port_mtu_attr.attr.mode = S_IRUGO; 2510 else { 2511 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2512 info->port_mtu_attr.store = set_port_ib_mtu; 2513 } 2514 info->port_mtu_attr.show = show_port_ib_mtu; 2515 sysfs_attr_init(&info->port_mtu_attr.attr); 2516 2517 err = device_create_file(&dev->persist->pdev->dev, 2518 &info->port_mtu_attr); 2519 if (err) { 2520 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2521 device_remove_file(&info->dev->persist->pdev->dev, 2522 &info->port_attr); 2523 info->port = -1; 2524 } 2525 2526 return err; 2527 } 2528 2529 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2530 { 2531 if (info->port < 0) 2532 return; 2533 2534 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2535 device_remove_file(&info->dev->persist->pdev->dev, 2536 &info->port_mtu_attr); 2537 } 2538 2539 static int mlx4_init_steering(struct mlx4_dev *dev) 2540 { 2541 struct mlx4_priv *priv = mlx4_priv(dev); 2542 int num_entries = dev->caps.num_ports; 2543 int i, j; 2544 2545 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2546 if (!priv->steer) 2547 return -ENOMEM; 2548 2549 for (i = 0; i < num_entries; i++) 2550 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2551 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2552 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2553 } 2554 return 0; 2555 } 2556 2557 static void mlx4_clear_steering(struct mlx4_dev *dev) 2558 { 2559 struct mlx4_priv *priv = mlx4_priv(dev); 2560 struct mlx4_steer_index *entry, *tmp_entry; 2561 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2562 int num_entries = dev->caps.num_ports; 2563 int i, j; 2564 2565 for (i = 0; i < num_entries; i++) { 2566 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2567 list_for_each_entry_safe(pqp, tmp_pqp, 2568 &priv->steer[i].promisc_qps[j], 2569 list) { 2570 list_del(&pqp->list); 2571 kfree(pqp); 2572 } 2573 list_for_each_entry_safe(entry, tmp_entry, 2574 &priv->steer[i].steer_entries[j], 2575 list) { 2576 list_del(&entry->list); 2577 list_for_each_entry_safe(pqp, tmp_pqp, 2578 &entry->duplicates, 2579 list) { 2580 list_del(&pqp->list); 2581 kfree(pqp); 2582 } 2583 kfree(entry); 2584 } 2585 } 2586 } 2587 kfree(priv->steer); 2588 } 2589 2590 static int extended_func_num(struct pci_dev *pdev) 2591 { 2592 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2593 } 2594 2595 #define MLX4_OWNER_BASE 0x8069c 2596 #define MLX4_OWNER_SIZE 4 2597 2598 static int mlx4_get_ownership(struct mlx4_dev *dev) 2599 { 2600 void __iomem *owner; 2601 u32 ret; 2602 2603 if (pci_channel_offline(dev->persist->pdev)) 2604 return -EIO; 2605 2606 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2607 MLX4_OWNER_BASE, 2608 MLX4_OWNER_SIZE); 2609 if (!owner) { 2610 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2611 return -ENOMEM; 2612 } 2613 2614 ret = readl(owner); 2615 iounmap(owner); 2616 return (int) !!ret; 2617 } 2618 2619 static void mlx4_free_ownership(struct mlx4_dev *dev) 2620 { 2621 void __iomem *owner; 2622 2623 if (pci_channel_offline(dev->persist->pdev)) 2624 return; 2625 2626 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2627 MLX4_OWNER_BASE, 2628 MLX4_OWNER_SIZE); 2629 if (!owner) { 2630 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2631 return; 2632 } 2633 writel(0, owner); 2634 msleep(1000); 2635 iounmap(owner); 2636 } 2637 2638 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2639 !!((flags) & MLX4_FLAG_MASTER)) 2640 2641 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2642 u8 total_vfs, int existing_vfs, int reset_flow) 2643 { 2644 u64 dev_flags = dev->flags; 2645 int err = 0; 2646 2647 if (reset_flow) { 2648 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 2649 GFP_KERNEL); 2650 if (!dev->dev_vfs) 2651 goto free_mem; 2652 return dev_flags; 2653 } 2654 2655 atomic_inc(&pf_loading); 2656 if (dev->flags & MLX4_FLAG_SRIOV) { 2657 if (existing_vfs != total_vfs) { 2658 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2659 existing_vfs, total_vfs); 2660 total_vfs = existing_vfs; 2661 } 2662 } 2663 2664 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2665 if (NULL == dev->dev_vfs) { 2666 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2667 goto disable_sriov; 2668 } 2669 2670 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2671 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2672 err = pci_enable_sriov(pdev, total_vfs); 2673 } 2674 if (err) { 2675 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2676 err); 2677 goto disable_sriov; 2678 } else { 2679 mlx4_warn(dev, "Running in master mode\n"); 2680 dev_flags |= MLX4_FLAG_SRIOV | 2681 MLX4_FLAG_MASTER; 2682 dev_flags &= ~MLX4_FLAG_SLAVE; 2683 dev->persist->num_vfs = total_vfs; 2684 } 2685 return dev_flags; 2686 2687 disable_sriov: 2688 atomic_dec(&pf_loading); 2689 free_mem: 2690 dev->persist->num_vfs = 0; 2691 kfree(dev->dev_vfs); 2692 return dev_flags & ~MLX4_FLAG_MASTER; 2693 } 2694 2695 enum { 2696 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2697 }; 2698 2699 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2700 int *nvfs) 2701 { 2702 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2703 /* Checking for 64 VFs as a limitation of CX2 */ 2704 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2705 requested_vfs >= 64) { 2706 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2707 requested_vfs); 2708 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2709 } 2710 return 0; 2711 } 2712 2713 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2714 int total_vfs, int *nvfs, struct mlx4_priv *priv, 2715 int reset_flow) 2716 { 2717 struct mlx4_dev *dev; 2718 unsigned sum = 0; 2719 int err; 2720 int port; 2721 int i; 2722 struct mlx4_dev_cap *dev_cap = NULL; 2723 int existing_vfs = 0; 2724 2725 dev = &priv->dev; 2726 2727 INIT_LIST_HEAD(&priv->ctx_list); 2728 spin_lock_init(&priv->ctx_lock); 2729 2730 mutex_init(&priv->port_mutex); 2731 mutex_init(&priv->bond_mutex); 2732 2733 INIT_LIST_HEAD(&priv->pgdir_list); 2734 mutex_init(&priv->pgdir_mutex); 2735 2736 INIT_LIST_HEAD(&priv->bf_list); 2737 mutex_init(&priv->bf_mutex); 2738 2739 dev->rev_id = pdev->revision; 2740 dev->numa_node = dev_to_node(&pdev->dev); 2741 2742 /* Detect if this device is a virtual function */ 2743 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2744 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2745 dev->flags |= MLX4_FLAG_SLAVE; 2746 } else { 2747 /* We reset the device and enable SRIOV only for physical 2748 * devices. Try to claim ownership on the device; 2749 * if already taken, skip -- do not allow multiple PFs */ 2750 err = mlx4_get_ownership(dev); 2751 if (err) { 2752 if (err < 0) 2753 return err; 2754 else { 2755 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2756 return -EINVAL; 2757 } 2758 } 2759 2760 atomic_set(&priv->opreq_count, 0); 2761 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2762 2763 /* 2764 * Now reset the HCA before we touch the PCI capabilities or 2765 * attempt a firmware command, since a boot ROM may have left 2766 * the HCA in an undefined state. 2767 */ 2768 err = mlx4_reset(dev); 2769 if (err) { 2770 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2771 goto err_sriov; 2772 } 2773 2774 if (total_vfs) { 2775 dev->flags = MLX4_FLAG_MASTER; 2776 existing_vfs = pci_num_vf(pdev); 2777 if (existing_vfs) 2778 dev->flags |= MLX4_FLAG_SRIOV; 2779 dev->persist->num_vfs = total_vfs; 2780 } 2781 } 2782 2783 /* on load remove any previous indication of internal error, 2784 * device is up. 2785 */ 2786 dev->persist->state = MLX4_DEVICE_STATE_UP; 2787 2788 slave_start: 2789 err = mlx4_cmd_init(dev); 2790 if (err) { 2791 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2792 goto err_sriov; 2793 } 2794 2795 /* In slave functions, the communication channel must be initialized 2796 * before posting commands. Also, init num_slaves before calling 2797 * mlx4_init_hca */ 2798 if (mlx4_is_mfunc(dev)) { 2799 if (mlx4_is_master(dev)) { 2800 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2801 2802 } else { 2803 dev->num_slaves = 0; 2804 err = mlx4_multi_func_init(dev); 2805 if (err) { 2806 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2807 goto err_cmd; 2808 } 2809 } 2810 } 2811 2812 err = mlx4_init_fw(dev); 2813 if (err) { 2814 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2815 goto err_mfunc; 2816 } 2817 2818 if (mlx4_is_master(dev)) { 2819 /* when we hit the goto slave_start below, dev_cap already initialized */ 2820 if (!dev_cap) { 2821 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2822 2823 if (!dev_cap) { 2824 err = -ENOMEM; 2825 goto err_fw; 2826 } 2827 2828 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2829 if (err) { 2830 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2831 goto err_fw; 2832 } 2833 2834 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2835 goto err_fw; 2836 2837 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2838 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 2839 total_vfs, 2840 existing_vfs, 2841 reset_flow); 2842 2843 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2844 dev->flags = dev_flags; 2845 if (!SRIOV_VALID_STATE(dev->flags)) { 2846 mlx4_err(dev, "Invalid SRIOV state\n"); 2847 goto err_sriov; 2848 } 2849 err = mlx4_reset(dev); 2850 if (err) { 2851 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2852 goto err_sriov; 2853 } 2854 goto slave_start; 2855 } 2856 } else { 2857 /* Legacy mode FW requires SRIOV to be enabled before 2858 * doing QUERY_DEV_CAP, since max_eq's value is different if 2859 * SRIOV is enabled. 2860 */ 2861 memset(dev_cap, 0, sizeof(*dev_cap)); 2862 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2863 if (err) { 2864 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2865 goto err_fw; 2866 } 2867 2868 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2869 goto err_fw; 2870 } 2871 } 2872 2873 err = mlx4_init_hca(dev); 2874 if (err) { 2875 if (err == -EACCES) { 2876 /* Not primary Physical function 2877 * Running in slave mode */ 2878 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2879 /* We're not a PF */ 2880 if (dev->flags & MLX4_FLAG_SRIOV) { 2881 if (!existing_vfs) 2882 pci_disable_sriov(pdev); 2883 if (mlx4_is_master(dev) && !reset_flow) 2884 atomic_dec(&pf_loading); 2885 dev->flags &= ~MLX4_FLAG_SRIOV; 2886 } 2887 if (!mlx4_is_slave(dev)) 2888 mlx4_free_ownership(dev); 2889 dev->flags |= MLX4_FLAG_SLAVE; 2890 dev->flags &= ~MLX4_FLAG_MASTER; 2891 goto slave_start; 2892 } else 2893 goto err_fw; 2894 } 2895 2896 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2897 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2898 existing_vfs, reset_flow); 2899 2900 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2901 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2902 dev->flags = dev_flags; 2903 err = mlx4_cmd_init(dev); 2904 if (err) { 2905 /* Only VHCR is cleaned up, so could still 2906 * send FW commands 2907 */ 2908 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2909 goto err_close; 2910 } 2911 } else { 2912 dev->flags = dev_flags; 2913 } 2914 2915 if (!SRIOV_VALID_STATE(dev->flags)) { 2916 mlx4_err(dev, "Invalid SRIOV state\n"); 2917 goto err_close; 2918 } 2919 } 2920 2921 /* check if the device is functioning at its maximum possible speed. 2922 * No return code for this call, just warn the user in case of PCI 2923 * express device capabilities are under-satisfied by the bus. 2924 */ 2925 if (!mlx4_is_slave(dev)) 2926 mlx4_check_pcie_caps(dev); 2927 2928 /* In master functions, the communication channel must be initialized 2929 * after obtaining its address from fw */ 2930 if (mlx4_is_master(dev)) { 2931 int ib_ports = 0; 2932 2933 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2934 ib_ports++; 2935 2936 if (ib_ports && 2937 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2938 mlx4_err(dev, 2939 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2940 err = -EINVAL; 2941 goto err_close; 2942 } 2943 if (dev->caps.num_ports < 2 && 2944 num_vfs_argc > 1) { 2945 err = -EINVAL; 2946 mlx4_err(dev, 2947 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2948 dev->caps.num_ports); 2949 goto err_close; 2950 } 2951 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 2952 2953 for (i = 0; 2954 i < sizeof(dev->persist->nvfs)/ 2955 sizeof(dev->persist->nvfs[0]); i++) { 2956 unsigned j; 2957 2958 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 2959 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2960 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2961 dev->caps.num_ports; 2962 } 2963 } 2964 2965 /* In master functions, the communication channel 2966 * must be initialized after obtaining its address from fw 2967 */ 2968 err = mlx4_multi_func_init(dev); 2969 if (err) { 2970 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2971 goto err_close; 2972 } 2973 } 2974 2975 err = mlx4_alloc_eq_table(dev); 2976 if (err) 2977 goto err_master_mfunc; 2978 2979 priv->msix_ctl.pool_bm = 0; 2980 mutex_init(&priv->msix_ctl.pool_lock); 2981 2982 mlx4_enable_msi_x(dev); 2983 if ((mlx4_is_mfunc(dev)) && 2984 !(dev->flags & MLX4_FLAG_MSI_X)) { 2985 err = -ENOSYS; 2986 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2987 goto err_free_eq; 2988 } 2989 2990 if (!mlx4_is_slave(dev)) { 2991 err = mlx4_init_steering(dev); 2992 if (err) 2993 goto err_disable_msix; 2994 } 2995 2996 err = mlx4_setup_hca(dev); 2997 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2998 !mlx4_is_mfunc(dev)) { 2999 dev->flags &= ~MLX4_FLAG_MSI_X; 3000 dev->caps.num_comp_vectors = 1; 3001 dev->caps.comp_pool = 0; 3002 pci_disable_msix(pdev); 3003 err = mlx4_setup_hca(dev); 3004 } 3005 3006 if (err) 3007 goto err_steer; 3008 3009 mlx4_init_quotas(dev); 3010 /* When PF resources are ready arm its comm channel to enable 3011 * getting commands 3012 */ 3013 if (mlx4_is_master(dev)) { 3014 err = mlx4_ARM_COMM_CHANNEL(dev); 3015 if (err) { 3016 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3017 err); 3018 goto err_steer; 3019 } 3020 } 3021 3022 for (port = 1; port <= dev->caps.num_ports; port++) { 3023 err = mlx4_init_port_info(dev, port); 3024 if (err) 3025 goto err_port; 3026 } 3027 3028 priv->v2p.port1 = 1; 3029 priv->v2p.port2 = 2; 3030 3031 err = mlx4_register_device(dev); 3032 if (err) 3033 goto err_port; 3034 3035 mlx4_request_modules(dev); 3036 3037 mlx4_sense_init(dev); 3038 mlx4_start_sense(dev); 3039 3040 priv->removed = 0; 3041 3042 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3043 atomic_dec(&pf_loading); 3044 3045 kfree(dev_cap); 3046 return 0; 3047 3048 err_port: 3049 for (--port; port >= 1; --port) 3050 mlx4_cleanup_port_info(&priv->port[port]); 3051 3052 mlx4_cleanup_counters_table(dev); 3053 mlx4_cleanup_qp_table(dev); 3054 mlx4_cleanup_srq_table(dev); 3055 mlx4_cleanup_cq_table(dev); 3056 mlx4_cmd_use_polling(dev); 3057 mlx4_cleanup_eq_table(dev); 3058 mlx4_cleanup_mcg_table(dev); 3059 mlx4_cleanup_mr_table(dev); 3060 mlx4_cleanup_xrcd_table(dev); 3061 mlx4_cleanup_pd_table(dev); 3062 mlx4_cleanup_uar_table(dev); 3063 3064 err_steer: 3065 if (!mlx4_is_slave(dev)) 3066 mlx4_clear_steering(dev); 3067 3068 err_disable_msix: 3069 if (dev->flags & MLX4_FLAG_MSI_X) 3070 pci_disable_msix(pdev); 3071 3072 err_free_eq: 3073 mlx4_free_eq_table(dev); 3074 3075 err_master_mfunc: 3076 if (mlx4_is_master(dev)) { 3077 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3078 mlx4_multi_func_cleanup(dev); 3079 } 3080 3081 if (mlx4_is_slave(dev)) { 3082 kfree(dev->caps.qp0_qkey); 3083 kfree(dev->caps.qp0_tunnel); 3084 kfree(dev->caps.qp0_proxy); 3085 kfree(dev->caps.qp1_tunnel); 3086 kfree(dev->caps.qp1_proxy); 3087 } 3088 3089 err_close: 3090 mlx4_close_hca(dev); 3091 3092 err_fw: 3093 mlx4_close_fw(dev); 3094 3095 err_mfunc: 3096 if (mlx4_is_slave(dev)) 3097 mlx4_multi_func_cleanup(dev); 3098 3099 err_cmd: 3100 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3101 3102 err_sriov: 3103 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3104 pci_disable_sriov(pdev); 3105 dev->flags &= ~MLX4_FLAG_SRIOV; 3106 } 3107 3108 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3109 atomic_dec(&pf_loading); 3110 3111 kfree(priv->dev.dev_vfs); 3112 3113 if (!mlx4_is_slave(dev)) 3114 mlx4_free_ownership(dev); 3115 3116 kfree(dev_cap); 3117 return err; 3118 } 3119 3120 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3121 struct mlx4_priv *priv) 3122 { 3123 int err; 3124 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3125 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3126 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3127 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3128 unsigned total_vfs = 0; 3129 unsigned int i; 3130 3131 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3132 3133 err = pci_enable_device(pdev); 3134 if (err) { 3135 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3136 return err; 3137 } 3138 3139 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3140 * per port, we must limit the number of VFs to 63 (since their are 3141 * 128 MACs) 3142 */ 3143 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3144 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3145 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3146 if (nvfs[i] < 0) { 3147 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3148 err = -EINVAL; 3149 goto err_disable_pdev; 3150 } 3151 } 3152 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3153 i++) { 3154 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3155 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3156 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3157 err = -EINVAL; 3158 goto err_disable_pdev; 3159 } 3160 } 3161 if (total_vfs >= MLX4_MAX_NUM_VF) { 3162 dev_err(&pdev->dev, 3163 "Requested more VF's (%d) than allowed (%d)\n", 3164 total_vfs, MLX4_MAX_NUM_VF - 1); 3165 err = -EINVAL; 3166 goto err_disable_pdev; 3167 } 3168 3169 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3170 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 3171 dev_err(&pdev->dev, 3172 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 3173 nvfs[i] + nvfs[2], i + 1, 3174 MLX4_MAX_NUM_VF_P_PORT - 1); 3175 err = -EINVAL; 3176 goto err_disable_pdev; 3177 } 3178 } 3179 3180 /* Check for BARs. */ 3181 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3182 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3183 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3184 pci_dev_data, pci_resource_flags(pdev, 0)); 3185 err = -ENODEV; 3186 goto err_disable_pdev; 3187 } 3188 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3189 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3190 err = -ENODEV; 3191 goto err_disable_pdev; 3192 } 3193 3194 err = pci_request_regions(pdev, DRV_NAME); 3195 if (err) { 3196 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3197 goto err_disable_pdev; 3198 } 3199 3200 pci_set_master(pdev); 3201 3202 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3203 if (err) { 3204 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3205 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3206 if (err) { 3207 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3208 goto err_release_regions; 3209 } 3210 } 3211 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3212 if (err) { 3213 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3214 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3215 if (err) { 3216 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3217 goto err_release_regions; 3218 } 3219 } 3220 3221 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3222 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3223 /* Detect if this device is a virtual function */ 3224 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3225 /* When acting as pf, we normally skip vfs unless explicitly 3226 * requested to probe them. 3227 */ 3228 if (total_vfs) { 3229 unsigned vfs_offset = 0; 3230 3231 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3232 vfs_offset + nvfs[i] < extended_func_num(pdev); 3233 vfs_offset += nvfs[i], i++) 3234 ; 3235 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3236 err = -ENODEV; 3237 goto err_release_regions; 3238 } 3239 if ((extended_func_num(pdev) - vfs_offset) 3240 > prb_vf[i]) { 3241 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3242 extended_func_num(pdev)); 3243 err = -ENODEV; 3244 goto err_release_regions; 3245 } 3246 } 3247 } 3248 3249 err = mlx4_catas_init(&priv->dev); 3250 if (err) 3251 goto err_release_regions; 3252 3253 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3254 if (err) 3255 goto err_catas; 3256 3257 return 0; 3258 3259 err_catas: 3260 mlx4_catas_end(&priv->dev); 3261 3262 err_release_regions: 3263 pci_release_regions(pdev); 3264 3265 err_disable_pdev: 3266 pci_disable_device(pdev); 3267 pci_set_drvdata(pdev, NULL); 3268 return err; 3269 } 3270 3271 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3272 { 3273 struct mlx4_priv *priv; 3274 struct mlx4_dev *dev; 3275 int ret; 3276 3277 printk_once(KERN_INFO "%s", mlx4_version); 3278 3279 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3280 if (!priv) 3281 return -ENOMEM; 3282 3283 dev = &priv->dev; 3284 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3285 if (!dev->persist) { 3286 kfree(priv); 3287 return -ENOMEM; 3288 } 3289 dev->persist->pdev = pdev; 3290 dev->persist->dev = dev; 3291 pci_set_drvdata(pdev, dev->persist); 3292 priv->pci_dev_data = id->driver_data; 3293 mutex_init(&dev->persist->device_state_mutex); 3294 mutex_init(&dev->persist->interface_state_mutex); 3295 3296 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3297 if (ret) { 3298 kfree(dev->persist); 3299 kfree(priv); 3300 } else { 3301 pci_save_state(pdev); 3302 } 3303 3304 return ret; 3305 } 3306 3307 static void mlx4_clean_dev(struct mlx4_dev *dev) 3308 { 3309 struct mlx4_dev_persistent *persist = dev->persist; 3310 struct mlx4_priv *priv = mlx4_priv(dev); 3311 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3312 3313 memset(priv, 0, sizeof(*priv)); 3314 priv->dev.persist = persist; 3315 priv->dev.flags = flags; 3316 } 3317 3318 static void mlx4_unload_one(struct pci_dev *pdev) 3319 { 3320 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3321 struct mlx4_dev *dev = persist->dev; 3322 struct mlx4_priv *priv = mlx4_priv(dev); 3323 int pci_dev_data; 3324 int p, i; 3325 3326 if (priv->removed) 3327 return; 3328 3329 /* saving current ports type for further use */ 3330 for (i = 0; i < dev->caps.num_ports; i++) { 3331 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3332 dev->persist->curr_port_poss_type[i] = dev->caps. 3333 possible_type[i + 1]; 3334 } 3335 3336 pci_dev_data = priv->pci_dev_data; 3337 3338 mlx4_stop_sense(dev); 3339 mlx4_unregister_device(dev); 3340 3341 for (p = 1; p <= dev->caps.num_ports; p++) { 3342 mlx4_cleanup_port_info(&priv->port[p]); 3343 mlx4_CLOSE_PORT(dev, p); 3344 } 3345 3346 if (mlx4_is_master(dev)) 3347 mlx4_free_resource_tracker(dev, 3348 RES_TR_FREE_SLAVES_ONLY); 3349 3350 mlx4_cleanup_counters_table(dev); 3351 mlx4_cleanup_qp_table(dev); 3352 mlx4_cleanup_srq_table(dev); 3353 mlx4_cleanup_cq_table(dev); 3354 mlx4_cmd_use_polling(dev); 3355 mlx4_cleanup_eq_table(dev); 3356 mlx4_cleanup_mcg_table(dev); 3357 mlx4_cleanup_mr_table(dev); 3358 mlx4_cleanup_xrcd_table(dev); 3359 mlx4_cleanup_pd_table(dev); 3360 3361 if (mlx4_is_master(dev)) 3362 mlx4_free_resource_tracker(dev, 3363 RES_TR_FREE_STRUCTS_ONLY); 3364 3365 iounmap(priv->kar); 3366 mlx4_uar_free(dev, &priv->driver_uar); 3367 mlx4_cleanup_uar_table(dev); 3368 if (!mlx4_is_slave(dev)) 3369 mlx4_clear_steering(dev); 3370 mlx4_free_eq_table(dev); 3371 if (mlx4_is_master(dev)) 3372 mlx4_multi_func_cleanup(dev); 3373 mlx4_close_hca(dev); 3374 mlx4_close_fw(dev); 3375 if (mlx4_is_slave(dev)) 3376 mlx4_multi_func_cleanup(dev); 3377 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3378 3379 if (dev->flags & MLX4_FLAG_MSI_X) 3380 pci_disable_msix(pdev); 3381 3382 if (!mlx4_is_slave(dev)) 3383 mlx4_free_ownership(dev); 3384 3385 kfree(dev->caps.qp0_qkey); 3386 kfree(dev->caps.qp0_tunnel); 3387 kfree(dev->caps.qp0_proxy); 3388 kfree(dev->caps.qp1_tunnel); 3389 kfree(dev->caps.qp1_proxy); 3390 kfree(dev->dev_vfs); 3391 3392 mlx4_clean_dev(dev); 3393 priv->pci_dev_data = pci_dev_data; 3394 priv->removed = 1; 3395 } 3396 3397 static void mlx4_remove_one(struct pci_dev *pdev) 3398 { 3399 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3400 struct mlx4_dev *dev = persist->dev; 3401 struct mlx4_priv *priv = mlx4_priv(dev); 3402 int active_vfs = 0; 3403 3404 mutex_lock(&persist->interface_state_mutex); 3405 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3406 mutex_unlock(&persist->interface_state_mutex); 3407 3408 /* Disabling SR-IOV is not allowed while there are active vf's */ 3409 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3410 active_vfs = mlx4_how_many_lives_vf(dev); 3411 if (active_vfs) { 3412 pr_warn("Removing PF when there are active VF's !!\n"); 3413 pr_warn("Will not disable SR-IOV.\n"); 3414 } 3415 } 3416 3417 /* device marked to be under deletion running now without the lock 3418 * letting other tasks to be terminated 3419 */ 3420 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3421 mlx4_unload_one(pdev); 3422 else 3423 mlx4_info(dev, "%s: interface is down\n", __func__); 3424 mlx4_catas_end(dev); 3425 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3426 mlx4_warn(dev, "Disabling SR-IOV\n"); 3427 pci_disable_sriov(pdev); 3428 } 3429 3430 pci_release_regions(pdev); 3431 pci_disable_device(pdev); 3432 kfree(dev->persist); 3433 kfree(priv); 3434 pci_set_drvdata(pdev, NULL); 3435 } 3436 3437 static int restore_current_port_types(struct mlx4_dev *dev, 3438 enum mlx4_port_type *types, 3439 enum mlx4_port_type *poss_types) 3440 { 3441 struct mlx4_priv *priv = mlx4_priv(dev); 3442 int err, i; 3443 3444 mlx4_stop_sense(dev); 3445 3446 mutex_lock(&priv->port_mutex); 3447 for (i = 0; i < dev->caps.num_ports; i++) 3448 dev->caps.possible_type[i + 1] = poss_types[i]; 3449 err = mlx4_change_port_types(dev, types); 3450 mlx4_start_sense(dev); 3451 mutex_unlock(&priv->port_mutex); 3452 3453 return err; 3454 } 3455 3456 int mlx4_restart_one(struct pci_dev *pdev) 3457 { 3458 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3459 struct mlx4_dev *dev = persist->dev; 3460 struct mlx4_priv *priv = mlx4_priv(dev); 3461 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3462 int pci_dev_data, err, total_vfs; 3463 3464 pci_dev_data = priv->pci_dev_data; 3465 total_vfs = dev->persist->num_vfs; 3466 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3467 3468 mlx4_unload_one(pdev); 3469 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3470 if (err) { 3471 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3472 __func__, pci_name(pdev), err); 3473 return err; 3474 } 3475 3476 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3477 dev->persist->curr_port_poss_type); 3478 if (err) 3479 mlx4_err(dev, "could not restore original port types (%d)\n", 3480 err); 3481 3482 return err; 3483 } 3484 3485 static const struct pci_device_id mlx4_pci_table[] = { 3486 /* MT25408 "Hermon" SDR */ 3487 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3488 /* MT25408 "Hermon" DDR */ 3489 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3490 /* MT25408 "Hermon" QDR */ 3491 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3492 /* MT25408 "Hermon" DDR PCIe gen2 */ 3493 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3494 /* MT25408 "Hermon" QDR PCIe gen2 */ 3495 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3496 /* MT25408 "Hermon" EN 10GigE */ 3497 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3498 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3499 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3500 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3501 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3502 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3503 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3504 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3505 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3506 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3507 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3508 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3509 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3510 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3511 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3512 /* MT27500 Family [ConnectX-3] */ 3513 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3514 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3515 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3516 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3517 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3518 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3519 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3520 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3521 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3522 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3523 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3524 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3525 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3526 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3527 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3528 { 0, } 3529 }; 3530 3531 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3532 3533 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3534 pci_channel_state_t state) 3535 { 3536 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3537 3538 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3539 mlx4_enter_error_state(persist); 3540 3541 mutex_lock(&persist->interface_state_mutex); 3542 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3543 mlx4_unload_one(pdev); 3544 3545 mutex_unlock(&persist->interface_state_mutex); 3546 if (state == pci_channel_io_perm_failure) 3547 return PCI_ERS_RESULT_DISCONNECT; 3548 3549 pci_disable_device(pdev); 3550 return PCI_ERS_RESULT_NEED_RESET; 3551 } 3552 3553 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3554 { 3555 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3556 struct mlx4_dev *dev = persist->dev; 3557 struct mlx4_priv *priv = mlx4_priv(dev); 3558 int ret; 3559 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3560 int total_vfs; 3561 3562 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3563 ret = pci_enable_device(pdev); 3564 if (ret) { 3565 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3566 return PCI_ERS_RESULT_DISCONNECT; 3567 } 3568 3569 pci_set_master(pdev); 3570 pci_restore_state(pdev); 3571 pci_save_state(pdev); 3572 3573 total_vfs = dev->persist->num_vfs; 3574 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3575 3576 mutex_lock(&persist->interface_state_mutex); 3577 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3578 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3579 priv, 1); 3580 if (ret) { 3581 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 3582 __func__, ret); 3583 goto end; 3584 } 3585 3586 ret = restore_current_port_types(dev, dev->persist-> 3587 curr_port_type, dev->persist-> 3588 curr_port_poss_type); 3589 if (ret) 3590 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 3591 } 3592 end: 3593 mutex_unlock(&persist->interface_state_mutex); 3594 3595 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3596 } 3597 3598 static void mlx4_shutdown(struct pci_dev *pdev) 3599 { 3600 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3601 3602 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 3603 mutex_lock(&persist->interface_state_mutex); 3604 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3605 mlx4_unload_one(pdev); 3606 mutex_unlock(&persist->interface_state_mutex); 3607 } 3608 3609 static const struct pci_error_handlers mlx4_err_handler = { 3610 .error_detected = mlx4_pci_err_detected, 3611 .slot_reset = mlx4_pci_slot_reset, 3612 }; 3613 3614 static struct pci_driver mlx4_driver = { 3615 .name = DRV_NAME, 3616 .id_table = mlx4_pci_table, 3617 .probe = mlx4_init_one, 3618 .shutdown = mlx4_shutdown, 3619 .remove = mlx4_remove_one, 3620 .err_handler = &mlx4_err_handler, 3621 }; 3622 3623 static int __init mlx4_verify_params(void) 3624 { 3625 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3626 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3627 return -1; 3628 } 3629 3630 if (log_num_vlan != 0) 3631 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3632 MLX4_LOG_NUM_VLANS); 3633 3634 if (use_prio != 0) 3635 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3636 3637 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3638 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3639 log_mtts_per_seg); 3640 return -1; 3641 } 3642 3643 /* Check if module param for ports type has legal combination */ 3644 if (port_type_array[0] == false && port_type_array[1] == true) { 3645 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3646 port_type_array[0] = true; 3647 } 3648 3649 if (mlx4_log_num_mgm_entry_size < -7 || 3650 (mlx4_log_num_mgm_entry_size > 0 && 3651 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3652 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3653 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3654 mlx4_log_num_mgm_entry_size, 3655 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3656 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3657 return -1; 3658 } 3659 3660 return 0; 3661 } 3662 3663 static int __init mlx4_init(void) 3664 { 3665 int ret; 3666 3667 if (mlx4_verify_params()) 3668 return -EINVAL; 3669 3670 3671 mlx4_wq = create_singlethread_workqueue("mlx4"); 3672 if (!mlx4_wq) 3673 return -ENOMEM; 3674 3675 ret = pci_register_driver(&mlx4_driver); 3676 if (ret < 0) 3677 destroy_workqueue(mlx4_wq); 3678 return ret < 0 ? ret : 0; 3679 } 3680 3681 static void __exit mlx4_cleanup(void) 3682 { 3683 pci_unregister_driver(&mlx4_driver); 3684 destroy_workqueue(mlx4_wq); 3685 } 3686 3687 module_init(mlx4_init); 3688 module_exit(mlx4_cleanup); 3689