1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 int mlx4_check_port_params(struct mlx4_dev *dev, 172 enum mlx4_port_type *port_type) 173 { 174 int i; 175 176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 177 for (i = 0; i < dev->caps.num_ports - 1; i++) { 178 if (port_type[i] != port_type[i + 1]) { 179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 180 return -EINVAL; 181 } 182 } 183 } 184 185 for (i = 0; i < dev->caps.num_ports; i++) { 186 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 188 i + 1); 189 return -EINVAL; 190 } 191 } 192 return 0; 193 } 194 195 static void mlx4_set_port_mask(struct mlx4_dev *dev) 196 { 197 int i; 198 199 for (i = 1; i <= dev->caps.num_ports; ++i) 200 dev->caps.port_mask[i] = dev->caps.port_type[i]; 201 } 202 203 enum { 204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 205 }; 206 207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 208 { 209 int err = 0; 210 struct mlx4_func func; 211 212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 213 err = mlx4_QUERY_FUNC(dev, &func, 0); 214 if (err) { 215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 216 return err; 217 } 218 dev_cap->max_eqs = func.max_eq; 219 dev_cap->reserved_eqs = func.rsvd_eqs; 220 dev_cap->reserved_uars = func.rsvd_uars; 221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 222 } 223 return err; 224 } 225 226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 227 { 228 struct mlx4_caps *dev_cap = &dev->caps; 229 230 /* FW not supporting or cancelled by user */ 231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 233 return; 234 235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 236 * When FW has NCSI it may decide not to report 64B CQE/EQEs 237 */ 238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 242 return; 243 } 244 245 if (cache_line_size() == 128 || cache_line_size() == 256) { 246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 247 /* Changing the real data inside CQE size to 32B */ 248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 250 251 if (mlx4_is_master(dev)) 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 253 } else { 254 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 255 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 257 } 258 } 259 260 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 261 struct mlx4_port_cap *port_cap) 262 { 263 dev->caps.vl_cap[port] = port_cap->max_vl; 264 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 265 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 266 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 267 /* set gid and pkey table operating lengths by default 268 * to non-sriov values 269 */ 270 dev->caps.gid_table_len[port] = port_cap->max_gids; 271 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 272 dev->caps.port_width_cap[port] = port_cap->max_port_width; 273 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 274 dev->caps.def_mac[port] = port_cap->def_mac; 275 dev->caps.supported_type[port] = port_cap->supported_port_types; 276 dev->caps.suggested_type[port] = port_cap->suggested_type; 277 dev->caps.default_sense[port] = port_cap->default_sense; 278 dev->caps.trans_type[port] = port_cap->trans_type; 279 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 280 dev->caps.wavelength[port] = port_cap->wavelength; 281 dev->caps.trans_code[port] = port_cap->trans_code; 282 283 return 0; 284 } 285 286 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 287 struct mlx4_port_cap *port_cap) 288 { 289 int err = 0; 290 291 err = mlx4_QUERY_PORT(dev, port, port_cap); 292 293 if (err) 294 mlx4_err(dev, "QUERY_PORT command failed.\n"); 295 296 return err; 297 } 298 299 #define MLX4_A0_STEERING_TABLE_SIZE 256 300 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 301 { 302 int err; 303 int i; 304 305 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 306 if (err) { 307 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 308 return err; 309 } 310 mlx4_dev_cap_dump(dev, dev_cap); 311 312 if (dev_cap->min_page_sz > PAGE_SIZE) { 313 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 314 dev_cap->min_page_sz, PAGE_SIZE); 315 return -ENODEV; 316 } 317 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 318 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 319 dev_cap->num_ports, MLX4_MAX_PORTS); 320 return -ENODEV; 321 } 322 323 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 324 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 325 dev_cap->uar_size, 326 (unsigned long long) 327 pci_resource_len(dev->persist->pdev, 2)); 328 return -ENODEV; 329 } 330 331 dev->caps.num_ports = dev_cap->num_ports; 332 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 333 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 334 dev->caps.num_sys_eqs : 335 MLX4_MAX_EQ_NUM; 336 for (i = 1; i <= dev->caps.num_ports; ++i) { 337 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 338 if (err) { 339 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 340 return err; 341 } 342 } 343 344 dev->caps.uar_page_size = PAGE_SIZE; 345 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 346 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 347 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 348 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 349 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 350 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 351 dev->caps.max_wqes = dev_cap->max_qp_sz; 352 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 353 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 354 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 355 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 356 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 357 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 358 /* 359 * Subtract 1 from the limit because we need to allocate a 360 * spare CQE so the HCA HW can tell the difference between an 361 * empty CQ and a full CQ. 362 */ 363 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 364 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 365 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 366 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 367 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 368 369 /* The first 128 UARs are used for EQ doorbells */ 370 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 371 dev->caps.reserved_pds = dev_cap->reserved_pds; 372 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 373 dev_cap->reserved_xrcds : 0; 374 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 375 dev_cap->max_xrcds : 0; 376 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 377 378 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 379 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 380 dev->caps.flags = dev_cap->flags; 381 dev->caps.flags2 = dev_cap->flags2; 382 dev->caps.bmme_flags = dev_cap->bmme_flags; 383 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 384 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 385 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 386 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 387 388 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 389 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 390 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 391 /* Don't do sense port on multifunction devices (for now at least) */ 392 if (mlx4_is_mfunc(dev)) 393 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 394 395 if (mlx4_low_memory_profile()) { 396 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 397 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 398 } else { 399 dev->caps.log_num_macs = log_num_mac; 400 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 401 } 402 403 for (i = 1; i <= dev->caps.num_ports; ++i) { 404 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 405 if (dev->caps.supported_type[i]) { 406 /* if only ETH is supported - assign ETH */ 407 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 408 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 409 /* if only IB is supported, assign IB */ 410 else if (dev->caps.supported_type[i] == 411 MLX4_PORT_TYPE_IB) 412 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 413 else { 414 /* if IB and ETH are supported, we set the port 415 * type according to user selection of port type; 416 * if user selected none, take the FW hint */ 417 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 418 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 419 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 420 else 421 dev->caps.port_type[i] = port_type_array[i - 1]; 422 } 423 } 424 /* 425 * Link sensing is allowed on the port if 3 conditions are true: 426 * 1. Both protocols are supported on the port. 427 * 2. Different types are supported on the port 428 * 3. FW declared that it supports link sensing 429 */ 430 mlx4_priv(dev)->sense.sense_allowed[i] = 431 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 432 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 433 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 434 435 /* 436 * If "default_sense" bit is set, we move the port to "AUTO" mode 437 * and perform sense_port FW command to try and set the correct 438 * port type from beginning 439 */ 440 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 441 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 442 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 443 mlx4_SENSE_PORT(dev, i, &sensed_port); 444 if (sensed_port != MLX4_PORT_TYPE_NONE) 445 dev->caps.port_type[i] = sensed_port; 446 } else { 447 dev->caps.possible_type[i] = dev->caps.port_type[i]; 448 } 449 450 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 451 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 452 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 453 i, 1 << dev->caps.log_num_macs); 454 } 455 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 456 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 457 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 458 i, 1 << dev->caps.log_num_vlans); 459 } 460 } 461 462 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 463 464 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 467 (1 << dev->caps.log_num_macs) * 468 (1 << dev->caps.log_num_vlans) * 469 dev->caps.num_ports; 470 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 471 472 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 473 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 474 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 475 else 476 dev->caps.dmfs_high_rate_qpn_base = 477 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 478 479 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 480 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 481 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 482 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 483 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 484 } else { 485 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 486 dev->caps.dmfs_high_rate_qpn_base = 487 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 488 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 489 } 490 491 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 492 dev->caps.dmfs_high_rate_qpn_range; 493 494 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 495 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 496 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 497 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 498 499 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 500 501 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 502 if (dev_cap->flags & 503 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 504 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 505 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 506 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 507 } 508 509 if (dev_cap->flags2 & 510 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 511 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 512 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 513 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 514 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 515 } 516 } 517 518 if ((dev->caps.flags & 519 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 520 mlx4_is_master(dev)) 521 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 522 523 if (!mlx4_is_slave(dev)) { 524 mlx4_enable_cqe_eqe_stride(dev); 525 dev->caps.alloc_res_qp_mask = 526 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 527 MLX4_RESERVE_A0_QP; 528 } else { 529 dev->caps.alloc_res_qp_mask = 0; 530 } 531 532 return 0; 533 } 534 535 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 536 enum pci_bus_speed *speed, 537 enum pcie_link_width *width) 538 { 539 u32 lnkcap1, lnkcap2; 540 int err1, err2; 541 542 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 543 544 *speed = PCI_SPEED_UNKNOWN; 545 *width = PCIE_LNK_WIDTH_UNKNOWN; 546 547 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 548 &lnkcap1); 549 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 550 &lnkcap2); 551 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 552 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 553 *speed = PCIE_SPEED_8_0GT; 554 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 555 *speed = PCIE_SPEED_5_0GT; 556 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 557 *speed = PCIE_SPEED_2_5GT; 558 } 559 if (!err1) { 560 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 561 if (!lnkcap2) { /* pre-r3.0 */ 562 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 563 *speed = PCIE_SPEED_5_0GT; 564 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 565 *speed = PCIE_SPEED_2_5GT; 566 } 567 } 568 569 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 570 return err1 ? err1 : 571 err2 ? err2 : -EINVAL; 572 } 573 return 0; 574 } 575 576 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 577 { 578 enum pcie_link_width width, width_cap; 579 enum pci_bus_speed speed, speed_cap; 580 int err; 581 582 #define PCIE_SPEED_STR(speed) \ 583 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 584 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 585 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 586 "Unknown") 587 588 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 589 if (err) { 590 mlx4_warn(dev, 591 "Unable to determine PCIe device BW capabilities\n"); 592 return; 593 } 594 595 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 596 if (err || speed == PCI_SPEED_UNKNOWN || 597 width == PCIE_LNK_WIDTH_UNKNOWN) { 598 mlx4_warn(dev, 599 "Unable to determine PCI device chain minimum BW\n"); 600 return; 601 } 602 603 if (width != width_cap || speed != speed_cap) 604 mlx4_warn(dev, 605 "PCIe BW is different than device's capability\n"); 606 607 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 608 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 609 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 610 width, width_cap); 611 return; 612 } 613 614 /*The function checks if there are live vf, return the num of them*/ 615 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 616 { 617 struct mlx4_priv *priv = mlx4_priv(dev); 618 struct mlx4_slave_state *s_state; 619 int i; 620 int ret = 0; 621 622 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 623 s_state = &priv->mfunc.master.slave_state[i]; 624 if (s_state->active && s_state->last_cmd != 625 MLX4_COMM_CMD_RESET) { 626 mlx4_warn(dev, "%s: slave: %d is still active\n", 627 __func__, i); 628 ret++; 629 } 630 } 631 return ret; 632 } 633 634 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 635 { 636 u32 qk = MLX4_RESERVED_QKEY_BASE; 637 638 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 639 qpn < dev->phys_caps.base_proxy_sqpn) 640 return -EINVAL; 641 642 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 643 /* tunnel qp */ 644 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 645 else 646 qk += qpn - dev->phys_caps.base_proxy_sqpn; 647 *qkey = qk; 648 return 0; 649 } 650 EXPORT_SYMBOL(mlx4_get_parav_qkey); 651 652 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 653 { 654 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 655 656 if (!mlx4_is_master(dev)) 657 return; 658 659 priv->virt2phys_pkey[slave][port - 1][i] = val; 660 } 661 EXPORT_SYMBOL(mlx4_sync_pkey_table); 662 663 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 664 { 665 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 666 667 if (!mlx4_is_master(dev)) 668 return; 669 670 priv->slave_node_guids[slave] = guid; 671 } 672 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 673 674 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 675 { 676 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 677 678 if (!mlx4_is_master(dev)) 679 return 0; 680 681 return priv->slave_node_guids[slave]; 682 } 683 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 684 685 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 686 { 687 struct mlx4_priv *priv = mlx4_priv(dev); 688 struct mlx4_slave_state *s_slave; 689 690 if (!mlx4_is_master(dev)) 691 return 0; 692 693 s_slave = &priv->mfunc.master.slave_state[slave]; 694 return !!s_slave->active; 695 } 696 EXPORT_SYMBOL(mlx4_is_slave_active); 697 698 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 699 struct mlx4_dev_cap *dev_cap, 700 struct mlx4_init_hca_param *hca_param) 701 { 702 dev->caps.steering_mode = hca_param->steering_mode; 703 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 704 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 705 dev->caps.fs_log_max_ucast_qp_range_size = 706 dev_cap->fs_log_max_ucast_qp_range_size; 707 } else 708 dev->caps.num_qp_per_mgm = 709 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 710 711 mlx4_dbg(dev, "Steering mode is: %s\n", 712 mlx4_steering_mode_str(dev->caps.steering_mode)); 713 } 714 715 static int mlx4_slave_cap(struct mlx4_dev *dev) 716 { 717 int err; 718 u32 page_size; 719 struct mlx4_dev_cap dev_cap; 720 struct mlx4_func_cap func_cap; 721 struct mlx4_init_hca_param hca_param; 722 u8 i; 723 724 memset(&hca_param, 0, sizeof(hca_param)); 725 err = mlx4_QUERY_HCA(dev, &hca_param); 726 if (err) { 727 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 728 return err; 729 } 730 731 /* fail if the hca has an unknown global capability 732 * at this time global_caps should be always zeroed 733 */ 734 if (hca_param.global_caps) { 735 mlx4_err(dev, "Unknown hca global capabilities\n"); 736 return -ENOSYS; 737 } 738 739 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 740 741 dev->caps.hca_core_clock = hca_param.hca_core_clock; 742 743 memset(&dev_cap, 0, sizeof(dev_cap)); 744 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 745 err = mlx4_dev_cap(dev, &dev_cap); 746 if (err) { 747 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 748 return err; 749 } 750 751 err = mlx4_QUERY_FW(dev); 752 if (err) 753 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 754 755 page_size = ~dev->caps.page_size_cap + 1; 756 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 757 if (page_size > PAGE_SIZE) { 758 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 759 page_size, PAGE_SIZE); 760 return -ENODEV; 761 } 762 763 /* slave gets uar page size from QUERY_HCA fw command */ 764 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 765 766 /* TODO: relax this assumption */ 767 if (dev->caps.uar_page_size != PAGE_SIZE) { 768 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 769 dev->caps.uar_page_size, PAGE_SIZE); 770 return -ENODEV; 771 } 772 773 memset(&func_cap, 0, sizeof(func_cap)); 774 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 775 if (err) { 776 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 777 err); 778 return err; 779 } 780 781 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 782 PF_CONTEXT_BEHAVIOUR_MASK) { 783 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 784 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 785 return -ENOSYS; 786 } 787 788 dev->caps.num_ports = func_cap.num_ports; 789 dev->quotas.qp = func_cap.qp_quota; 790 dev->quotas.srq = func_cap.srq_quota; 791 dev->quotas.cq = func_cap.cq_quota; 792 dev->quotas.mpt = func_cap.mpt_quota; 793 dev->quotas.mtt = func_cap.mtt_quota; 794 dev->caps.num_qps = 1 << hca_param.log_num_qps; 795 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 796 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 797 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 798 dev->caps.num_eqs = func_cap.max_eq; 799 dev->caps.reserved_eqs = func_cap.reserved_eq; 800 dev->caps.reserved_lkey = func_cap.reserved_lkey; 801 dev->caps.num_pds = MLX4_NUM_PDS; 802 dev->caps.num_mgms = 0; 803 dev->caps.num_amgms = 0; 804 805 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 806 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 807 dev->caps.num_ports, MLX4_MAX_PORTS); 808 return -ENODEV; 809 } 810 811 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 812 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 813 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 814 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 815 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 816 817 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 818 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 819 !dev->caps.qp0_qkey) { 820 err = -ENOMEM; 821 goto err_mem; 822 } 823 824 for (i = 1; i <= dev->caps.num_ports; ++i) { 825 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 826 if (err) { 827 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 828 i, err); 829 goto err_mem; 830 } 831 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 832 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 833 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 834 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 835 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 836 dev->caps.port_mask[i] = dev->caps.port_type[i]; 837 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 838 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 839 &dev->caps.gid_table_len[i], 840 &dev->caps.pkey_table_len[i])) 841 goto err_mem; 842 } 843 844 if (dev->caps.uar_page_size * (dev->caps.num_uars - 845 dev->caps.reserved_uars) > 846 pci_resource_len(dev->persist->pdev, 847 2)) { 848 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 849 dev->caps.uar_page_size * dev->caps.num_uars, 850 (unsigned long long) 851 pci_resource_len(dev->persist->pdev, 2)); 852 goto err_mem; 853 } 854 855 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 856 dev->caps.eqe_size = 64; 857 dev->caps.eqe_factor = 1; 858 } else { 859 dev->caps.eqe_size = 32; 860 dev->caps.eqe_factor = 0; 861 } 862 863 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 864 dev->caps.cqe_size = 64; 865 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 866 } else { 867 dev->caps.cqe_size = 32; 868 } 869 870 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 871 dev->caps.eqe_size = hca_param.eqe_size; 872 dev->caps.eqe_factor = 0; 873 } 874 875 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 876 dev->caps.cqe_size = hca_param.cqe_size; 877 /* User still need to know when CQE > 32B */ 878 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 879 } 880 881 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 882 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 883 884 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 885 886 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 887 dev->caps.bf_reg_size) 888 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 889 890 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 891 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 892 893 return 0; 894 895 err_mem: 896 kfree(dev->caps.qp0_qkey); 897 kfree(dev->caps.qp0_tunnel); 898 kfree(dev->caps.qp0_proxy); 899 kfree(dev->caps.qp1_tunnel); 900 kfree(dev->caps.qp1_proxy); 901 dev->caps.qp0_qkey = NULL; 902 dev->caps.qp0_tunnel = NULL; 903 dev->caps.qp0_proxy = NULL; 904 dev->caps.qp1_tunnel = NULL; 905 dev->caps.qp1_proxy = NULL; 906 907 return err; 908 } 909 910 static void mlx4_request_modules(struct mlx4_dev *dev) 911 { 912 int port; 913 int has_ib_port = false; 914 int has_eth_port = false; 915 #define EN_DRV_NAME "mlx4_en" 916 #define IB_DRV_NAME "mlx4_ib" 917 918 for (port = 1; port <= dev->caps.num_ports; port++) { 919 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 920 has_ib_port = true; 921 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 922 has_eth_port = true; 923 } 924 925 if (has_eth_port) 926 request_module_nowait(EN_DRV_NAME); 927 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 928 request_module_nowait(IB_DRV_NAME); 929 } 930 931 /* 932 * Change the port configuration of the device. 933 * Every user of this function must hold the port mutex. 934 */ 935 int mlx4_change_port_types(struct mlx4_dev *dev, 936 enum mlx4_port_type *port_types) 937 { 938 int err = 0; 939 int change = 0; 940 int port; 941 942 for (port = 0; port < dev->caps.num_ports; port++) { 943 /* Change the port type only if the new type is different 944 * from the current, and not set to Auto */ 945 if (port_types[port] != dev->caps.port_type[port + 1]) 946 change = 1; 947 } 948 if (change) { 949 mlx4_unregister_device(dev); 950 for (port = 1; port <= dev->caps.num_ports; port++) { 951 mlx4_CLOSE_PORT(dev, port); 952 dev->caps.port_type[port] = port_types[port - 1]; 953 err = mlx4_SET_PORT(dev, port, -1); 954 if (err) { 955 mlx4_err(dev, "Failed to set port %d, aborting\n", 956 port); 957 goto out; 958 } 959 } 960 mlx4_set_port_mask(dev); 961 err = mlx4_register_device(dev); 962 if (err) { 963 mlx4_err(dev, "Failed to register device\n"); 964 goto out; 965 } 966 mlx4_request_modules(dev); 967 } 968 969 out: 970 return err; 971 } 972 973 static ssize_t show_port_type(struct device *dev, 974 struct device_attribute *attr, 975 char *buf) 976 { 977 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 978 port_attr); 979 struct mlx4_dev *mdev = info->dev; 980 char type[8]; 981 982 sprintf(type, "%s", 983 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 984 "ib" : "eth"); 985 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 986 sprintf(buf, "auto (%s)\n", type); 987 else 988 sprintf(buf, "%s\n", type); 989 990 return strlen(buf); 991 } 992 993 static ssize_t set_port_type(struct device *dev, 994 struct device_attribute *attr, 995 const char *buf, size_t count) 996 { 997 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 998 port_attr); 999 struct mlx4_dev *mdev = info->dev; 1000 struct mlx4_priv *priv = mlx4_priv(mdev); 1001 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1002 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1003 static DEFINE_MUTEX(set_port_type_mutex); 1004 int i; 1005 int err = 0; 1006 1007 mutex_lock(&set_port_type_mutex); 1008 1009 if (!strcmp(buf, "ib\n")) 1010 info->tmp_type = MLX4_PORT_TYPE_IB; 1011 else if (!strcmp(buf, "eth\n")) 1012 info->tmp_type = MLX4_PORT_TYPE_ETH; 1013 else if (!strcmp(buf, "auto\n")) 1014 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1015 else { 1016 mlx4_err(mdev, "%s is not supported port type\n", buf); 1017 err = -EINVAL; 1018 goto err_out; 1019 } 1020 1021 mlx4_stop_sense(mdev); 1022 mutex_lock(&priv->port_mutex); 1023 /* Possible type is always the one that was delivered */ 1024 mdev->caps.possible_type[info->port] = info->tmp_type; 1025 1026 for (i = 0; i < mdev->caps.num_ports; i++) { 1027 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1028 mdev->caps.possible_type[i+1]; 1029 if (types[i] == MLX4_PORT_TYPE_AUTO) 1030 types[i] = mdev->caps.port_type[i+1]; 1031 } 1032 1033 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1034 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1035 for (i = 1; i <= mdev->caps.num_ports; i++) { 1036 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1037 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1038 err = -EINVAL; 1039 } 1040 } 1041 } 1042 if (err) { 1043 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1044 goto out; 1045 } 1046 1047 mlx4_do_sense_ports(mdev, new_types, types); 1048 1049 err = mlx4_check_port_params(mdev, new_types); 1050 if (err) 1051 goto out; 1052 1053 /* We are about to apply the changes after the configuration 1054 * was verified, no need to remember the temporary types 1055 * any more */ 1056 for (i = 0; i < mdev->caps.num_ports; i++) 1057 priv->port[i + 1].tmp_type = 0; 1058 1059 err = mlx4_change_port_types(mdev, new_types); 1060 1061 out: 1062 mlx4_start_sense(mdev); 1063 mutex_unlock(&priv->port_mutex); 1064 err_out: 1065 mutex_unlock(&set_port_type_mutex); 1066 1067 return err ? err : count; 1068 } 1069 1070 enum ibta_mtu { 1071 IB_MTU_256 = 1, 1072 IB_MTU_512 = 2, 1073 IB_MTU_1024 = 3, 1074 IB_MTU_2048 = 4, 1075 IB_MTU_4096 = 5 1076 }; 1077 1078 static inline int int_to_ibta_mtu(int mtu) 1079 { 1080 switch (mtu) { 1081 case 256: return IB_MTU_256; 1082 case 512: return IB_MTU_512; 1083 case 1024: return IB_MTU_1024; 1084 case 2048: return IB_MTU_2048; 1085 case 4096: return IB_MTU_4096; 1086 default: return -1; 1087 } 1088 } 1089 1090 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1091 { 1092 switch (mtu) { 1093 case IB_MTU_256: return 256; 1094 case IB_MTU_512: return 512; 1095 case IB_MTU_1024: return 1024; 1096 case IB_MTU_2048: return 2048; 1097 case IB_MTU_4096: return 4096; 1098 default: return -1; 1099 } 1100 } 1101 1102 static ssize_t show_port_ib_mtu(struct device *dev, 1103 struct device_attribute *attr, 1104 char *buf) 1105 { 1106 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1107 port_mtu_attr); 1108 struct mlx4_dev *mdev = info->dev; 1109 1110 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1111 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1112 1113 sprintf(buf, "%d\n", 1114 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1115 return strlen(buf); 1116 } 1117 1118 static ssize_t set_port_ib_mtu(struct device *dev, 1119 struct device_attribute *attr, 1120 const char *buf, size_t count) 1121 { 1122 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1123 port_mtu_attr); 1124 struct mlx4_dev *mdev = info->dev; 1125 struct mlx4_priv *priv = mlx4_priv(mdev); 1126 int err, port, mtu, ibta_mtu = -1; 1127 1128 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1129 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1130 return -EINVAL; 1131 } 1132 1133 err = kstrtoint(buf, 0, &mtu); 1134 if (!err) 1135 ibta_mtu = int_to_ibta_mtu(mtu); 1136 1137 if (err || ibta_mtu < 0) { 1138 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1139 return -EINVAL; 1140 } 1141 1142 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1143 1144 mlx4_stop_sense(mdev); 1145 mutex_lock(&priv->port_mutex); 1146 mlx4_unregister_device(mdev); 1147 for (port = 1; port <= mdev->caps.num_ports; port++) { 1148 mlx4_CLOSE_PORT(mdev, port); 1149 err = mlx4_SET_PORT(mdev, port, -1); 1150 if (err) { 1151 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1152 port); 1153 goto err_set_port; 1154 } 1155 } 1156 err = mlx4_register_device(mdev); 1157 err_set_port: 1158 mutex_unlock(&priv->port_mutex); 1159 mlx4_start_sense(mdev); 1160 return err ? err : count; 1161 } 1162 1163 int mlx4_bond(struct mlx4_dev *dev) 1164 { 1165 int ret = 0; 1166 struct mlx4_priv *priv = mlx4_priv(dev); 1167 1168 mutex_lock(&priv->bond_mutex); 1169 1170 if (!mlx4_is_bonded(dev)) 1171 ret = mlx4_do_bond(dev, true); 1172 else 1173 ret = 0; 1174 1175 mutex_unlock(&priv->bond_mutex); 1176 if (ret) 1177 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1178 else 1179 mlx4_dbg(dev, "Device is bonded\n"); 1180 return ret; 1181 } 1182 EXPORT_SYMBOL_GPL(mlx4_bond); 1183 1184 int mlx4_unbond(struct mlx4_dev *dev) 1185 { 1186 int ret = 0; 1187 struct mlx4_priv *priv = mlx4_priv(dev); 1188 1189 mutex_lock(&priv->bond_mutex); 1190 1191 if (mlx4_is_bonded(dev)) 1192 ret = mlx4_do_bond(dev, false); 1193 1194 mutex_unlock(&priv->bond_mutex); 1195 if (ret) 1196 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1197 else 1198 mlx4_dbg(dev, "Device is unbonded\n"); 1199 return ret; 1200 } 1201 EXPORT_SYMBOL_GPL(mlx4_unbond); 1202 1203 1204 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1205 { 1206 u8 port1 = v2p->port1; 1207 u8 port2 = v2p->port2; 1208 struct mlx4_priv *priv = mlx4_priv(dev); 1209 int err; 1210 1211 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1212 return -ENOTSUPP; 1213 1214 mutex_lock(&priv->bond_mutex); 1215 1216 /* zero means keep current mapping for this port */ 1217 if (port1 == 0) 1218 port1 = priv->v2p.port1; 1219 if (port2 == 0) 1220 port2 = priv->v2p.port2; 1221 1222 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1223 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1224 (port1 == 2 && port2 == 1)) { 1225 /* besides boundary checks cross mapping makes 1226 * no sense and therefore not allowed */ 1227 err = -EINVAL; 1228 } else if ((port1 == priv->v2p.port1) && 1229 (port2 == priv->v2p.port2)) { 1230 err = 0; 1231 } else { 1232 err = mlx4_virt2phy_port_map(dev, port1, port2); 1233 if (!err) { 1234 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1235 port1, port2); 1236 priv->v2p.port1 = port1; 1237 priv->v2p.port2 = port2; 1238 } else { 1239 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1240 } 1241 } 1242 1243 mutex_unlock(&priv->bond_mutex); 1244 return err; 1245 } 1246 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1247 1248 static int mlx4_load_fw(struct mlx4_dev *dev) 1249 { 1250 struct mlx4_priv *priv = mlx4_priv(dev); 1251 int err; 1252 1253 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1254 GFP_HIGHUSER | __GFP_NOWARN, 0); 1255 if (!priv->fw.fw_icm) { 1256 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1257 return -ENOMEM; 1258 } 1259 1260 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1261 if (err) { 1262 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1263 goto err_free; 1264 } 1265 1266 err = mlx4_RUN_FW(dev); 1267 if (err) { 1268 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1269 goto err_unmap_fa; 1270 } 1271 1272 return 0; 1273 1274 err_unmap_fa: 1275 mlx4_UNMAP_FA(dev); 1276 1277 err_free: 1278 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1279 return err; 1280 } 1281 1282 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1283 int cmpt_entry_sz) 1284 { 1285 struct mlx4_priv *priv = mlx4_priv(dev); 1286 int err; 1287 int num_eqs; 1288 1289 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1290 cmpt_base + 1291 ((u64) (MLX4_CMPT_TYPE_QP * 1292 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1293 cmpt_entry_sz, dev->caps.num_qps, 1294 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1295 0, 0); 1296 if (err) 1297 goto err; 1298 1299 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1300 cmpt_base + 1301 ((u64) (MLX4_CMPT_TYPE_SRQ * 1302 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1303 cmpt_entry_sz, dev->caps.num_srqs, 1304 dev->caps.reserved_srqs, 0, 0); 1305 if (err) 1306 goto err_qp; 1307 1308 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1309 cmpt_base + 1310 ((u64) (MLX4_CMPT_TYPE_CQ * 1311 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1312 cmpt_entry_sz, dev->caps.num_cqs, 1313 dev->caps.reserved_cqs, 0, 0); 1314 if (err) 1315 goto err_srq; 1316 1317 num_eqs = dev->phys_caps.num_phys_eqs; 1318 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1319 cmpt_base + 1320 ((u64) (MLX4_CMPT_TYPE_EQ * 1321 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1322 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1323 if (err) 1324 goto err_cq; 1325 1326 return 0; 1327 1328 err_cq: 1329 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1330 1331 err_srq: 1332 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1333 1334 err_qp: 1335 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1336 1337 err: 1338 return err; 1339 } 1340 1341 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1342 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1343 { 1344 struct mlx4_priv *priv = mlx4_priv(dev); 1345 u64 aux_pages; 1346 int num_eqs; 1347 int err; 1348 1349 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1350 if (err) { 1351 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1352 return err; 1353 } 1354 1355 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1356 (unsigned long long) icm_size >> 10, 1357 (unsigned long long) aux_pages << 2); 1358 1359 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1360 GFP_HIGHUSER | __GFP_NOWARN, 0); 1361 if (!priv->fw.aux_icm) { 1362 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1363 return -ENOMEM; 1364 } 1365 1366 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1367 if (err) { 1368 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1369 goto err_free_aux; 1370 } 1371 1372 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1373 if (err) { 1374 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1375 goto err_unmap_aux; 1376 } 1377 1378 1379 num_eqs = dev->phys_caps.num_phys_eqs; 1380 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1381 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1382 num_eqs, num_eqs, 0, 0); 1383 if (err) { 1384 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1385 goto err_unmap_cmpt; 1386 } 1387 1388 /* 1389 * Reserved MTT entries must be aligned up to a cacheline 1390 * boundary, since the FW will write to them, while the driver 1391 * writes to all other MTT entries. (The variable 1392 * dev->caps.mtt_entry_sz below is really the MTT segment 1393 * size, not the raw entry size) 1394 */ 1395 dev->caps.reserved_mtts = 1396 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1397 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1398 1399 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1400 init_hca->mtt_base, 1401 dev->caps.mtt_entry_sz, 1402 dev->caps.num_mtts, 1403 dev->caps.reserved_mtts, 1, 0); 1404 if (err) { 1405 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1406 goto err_unmap_eq; 1407 } 1408 1409 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1410 init_hca->dmpt_base, 1411 dev_cap->dmpt_entry_sz, 1412 dev->caps.num_mpts, 1413 dev->caps.reserved_mrws, 1, 1); 1414 if (err) { 1415 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1416 goto err_unmap_mtt; 1417 } 1418 1419 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1420 init_hca->qpc_base, 1421 dev_cap->qpc_entry_sz, 1422 dev->caps.num_qps, 1423 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1424 0, 0); 1425 if (err) { 1426 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1427 goto err_unmap_dmpt; 1428 } 1429 1430 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1431 init_hca->auxc_base, 1432 dev_cap->aux_entry_sz, 1433 dev->caps.num_qps, 1434 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1435 0, 0); 1436 if (err) { 1437 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1438 goto err_unmap_qp; 1439 } 1440 1441 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1442 init_hca->altc_base, 1443 dev_cap->altc_entry_sz, 1444 dev->caps.num_qps, 1445 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1446 0, 0); 1447 if (err) { 1448 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1449 goto err_unmap_auxc; 1450 } 1451 1452 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1453 init_hca->rdmarc_base, 1454 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1455 dev->caps.num_qps, 1456 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1457 0, 0); 1458 if (err) { 1459 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1460 goto err_unmap_altc; 1461 } 1462 1463 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1464 init_hca->cqc_base, 1465 dev_cap->cqc_entry_sz, 1466 dev->caps.num_cqs, 1467 dev->caps.reserved_cqs, 0, 0); 1468 if (err) { 1469 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1470 goto err_unmap_rdmarc; 1471 } 1472 1473 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1474 init_hca->srqc_base, 1475 dev_cap->srq_entry_sz, 1476 dev->caps.num_srqs, 1477 dev->caps.reserved_srqs, 0, 0); 1478 if (err) { 1479 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1480 goto err_unmap_cq; 1481 } 1482 1483 /* 1484 * For flow steering device managed mode it is required to use 1485 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1486 * required, but for simplicity just map the whole multicast 1487 * group table now. The table isn't very big and it's a lot 1488 * easier than trying to track ref counts. 1489 */ 1490 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1491 init_hca->mc_base, 1492 mlx4_get_mgm_entry_size(dev), 1493 dev->caps.num_mgms + dev->caps.num_amgms, 1494 dev->caps.num_mgms + dev->caps.num_amgms, 1495 0, 0); 1496 if (err) { 1497 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1498 goto err_unmap_srq; 1499 } 1500 1501 return 0; 1502 1503 err_unmap_srq: 1504 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1505 1506 err_unmap_cq: 1507 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1508 1509 err_unmap_rdmarc: 1510 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1511 1512 err_unmap_altc: 1513 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1514 1515 err_unmap_auxc: 1516 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1517 1518 err_unmap_qp: 1519 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1520 1521 err_unmap_dmpt: 1522 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1523 1524 err_unmap_mtt: 1525 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1526 1527 err_unmap_eq: 1528 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1529 1530 err_unmap_cmpt: 1531 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1532 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1533 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1534 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1535 1536 err_unmap_aux: 1537 mlx4_UNMAP_ICM_AUX(dev); 1538 1539 err_free_aux: 1540 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1541 1542 return err; 1543 } 1544 1545 static void mlx4_free_icms(struct mlx4_dev *dev) 1546 { 1547 struct mlx4_priv *priv = mlx4_priv(dev); 1548 1549 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1550 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1551 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1552 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1553 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1554 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1555 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1556 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1557 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1558 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1559 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1560 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1561 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1562 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1563 1564 mlx4_UNMAP_ICM_AUX(dev); 1565 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1566 } 1567 1568 static void mlx4_slave_exit(struct mlx4_dev *dev) 1569 { 1570 struct mlx4_priv *priv = mlx4_priv(dev); 1571 1572 mutex_lock(&priv->cmd.slave_cmd_mutex); 1573 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1574 MLX4_COMM_TIME)) 1575 mlx4_warn(dev, "Failed to close slave function\n"); 1576 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1577 } 1578 1579 static int map_bf_area(struct mlx4_dev *dev) 1580 { 1581 struct mlx4_priv *priv = mlx4_priv(dev); 1582 resource_size_t bf_start; 1583 resource_size_t bf_len; 1584 int err = 0; 1585 1586 if (!dev->caps.bf_reg_size) 1587 return -ENXIO; 1588 1589 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1590 (dev->caps.num_uars << PAGE_SHIFT); 1591 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1592 (dev->caps.num_uars << PAGE_SHIFT); 1593 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1594 if (!priv->bf_mapping) 1595 err = -ENOMEM; 1596 1597 return err; 1598 } 1599 1600 static void unmap_bf_area(struct mlx4_dev *dev) 1601 { 1602 if (mlx4_priv(dev)->bf_mapping) 1603 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1604 } 1605 1606 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1607 { 1608 u32 clockhi, clocklo, clockhi1; 1609 cycle_t cycles; 1610 int i; 1611 struct mlx4_priv *priv = mlx4_priv(dev); 1612 1613 for (i = 0; i < 10; i++) { 1614 clockhi = swab32(readl(priv->clock_mapping)); 1615 clocklo = swab32(readl(priv->clock_mapping + 4)); 1616 clockhi1 = swab32(readl(priv->clock_mapping)); 1617 if (clockhi == clockhi1) 1618 break; 1619 } 1620 1621 cycles = (u64) clockhi << 32 | (u64) clocklo; 1622 1623 return cycles; 1624 } 1625 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1626 1627 1628 static int map_internal_clock(struct mlx4_dev *dev) 1629 { 1630 struct mlx4_priv *priv = mlx4_priv(dev); 1631 1632 priv->clock_mapping = 1633 ioremap(pci_resource_start(dev->persist->pdev, 1634 priv->fw.clock_bar) + 1635 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1636 1637 if (!priv->clock_mapping) 1638 return -ENOMEM; 1639 1640 return 0; 1641 } 1642 1643 static void unmap_internal_clock(struct mlx4_dev *dev) 1644 { 1645 struct mlx4_priv *priv = mlx4_priv(dev); 1646 1647 if (priv->clock_mapping) 1648 iounmap(priv->clock_mapping); 1649 } 1650 1651 static void mlx4_close_hca(struct mlx4_dev *dev) 1652 { 1653 unmap_internal_clock(dev); 1654 unmap_bf_area(dev); 1655 if (mlx4_is_slave(dev)) 1656 mlx4_slave_exit(dev); 1657 else { 1658 mlx4_CLOSE_HCA(dev, 0); 1659 mlx4_free_icms(dev); 1660 } 1661 } 1662 1663 static void mlx4_close_fw(struct mlx4_dev *dev) 1664 { 1665 if (!mlx4_is_slave(dev)) { 1666 mlx4_UNMAP_FA(dev); 1667 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1668 } 1669 } 1670 1671 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1672 { 1673 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1674 1675 u32 comm_flags; 1676 u32 offline_bit; 1677 unsigned long end; 1678 struct mlx4_priv *priv = mlx4_priv(dev); 1679 1680 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1681 while (time_before(jiffies, end)) { 1682 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1683 MLX4_COMM_CHAN_FLAGS)); 1684 offline_bit = (comm_flags & 1685 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1686 if (!offline_bit) 1687 return 0; 1688 /* There are cases as part of AER/Reset flow that PF needs 1689 * around 100 msec to load. We therefore sleep for 100 msec 1690 * to allow other tasks to make use of that CPU during this 1691 * time interval. 1692 */ 1693 msleep(100); 1694 } 1695 mlx4_err(dev, "Communication channel is offline.\n"); 1696 return -EIO; 1697 } 1698 1699 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1700 { 1701 #define COMM_CHAN_RST_OFFSET 0x1e 1702 1703 struct mlx4_priv *priv = mlx4_priv(dev); 1704 u32 comm_rst; 1705 u32 comm_caps; 1706 1707 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1708 MLX4_COMM_CHAN_CAPS)); 1709 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1710 1711 if (comm_rst) 1712 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1713 } 1714 1715 static int mlx4_init_slave(struct mlx4_dev *dev) 1716 { 1717 struct mlx4_priv *priv = mlx4_priv(dev); 1718 u64 dma = (u64) priv->mfunc.vhcr_dma; 1719 int ret_from_reset = 0; 1720 u32 slave_read; 1721 u32 cmd_channel_ver; 1722 1723 if (atomic_read(&pf_loading)) { 1724 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1725 return -EPROBE_DEFER; 1726 } 1727 1728 mutex_lock(&priv->cmd.slave_cmd_mutex); 1729 priv->cmd.max_cmds = 1; 1730 if (mlx4_comm_check_offline(dev)) { 1731 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1732 goto err_offline; 1733 } 1734 1735 mlx4_reset_vf_support(dev); 1736 mlx4_warn(dev, "Sending reset\n"); 1737 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1738 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1739 /* if we are in the middle of flr the slave will try 1740 * NUM_OF_RESET_RETRIES times before leaving.*/ 1741 if (ret_from_reset) { 1742 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1743 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1744 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1745 return -EPROBE_DEFER; 1746 } else 1747 goto err; 1748 } 1749 1750 /* check the driver version - the slave I/F revision 1751 * must match the master's */ 1752 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1753 cmd_channel_ver = mlx4_comm_get_version(); 1754 1755 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1756 MLX4_COMM_GET_IF_REV(slave_read)) { 1757 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1758 goto err; 1759 } 1760 1761 mlx4_warn(dev, "Sending vhcr0\n"); 1762 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1763 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1764 goto err; 1765 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1766 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1767 goto err; 1768 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1769 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1770 goto err; 1771 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1772 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1773 goto err; 1774 1775 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1776 return 0; 1777 1778 err: 1779 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1780 err_offline: 1781 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1782 return -EIO; 1783 } 1784 1785 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1786 { 1787 int i; 1788 1789 for (i = 1; i <= dev->caps.num_ports; i++) { 1790 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1791 dev->caps.gid_table_len[i] = 1792 mlx4_get_slave_num_gids(dev, 0, i); 1793 else 1794 dev->caps.gid_table_len[i] = 1; 1795 dev->caps.pkey_table_len[i] = 1796 dev->phys_caps.pkey_phys_table_len[i] - 1; 1797 } 1798 } 1799 1800 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1801 { 1802 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1803 1804 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1805 i++) { 1806 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1807 break; 1808 } 1809 1810 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1811 } 1812 1813 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1814 { 1815 switch (dmfs_high_steer_mode) { 1816 case MLX4_STEERING_DMFS_A0_DEFAULT: 1817 return "default performance"; 1818 1819 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1820 return "dynamic hybrid mode"; 1821 1822 case MLX4_STEERING_DMFS_A0_STATIC: 1823 return "performance optimized for limited rule configuration (static)"; 1824 1825 case MLX4_STEERING_DMFS_A0_DISABLE: 1826 return "disabled performance optimized steering"; 1827 1828 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1829 return "performance optimized steering not supported"; 1830 1831 default: 1832 return "Unrecognized mode"; 1833 } 1834 } 1835 1836 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1837 1838 static void choose_steering_mode(struct mlx4_dev *dev, 1839 struct mlx4_dev_cap *dev_cap) 1840 { 1841 if (mlx4_log_num_mgm_entry_size <= 0) { 1842 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1843 if (dev->caps.dmfs_high_steer_mode == 1844 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1845 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1846 else 1847 dev->caps.dmfs_high_steer_mode = 1848 MLX4_STEERING_DMFS_A0_STATIC; 1849 } 1850 } 1851 1852 if (mlx4_log_num_mgm_entry_size <= 0 && 1853 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1854 (!mlx4_is_mfunc(dev) || 1855 (dev_cap->fs_max_num_qp_per_entry >= 1856 (dev->persist->num_vfs + 1))) && 1857 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1858 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1859 dev->oper_log_mgm_entry_size = 1860 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1861 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1862 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1863 dev->caps.fs_log_max_ucast_qp_range_size = 1864 dev_cap->fs_log_max_ucast_qp_range_size; 1865 } else { 1866 if (dev->caps.dmfs_high_steer_mode != 1867 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1868 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1869 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1870 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1871 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1872 else { 1873 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1874 1875 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1876 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1877 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1878 } 1879 dev->oper_log_mgm_entry_size = 1880 mlx4_log_num_mgm_entry_size > 0 ? 1881 mlx4_log_num_mgm_entry_size : 1882 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1883 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1884 } 1885 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1886 mlx4_steering_mode_str(dev->caps.steering_mode), 1887 dev->oper_log_mgm_entry_size, 1888 mlx4_log_num_mgm_entry_size); 1889 } 1890 1891 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1892 struct mlx4_dev_cap *dev_cap) 1893 { 1894 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1895 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1896 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1897 else 1898 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1899 1900 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1901 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1902 } 1903 1904 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1905 { 1906 int i; 1907 struct mlx4_port_cap port_cap; 1908 1909 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1910 return -EINVAL; 1911 1912 for (i = 1; i <= dev->caps.num_ports; i++) { 1913 if (mlx4_dev_port(dev, i, &port_cap)) { 1914 mlx4_err(dev, 1915 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1916 } else if ((dev->caps.dmfs_high_steer_mode != 1917 MLX4_STEERING_DMFS_A0_DEFAULT) && 1918 (port_cap.dmfs_optimized_state == 1919 !!(dev->caps.dmfs_high_steer_mode == 1920 MLX4_STEERING_DMFS_A0_DISABLE))) { 1921 mlx4_err(dev, 1922 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1923 dmfs_high_rate_steering_mode_str( 1924 dev->caps.dmfs_high_steer_mode), 1925 (port_cap.dmfs_optimized_state ? 1926 "enabled" : "disabled")); 1927 } 1928 } 1929 1930 return 0; 1931 } 1932 1933 static int mlx4_init_fw(struct mlx4_dev *dev) 1934 { 1935 struct mlx4_mod_stat_cfg mlx4_cfg; 1936 int err = 0; 1937 1938 if (!mlx4_is_slave(dev)) { 1939 err = mlx4_QUERY_FW(dev); 1940 if (err) { 1941 if (err == -EACCES) 1942 mlx4_info(dev, "non-primary physical function, skipping\n"); 1943 else 1944 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1945 return err; 1946 } 1947 1948 err = mlx4_load_fw(dev); 1949 if (err) { 1950 mlx4_err(dev, "Failed to start FW, aborting\n"); 1951 return err; 1952 } 1953 1954 mlx4_cfg.log_pg_sz_m = 1; 1955 mlx4_cfg.log_pg_sz = 0; 1956 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1957 if (err) 1958 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1959 } 1960 1961 return err; 1962 } 1963 1964 static int mlx4_init_hca(struct mlx4_dev *dev) 1965 { 1966 struct mlx4_priv *priv = mlx4_priv(dev); 1967 struct mlx4_adapter adapter; 1968 struct mlx4_dev_cap dev_cap; 1969 struct mlx4_profile profile; 1970 struct mlx4_init_hca_param init_hca; 1971 u64 icm_size; 1972 struct mlx4_config_dev_params params; 1973 int err; 1974 1975 if (!mlx4_is_slave(dev)) { 1976 err = mlx4_dev_cap(dev, &dev_cap); 1977 if (err) { 1978 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1979 return err; 1980 } 1981 1982 choose_steering_mode(dev, &dev_cap); 1983 choose_tunnel_offload_mode(dev, &dev_cap); 1984 1985 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1986 mlx4_is_master(dev)) 1987 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1988 1989 err = mlx4_get_phys_port_id(dev); 1990 if (err) 1991 mlx4_err(dev, "Fail to get physical port id\n"); 1992 1993 if (mlx4_is_master(dev)) 1994 mlx4_parav_master_pf_caps(dev); 1995 1996 if (mlx4_low_memory_profile()) { 1997 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 1998 profile = low_mem_profile; 1999 } else { 2000 profile = default_profile; 2001 } 2002 if (dev->caps.steering_mode == 2003 MLX4_STEERING_MODE_DEVICE_MANAGED) 2004 profile.num_mcg = MLX4_FS_NUM_MCG; 2005 2006 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2007 &init_hca); 2008 if ((long long) icm_size < 0) { 2009 err = icm_size; 2010 return err; 2011 } 2012 2013 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2014 2015 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2016 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2017 init_hca.mw_enabled = 0; 2018 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2019 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2020 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2021 2022 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2023 if (err) 2024 return err; 2025 2026 err = mlx4_INIT_HCA(dev, &init_hca); 2027 if (err) { 2028 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2029 goto err_free_icm; 2030 } 2031 2032 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2033 err = mlx4_query_func(dev, &dev_cap); 2034 if (err < 0) { 2035 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2036 goto err_close; 2037 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2038 dev->caps.num_eqs = dev_cap.max_eqs; 2039 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2040 dev->caps.reserved_uars = dev_cap.reserved_uars; 2041 } 2042 } 2043 2044 /* 2045 * If TS is supported by FW 2046 * read HCA frequency by QUERY_HCA command 2047 */ 2048 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2049 memset(&init_hca, 0, sizeof(init_hca)); 2050 err = mlx4_QUERY_HCA(dev, &init_hca); 2051 if (err) { 2052 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2053 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2054 } else { 2055 dev->caps.hca_core_clock = 2056 init_hca.hca_core_clock; 2057 } 2058 2059 /* In case we got HCA frequency 0 - disable timestamping 2060 * to avoid dividing by zero 2061 */ 2062 if (!dev->caps.hca_core_clock) { 2063 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2064 mlx4_err(dev, 2065 "HCA frequency is 0 - timestamping is not supported\n"); 2066 } else if (map_internal_clock(dev)) { 2067 /* 2068 * Map internal clock, 2069 * in case of failure disable timestamping 2070 */ 2071 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2072 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2073 } 2074 } 2075 2076 if (dev->caps.dmfs_high_steer_mode != 2077 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2078 if (mlx4_validate_optimized_steering(dev)) 2079 mlx4_warn(dev, "Optimized steering validation failed\n"); 2080 2081 if (dev->caps.dmfs_high_steer_mode == 2082 MLX4_STEERING_DMFS_A0_DISABLE) { 2083 dev->caps.dmfs_high_rate_qpn_base = 2084 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2085 dev->caps.dmfs_high_rate_qpn_range = 2086 MLX4_A0_STEERING_TABLE_SIZE; 2087 } 2088 2089 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2090 dmfs_high_rate_steering_mode_str( 2091 dev->caps.dmfs_high_steer_mode)); 2092 } 2093 } else { 2094 err = mlx4_init_slave(dev); 2095 if (err) { 2096 if (err != -EPROBE_DEFER) 2097 mlx4_err(dev, "Failed to initialize slave\n"); 2098 return err; 2099 } 2100 2101 err = mlx4_slave_cap(dev); 2102 if (err) { 2103 mlx4_err(dev, "Failed to obtain slave caps\n"); 2104 goto err_close; 2105 } 2106 } 2107 2108 if (map_bf_area(dev)) 2109 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2110 2111 /*Only the master set the ports, all the rest got it from it.*/ 2112 if (!mlx4_is_slave(dev)) 2113 mlx4_set_port_mask(dev); 2114 2115 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2116 if (err) { 2117 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2118 goto unmap_bf; 2119 } 2120 2121 /* Query CONFIG_DEV parameters */ 2122 err = mlx4_config_dev_retrieval(dev, ¶ms); 2123 if (err && err != -ENOTSUPP) { 2124 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2125 } else if (!err) { 2126 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2127 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2128 } 2129 priv->eq_table.inta_pin = adapter.inta_pin; 2130 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2131 2132 return 0; 2133 2134 unmap_bf: 2135 unmap_internal_clock(dev); 2136 unmap_bf_area(dev); 2137 2138 if (mlx4_is_slave(dev)) { 2139 kfree(dev->caps.qp0_qkey); 2140 kfree(dev->caps.qp0_tunnel); 2141 kfree(dev->caps.qp0_proxy); 2142 kfree(dev->caps.qp1_tunnel); 2143 kfree(dev->caps.qp1_proxy); 2144 } 2145 2146 err_close: 2147 if (mlx4_is_slave(dev)) 2148 mlx4_slave_exit(dev); 2149 else 2150 mlx4_CLOSE_HCA(dev, 0); 2151 2152 err_free_icm: 2153 if (!mlx4_is_slave(dev)) 2154 mlx4_free_icms(dev); 2155 2156 return err; 2157 } 2158 2159 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2160 { 2161 struct mlx4_priv *priv = mlx4_priv(dev); 2162 int nent; 2163 2164 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2165 return -ENOENT; 2166 2167 nent = dev->caps.max_counters; 2168 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2169 } 2170 2171 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2172 { 2173 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2174 } 2175 2176 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2177 { 2178 struct mlx4_priv *priv = mlx4_priv(dev); 2179 2180 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2181 return -ENOENT; 2182 2183 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2184 if (*idx == -1) 2185 return -ENOMEM; 2186 2187 return 0; 2188 } 2189 2190 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2191 { 2192 u64 out_param; 2193 int err; 2194 2195 if (mlx4_is_mfunc(dev)) { 2196 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2197 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2198 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2199 if (!err) 2200 *idx = get_param_l(&out_param); 2201 2202 return err; 2203 } 2204 return __mlx4_counter_alloc(dev, idx); 2205 } 2206 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2207 2208 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2209 { 2210 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2211 return; 2212 } 2213 2214 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2215 { 2216 u64 in_param = 0; 2217 2218 if (mlx4_is_mfunc(dev)) { 2219 set_param_l(&in_param, idx); 2220 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2221 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2222 MLX4_CMD_WRAPPED); 2223 return; 2224 } 2225 __mlx4_counter_free(dev, idx); 2226 } 2227 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2228 2229 static int mlx4_setup_hca(struct mlx4_dev *dev) 2230 { 2231 struct mlx4_priv *priv = mlx4_priv(dev); 2232 int err; 2233 int port; 2234 __be32 ib_port_default_caps; 2235 2236 err = mlx4_init_uar_table(dev); 2237 if (err) { 2238 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2239 return err; 2240 } 2241 2242 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2243 if (err) { 2244 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2245 goto err_uar_table_free; 2246 } 2247 2248 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2249 if (!priv->kar) { 2250 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2251 err = -ENOMEM; 2252 goto err_uar_free; 2253 } 2254 2255 err = mlx4_init_pd_table(dev); 2256 if (err) { 2257 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2258 goto err_kar_unmap; 2259 } 2260 2261 err = mlx4_init_xrcd_table(dev); 2262 if (err) { 2263 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2264 goto err_pd_table_free; 2265 } 2266 2267 err = mlx4_init_mr_table(dev); 2268 if (err) { 2269 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2270 goto err_xrcd_table_free; 2271 } 2272 2273 if (!mlx4_is_slave(dev)) { 2274 err = mlx4_init_mcg_table(dev); 2275 if (err) { 2276 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2277 goto err_mr_table_free; 2278 } 2279 err = mlx4_config_mad_demux(dev); 2280 if (err) { 2281 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2282 goto err_mcg_table_free; 2283 } 2284 } 2285 2286 err = mlx4_init_eq_table(dev); 2287 if (err) { 2288 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2289 goto err_mcg_table_free; 2290 } 2291 2292 err = mlx4_cmd_use_events(dev); 2293 if (err) { 2294 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2295 goto err_eq_table_free; 2296 } 2297 2298 err = mlx4_NOP(dev); 2299 if (err) { 2300 if (dev->flags & MLX4_FLAG_MSI_X) { 2301 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2302 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2303 mlx4_warn(dev, "Trying again without MSI-X\n"); 2304 } else { 2305 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2306 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2307 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2308 } 2309 2310 goto err_cmd_poll; 2311 } 2312 2313 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2314 2315 err = mlx4_init_cq_table(dev); 2316 if (err) { 2317 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2318 goto err_cmd_poll; 2319 } 2320 2321 err = mlx4_init_srq_table(dev); 2322 if (err) { 2323 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2324 goto err_cq_table_free; 2325 } 2326 2327 err = mlx4_init_qp_table(dev); 2328 if (err) { 2329 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2330 goto err_srq_table_free; 2331 } 2332 2333 err = mlx4_init_counters_table(dev); 2334 if (err && err != -ENOENT) { 2335 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2336 goto err_qp_table_free; 2337 } 2338 2339 if (!mlx4_is_slave(dev)) { 2340 for (port = 1; port <= dev->caps.num_ports; port++) { 2341 ib_port_default_caps = 0; 2342 err = mlx4_get_port_ib_caps(dev, port, 2343 &ib_port_default_caps); 2344 if (err) 2345 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2346 port, err); 2347 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2348 2349 /* initialize per-slave default ib port capabilities */ 2350 if (mlx4_is_master(dev)) { 2351 int i; 2352 for (i = 0; i < dev->num_slaves; i++) { 2353 if (i == mlx4_master_func_num(dev)) 2354 continue; 2355 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2356 ib_port_default_caps; 2357 } 2358 } 2359 2360 if (mlx4_is_mfunc(dev)) 2361 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2362 else 2363 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2364 2365 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2366 dev->caps.pkey_table_len[port] : -1); 2367 if (err) { 2368 mlx4_err(dev, "Failed to set port %d, aborting\n", 2369 port); 2370 goto err_counters_table_free; 2371 } 2372 } 2373 } 2374 2375 return 0; 2376 2377 err_counters_table_free: 2378 mlx4_cleanup_counters_table(dev); 2379 2380 err_qp_table_free: 2381 mlx4_cleanup_qp_table(dev); 2382 2383 err_srq_table_free: 2384 mlx4_cleanup_srq_table(dev); 2385 2386 err_cq_table_free: 2387 mlx4_cleanup_cq_table(dev); 2388 2389 err_cmd_poll: 2390 mlx4_cmd_use_polling(dev); 2391 2392 err_eq_table_free: 2393 mlx4_cleanup_eq_table(dev); 2394 2395 err_mcg_table_free: 2396 if (!mlx4_is_slave(dev)) 2397 mlx4_cleanup_mcg_table(dev); 2398 2399 err_mr_table_free: 2400 mlx4_cleanup_mr_table(dev); 2401 2402 err_xrcd_table_free: 2403 mlx4_cleanup_xrcd_table(dev); 2404 2405 err_pd_table_free: 2406 mlx4_cleanup_pd_table(dev); 2407 2408 err_kar_unmap: 2409 iounmap(priv->kar); 2410 2411 err_uar_free: 2412 mlx4_uar_free(dev, &priv->driver_uar); 2413 2414 err_uar_table_free: 2415 mlx4_cleanup_uar_table(dev); 2416 return err; 2417 } 2418 2419 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2420 { 2421 struct mlx4_priv *priv = mlx4_priv(dev); 2422 struct msix_entry *entries; 2423 int i; 2424 2425 if (msi_x) { 2426 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2427 2428 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2429 nreq); 2430 2431 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2432 if (!entries) 2433 goto no_msi; 2434 2435 for (i = 0; i < nreq; ++i) 2436 entries[i].entry = i; 2437 2438 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2439 nreq); 2440 2441 if (nreq < 0) { 2442 kfree(entries); 2443 goto no_msi; 2444 } else if (nreq < MSIX_LEGACY_SZ + 2445 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2446 /*Working in legacy mode , all EQ's shared*/ 2447 dev->caps.comp_pool = 0; 2448 dev->caps.num_comp_vectors = nreq - 1; 2449 } else { 2450 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2451 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2452 } 2453 for (i = 0; i < nreq; ++i) 2454 priv->eq_table.eq[i].irq = entries[i].vector; 2455 2456 dev->flags |= MLX4_FLAG_MSI_X; 2457 2458 kfree(entries); 2459 return; 2460 } 2461 2462 no_msi: 2463 dev->caps.num_comp_vectors = 1; 2464 dev->caps.comp_pool = 0; 2465 2466 for (i = 0; i < 2; ++i) 2467 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2468 } 2469 2470 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2471 { 2472 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2473 int err = 0; 2474 2475 info->dev = dev; 2476 info->port = port; 2477 if (!mlx4_is_slave(dev)) { 2478 mlx4_init_mac_table(dev, &info->mac_table); 2479 mlx4_init_vlan_table(dev, &info->vlan_table); 2480 mlx4_init_roce_gid_table(dev, &info->gid_table); 2481 info->base_qpn = mlx4_get_base_qpn(dev, port); 2482 } 2483 2484 sprintf(info->dev_name, "mlx4_port%d", port); 2485 info->port_attr.attr.name = info->dev_name; 2486 if (mlx4_is_mfunc(dev)) 2487 info->port_attr.attr.mode = S_IRUGO; 2488 else { 2489 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2490 info->port_attr.store = set_port_type; 2491 } 2492 info->port_attr.show = show_port_type; 2493 sysfs_attr_init(&info->port_attr.attr); 2494 2495 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2496 if (err) { 2497 mlx4_err(dev, "Failed to create file for port %d\n", port); 2498 info->port = -1; 2499 } 2500 2501 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2502 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2503 if (mlx4_is_mfunc(dev)) 2504 info->port_mtu_attr.attr.mode = S_IRUGO; 2505 else { 2506 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2507 info->port_mtu_attr.store = set_port_ib_mtu; 2508 } 2509 info->port_mtu_attr.show = show_port_ib_mtu; 2510 sysfs_attr_init(&info->port_mtu_attr.attr); 2511 2512 err = device_create_file(&dev->persist->pdev->dev, 2513 &info->port_mtu_attr); 2514 if (err) { 2515 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2516 device_remove_file(&info->dev->persist->pdev->dev, 2517 &info->port_attr); 2518 info->port = -1; 2519 } 2520 2521 return err; 2522 } 2523 2524 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2525 { 2526 if (info->port < 0) 2527 return; 2528 2529 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2530 device_remove_file(&info->dev->persist->pdev->dev, 2531 &info->port_mtu_attr); 2532 } 2533 2534 static int mlx4_init_steering(struct mlx4_dev *dev) 2535 { 2536 struct mlx4_priv *priv = mlx4_priv(dev); 2537 int num_entries = dev->caps.num_ports; 2538 int i, j; 2539 2540 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2541 if (!priv->steer) 2542 return -ENOMEM; 2543 2544 for (i = 0; i < num_entries; i++) 2545 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2546 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2547 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2548 } 2549 return 0; 2550 } 2551 2552 static void mlx4_clear_steering(struct mlx4_dev *dev) 2553 { 2554 struct mlx4_priv *priv = mlx4_priv(dev); 2555 struct mlx4_steer_index *entry, *tmp_entry; 2556 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2557 int num_entries = dev->caps.num_ports; 2558 int i, j; 2559 2560 for (i = 0; i < num_entries; i++) { 2561 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2562 list_for_each_entry_safe(pqp, tmp_pqp, 2563 &priv->steer[i].promisc_qps[j], 2564 list) { 2565 list_del(&pqp->list); 2566 kfree(pqp); 2567 } 2568 list_for_each_entry_safe(entry, tmp_entry, 2569 &priv->steer[i].steer_entries[j], 2570 list) { 2571 list_del(&entry->list); 2572 list_for_each_entry_safe(pqp, tmp_pqp, 2573 &entry->duplicates, 2574 list) { 2575 list_del(&pqp->list); 2576 kfree(pqp); 2577 } 2578 kfree(entry); 2579 } 2580 } 2581 } 2582 kfree(priv->steer); 2583 } 2584 2585 static int extended_func_num(struct pci_dev *pdev) 2586 { 2587 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2588 } 2589 2590 #define MLX4_OWNER_BASE 0x8069c 2591 #define MLX4_OWNER_SIZE 4 2592 2593 static int mlx4_get_ownership(struct mlx4_dev *dev) 2594 { 2595 void __iomem *owner; 2596 u32 ret; 2597 2598 if (pci_channel_offline(dev->persist->pdev)) 2599 return -EIO; 2600 2601 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2602 MLX4_OWNER_BASE, 2603 MLX4_OWNER_SIZE); 2604 if (!owner) { 2605 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2606 return -ENOMEM; 2607 } 2608 2609 ret = readl(owner); 2610 iounmap(owner); 2611 return (int) !!ret; 2612 } 2613 2614 static void mlx4_free_ownership(struct mlx4_dev *dev) 2615 { 2616 void __iomem *owner; 2617 2618 if (pci_channel_offline(dev->persist->pdev)) 2619 return; 2620 2621 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2622 MLX4_OWNER_BASE, 2623 MLX4_OWNER_SIZE); 2624 if (!owner) { 2625 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2626 return; 2627 } 2628 writel(0, owner); 2629 msleep(1000); 2630 iounmap(owner); 2631 } 2632 2633 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2634 !!((flags) & MLX4_FLAG_MASTER)) 2635 2636 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2637 u8 total_vfs, int existing_vfs, int reset_flow) 2638 { 2639 u64 dev_flags = dev->flags; 2640 int err = 0; 2641 2642 if (reset_flow) { 2643 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 2644 GFP_KERNEL); 2645 if (!dev->dev_vfs) 2646 goto free_mem; 2647 return dev_flags; 2648 } 2649 2650 atomic_inc(&pf_loading); 2651 if (dev->flags & MLX4_FLAG_SRIOV) { 2652 if (existing_vfs != total_vfs) { 2653 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2654 existing_vfs, total_vfs); 2655 total_vfs = existing_vfs; 2656 } 2657 } 2658 2659 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2660 if (NULL == dev->dev_vfs) { 2661 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2662 goto disable_sriov; 2663 } 2664 2665 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2666 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2667 err = pci_enable_sriov(pdev, total_vfs); 2668 } 2669 if (err) { 2670 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2671 err); 2672 goto disable_sriov; 2673 } else { 2674 mlx4_warn(dev, "Running in master mode\n"); 2675 dev_flags |= MLX4_FLAG_SRIOV | 2676 MLX4_FLAG_MASTER; 2677 dev_flags &= ~MLX4_FLAG_SLAVE; 2678 dev->persist->num_vfs = total_vfs; 2679 } 2680 return dev_flags; 2681 2682 disable_sriov: 2683 atomic_dec(&pf_loading); 2684 free_mem: 2685 dev->persist->num_vfs = 0; 2686 kfree(dev->dev_vfs); 2687 return dev_flags & ~MLX4_FLAG_MASTER; 2688 } 2689 2690 enum { 2691 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2692 }; 2693 2694 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2695 int *nvfs) 2696 { 2697 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2698 /* Checking for 64 VFs as a limitation of CX2 */ 2699 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2700 requested_vfs >= 64) { 2701 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2702 requested_vfs); 2703 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2704 } 2705 return 0; 2706 } 2707 2708 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2709 int total_vfs, int *nvfs, struct mlx4_priv *priv, 2710 int reset_flow) 2711 { 2712 struct mlx4_dev *dev; 2713 unsigned sum = 0; 2714 int err; 2715 int port; 2716 int i; 2717 struct mlx4_dev_cap *dev_cap = NULL; 2718 int existing_vfs = 0; 2719 2720 dev = &priv->dev; 2721 2722 INIT_LIST_HEAD(&priv->ctx_list); 2723 spin_lock_init(&priv->ctx_lock); 2724 2725 mutex_init(&priv->port_mutex); 2726 mutex_init(&priv->bond_mutex); 2727 2728 INIT_LIST_HEAD(&priv->pgdir_list); 2729 mutex_init(&priv->pgdir_mutex); 2730 2731 INIT_LIST_HEAD(&priv->bf_list); 2732 mutex_init(&priv->bf_mutex); 2733 2734 dev->rev_id = pdev->revision; 2735 dev->numa_node = dev_to_node(&pdev->dev); 2736 2737 /* Detect if this device is a virtual function */ 2738 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2739 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2740 dev->flags |= MLX4_FLAG_SLAVE; 2741 } else { 2742 /* We reset the device and enable SRIOV only for physical 2743 * devices. Try to claim ownership on the device; 2744 * if already taken, skip -- do not allow multiple PFs */ 2745 err = mlx4_get_ownership(dev); 2746 if (err) { 2747 if (err < 0) 2748 return err; 2749 else { 2750 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2751 return -EINVAL; 2752 } 2753 } 2754 2755 atomic_set(&priv->opreq_count, 0); 2756 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2757 2758 /* 2759 * Now reset the HCA before we touch the PCI capabilities or 2760 * attempt a firmware command, since a boot ROM may have left 2761 * the HCA in an undefined state. 2762 */ 2763 err = mlx4_reset(dev); 2764 if (err) { 2765 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2766 goto err_sriov; 2767 } 2768 2769 if (total_vfs) { 2770 dev->flags = MLX4_FLAG_MASTER; 2771 existing_vfs = pci_num_vf(pdev); 2772 if (existing_vfs) 2773 dev->flags |= MLX4_FLAG_SRIOV; 2774 dev->persist->num_vfs = total_vfs; 2775 } 2776 } 2777 2778 /* on load remove any previous indication of internal error, 2779 * device is up. 2780 */ 2781 dev->persist->state = MLX4_DEVICE_STATE_UP; 2782 2783 slave_start: 2784 err = mlx4_cmd_init(dev); 2785 if (err) { 2786 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2787 goto err_sriov; 2788 } 2789 2790 /* In slave functions, the communication channel must be initialized 2791 * before posting commands. Also, init num_slaves before calling 2792 * mlx4_init_hca */ 2793 if (mlx4_is_mfunc(dev)) { 2794 if (mlx4_is_master(dev)) { 2795 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2796 2797 } else { 2798 dev->num_slaves = 0; 2799 err = mlx4_multi_func_init(dev); 2800 if (err) { 2801 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2802 goto err_cmd; 2803 } 2804 } 2805 } 2806 2807 err = mlx4_init_fw(dev); 2808 if (err) { 2809 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2810 goto err_mfunc; 2811 } 2812 2813 if (mlx4_is_master(dev)) { 2814 /* when we hit the goto slave_start below, dev_cap already initialized */ 2815 if (!dev_cap) { 2816 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2817 2818 if (!dev_cap) { 2819 err = -ENOMEM; 2820 goto err_fw; 2821 } 2822 2823 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2824 if (err) { 2825 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2826 goto err_fw; 2827 } 2828 2829 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2830 goto err_fw; 2831 2832 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2833 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 2834 total_vfs, 2835 existing_vfs, 2836 reset_flow); 2837 2838 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2839 dev->flags = dev_flags; 2840 if (!SRIOV_VALID_STATE(dev->flags)) { 2841 mlx4_err(dev, "Invalid SRIOV state\n"); 2842 goto err_sriov; 2843 } 2844 err = mlx4_reset(dev); 2845 if (err) { 2846 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2847 goto err_sriov; 2848 } 2849 goto slave_start; 2850 } 2851 } else { 2852 /* Legacy mode FW requires SRIOV to be enabled before 2853 * doing QUERY_DEV_CAP, since max_eq's value is different if 2854 * SRIOV is enabled. 2855 */ 2856 memset(dev_cap, 0, sizeof(*dev_cap)); 2857 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2858 if (err) { 2859 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2860 goto err_fw; 2861 } 2862 2863 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2864 goto err_fw; 2865 } 2866 } 2867 2868 err = mlx4_init_hca(dev); 2869 if (err) { 2870 if (err == -EACCES) { 2871 /* Not primary Physical function 2872 * Running in slave mode */ 2873 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2874 /* We're not a PF */ 2875 if (dev->flags & MLX4_FLAG_SRIOV) { 2876 if (!existing_vfs) 2877 pci_disable_sriov(pdev); 2878 if (mlx4_is_master(dev) && !reset_flow) 2879 atomic_dec(&pf_loading); 2880 dev->flags &= ~MLX4_FLAG_SRIOV; 2881 } 2882 if (!mlx4_is_slave(dev)) 2883 mlx4_free_ownership(dev); 2884 dev->flags |= MLX4_FLAG_SLAVE; 2885 dev->flags &= ~MLX4_FLAG_MASTER; 2886 goto slave_start; 2887 } else 2888 goto err_fw; 2889 } 2890 2891 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2892 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2893 existing_vfs, reset_flow); 2894 2895 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2896 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2897 dev->flags = dev_flags; 2898 err = mlx4_cmd_init(dev); 2899 if (err) { 2900 /* Only VHCR is cleaned up, so could still 2901 * send FW commands 2902 */ 2903 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2904 goto err_close; 2905 } 2906 } else { 2907 dev->flags = dev_flags; 2908 } 2909 2910 if (!SRIOV_VALID_STATE(dev->flags)) { 2911 mlx4_err(dev, "Invalid SRIOV state\n"); 2912 goto err_close; 2913 } 2914 } 2915 2916 /* check if the device is functioning at its maximum possible speed. 2917 * No return code for this call, just warn the user in case of PCI 2918 * express device capabilities are under-satisfied by the bus. 2919 */ 2920 if (!mlx4_is_slave(dev)) 2921 mlx4_check_pcie_caps(dev); 2922 2923 /* In master functions, the communication channel must be initialized 2924 * after obtaining its address from fw */ 2925 if (mlx4_is_master(dev)) { 2926 int ib_ports = 0; 2927 2928 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2929 ib_ports++; 2930 2931 if (ib_ports && 2932 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2933 mlx4_err(dev, 2934 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2935 err = -EINVAL; 2936 goto err_close; 2937 } 2938 if (dev->caps.num_ports < 2 && 2939 num_vfs_argc > 1) { 2940 err = -EINVAL; 2941 mlx4_err(dev, 2942 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2943 dev->caps.num_ports); 2944 goto err_close; 2945 } 2946 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 2947 2948 for (i = 0; 2949 i < sizeof(dev->persist->nvfs)/ 2950 sizeof(dev->persist->nvfs[0]); i++) { 2951 unsigned j; 2952 2953 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 2954 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2955 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2956 dev->caps.num_ports; 2957 } 2958 } 2959 2960 /* In master functions, the communication channel 2961 * must be initialized after obtaining its address from fw 2962 */ 2963 err = mlx4_multi_func_init(dev); 2964 if (err) { 2965 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2966 goto err_close; 2967 } 2968 } 2969 2970 err = mlx4_alloc_eq_table(dev); 2971 if (err) 2972 goto err_master_mfunc; 2973 2974 priv->msix_ctl.pool_bm = 0; 2975 mutex_init(&priv->msix_ctl.pool_lock); 2976 2977 mlx4_enable_msi_x(dev); 2978 if ((mlx4_is_mfunc(dev)) && 2979 !(dev->flags & MLX4_FLAG_MSI_X)) { 2980 err = -ENOSYS; 2981 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2982 goto err_free_eq; 2983 } 2984 2985 if (!mlx4_is_slave(dev)) { 2986 err = mlx4_init_steering(dev); 2987 if (err) 2988 goto err_disable_msix; 2989 } 2990 2991 err = mlx4_setup_hca(dev); 2992 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2993 !mlx4_is_mfunc(dev)) { 2994 dev->flags &= ~MLX4_FLAG_MSI_X; 2995 dev->caps.num_comp_vectors = 1; 2996 dev->caps.comp_pool = 0; 2997 pci_disable_msix(pdev); 2998 err = mlx4_setup_hca(dev); 2999 } 3000 3001 if (err) 3002 goto err_steer; 3003 3004 mlx4_init_quotas(dev); 3005 /* When PF resources are ready arm its comm channel to enable 3006 * getting commands 3007 */ 3008 if (mlx4_is_master(dev)) { 3009 err = mlx4_ARM_COMM_CHANNEL(dev); 3010 if (err) { 3011 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3012 err); 3013 goto err_steer; 3014 } 3015 } 3016 3017 for (port = 1; port <= dev->caps.num_ports; port++) { 3018 err = mlx4_init_port_info(dev, port); 3019 if (err) 3020 goto err_port; 3021 } 3022 3023 priv->v2p.port1 = 1; 3024 priv->v2p.port2 = 2; 3025 3026 err = mlx4_register_device(dev); 3027 if (err) 3028 goto err_port; 3029 3030 mlx4_request_modules(dev); 3031 3032 mlx4_sense_init(dev); 3033 mlx4_start_sense(dev); 3034 3035 priv->removed = 0; 3036 3037 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3038 atomic_dec(&pf_loading); 3039 3040 kfree(dev_cap); 3041 return 0; 3042 3043 err_port: 3044 for (--port; port >= 1; --port) 3045 mlx4_cleanup_port_info(&priv->port[port]); 3046 3047 mlx4_cleanup_counters_table(dev); 3048 mlx4_cleanup_qp_table(dev); 3049 mlx4_cleanup_srq_table(dev); 3050 mlx4_cleanup_cq_table(dev); 3051 mlx4_cmd_use_polling(dev); 3052 mlx4_cleanup_eq_table(dev); 3053 mlx4_cleanup_mcg_table(dev); 3054 mlx4_cleanup_mr_table(dev); 3055 mlx4_cleanup_xrcd_table(dev); 3056 mlx4_cleanup_pd_table(dev); 3057 mlx4_cleanup_uar_table(dev); 3058 3059 err_steer: 3060 if (!mlx4_is_slave(dev)) 3061 mlx4_clear_steering(dev); 3062 3063 err_disable_msix: 3064 if (dev->flags & MLX4_FLAG_MSI_X) 3065 pci_disable_msix(pdev); 3066 3067 err_free_eq: 3068 mlx4_free_eq_table(dev); 3069 3070 err_master_mfunc: 3071 if (mlx4_is_master(dev)) { 3072 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3073 mlx4_multi_func_cleanup(dev); 3074 } 3075 3076 if (mlx4_is_slave(dev)) { 3077 kfree(dev->caps.qp0_qkey); 3078 kfree(dev->caps.qp0_tunnel); 3079 kfree(dev->caps.qp0_proxy); 3080 kfree(dev->caps.qp1_tunnel); 3081 kfree(dev->caps.qp1_proxy); 3082 } 3083 3084 err_close: 3085 mlx4_close_hca(dev); 3086 3087 err_fw: 3088 mlx4_close_fw(dev); 3089 3090 err_mfunc: 3091 if (mlx4_is_slave(dev)) 3092 mlx4_multi_func_cleanup(dev); 3093 3094 err_cmd: 3095 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3096 3097 err_sriov: 3098 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3099 pci_disable_sriov(pdev); 3100 dev->flags &= ~MLX4_FLAG_SRIOV; 3101 } 3102 3103 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3104 atomic_dec(&pf_loading); 3105 3106 kfree(priv->dev.dev_vfs); 3107 3108 if (!mlx4_is_slave(dev)) 3109 mlx4_free_ownership(dev); 3110 3111 kfree(dev_cap); 3112 return err; 3113 } 3114 3115 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3116 struct mlx4_priv *priv) 3117 { 3118 int err; 3119 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3120 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3121 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3122 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3123 unsigned total_vfs = 0; 3124 unsigned int i; 3125 3126 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3127 3128 err = pci_enable_device(pdev); 3129 if (err) { 3130 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3131 return err; 3132 } 3133 3134 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3135 * per port, we must limit the number of VFs to 63 (since their are 3136 * 128 MACs) 3137 */ 3138 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3139 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3140 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3141 if (nvfs[i] < 0) { 3142 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3143 err = -EINVAL; 3144 goto err_disable_pdev; 3145 } 3146 } 3147 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3148 i++) { 3149 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3150 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3151 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3152 err = -EINVAL; 3153 goto err_disable_pdev; 3154 } 3155 } 3156 if (total_vfs >= MLX4_MAX_NUM_VF) { 3157 dev_err(&pdev->dev, 3158 "Requested more VF's (%d) than allowed (%d)\n", 3159 total_vfs, MLX4_MAX_NUM_VF - 1); 3160 err = -EINVAL; 3161 goto err_disable_pdev; 3162 } 3163 3164 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3165 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 3166 dev_err(&pdev->dev, 3167 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 3168 nvfs[i] + nvfs[2], i + 1, 3169 MLX4_MAX_NUM_VF_P_PORT - 1); 3170 err = -EINVAL; 3171 goto err_disable_pdev; 3172 } 3173 } 3174 3175 /* Check for BARs. */ 3176 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3177 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3178 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3179 pci_dev_data, pci_resource_flags(pdev, 0)); 3180 err = -ENODEV; 3181 goto err_disable_pdev; 3182 } 3183 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3184 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3185 err = -ENODEV; 3186 goto err_disable_pdev; 3187 } 3188 3189 err = pci_request_regions(pdev, DRV_NAME); 3190 if (err) { 3191 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3192 goto err_disable_pdev; 3193 } 3194 3195 pci_set_master(pdev); 3196 3197 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3198 if (err) { 3199 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3200 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3201 if (err) { 3202 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3203 goto err_release_regions; 3204 } 3205 } 3206 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3207 if (err) { 3208 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3209 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3210 if (err) { 3211 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3212 goto err_release_regions; 3213 } 3214 } 3215 3216 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3217 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3218 /* Detect if this device is a virtual function */ 3219 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3220 /* When acting as pf, we normally skip vfs unless explicitly 3221 * requested to probe them. 3222 */ 3223 if (total_vfs) { 3224 unsigned vfs_offset = 0; 3225 3226 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3227 vfs_offset + nvfs[i] < extended_func_num(pdev); 3228 vfs_offset += nvfs[i], i++) 3229 ; 3230 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3231 err = -ENODEV; 3232 goto err_release_regions; 3233 } 3234 if ((extended_func_num(pdev) - vfs_offset) 3235 > prb_vf[i]) { 3236 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3237 extended_func_num(pdev)); 3238 err = -ENODEV; 3239 goto err_release_regions; 3240 } 3241 } 3242 } 3243 3244 err = mlx4_catas_init(&priv->dev); 3245 if (err) 3246 goto err_release_regions; 3247 3248 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3249 if (err) 3250 goto err_catas; 3251 3252 return 0; 3253 3254 err_catas: 3255 mlx4_catas_end(&priv->dev); 3256 3257 err_release_regions: 3258 pci_release_regions(pdev); 3259 3260 err_disable_pdev: 3261 pci_disable_device(pdev); 3262 pci_set_drvdata(pdev, NULL); 3263 return err; 3264 } 3265 3266 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3267 { 3268 struct mlx4_priv *priv; 3269 struct mlx4_dev *dev; 3270 int ret; 3271 3272 printk_once(KERN_INFO "%s", mlx4_version); 3273 3274 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3275 if (!priv) 3276 return -ENOMEM; 3277 3278 dev = &priv->dev; 3279 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3280 if (!dev->persist) { 3281 kfree(priv); 3282 return -ENOMEM; 3283 } 3284 dev->persist->pdev = pdev; 3285 dev->persist->dev = dev; 3286 pci_set_drvdata(pdev, dev->persist); 3287 priv->pci_dev_data = id->driver_data; 3288 mutex_init(&dev->persist->device_state_mutex); 3289 mutex_init(&dev->persist->interface_state_mutex); 3290 3291 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3292 if (ret) { 3293 kfree(dev->persist); 3294 kfree(priv); 3295 } else { 3296 pci_save_state(pdev); 3297 } 3298 3299 return ret; 3300 } 3301 3302 static void mlx4_clean_dev(struct mlx4_dev *dev) 3303 { 3304 struct mlx4_dev_persistent *persist = dev->persist; 3305 struct mlx4_priv *priv = mlx4_priv(dev); 3306 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3307 3308 memset(priv, 0, sizeof(*priv)); 3309 priv->dev.persist = persist; 3310 priv->dev.flags = flags; 3311 } 3312 3313 static void mlx4_unload_one(struct pci_dev *pdev) 3314 { 3315 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3316 struct mlx4_dev *dev = persist->dev; 3317 struct mlx4_priv *priv = mlx4_priv(dev); 3318 int pci_dev_data; 3319 int p, i; 3320 3321 if (priv->removed) 3322 return; 3323 3324 /* saving current ports type for further use */ 3325 for (i = 0; i < dev->caps.num_ports; i++) { 3326 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3327 dev->persist->curr_port_poss_type[i] = dev->caps. 3328 possible_type[i + 1]; 3329 } 3330 3331 pci_dev_data = priv->pci_dev_data; 3332 3333 mlx4_stop_sense(dev); 3334 mlx4_unregister_device(dev); 3335 3336 for (p = 1; p <= dev->caps.num_ports; p++) { 3337 mlx4_cleanup_port_info(&priv->port[p]); 3338 mlx4_CLOSE_PORT(dev, p); 3339 } 3340 3341 if (mlx4_is_master(dev)) 3342 mlx4_free_resource_tracker(dev, 3343 RES_TR_FREE_SLAVES_ONLY); 3344 3345 mlx4_cleanup_counters_table(dev); 3346 mlx4_cleanup_qp_table(dev); 3347 mlx4_cleanup_srq_table(dev); 3348 mlx4_cleanup_cq_table(dev); 3349 mlx4_cmd_use_polling(dev); 3350 mlx4_cleanup_eq_table(dev); 3351 mlx4_cleanup_mcg_table(dev); 3352 mlx4_cleanup_mr_table(dev); 3353 mlx4_cleanup_xrcd_table(dev); 3354 mlx4_cleanup_pd_table(dev); 3355 3356 if (mlx4_is_master(dev)) 3357 mlx4_free_resource_tracker(dev, 3358 RES_TR_FREE_STRUCTS_ONLY); 3359 3360 iounmap(priv->kar); 3361 mlx4_uar_free(dev, &priv->driver_uar); 3362 mlx4_cleanup_uar_table(dev); 3363 if (!mlx4_is_slave(dev)) 3364 mlx4_clear_steering(dev); 3365 mlx4_free_eq_table(dev); 3366 if (mlx4_is_master(dev)) 3367 mlx4_multi_func_cleanup(dev); 3368 mlx4_close_hca(dev); 3369 mlx4_close_fw(dev); 3370 if (mlx4_is_slave(dev)) 3371 mlx4_multi_func_cleanup(dev); 3372 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3373 3374 if (dev->flags & MLX4_FLAG_MSI_X) 3375 pci_disable_msix(pdev); 3376 3377 if (!mlx4_is_slave(dev)) 3378 mlx4_free_ownership(dev); 3379 3380 kfree(dev->caps.qp0_qkey); 3381 kfree(dev->caps.qp0_tunnel); 3382 kfree(dev->caps.qp0_proxy); 3383 kfree(dev->caps.qp1_tunnel); 3384 kfree(dev->caps.qp1_proxy); 3385 kfree(dev->dev_vfs); 3386 3387 mlx4_clean_dev(dev); 3388 priv->pci_dev_data = pci_dev_data; 3389 priv->removed = 1; 3390 } 3391 3392 static void mlx4_remove_one(struct pci_dev *pdev) 3393 { 3394 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3395 struct mlx4_dev *dev = persist->dev; 3396 struct mlx4_priv *priv = mlx4_priv(dev); 3397 int active_vfs = 0; 3398 3399 mutex_lock(&persist->interface_state_mutex); 3400 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3401 mutex_unlock(&persist->interface_state_mutex); 3402 3403 /* Disabling SR-IOV is not allowed while there are active vf's */ 3404 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3405 active_vfs = mlx4_how_many_lives_vf(dev); 3406 if (active_vfs) { 3407 pr_warn("Removing PF when there are active VF's !!\n"); 3408 pr_warn("Will not disable SR-IOV.\n"); 3409 } 3410 } 3411 3412 /* device marked to be under deletion running now without the lock 3413 * letting other tasks to be terminated 3414 */ 3415 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3416 mlx4_unload_one(pdev); 3417 else 3418 mlx4_info(dev, "%s: interface is down\n", __func__); 3419 mlx4_catas_end(dev); 3420 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3421 mlx4_warn(dev, "Disabling SR-IOV\n"); 3422 pci_disable_sriov(pdev); 3423 } 3424 3425 pci_release_regions(pdev); 3426 pci_disable_device(pdev); 3427 kfree(dev->persist); 3428 kfree(priv); 3429 pci_set_drvdata(pdev, NULL); 3430 } 3431 3432 static int restore_current_port_types(struct mlx4_dev *dev, 3433 enum mlx4_port_type *types, 3434 enum mlx4_port_type *poss_types) 3435 { 3436 struct mlx4_priv *priv = mlx4_priv(dev); 3437 int err, i; 3438 3439 mlx4_stop_sense(dev); 3440 3441 mutex_lock(&priv->port_mutex); 3442 for (i = 0; i < dev->caps.num_ports; i++) 3443 dev->caps.possible_type[i + 1] = poss_types[i]; 3444 err = mlx4_change_port_types(dev, types); 3445 mlx4_start_sense(dev); 3446 mutex_unlock(&priv->port_mutex); 3447 3448 return err; 3449 } 3450 3451 int mlx4_restart_one(struct pci_dev *pdev) 3452 { 3453 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3454 struct mlx4_dev *dev = persist->dev; 3455 struct mlx4_priv *priv = mlx4_priv(dev); 3456 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3457 int pci_dev_data, err, total_vfs; 3458 3459 pci_dev_data = priv->pci_dev_data; 3460 total_vfs = dev->persist->num_vfs; 3461 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3462 3463 mlx4_unload_one(pdev); 3464 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3465 if (err) { 3466 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3467 __func__, pci_name(pdev), err); 3468 return err; 3469 } 3470 3471 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3472 dev->persist->curr_port_poss_type); 3473 if (err) 3474 mlx4_err(dev, "could not restore original port types (%d)\n", 3475 err); 3476 3477 return err; 3478 } 3479 3480 static const struct pci_device_id mlx4_pci_table[] = { 3481 /* MT25408 "Hermon" SDR */ 3482 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3483 /* MT25408 "Hermon" DDR */ 3484 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3485 /* MT25408 "Hermon" QDR */ 3486 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3487 /* MT25408 "Hermon" DDR PCIe gen2 */ 3488 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3489 /* MT25408 "Hermon" QDR PCIe gen2 */ 3490 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3491 /* MT25408 "Hermon" EN 10GigE */ 3492 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3493 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3494 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3495 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3496 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3497 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3498 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3499 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3500 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3501 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3502 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3503 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3504 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3505 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3506 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3507 /* MT27500 Family [ConnectX-3] */ 3508 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3509 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3510 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3511 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3512 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3513 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3514 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3515 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3516 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3517 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3518 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3519 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3520 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3521 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3522 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3523 { 0, } 3524 }; 3525 3526 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3527 3528 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3529 pci_channel_state_t state) 3530 { 3531 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3532 3533 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3534 mlx4_enter_error_state(persist); 3535 3536 mutex_lock(&persist->interface_state_mutex); 3537 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3538 mlx4_unload_one(pdev); 3539 3540 mutex_unlock(&persist->interface_state_mutex); 3541 if (state == pci_channel_io_perm_failure) 3542 return PCI_ERS_RESULT_DISCONNECT; 3543 3544 pci_disable_device(pdev); 3545 return PCI_ERS_RESULT_NEED_RESET; 3546 } 3547 3548 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3549 { 3550 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3551 struct mlx4_dev *dev = persist->dev; 3552 struct mlx4_priv *priv = mlx4_priv(dev); 3553 int ret; 3554 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3555 int total_vfs; 3556 3557 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3558 ret = pci_enable_device(pdev); 3559 if (ret) { 3560 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3561 return PCI_ERS_RESULT_DISCONNECT; 3562 } 3563 3564 pci_set_master(pdev); 3565 pci_restore_state(pdev); 3566 pci_save_state(pdev); 3567 3568 total_vfs = dev->persist->num_vfs; 3569 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3570 3571 mutex_lock(&persist->interface_state_mutex); 3572 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3573 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3574 priv, 1); 3575 if (ret) { 3576 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 3577 __func__, ret); 3578 goto end; 3579 } 3580 3581 ret = restore_current_port_types(dev, dev->persist-> 3582 curr_port_type, dev->persist-> 3583 curr_port_poss_type); 3584 if (ret) 3585 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 3586 } 3587 end: 3588 mutex_unlock(&persist->interface_state_mutex); 3589 3590 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3591 } 3592 3593 static void mlx4_shutdown(struct pci_dev *pdev) 3594 { 3595 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3596 3597 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 3598 mutex_lock(&persist->interface_state_mutex); 3599 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3600 mlx4_unload_one(pdev); 3601 mutex_unlock(&persist->interface_state_mutex); 3602 } 3603 3604 static const struct pci_error_handlers mlx4_err_handler = { 3605 .error_detected = mlx4_pci_err_detected, 3606 .slot_reset = mlx4_pci_slot_reset, 3607 }; 3608 3609 static struct pci_driver mlx4_driver = { 3610 .name = DRV_NAME, 3611 .id_table = mlx4_pci_table, 3612 .probe = mlx4_init_one, 3613 .shutdown = mlx4_shutdown, 3614 .remove = mlx4_remove_one, 3615 .err_handler = &mlx4_err_handler, 3616 }; 3617 3618 static int __init mlx4_verify_params(void) 3619 { 3620 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3621 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3622 return -1; 3623 } 3624 3625 if (log_num_vlan != 0) 3626 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3627 MLX4_LOG_NUM_VLANS); 3628 3629 if (use_prio != 0) 3630 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3631 3632 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3633 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3634 log_mtts_per_seg); 3635 return -1; 3636 } 3637 3638 /* Check if module param for ports type has legal combination */ 3639 if (port_type_array[0] == false && port_type_array[1] == true) { 3640 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3641 port_type_array[0] = true; 3642 } 3643 3644 if (mlx4_log_num_mgm_entry_size < -7 || 3645 (mlx4_log_num_mgm_entry_size > 0 && 3646 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3647 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3648 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3649 mlx4_log_num_mgm_entry_size, 3650 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3651 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3652 return -1; 3653 } 3654 3655 return 0; 3656 } 3657 3658 static int __init mlx4_init(void) 3659 { 3660 int ret; 3661 3662 if (mlx4_verify_params()) 3663 return -EINVAL; 3664 3665 3666 mlx4_wq = create_singlethread_workqueue("mlx4"); 3667 if (!mlx4_wq) 3668 return -ENOMEM; 3669 3670 ret = pci_register_driver(&mlx4_driver); 3671 if (ret < 0) 3672 destroy_workqueue(mlx4_wq); 3673 return ret < 0 ? ret : 0; 3674 } 3675 3676 static void __exit mlx4_cleanup(void) 3677 { 3678 pci_unregister_driver(&mlx4_driver); 3679 destroy_workqueue(mlx4_wq); 3680 } 3681 3682 module_init(mlx4_init); 3683 module_exit(mlx4_cleanup); 3684