1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 int mlx4_check_port_params(struct mlx4_dev *dev, 172 enum mlx4_port_type *port_type) 173 { 174 int i; 175 176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 177 for (i = 0; i < dev->caps.num_ports - 1; i++) { 178 if (port_type[i] != port_type[i + 1]) { 179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 180 return -EINVAL; 181 } 182 } 183 } 184 185 for (i = 0; i < dev->caps.num_ports; i++) { 186 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 188 i + 1); 189 return -EINVAL; 190 } 191 } 192 return 0; 193 } 194 195 static void mlx4_set_port_mask(struct mlx4_dev *dev) 196 { 197 int i; 198 199 for (i = 1; i <= dev->caps.num_ports; ++i) 200 dev->caps.port_mask[i] = dev->caps.port_type[i]; 201 } 202 203 enum { 204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 205 }; 206 207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 208 { 209 int err = 0; 210 struct mlx4_func func; 211 212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 213 err = mlx4_QUERY_FUNC(dev, &func, 0); 214 if (err) { 215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 216 return err; 217 } 218 dev_cap->max_eqs = func.max_eq; 219 dev_cap->reserved_eqs = func.rsvd_eqs; 220 dev_cap->reserved_uars = func.rsvd_uars; 221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 222 } 223 return err; 224 } 225 226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 227 { 228 struct mlx4_caps *dev_cap = &dev->caps; 229 230 /* FW not supporting or cancelled by user */ 231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 233 return; 234 235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 236 * When FW has NCSI it may decide not to report 64B CQE/EQEs 237 */ 238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 242 return; 243 } 244 245 if (cache_line_size() == 128 || cache_line_size() == 256) { 246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 247 /* Changing the real data inside CQE size to 32B */ 248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 250 251 if (mlx4_is_master(dev)) 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 253 } else { 254 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 255 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 257 } 258 } 259 260 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 261 struct mlx4_port_cap *port_cap) 262 { 263 dev->caps.vl_cap[port] = port_cap->max_vl; 264 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 265 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 266 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 267 /* set gid and pkey table operating lengths by default 268 * to non-sriov values 269 */ 270 dev->caps.gid_table_len[port] = port_cap->max_gids; 271 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 272 dev->caps.port_width_cap[port] = port_cap->max_port_width; 273 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 274 dev->caps.def_mac[port] = port_cap->def_mac; 275 dev->caps.supported_type[port] = port_cap->supported_port_types; 276 dev->caps.suggested_type[port] = port_cap->suggested_type; 277 dev->caps.default_sense[port] = port_cap->default_sense; 278 dev->caps.trans_type[port] = port_cap->trans_type; 279 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 280 dev->caps.wavelength[port] = port_cap->wavelength; 281 dev->caps.trans_code[port] = port_cap->trans_code; 282 283 return 0; 284 } 285 286 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 287 struct mlx4_port_cap *port_cap) 288 { 289 int err = 0; 290 291 err = mlx4_QUERY_PORT(dev, port, port_cap); 292 293 if (err) 294 mlx4_err(dev, "QUERY_PORT command failed.\n"); 295 296 return err; 297 } 298 299 #define MLX4_A0_STEERING_TABLE_SIZE 256 300 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 301 { 302 int err; 303 int i; 304 305 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 306 if (err) { 307 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 308 return err; 309 } 310 mlx4_dev_cap_dump(dev, dev_cap); 311 312 if (dev_cap->min_page_sz > PAGE_SIZE) { 313 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 314 dev_cap->min_page_sz, PAGE_SIZE); 315 return -ENODEV; 316 } 317 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 318 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 319 dev_cap->num_ports, MLX4_MAX_PORTS); 320 return -ENODEV; 321 } 322 323 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 324 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 325 dev_cap->uar_size, 326 (unsigned long long) 327 pci_resource_len(dev->persist->pdev, 2)); 328 return -ENODEV; 329 } 330 331 dev->caps.num_ports = dev_cap->num_ports; 332 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 333 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 334 dev->caps.num_sys_eqs : 335 MLX4_MAX_EQ_NUM; 336 for (i = 1; i <= dev->caps.num_ports; ++i) { 337 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 338 if (err) { 339 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 340 return err; 341 } 342 } 343 344 dev->caps.uar_page_size = PAGE_SIZE; 345 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 346 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 347 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 348 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 349 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 350 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 351 dev->caps.max_wqes = dev_cap->max_qp_sz; 352 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 353 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 354 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 355 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 356 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 357 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 358 /* 359 * Subtract 1 from the limit because we need to allocate a 360 * spare CQE so the HCA HW can tell the difference between an 361 * empty CQ and a full CQ. 362 */ 363 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 364 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 365 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 366 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 367 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 368 369 /* The first 128 UARs are used for EQ doorbells */ 370 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 371 dev->caps.reserved_pds = dev_cap->reserved_pds; 372 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 373 dev_cap->reserved_xrcds : 0; 374 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 375 dev_cap->max_xrcds : 0; 376 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 377 378 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 379 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 380 dev->caps.flags = dev_cap->flags; 381 dev->caps.flags2 = dev_cap->flags2; 382 dev->caps.bmme_flags = dev_cap->bmme_flags; 383 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 384 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 385 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 386 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 387 388 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 389 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 390 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 391 /* Don't do sense port on multifunction devices (for now at least) */ 392 if (mlx4_is_mfunc(dev)) 393 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 394 395 if (mlx4_low_memory_profile()) { 396 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 397 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 398 } else { 399 dev->caps.log_num_macs = log_num_mac; 400 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 401 } 402 403 for (i = 1; i <= dev->caps.num_ports; ++i) { 404 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 405 if (dev->caps.supported_type[i]) { 406 /* if only ETH is supported - assign ETH */ 407 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 408 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 409 /* if only IB is supported, assign IB */ 410 else if (dev->caps.supported_type[i] == 411 MLX4_PORT_TYPE_IB) 412 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 413 else { 414 /* if IB and ETH are supported, we set the port 415 * type according to user selection of port type; 416 * if user selected none, take the FW hint */ 417 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 418 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 419 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 420 else 421 dev->caps.port_type[i] = port_type_array[i - 1]; 422 } 423 } 424 /* 425 * Link sensing is allowed on the port if 3 conditions are true: 426 * 1. Both protocols are supported on the port. 427 * 2. Different types are supported on the port 428 * 3. FW declared that it supports link sensing 429 */ 430 mlx4_priv(dev)->sense.sense_allowed[i] = 431 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 432 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 433 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 434 435 /* 436 * If "default_sense" bit is set, we move the port to "AUTO" mode 437 * and perform sense_port FW command to try and set the correct 438 * port type from beginning 439 */ 440 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 441 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 442 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 443 mlx4_SENSE_PORT(dev, i, &sensed_port); 444 if (sensed_port != MLX4_PORT_TYPE_NONE) 445 dev->caps.port_type[i] = sensed_port; 446 } else { 447 dev->caps.possible_type[i] = dev->caps.port_type[i]; 448 } 449 450 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 451 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 452 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 453 i, 1 << dev->caps.log_num_macs); 454 } 455 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 456 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 457 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 458 i, 1 << dev->caps.log_num_vlans); 459 } 460 } 461 462 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 463 464 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 467 (1 << dev->caps.log_num_macs) * 468 (1 << dev->caps.log_num_vlans) * 469 dev->caps.num_ports; 470 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 471 472 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 473 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 474 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 475 else 476 dev->caps.dmfs_high_rate_qpn_base = 477 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 478 479 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 480 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 481 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 482 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 483 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 484 } else { 485 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 486 dev->caps.dmfs_high_rate_qpn_base = 487 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 488 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 489 } 490 491 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 492 dev->caps.dmfs_high_rate_qpn_range; 493 494 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 495 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 496 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 497 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 498 499 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 500 501 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 502 if (dev_cap->flags & 503 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 504 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 505 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 506 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 507 } 508 509 if (dev_cap->flags2 & 510 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 511 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 512 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 513 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 514 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 515 } 516 } 517 518 if ((dev->caps.flags & 519 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 520 mlx4_is_master(dev)) 521 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 522 523 if (!mlx4_is_slave(dev)) { 524 mlx4_enable_cqe_eqe_stride(dev); 525 dev->caps.alloc_res_qp_mask = 526 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 527 MLX4_RESERVE_A0_QP; 528 } else { 529 dev->caps.alloc_res_qp_mask = 0; 530 } 531 532 return 0; 533 } 534 535 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 536 enum pci_bus_speed *speed, 537 enum pcie_link_width *width) 538 { 539 u32 lnkcap1, lnkcap2; 540 int err1, err2; 541 542 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 543 544 *speed = PCI_SPEED_UNKNOWN; 545 *width = PCIE_LNK_WIDTH_UNKNOWN; 546 547 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 548 &lnkcap1); 549 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 550 &lnkcap2); 551 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 552 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 553 *speed = PCIE_SPEED_8_0GT; 554 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 555 *speed = PCIE_SPEED_5_0GT; 556 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 557 *speed = PCIE_SPEED_2_5GT; 558 } 559 if (!err1) { 560 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 561 if (!lnkcap2) { /* pre-r3.0 */ 562 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 563 *speed = PCIE_SPEED_5_0GT; 564 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 565 *speed = PCIE_SPEED_2_5GT; 566 } 567 } 568 569 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 570 return err1 ? err1 : 571 err2 ? err2 : -EINVAL; 572 } 573 return 0; 574 } 575 576 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 577 { 578 enum pcie_link_width width, width_cap; 579 enum pci_bus_speed speed, speed_cap; 580 int err; 581 582 #define PCIE_SPEED_STR(speed) \ 583 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 584 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 585 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 586 "Unknown") 587 588 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 589 if (err) { 590 mlx4_warn(dev, 591 "Unable to determine PCIe device BW capabilities\n"); 592 return; 593 } 594 595 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 596 if (err || speed == PCI_SPEED_UNKNOWN || 597 width == PCIE_LNK_WIDTH_UNKNOWN) { 598 mlx4_warn(dev, 599 "Unable to determine PCI device chain minimum BW\n"); 600 return; 601 } 602 603 if (width != width_cap || speed != speed_cap) 604 mlx4_warn(dev, 605 "PCIe BW is different than device's capability\n"); 606 607 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 608 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 609 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 610 width, width_cap); 611 return; 612 } 613 614 /*The function checks if there are live vf, return the num of them*/ 615 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 616 { 617 struct mlx4_priv *priv = mlx4_priv(dev); 618 struct mlx4_slave_state *s_state; 619 int i; 620 int ret = 0; 621 622 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 623 s_state = &priv->mfunc.master.slave_state[i]; 624 if (s_state->active && s_state->last_cmd != 625 MLX4_COMM_CMD_RESET) { 626 mlx4_warn(dev, "%s: slave: %d is still active\n", 627 __func__, i); 628 ret++; 629 } 630 } 631 return ret; 632 } 633 634 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 635 { 636 u32 qk = MLX4_RESERVED_QKEY_BASE; 637 638 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 639 qpn < dev->phys_caps.base_proxy_sqpn) 640 return -EINVAL; 641 642 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 643 /* tunnel qp */ 644 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 645 else 646 qk += qpn - dev->phys_caps.base_proxy_sqpn; 647 *qkey = qk; 648 return 0; 649 } 650 EXPORT_SYMBOL(mlx4_get_parav_qkey); 651 652 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 653 { 654 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 655 656 if (!mlx4_is_master(dev)) 657 return; 658 659 priv->virt2phys_pkey[slave][port - 1][i] = val; 660 } 661 EXPORT_SYMBOL(mlx4_sync_pkey_table); 662 663 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 664 { 665 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 666 667 if (!mlx4_is_master(dev)) 668 return; 669 670 priv->slave_node_guids[slave] = guid; 671 } 672 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 673 674 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 675 { 676 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 677 678 if (!mlx4_is_master(dev)) 679 return 0; 680 681 return priv->slave_node_guids[slave]; 682 } 683 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 684 685 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 686 { 687 struct mlx4_priv *priv = mlx4_priv(dev); 688 struct mlx4_slave_state *s_slave; 689 690 if (!mlx4_is_master(dev)) 691 return 0; 692 693 s_slave = &priv->mfunc.master.slave_state[slave]; 694 return !!s_slave->active; 695 } 696 EXPORT_SYMBOL(mlx4_is_slave_active); 697 698 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 699 struct mlx4_dev_cap *dev_cap, 700 struct mlx4_init_hca_param *hca_param) 701 { 702 dev->caps.steering_mode = hca_param->steering_mode; 703 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 704 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 705 dev->caps.fs_log_max_ucast_qp_range_size = 706 dev_cap->fs_log_max_ucast_qp_range_size; 707 } else 708 dev->caps.num_qp_per_mgm = 709 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 710 711 mlx4_dbg(dev, "Steering mode is: %s\n", 712 mlx4_steering_mode_str(dev->caps.steering_mode)); 713 } 714 715 static int mlx4_slave_cap(struct mlx4_dev *dev) 716 { 717 int err; 718 u32 page_size; 719 struct mlx4_dev_cap dev_cap; 720 struct mlx4_func_cap func_cap; 721 struct mlx4_init_hca_param hca_param; 722 u8 i; 723 724 memset(&hca_param, 0, sizeof(hca_param)); 725 err = mlx4_QUERY_HCA(dev, &hca_param); 726 if (err) { 727 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 728 return err; 729 } 730 731 /* fail if the hca has an unknown global capability 732 * at this time global_caps should be always zeroed 733 */ 734 if (hca_param.global_caps) { 735 mlx4_err(dev, "Unknown hca global capabilities\n"); 736 return -ENOSYS; 737 } 738 739 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 740 741 dev->caps.hca_core_clock = hca_param.hca_core_clock; 742 743 memset(&dev_cap, 0, sizeof(dev_cap)); 744 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 745 err = mlx4_dev_cap(dev, &dev_cap); 746 if (err) { 747 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 748 return err; 749 } 750 751 err = mlx4_QUERY_FW(dev); 752 if (err) 753 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 754 755 page_size = ~dev->caps.page_size_cap + 1; 756 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 757 if (page_size > PAGE_SIZE) { 758 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 759 page_size, PAGE_SIZE); 760 return -ENODEV; 761 } 762 763 /* slave gets uar page size from QUERY_HCA fw command */ 764 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 765 766 /* TODO: relax this assumption */ 767 if (dev->caps.uar_page_size != PAGE_SIZE) { 768 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 769 dev->caps.uar_page_size, PAGE_SIZE); 770 return -ENODEV; 771 } 772 773 memset(&func_cap, 0, sizeof(func_cap)); 774 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 775 if (err) { 776 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 777 err); 778 return err; 779 } 780 781 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 782 PF_CONTEXT_BEHAVIOUR_MASK) { 783 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 784 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 785 return -ENOSYS; 786 } 787 788 dev->caps.num_ports = func_cap.num_ports; 789 dev->quotas.qp = func_cap.qp_quota; 790 dev->quotas.srq = func_cap.srq_quota; 791 dev->quotas.cq = func_cap.cq_quota; 792 dev->quotas.mpt = func_cap.mpt_quota; 793 dev->quotas.mtt = func_cap.mtt_quota; 794 dev->caps.num_qps = 1 << hca_param.log_num_qps; 795 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 796 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 797 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 798 dev->caps.num_eqs = func_cap.max_eq; 799 dev->caps.reserved_eqs = func_cap.reserved_eq; 800 dev->caps.num_pds = MLX4_NUM_PDS; 801 dev->caps.num_mgms = 0; 802 dev->caps.num_amgms = 0; 803 804 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 805 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 806 dev->caps.num_ports, MLX4_MAX_PORTS); 807 return -ENODEV; 808 } 809 810 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 811 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 812 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 813 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 814 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 815 816 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 817 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 818 !dev->caps.qp0_qkey) { 819 err = -ENOMEM; 820 goto err_mem; 821 } 822 823 for (i = 1; i <= dev->caps.num_ports; ++i) { 824 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 825 if (err) { 826 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 827 i, err); 828 goto err_mem; 829 } 830 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 831 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 832 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 833 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 834 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 835 dev->caps.port_mask[i] = dev->caps.port_type[i]; 836 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 837 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 838 &dev->caps.gid_table_len[i], 839 &dev->caps.pkey_table_len[i])) 840 goto err_mem; 841 } 842 843 if (dev->caps.uar_page_size * (dev->caps.num_uars - 844 dev->caps.reserved_uars) > 845 pci_resource_len(dev->persist->pdev, 846 2)) { 847 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 848 dev->caps.uar_page_size * dev->caps.num_uars, 849 (unsigned long long) 850 pci_resource_len(dev->persist->pdev, 2)); 851 goto err_mem; 852 } 853 854 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 855 dev->caps.eqe_size = 64; 856 dev->caps.eqe_factor = 1; 857 } else { 858 dev->caps.eqe_size = 32; 859 dev->caps.eqe_factor = 0; 860 } 861 862 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 863 dev->caps.cqe_size = 64; 864 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 865 } else { 866 dev->caps.cqe_size = 32; 867 } 868 869 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 870 dev->caps.eqe_size = hca_param.eqe_size; 871 dev->caps.eqe_factor = 0; 872 } 873 874 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 875 dev->caps.cqe_size = hca_param.cqe_size; 876 /* User still need to know when CQE > 32B */ 877 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 878 } 879 880 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 881 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 882 883 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 884 885 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 886 dev->caps.bf_reg_size) 887 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 888 889 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 890 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 891 892 return 0; 893 894 err_mem: 895 kfree(dev->caps.qp0_qkey); 896 kfree(dev->caps.qp0_tunnel); 897 kfree(dev->caps.qp0_proxy); 898 kfree(dev->caps.qp1_tunnel); 899 kfree(dev->caps.qp1_proxy); 900 dev->caps.qp0_qkey = NULL; 901 dev->caps.qp0_tunnel = NULL; 902 dev->caps.qp0_proxy = NULL; 903 dev->caps.qp1_tunnel = NULL; 904 dev->caps.qp1_proxy = NULL; 905 906 return err; 907 } 908 909 static void mlx4_request_modules(struct mlx4_dev *dev) 910 { 911 int port; 912 int has_ib_port = false; 913 int has_eth_port = false; 914 #define EN_DRV_NAME "mlx4_en" 915 #define IB_DRV_NAME "mlx4_ib" 916 917 for (port = 1; port <= dev->caps.num_ports; port++) { 918 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 919 has_ib_port = true; 920 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 921 has_eth_port = true; 922 } 923 924 if (has_eth_port) 925 request_module_nowait(EN_DRV_NAME); 926 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 927 request_module_nowait(IB_DRV_NAME); 928 } 929 930 /* 931 * Change the port configuration of the device. 932 * Every user of this function must hold the port mutex. 933 */ 934 int mlx4_change_port_types(struct mlx4_dev *dev, 935 enum mlx4_port_type *port_types) 936 { 937 int err = 0; 938 int change = 0; 939 int port; 940 941 for (port = 0; port < dev->caps.num_ports; port++) { 942 /* Change the port type only if the new type is different 943 * from the current, and not set to Auto */ 944 if (port_types[port] != dev->caps.port_type[port + 1]) 945 change = 1; 946 } 947 if (change) { 948 mlx4_unregister_device(dev); 949 for (port = 1; port <= dev->caps.num_ports; port++) { 950 mlx4_CLOSE_PORT(dev, port); 951 dev->caps.port_type[port] = port_types[port - 1]; 952 err = mlx4_SET_PORT(dev, port, -1); 953 if (err) { 954 mlx4_err(dev, "Failed to set port %d, aborting\n", 955 port); 956 goto out; 957 } 958 } 959 mlx4_set_port_mask(dev); 960 err = mlx4_register_device(dev); 961 if (err) { 962 mlx4_err(dev, "Failed to register device\n"); 963 goto out; 964 } 965 mlx4_request_modules(dev); 966 } 967 968 out: 969 return err; 970 } 971 972 static ssize_t show_port_type(struct device *dev, 973 struct device_attribute *attr, 974 char *buf) 975 { 976 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 977 port_attr); 978 struct mlx4_dev *mdev = info->dev; 979 char type[8]; 980 981 sprintf(type, "%s", 982 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 983 "ib" : "eth"); 984 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 985 sprintf(buf, "auto (%s)\n", type); 986 else 987 sprintf(buf, "%s\n", type); 988 989 return strlen(buf); 990 } 991 992 static ssize_t set_port_type(struct device *dev, 993 struct device_attribute *attr, 994 const char *buf, size_t count) 995 { 996 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 997 port_attr); 998 struct mlx4_dev *mdev = info->dev; 999 struct mlx4_priv *priv = mlx4_priv(mdev); 1000 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1001 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1002 static DEFINE_MUTEX(set_port_type_mutex); 1003 int i; 1004 int err = 0; 1005 1006 mutex_lock(&set_port_type_mutex); 1007 1008 if (!strcmp(buf, "ib\n")) 1009 info->tmp_type = MLX4_PORT_TYPE_IB; 1010 else if (!strcmp(buf, "eth\n")) 1011 info->tmp_type = MLX4_PORT_TYPE_ETH; 1012 else if (!strcmp(buf, "auto\n")) 1013 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1014 else { 1015 mlx4_err(mdev, "%s is not supported port type\n", buf); 1016 err = -EINVAL; 1017 goto err_out; 1018 } 1019 1020 mlx4_stop_sense(mdev); 1021 mutex_lock(&priv->port_mutex); 1022 /* Possible type is always the one that was delivered */ 1023 mdev->caps.possible_type[info->port] = info->tmp_type; 1024 1025 for (i = 0; i < mdev->caps.num_ports; i++) { 1026 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1027 mdev->caps.possible_type[i+1]; 1028 if (types[i] == MLX4_PORT_TYPE_AUTO) 1029 types[i] = mdev->caps.port_type[i+1]; 1030 } 1031 1032 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1033 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1034 for (i = 1; i <= mdev->caps.num_ports; i++) { 1035 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1036 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1037 err = -EINVAL; 1038 } 1039 } 1040 } 1041 if (err) { 1042 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1043 goto out; 1044 } 1045 1046 mlx4_do_sense_ports(mdev, new_types, types); 1047 1048 err = mlx4_check_port_params(mdev, new_types); 1049 if (err) 1050 goto out; 1051 1052 /* We are about to apply the changes after the configuration 1053 * was verified, no need to remember the temporary types 1054 * any more */ 1055 for (i = 0; i < mdev->caps.num_ports; i++) 1056 priv->port[i + 1].tmp_type = 0; 1057 1058 err = mlx4_change_port_types(mdev, new_types); 1059 1060 out: 1061 mlx4_start_sense(mdev); 1062 mutex_unlock(&priv->port_mutex); 1063 err_out: 1064 mutex_unlock(&set_port_type_mutex); 1065 1066 return err ? err : count; 1067 } 1068 1069 enum ibta_mtu { 1070 IB_MTU_256 = 1, 1071 IB_MTU_512 = 2, 1072 IB_MTU_1024 = 3, 1073 IB_MTU_2048 = 4, 1074 IB_MTU_4096 = 5 1075 }; 1076 1077 static inline int int_to_ibta_mtu(int mtu) 1078 { 1079 switch (mtu) { 1080 case 256: return IB_MTU_256; 1081 case 512: return IB_MTU_512; 1082 case 1024: return IB_MTU_1024; 1083 case 2048: return IB_MTU_2048; 1084 case 4096: return IB_MTU_4096; 1085 default: return -1; 1086 } 1087 } 1088 1089 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1090 { 1091 switch (mtu) { 1092 case IB_MTU_256: return 256; 1093 case IB_MTU_512: return 512; 1094 case IB_MTU_1024: return 1024; 1095 case IB_MTU_2048: return 2048; 1096 case IB_MTU_4096: return 4096; 1097 default: return -1; 1098 } 1099 } 1100 1101 static ssize_t show_port_ib_mtu(struct device *dev, 1102 struct device_attribute *attr, 1103 char *buf) 1104 { 1105 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1106 port_mtu_attr); 1107 struct mlx4_dev *mdev = info->dev; 1108 1109 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1110 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1111 1112 sprintf(buf, "%d\n", 1113 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1114 return strlen(buf); 1115 } 1116 1117 static ssize_t set_port_ib_mtu(struct device *dev, 1118 struct device_attribute *attr, 1119 const char *buf, size_t count) 1120 { 1121 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1122 port_mtu_attr); 1123 struct mlx4_dev *mdev = info->dev; 1124 struct mlx4_priv *priv = mlx4_priv(mdev); 1125 int err, port, mtu, ibta_mtu = -1; 1126 1127 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1128 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1129 return -EINVAL; 1130 } 1131 1132 err = kstrtoint(buf, 0, &mtu); 1133 if (!err) 1134 ibta_mtu = int_to_ibta_mtu(mtu); 1135 1136 if (err || ibta_mtu < 0) { 1137 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1138 return -EINVAL; 1139 } 1140 1141 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1142 1143 mlx4_stop_sense(mdev); 1144 mutex_lock(&priv->port_mutex); 1145 mlx4_unregister_device(mdev); 1146 for (port = 1; port <= mdev->caps.num_ports; port++) { 1147 mlx4_CLOSE_PORT(mdev, port); 1148 err = mlx4_SET_PORT(mdev, port, -1); 1149 if (err) { 1150 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1151 port); 1152 goto err_set_port; 1153 } 1154 } 1155 err = mlx4_register_device(mdev); 1156 err_set_port: 1157 mutex_unlock(&priv->port_mutex); 1158 mlx4_start_sense(mdev); 1159 return err ? err : count; 1160 } 1161 1162 static int mlx4_load_fw(struct mlx4_dev *dev) 1163 { 1164 struct mlx4_priv *priv = mlx4_priv(dev); 1165 int err; 1166 1167 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1168 GFP_HIGHUSER | __GFP_NOWARN, 0); 1169 if (!priv->fw.fw_icm) { 1170 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1171 return -ENOMEM; 1172 } 1173 1174 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1175 if (err) { 1176 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1177 goto err_free; 1178 } 1179 1180 err = mlx4_RUN_FW(dev); 1181 if (err) { 1182 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1183 goto err_unmap_fa; 1184 } 1185 1186 return 0; 1187 1188 err_unmap_fa: 1189 mlx4_UNMAP_FA(dev); 1190 1191 err_free: 1192 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1193 return err; 1194 } 1195 1196 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1197 int cmpt_entry_sz) 1198 { 1199 struct mlx4_priv *priv = mlx4_priv(dev); 1200 int err; 1201 int num_eqs; 1202 1203 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1204 cmpt_base + 1205 ((u64) (MLX4_CMPT_TYPE_QP * 1206 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1207 cmpt_entry_sz, dev->caps.num_qps, 1208 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1209 0, 0); 1210 if (err) 1211 goto err; 1212 1213 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1214 cmpt_base + 1215 ((u64) (MLX4_CMPT_TYPE_SRQ * 1216 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1217 cmpt_entry_sz, dev->caps.num_srqs, 1218 dev->caps.reserved_srqs, 0, 0); 1219 if (err) 1220 goto err_qp; 1221 1222 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1223 cmpt_base + 1224 ((u64) (MLX4_CMPT_TYPE_CQ * 1225 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1226 cmpt_entry_sz, dev->caps.num_cqs, 1227 dev->caps.reserved_cqs, 0, 0); 1228 if (err) 1229 goto err_srq; 1230 1231 num_eqs = dev->phys_caps.num_phys_eqs; 1232 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1233 cmpt_base + 1234 ((u64) (MLX4_CMPT_TYPE_EQ * 1235 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1236 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1237 if (err) 1238 goto err_cq; 1239 1240 return 0; 1241 1242 err_cq: 1243 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1244 1245 err_srq: 1246 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1247 1248 err_qp: 1249 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1250 1251 err: 1252 return err; 1253 } 1254 1255 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1256 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1257 { 1258 struct mlx4_priv *priv = mlx4_priv(dev); 1259 u64 aux_pages; 1260 int num_eqs; 1261 int err; 1262 1263 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1264 if (err) { 1265 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1266 return err; 1267 } 1268 1269 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1270 (unsigned long long) icm_size >> 10, 1271 (unsigned long long) aux_pages << 2); 1272 1273 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1274 GFP_HIGHUSER | __GFP_NOWARN, 0); 1275 if (!priv->fw.aux_icm) { 1276 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1277 return -ENOMEM; 1278 } 1279 1280 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1281 if (err) { 1282 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1283 goto err_free_aux; 1284 } 1285 1286 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1287 if (err) { 1288 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1289 goto err_unmap_aux; 1290 } 1291 1292 1293 num_eqs = dev->phys_caps.num_phys_eqs; 1294 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1295 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1296 num_eqs, num_eqs, 0, 0); 1297 if (err) { 1298 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1299 goto err_unmap_cmpt; 1300 } 1301 1302 /* 1303 * Reserved MTT entries must be aligned up to a cacheline 1304 * boundary, since the FW will write to them, while the driver 1305 * writes to all other MTT entries. (The variable 1306 * dev->caps.mtt_entry_sz below is really the MTT segment 1307 * size, not the raw entry size) 1308 */ 1309 dev->caps.reserved_mtts = 1310 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1311 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1312 1313 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1314 init_hca->mtt_base, 1315 dev->caps.mtt_entry_sz, 1316 dev->caps.num_mtts, 1317 dev->caps.reserved_mtts, 1, 0); 1318 if (err) { 1319 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1320 goto err_unmap_eq; 1321 } 1322 1323 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1324 init_hca->dmpt_base, 1325 dev_cap->dmpt_entry_sz, 1326 dev->caps.num_mpts, 1327 dev->caps.reserved_mrws, 1, 1); 1328 if (err) { 1329 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1330 goto err_unmap_mtt; 1331 } 1332 1333 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1334 init_hca->qpc_base, 1335 dev_cap->qpc_entry_sz, 1336 dev->caps.num_qps, 1337 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1338 0, 0); 1339 if (err) { 1340 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1341 goto err_unmap_dmpt; 1342 } 1343 1344 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1345 init_hca->auxc_base, 1346 dev_cap->aux_entry_sz, 1347 dev->caps.num_qps, 1348 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1349 0, 0); 1350 if (err) { 1351 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1352 goto err_unmap_qp; 1353 } 1354 1355 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1356 init_hca->altc_base, 1357 dev_cap->altc_entry_sz, 1358 dev->caps.num_qps, 1359 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1360 0, 0); 1361 if (err) { 1362 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1363 goto err_unmap_auxc; 1364 } 1365 1366 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1367 init_hca->rdmarc_base, 1368 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1369 dev->caps.num_qps, 1370 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1371 0, 0); 1372 if (err) { 1373 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1374 goto err_unmap_altc; 1375 } 1376 1377 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1378 init_hca->cqc_base, 1379 dev_cap->cqc_entry_sz, 1380 dev->caps.num_cqs, 1381 dev->caps.reserved_cqs, 0, 0); 1382 if (err) { 1383 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1384 goto err_unmap_rdmarc; 1385 } 1386 1387 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1388 init_hca->srqc_base, 1389 dev_cap->srq_entry_sz, 1390 dev->caps.num_srqs, 1391 dev->caps.reserved_srqs, 0, 0); 1392 if (err) { 1393 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1394 goto err_unmap_cq; 1395 } 1396 1397 /* 1398 * For flow steering device managed mode it is required to use 1399 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1400 * required, but for simplicity just map the whole multicast 1401 * group table now. The table isn't very big and it's a lot 1402 * easier than trying to track ref counts. 1403 */ 1404 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1405 init_hca->mc_base, 1406 mlx4_get_mgm_entry_size(dev), 1407 dev->caps.num_mgms + dev->caps.num_amgms, 1408 dev->caps.num_mgms + dev->caps.num_amgms, 1409 0, 0); 1410 if (err) { 1411 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1412 goto err_unmap_srq; 1413 } 1414 1415 return 0; 1416 1417 err_unmap_srq: 1418 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1419 1420 err_unmap_cq: 1421 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1422 1423 err_unmap_rdmarc: 1424 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1425 1426 err_unmap_altc: 1427 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1428 1429 err_unmap_auxc: 1430 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1431 1432 err_unmap_qp: 1433 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1434 1435 err_unmap_dmpt: 1436 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1437 1438 err_unmap_mtt: 1439 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1440 1441 err_unmap_eq: 1442 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1443 1444 err_unmap_cmpt: 1445 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1446 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1447 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1448 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1449 1450 err_unmap_aux: 1451 mlx4_UNMAP_ICM_AUX(dev); 1452 1453 err_free_aux: 1454 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1455 1456 return err; 1457 } 1458 1459 static void mlx4_free_icms(struct mlx4_dev *dev) 1460 { 1461 struct mlx4_priv *priv = mlx4_priv(dev); 1462 1463 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1464 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1465 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1466 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1467 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1468 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1469 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1470 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1471 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1472 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1473 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1474 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1475 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1476 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1477 1478 mlx4_UNMAP_ICM_AUX(dev); 1479 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1480 } 1481 1482 static void mlx4_slave_exit(struct mlx4_dev *dev) 1483 { 1484 struct mlx4_priv *priv = mlx4_priv(dev); 1485 1486 mutex_lock(&priv->cmd.slave_cmd_mutex); 1487 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1488 MLX4_COMM_TIME)) 1489 mlx4_warn(dev, "Failed to close slave function\n"); 1490 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1491 } 1492 1493 static int map_bf_area(struct mlx4_dev *dev) 1494 { 1495 struct mlx4_priv *priv = mlx4_priv(dev); 1496 resource_size_t bf_start; 1497 resource_size_t bf_len; 1498 int err = 0; 1499 1500 if (!dev->caps.bf_reg_size) 1501 return -ENXIO; 1502 1503 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1504 (dev->caps.num_uars << PAGE_SHIFT); 1505 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1506 (dev->caps.num_uars << PAGE_SHIFT); 1507 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1508 if (!priv->bf_mapping) 1509 err = -ENOMEM; 1510 1511 return err; 1512 } 1513 1514 static void unmap_bf_area(struct mlx4_dev *dev) 1515 { 1516 if (mlx4_priv(dev)->bf_mapping) 1517 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1518 } 1519 1520 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1521 { 1522 u32 clockhi, clocklo, clockhi1; 1523 cycle_t cycles; 1524 int i; 1525 struct mlx4_priv *priv = mlx4_priv(dev); 1526 1527 for (i = 0; i < 10; i++) { 1528 clockhi = swab32(readl(priv->clock_mapping)); 1529 clocklo = swab32(readl(priv->clock_mapping + 4)); 1530 clockhi1 = swab32(readl(priv->clock_mapping)); 1531 if (clockhi == clockhi1) 1532 break; 1533 } 1534 1535 cycles = (u64) clockhi << 32 | (u64) clocklo; 1536 1537 return cycles; 1538 } 1539 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1540 1541 1542 static int map_internal_clock(struct mlx4_dev *dev) 1543 { 1544 struct mlx4_priv *priv = mlx4_priv(dev); 1545 1546 priv->clock_mapping = 1547 ioremap(pci_resource_start(dev->persist->pdev, 1548 priv->fw.clock_bar) + 1549 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1550 1551 if (!priv->clock_mapping) 1552 return -ENOMEM; 1553 1554 return 0; 1555 } 1556 1557 static void unmap_internal_clock(struct mlx4_dev *dev) 1558 { 1559 struct mlx4_priv *priv = mlx4_priv(dev); 1560 1561 if (priv->clock_mapping) 1562 iounmap(priv->clock_mapping); 1563 } 1564 1565 static void mlx4_close_hca(struct mlx4_dev *dev) 1566 { 1567 unmap_internal_clock(dev); 1568 unmap_bf_area(dev); 1569 if (mlx4_is_slave(dev)) 1570 mlx4_slave_exit(dev); 1571 else { 1572 mlx4_CLOSE_HCA(dev, 0); 1573 mlx4_free_icms(dev); 1574 } 1575 } 1576 1577 static void mlx4_close_fw(struct mlx4_dev *dev) 1578 { 1579 if (!mlx4_is_slave(dev)) { 1580 mlx4_UNMAP_FA(dev); 1581 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1582 } 1583 } 1584 1585 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1586 { 1587 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1588 1589 u32 comm_flags; 1590 u32 offline_bit; 1591 unsigned long end; 1592 struct mlx4_priv *priv = mlx4_priv(dev); 1593 1594 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1595 while (time_before(jiffies, end)) { 1596 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1597 MLX4_COMM_CHAN_FLAGS)); 1598 offline_bit = (comm_flags & 1599 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1600 if (!offline_bit) 1601 return 0; 1602 /* There are cases as part of AER/Reset flow that PF needs 1603 * around 100 msec to load. We therefore sleep for 100 msec 1604 * to allow other tasks to make use of that CPU during this 1605 * time interval. 1606 */ 1607 msleep(100); 1608 } 1609 mlx4_err(dev, "Communication channel is offline.\n"); 1610 return -EIO; 1611 } 1612 1613 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1614 { 1615 #define COMM_CHAN_RST_OFFSET 0x1e 1616 1617 struct mlx4_priv *priv = mlx4_priv(dev); 1618 u32 comm_rst; 1619 u32 comm_caps; 1620 1621 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1622 MLX4_COMM_CHAN_CAPS)); 1623 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1624 1625 if (comm_rst) 1626 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1627 } 1628 1629 static int mlx4_init_slave(struct mlx4_dev *dev) 1630 { 1631 struct mlx4_priv *priv = mlx4_priv(dev); 1632 u64 dma = (u64) priv->mfunc.vhcr_dma; 1633 int ret_from_reset = 0; 1634 u32 slave_read; 1635 u32 cmd_channel_ver; 1636 1637 if (atomic_read(&pf_loading)) { 1638 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1639 return -EPROBE_DEFER; 1640 } 1641 1642 mutex_lock(&priv->cmd.slave_cmd_mutex); 1643 priv->cmd.max_cmds = 1; 1644 if (mlx4_comm_check_offline(dev)) { 1645 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1646 goto err_offline; 1647 } 1648 1649 mlx4_reset_vf_support(dev); 1650 mlx4_warn(dev, "Sending reset\n"); 1651 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1652 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1653 /* if we are in the middle of flr the slave will try 1654 * NUM_OF_RESET_RETRIES times before leaving.*/ 1655 if (ret_from_reset) { 1656 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1657 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1658 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1659 return -EPROBE_DEFER; 1660 } else 1661 goto err; 1662 } 1663 1664 /* check the driver version - the slave I/F revision 1665 * must match the master's */ 1666 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1667 cmd_channel_ver = mlx4_comm_get_version(); 1668 1669 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1670 MLX4_COMM_GET_IF_REV(slave_read)) { 1671 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1672 goto err; 1673 } 1674 1675 mlx4_warn(dev, "Sending vhcr0\n"); 1676 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1677 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1678 goto err; 1679 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1680 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1681 goto err; 1682 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1683 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1684 goto err; 1685 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1686 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1687 goto err; 1688 1689 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1690 return 0; 1691 1692 err: 1693 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1694 err_offline: 1695 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1696 return -EIO; 1697 } 1698 1699 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1700 { 1701 int i; 1702 1703 for (i = 1; i <= dev->caps.num_ports; i++) { 1704 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1705 dev->caps.gid_table_len[i] = 1706 mlx4_get_slave_num_gids(dev, 0, i); 1707 else 1708 dev->caps.gid_table_len[i] = 1; 1709 dev->caps.pkey_table_len[i] = 1710 dev->phys_caps.pkey_phys_table_len[i] - 1; 1711 } 1712 } 1713 1714 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1715 { 1716 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1717 1718 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1719 i++) { 1720 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1721 break; 1722 } 1723 1724 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1725 } 1726 1727 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1728 { 1729 switch (dmfs_high_steer_mode) { 1730 case MLX4_STEERING_DMFS_A0_DEFAULT: 1731 return "default performance"; 1732 1733 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1734 return "dynamic hybrid mode"; 1735 1736 case MLX4_STEERING_DMFS_A0_STATIC: 1737 return "performance optimized for limited rule configuration (static)"; 1738 1739 case MLX4_STEERING_DMFS_A0_DISABLE: 1740 return "disabled performance optimized steering"; 1741 1742 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1743 return "performance optimized steering not supported"; 1744 1745 default: 1746 return "Unrecognized mode"; 1747 } 1748 } 1749 1750 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1751 1752 static void choose_steering_mode(struct mlx4_dev *dev, 1753 struct mlx4_dev_cap *dev_cap) 1754 { 1755 if (mlx4_log_num_mgm_entry_size <= 0) { 1756 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1757 if (dev->caps.dmfs_high_steer_mode == 1758 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1759 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1760 else 1761 dev->caps.dmfs_high_steer_mode = 1762 MLX4_STEERING_DMFS_A0_STATIC; 1763 } 1764 } 1765 1766 if (mlx4_log_num_mgm_entry_size <= 0 && 1767 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1768 (!mlx4_is_mfunc(dev) || 1769 (dev_cap->fs_max_num_qp_per_entry >= 1770 (dev->persist->num_vfs + 1))) && 1771 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1772 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1773 dev->oper_log_mgm_entry_size = 1774 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1775 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1776 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1777 dev->caps.fs_log_max_ucast_qp_range_size = 1778 dev_cap->fs_log_max_ucast_qp_range_size; 1779 } else { 1780 if (dev->caps.dmfs_high_steer_mode != 1781 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1782 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1783 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1784 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1785 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1786 else { 1787 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1788 1789 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1790 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1791 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1792 } 1793 dev->oper_log_mgm_entry_size = 1794 mlx4_log_num_mgm_entry_size > 0 ? 1795 mlx4_log_num_mgm_entry_size : 1796 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1797 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1798 } 1799 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1800 mlx4_steering_mode_str(dev->caps.steering_mode), 1801 dev->oper_log_mgm_entry_size, 1802 mlx4_log_num_mgm_entry_size); 1803 } 1804 1805 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1806 struct mlx4_dev_cap *dev_cap) 1807 { 1808 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1809 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1810 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1811 else 1812 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1813 1814 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1815 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1816 } 1817 1818 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1819 { 1820 int i; 1821 struct mlx4_port_cap port_cap; 1822 1823 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1824 return -EINVAL; 1825 1826 for (i = 1; i <= dev->caps.num_ports; i++) { 1827 if (mlx4_dev_port(dev, i, &port_cap)) { 1828 mlx4_err(dev, 1829 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1830 } else if ((dev->caps.dmfs_high_steer_mode != 1831 MLX4_STEERING_DMFS_A0_DEFAULT) && 1832 (port_cap.dmfs_optimized_state == 1833 !!(dev->caps.dmfs_high_steer_mode == 1834 MLX4_STEERING_DMFS_A0_DISABLE))) { 1835 mlx4_err(dev, 1836 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1837 dmfs_high_rate_steering_mode_str( 1838 dev->caps.dmfs_high_steer_mode), 1839 (port_cap.dmfs_optimized_state ? 1840 "enabled" : "disabled")); 1841 } 1842 } 1843 1844 return 0; 1845 } 1846 1847 static int mlx4_init_fw(struct mlx4_dev *dev) 1848 { 1849 struct mlx4_mod_stat_cfg mlx4_cfg; 1850 int err = 0; 1851 1852 if (!mlx4_is_slave(dev)) { 1853 err = mlx4_QUERY_FW(dev); 1854 if (err) { 1855 if (err == -EACCES) 1856 mlx4_info(dev, "non-primary physical function, skipping\n"); 1857 else 1858 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1859 return err; 1860 } 1861 1862 err = mlx4_load_fw(dev); 1863 if (err) { 1864 mlx4_err(dev, "Failed to start FW, aborting\n"); 1865 return err; 1866 } 1867 1868 mlx4_cfg.log_pg_sz_m = 1; 1869 mlx4_cfg.log_pg_sz = 0; 1870 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1871 if (err) 1872 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1873 } 1874 1875 return err; 1876 } 1877 1878 static int mlx4_init_hca(struct mlx4_dev *dev) 1879 { 1880 struct mlx4_priv *priv = mlx4_priv(dev); 1881 struct mlx4_adapter adapter; 1882 struct mlx4_dev_cap dev_cap; 1883 struct mlx4_profile profile; 1884 struct mlx4_init_hca_param init_hca; 1885 u64 icm_size; 1886 struct mlx4_config_dev_params params; 1887 int err; 1888 1889 if (!mlx4_is_slave(dev)) { 1890 err = mlx4_dev_cap(dev, &dev_cap); 1891 if (err) { 1892 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1893 return err; 1894 } 1895 1896 choose_steering_mode(dev, &dev_cap); 1897 choose_tunnel_offload_mode(dev, &dev_cap); 1898 1899 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1900 mlx4_is_master(dev)) 1901 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1902 1903 err = mlx4_get_phys_port_id(dev); 1904 if (err) 1905 mlx4_err(dev, "Fail to get physical port id\n"); 1906 1907 if (mlx4_is_master(dev)) 1908 mlx4_parav_master_pf_caps(dev); 1909 1910 if (mlx4_low_memory_profile()) { 1911 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 1912 profile = low_mem_profile; 1913 } else { 1914 profile = default_profile; 1915 } 1916 if (dev->caps.steering_mode == 1917 MLX4_STEERING_MODE_DEVICE_MANAGED) 1918 profile.num_mcg = MLX4_FS_NUM_MCG; 1919 1920 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1921 &init_hca); 1922 if ((long long) icm_size < 0) { 1923 err = icm_size; 1924 return err; 1925 } 1926 1927 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1928 1929 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1930 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1931 init_hca.mw_enabled = 0; 1932 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 1933 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 1934 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 1935 1936 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1937 if (err) 1938 return err; 1939 1940 err = mlx4_INIT_HCA(dev, &init_hca); 1941 if (err) { 1942 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 1943 goto err_free_icm; 1944 } 1945 1946 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 1947 err = mlx4_query_func(dev, &dev_cap); 1948 if (err < 0) { 1949 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1950 goto err_close; 1951 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1952 dev->caps.num_eqs = dev_cap.max_eqs; 1953 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 1954 dev->caps.reserved_uars = dev_cap.reserved_uars; 1955 } 1956 } 1957 1958 /* 1959 * If TS is supported by FW 1960 * read HCA frequency by QUERY_HCA command 1961 */ 1962 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 1963 memset(&init_hca, 0, sizeof(init_hca)); 1964 err = mlx4_QUERY_HCA(dev, &init_hca); 1965 if (err) { 1966 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 1967 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1968 } else { 1969 dev->caps.hca_core_clock = 1970 init_hca.hca_core_clock; 1971 } 1972 1973 /* In case we got HCA frequency 0 - disable timestamping 1974 * to avoid dividing by zero 1975 */ 1976 if (!dev->caps.hca_core_clock) { 1977 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1978 mlx4_err(dev, 1979 "HCA frequency is 0 - timestamping is not supported\n"); 1980 } else if (map_internal_clock(dev)) { 1981 /* 1982 * Map internal clock, 1983 * in case of failure disable timestamping 1984 */ 1985 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1986 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 1987 } 1988 } 1989 1990 if (dev->caps.dmfs_high_steer_mode != 1991 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 1992 if (mlx4_validate_optimized_steering(dev)) 1993 mlx4_warn(dev, "Optimized steering validation failed\n"); 1994 1995 if (dev->caps.dmfs_high_steer_mode == 1996 MLX4_STEERING_DMFS_A0_DISABLE) { 1997 dev->caps.dmfs_high_rate_qpn_base = 1998 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1999 dev->caps.dmfs_high_rate_qpn_range = 2000 MLX4_A0_STEERING_TABLE_SIZE; 2001 } 2002 2003 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2004 dmfs_high_rate_steering_mode_str( 2005 dev->caps.dmfs_high_steer_mode)); 2006 } 2007 } else { 2008 err = mlx4_init_slave(dev); 2009 if (err) { 2010 if (err != -EPROBE_DEFER) 2011 mlx4_err(dev, "Failed to initialize slave\n"); 2012 return err; 2013 } 2014 2015 err = mlx4_slave_cap(dev); 2016 if (err) { 2017 mlx4_err(dev, "Failed to obtain slave caps\n"); 2018 goto err_close; 2019 } 2020 } 2021 2022 if (map_bf_area(dev)) 2023 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2024 2025 /*Only the master set the ports, all the rest got it from it.*/ 2026 if (!mlx4_is_slave(dev)) 2027 mlx4_set_port_mask(dev); 2028 2029 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2030 if (err) { 2031 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2032 goto unmap_bf; 2033 } 2034 2035 /* Query CONFIG_DEV parameters */ 2036 err = mlx4_config_dev_retrieval(dev, ¶ms); 2037 if (err && err != -ENOTSUPP) { 2038 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2039 } else if (!err) { 2040 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2041 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2042 } 2043 priv->eq_table.inta_pin = adapter.inta_pin; 2044 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2045 2046 return 0; 2047 2048 unmap_bf: 2049 unmap_internal_clock(dev); 2050 unmap_bf_area(dev); 2051 2052 if (mlx4_is_slave(dev)) { 2053 kfree(dev->caps.qp0_qkey); 2054 kfree(dev->caps.qp0_tunnel); 2055 kfree(dev->caps.qp0_proxy); 2056 kfree(dev->caps.qp1_tunnel); 2057 kfree(dev->caps.qp1_proxy); 2058 } 2059 2060 err_close: 2061 if (mlx4_is_slave(dev)) 2062 mlx4_slave_exit(dev); 2063 else 2064 mlx4_CLOSE_HCA(dev, 0); 2065 2066 err_free_icm: 2067 if (!mlx4_is_slave(dev)) 2068 mlx4_free_icms(dev); 2069 2070 return err; 2071 } 2072 2073 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2074 { 2075 struct mlx4_priv *priv = mlx4_priv(dev); 2076 int nent; 2077 2078 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2079 return -ENOENT; 2080 2081 nent = dev->caps.max_counters; 2082 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2083 } 2084 2085 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2086 { 2087 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2088 } 2089 2090 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2091 { 2092 struct mlx4_priv *priv = mlx4_priv(dev); 2093 2094 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2095 return -ENOENT; 2096 2097 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2098 if (*idx == -1) 2099 return -ENOMEM; 2100 2101 return 0; 2102 } 2103 2104 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2105 { 2106 u64 out_param; 2107 int err; 2108 2109 if (mlx4_is_mfunc(dev)) { 2110 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2111 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2112 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2113 if (!err) 2114 *idx = get_param_l(&out_param); 2115 2116 return err; 2117 } 2118 return __mlx4_counter_alloc(dev, idx); 2119 } 2120 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2121 2122 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2123 { 2124 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2125 return; 2126 } 2127 2128 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2129 { 2130 u64 in_param = 0; 2131 2132 if (mlx4_is_mfunc(dev)) { 2133 set_param_l(&in_param, idx); 2134 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2135 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2136 MLX4_CMD_WRAPPED); 2137 return; 2138 } 2139 __mlx4_counter_free(dev, idx); 2140 } 2141 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2142 2143 static int mlx4_setup_hca(struct mlx4_dev *dev) 2144 { 2145 struct mlx4_priv *priv = mlx4_priv(dev); 2146 int err; 2147 int port; 2148 __be32 ib_port_default_caps; 2149 2150 err = mlx4_init_uar_table(dev); 2151 if (err) { 2152 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2153 return err; 2154 } 2155 2156 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2157 if (err) { 2158 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2159 goto err_uar_table_free; 2160 } 2161 2162 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2163 if (!priv->kar) { 2164 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2165 err = -ENOMEM; 2166 goto err_uar_free; 2167 } 2168 2169 err = mlx4_init_pd_table(dev); 2170 if (err) { 2171 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2172 goto err_kar_unmap; 2173 } 2174 2175 err = mlx4_init_xrcd_table(dev); 2176 if (err) { 2177 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2178 goto err_pd_table_free; 2179 } 2180 2181 err = mlx4_init_mr_table(dev); 2182 if (err) { 2183 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2184 goto err_xrcd_table_free; 2185 } 2186 2187 if (!mlx4_is_slave(dev)) { 2188 err = mlx4_init_mcg_table(dev); 2189 if (err) { 2190 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2191 goto err_mr_table_free; 2192 } 2193 err = mlx4_config_mad_demux(dev); 2194 if (err) { 2195 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2196 goto err_mcg_table_free; 2197 } 2198 } 2199 2200 err = mlx4_init_eq_table(dev); 2201 if (err) { 2202 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2203 goto err_mcg_table_free; 2204 } 2205 2206 err = mlx4_cmd_use_events(dev); 2207 if (err) { 2208 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2209 goto err_eq_table_free; 2210 } 2211 2212 err = mlx4_NOP(dev); 2213 if (err) { 2214 if (dev->flags & MLX4_FLAG_MSI_X) { 2215 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2216 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2217 mlx4_warn(dev, "Trying again without MSI-X\n"); 2218 } else { 2219 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2220 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2221 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2222 } 2223 2224 goto err_cmd_poll; 2225 } 2226 2227 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2228 2229 err = mlx4_init_cq_table(dev); 2230 if (err) { 2231 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2232 goto err_cmd_poll; 2233 } 2234 2235 err = mlx4_init_srq_table(dev); 2236 if (err) { 2237 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2238 goto err_cq_table_free; 2239 } 2240 2241 err = mlx4_init_qp_table(dev); 2242 if (err) { 2243 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2244 goto err_srq_table_free; 2245 } 2246 2247 err = mlx4_init_counters_table(dev); 2248 if (err && err != -ENOENT) { 2249 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2250 goto err_qp_table_free; 2251 } 2252 2253 if (!mlx4_is_slave(dev)) { 2254 for (port = 1; port <= dev->caps.num_ports; port++) { 2255 ib_port_default_caps = 0; 2256 err = mlx4_get_port_ib_caps(dev, port, 2257 &ib_port_default_caps); 2258 if (err) 2259 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2260 port, err); 2261 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2262 2263 /* initialize per-slave default ib port capabilities */ 2264 if (mlx4_is_master(dev)) { 2265 int i; 2266 for (i = 0; i < dev->num_slaves; i++) { 2267 if (i == mlx4_master_func_num(dev)) 2268 continue; 2269 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2270 ib_port_default_caps; 2271 } 2272 } 2273 2274 if (mlx4_is_mfunc(dev)) 2275 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2276 else 2277 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2278 2279 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2280 dev->caps.pkey_table_len[port] : -1); 2281 if (err) { 2282 mlx4_err(dev, "Failed to set port %d, aborting\n", 2283 port); 2284 goto err_counters_table_free; 2285 } 2286 } 2287 } 2288 2289 return 0; 2290 2291 err_counters_table_free: 2292 mlx4_cleanup_counters_table(dev); 2293 2294 err_qp_table_free: 2295 mlx4_cleanup_qp_table(dev); 2296 2297 err_srq_table_free: 2298 mlx4_cleanup_srq_table(dev); 2299 2300 err_cq_table_free: 2301 mlx4_cleanup_cq_table(dev); 2302 2303 err_cmd_poll: 2304 mlx4_cmd_use_polling(dev); 2305 2306 err_eq_table_free: 2307 mlx4_cleanup_eq_table(dev); 2308 2309 err_mcg_table_free: 2310 if (!mlx4_is_slave(dev)) 2311 mlx4_cleanup_mcg_table(dev); 2312 2313 err_mr_table_free: 2314 mlx4_cleanup_mr_table(dev); 2315 2316 err_xrcd_table_free: 2317 mlx4_cleanup_xrcd_table(dev); 2318 2319 err_pd_table_free: 2320 mlx4_cleanup_pd_table(dev); 2321 2322 err_kar_unmap: 2323 iounmap(priv->kar); 2324 2325 err_uar_free: 2326 mlx4_uar_free(dev, &priv->driver_uar); 2327 2328 err_uar_table_free: 2329 mlx4_cleanup_uar_table(dev); 2330 return err; 2331 } 2332 2333 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2334 { 2335 struct mlx4_priv *priv = mlx4_priv(dev); 2336 struct msix_entry *entries; 2337 int i; 2338 2339 if (msi_x) { 2340 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2341 2342 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2343 nreq); 2344 2345 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2346 if (!entries) 2347 goto no_msi; 2348 2349 for (i = 0; i < nreq; ++i) 2350 entries[i].entry = i; 2351 2352 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2353 nreq); 2354 2355 if (nreq < 0) { 2356 kfree(entries); 2357 goto no_msi; 2358 } else if (nreq < MSIX_LEGACY_SZ + 2359 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2360 /*Working in legacy mode , all EQ's shared*/ 2361 dev->caps.comp_pool = 0; 2362 dev->caps.num_comp_vectors = nreq - 1; 2363 } else { 2364 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2365 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2366 } 2367 for (i = 0; i < nreq; ++i) 2368 priv->eq_table.eq[i].irq = entries[i].vector; 2369 2370 dev->flags |= MLX4_FLAG_MSI_X; 2371 2372 kfree(entries); 2373 return; 2374 } 2375 2376 no_msi: 2377 dev->caps.num_comp_vectors = 1; 2378 dev->caps.comp_pool = 0; 2379 2380 for (i = 0; i < 2; ++i) 2381 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2382 } 2383 2384 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2385 { 2386 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2387 int err = 0; 2388 2389 info->dev = dev; 2390 info->port = port; 2391 if (!mlx4_is_slave(dev)) { 2392 mlx4_init_mac_table(dev, &info->mac_table); 2393 mlx4_init_vlan_table(dev, &info->vlan_table); 2394 mlx4_init_roce_gid_table(dev, &info->gid_table); 2395 info->base_qpn = mlx4_get_base_qpn(dev, port); 2396 } 2397 2398 sprintf(info->dev_name, "mlx4_port%d", port); 2399 info->port_attr.attr.name = info->dev_name; 2400 if (mlx4_is_mfunc(dev)) 2401 info->port_attr.attr.mode = S_IRUGO; 2402 else { 2403 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2404 info->port_attr.store = set_port_type; 2405 } 2406 info->port_attr.show = show_port_type; 2407 sysfs_attr_init(&info->port_attr.attr); 2408 2409 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2410 if (err) { 2411 mlx4_err(dev, "Failed to create file for port %d\n", port); 2412 info->port = -1; 2413 } 2414 2415 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2416 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2417 if (mlx4_is_mfunc(dev)) 2418 info->port_mtu_attr.attr.mode = S_IRUGO; 2419 else { 2420 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2421 info->port_mtu_attr.store = set_port_ib_mtu; 2422 } 2423 info->port_mtu_attr.show = show_port_ib_mtu; 2424 sysfs_attr_init(&info->port_mtu_attr.attr); 2425 2426 err = device_create_file(&dev->persist->pdev->dev, 2427 &info->port_mtu_attr); 2428 if (err) { 2429 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2430 device_remove_file(&info->dev->persist->pdev->dev, 2431 &info->port_attr); 2432 info->port = -1; 2433 } 2434 2435 return err; 2436 } 2437 2438 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2439 { 2440 if (info->port < 0) 2441 return; 2442 2443 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2444 device_remove_file(&info->dev->persist->pdev->dev, 2445 &info->port_mtu_attr); 2446 } 2447 2448 static int mlx4_init_steering(struct mlx4_dev *dev) 2449 { 2450 struct mlx4_priv *priv = mlx4_priv(dev); 2451 int num_entries = dev->caps.num_ports; 2452 int i, j; 2453 2454 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2455 if (!priv->steer) 2456 return -ENOMEM; 2457 2458 for (i = 0; i < num_entries; i++) 2459 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2460 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2461 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2462 } 2463 return 0; 2464 } 2465 2466 static void mlx4_clear_steering(struct mlx4_dev *dev) 2467 { 2468 struct mlx4_priv *priv = mlx4_priv(dev); 2469 struct mlx4_steer_index *entry, *tmp_entry; 2470 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2471 int num_entries = dev->caps.num_ports; 2472 int i, j; 2473 2474 for (i = 0; i < num_entries; i++) { 2475 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2476 list_for_each_entry_safe(pqp, tmp_pqp, 2477 &priv->steer[i].promisc_qps[j], 2478 list) { 2479 list_del(&pqp->list); 2480 kfree(pqp); 2481 } 2482 list_for_each_entry_safe(entry, tmp_entry, 2483 &priv->steer[i].steer_entries[j], 2484 list) { 2485 list_del(&entry->list); 2486 list_for_each_entry_safe(pqp, tmp_pqp, 2487 &entry->duplicates, 2488 list) { 2489 list_del(&pqp->list); 2490 kfree(pqp); 2491 } 2492 kfree(entry); 2493 } 2494 } 2495 } 2496 kfree(priv->steer); 2497 } 2498 2499 static int extended_func_num(struct pci_dev *pdev) 2500 { 2501 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2502 } 2503 2504 #define MLX4_OWNER_BASE 0x8069c 2505 #define MLX4_OWNER_SIZE 4 2506 2507 static int mlx4_get_ownership(struct mlx4_dev *dev) 2508 { 2509 void __iomem *owner; 2510 u32 ret; 2511 2512 if (pci_channel_offline(dev->persist->pdev)) 2513 return -EIO; 2514 2515 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2516 MLX4_OWNER_BASE, 2517 MLX4_OWNER_SIZE); 2518 if (!owner) { 2519 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2520 return -ENOMEM; 2521 } 2522 2523 ret = readl(owner); 2524 iounmap(owner); 2525 return (int) !!ret; 2526 } 2527 2528 static void mlx4_free_ownership(struct mlx4_dev *dev) 2529 { 2530 void __iomem *owner; 2531 2532 if (pci_channel_offline(dev->persist->pdev)) 2533 return; 2534 2535 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2536 MLX4_OWNER_BASE, 2537 MLX4_OWNER_SIZE); 2538 if (!owner) { 2539 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2540 return; 2541 } 2542 writel(0, owner); 2543 msleep(1000); 2544 iounmap(owner); 2545 } 2546 2547 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2548 !!((flags) & MLX4_FLAG_MASTER)) 2549 2550 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2551 u8 total_vfs, int existing_vfs, int reset_flow) 2552 { 2553 u64 dev_flags = dev->flags; 2554 int err = 0; 2555 2556 if (reset_flow) { 2557 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 2558 GFP_KERNEL); 2559 if (!dev->dev_vfs) 2560 goto free_mem; 2561 return dev_flags; 2562 } 2563 2564 atomic_inc(&pf_loading); 2565 if (dev->flags & MLX4_FLAG_SRIOV) { 2566 if (existing_vfs != total_vfs) { 2567 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2568 existing_vfs, total_vfs); 2569 total_vfs = existing_vfs; 2570 } 2571 } 2572 2573 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2574 if (NULL == dev->dev_vfs) { 2575 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2576 goto disable_sriov; 2577 } 2578 2579 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2580 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2581 err = pci_enable_sriov(pdev, total_vfs); 2582 } 2583 if (err) { 2584 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2585 err); 2586 goto disable_sriov; 2587 } else { 2588 mlx4_warn(dev, "Running in master mode\n"); 2589 dev_flags |= MLX4_FLAG_SRIOV | 2590 MLX4_FLAG_MASTER; 2591 dev_flags &= ~MLX4_FLAG_SLAVE; 2592 dev->persist->num_vfs = total_vfs; 2593 } 2594 return dev_flags; 2595 2596 disable_sriov: 2597 atomic_dec(&pf_loading); 2598 free_mem: 2599 dev->persist->num_vfs = 0; 2600 kfree(dev->dev_vfs); 2601 return dev_flags & ~MLX4_FLAG_MASTER; 2602 } 2603 2604 enum { 2605 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2606 }; 2607 2608 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2609 int *nvfs) 2610 { 2611 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2612 /* Checking for 64 VFs as a limitation of CX2 */ 2613 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2614 requested_vfs >= 64) { 2615 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2616 requested_vfs); 2617 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2618 } 2619 return 0; 2620 } 2621 2622 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2623 int total_vfs, int *nvfs, struct mlx4_priv *priv, 2624 int reset_flow) 2625 { 2626 struct mlx4_dev *dev; 2627 unsigned sum = 0; 2628 int err; 2629 int port; 2630 int i; 2631 struct mlx4_dev_cap *dev_cap = NULL; 2632 int existing_vfs = 0; 2633 2634 dev = &priv->dev; 2635 2636 INIT_LIST_HEAD(&priv->ctx_list); 2637 spin_lock_init(&priv->ctx_lock); 2638 2639 mutex_init(&priv->port_mutex); 2640 2641 INIT_LIST_HEAD(&priv->pgdir_list); 2642 mutex_init(&priv->pgdir_mutex); 2643 2644 INIT_LIST_HEAD(&priv->bf_list); 2645 mutex_init(&priv->bf_mutex); 2646 2647 dev->rev_id = pdev->revision; 2648 dev->numa_node = dev_to_node(&pdev->dev); 2649 2650 /* Detect if this device is a virtual function */ 2651 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2652 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2653 dev->flags |= MLX4_FLAG_SLAVE; 2654 } else { 2655 /* We reset the device and enable SRIOV only for physical 2656 * devices. Try to claim ownership on the device; 2657 * if already taken, skip -- do not allow multiple PFs */ 2658 err = mlx4_get_ownership(dev); 2659 if (err) { 2660 if (err < 0) 2661 return err; 2662 else { 2663 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2664 return -EINVAL; 2665 } 2666 } 2667 2668 atomic_set(&priv->opreq_count, 0); 2669 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2670 2671 /* 2672 * Now reset the HCA before we touch the PCI capabilities or 2673 * attempt a firmware command, since a boot ROM may have left 2674 * the HCA in an undefined state. 2675 */ 2676 err = mlx4_reset(dev); 2677 if (err) { 2678 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2679 goto err_sriov; 2680 } 2681 2682 if (total_vfs) { 2683 dev->flags = MLX4_FLAG_MASTER; 2684 existing_vfs = pci_num_vf(pdev); 2685 if (existing_vfs) 2686 dev->flags |= MLX4_FLAG_SRIOV; 2687 dev->persist->num_vfs = total_vfs; 2688 } 2689 } 2690 2691 /* on load remove any previous indication of internal error, 2692 * device is up. 2693 */ 2694 dev->persist->state = MLX4_DEVICE_STATE_UP; 2695 2696 slave_start: 2697 err = mlx4_cmd_init(dev); 2698 if (err) { 2699 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2700 goto err_sriov; 2701 } 2702 2703 /* In slave functions, the communication channel must be initialized 2704 * before posting commands. Also, init num_slaves before calling 2705 * mlx4_init_hca */ 2706 if (mlx4_is_mfunc(dev)) { 2707 if (mlx4_is_master(dev)) { 2708 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2709 2710 } else { 2711 dev->num_slaves = 0; 2712 err = mlx4_multi_func_init(dev); 2713 if (err) { 2714 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2715 goto err_cmd; 2716 } 2717 } 2718 } 2719 2720 err = mlx4_init_fw(dev); 2721 if (err) { 2722 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2723 goto err_mfunc; 2724 } 2725 2726 if (mlx4_is_master(dev)) { 2727 /* when we hit the goto slave_start below, dev_cap already initialized */ 2728 if (!dev_cap) { 2729 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2730 2731 if (!dev_cap) { 2732 err = -ENOMEM; 2733 goto err_fw; 2734 } 2735 2736 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2737 if (err) { 2738 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2739 goto err_fw; 2740 } 2741 2742 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2743 goto err_fw; 2744 2745 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2746 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 2747 total_vfs, 2748 existing_vfs, 2749 reset_flow); 2750 2751 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2752 dev->flags = dev_flags; 2753 if (!SRIOV_VALID_STATE(dev->flags)) { 2754 mlx4_err(dev, "Invalid SRIOV state\n"); 2755 goto err_sriov; 2756 } 2757 err = mlx4_reset(dev); 2758 if (err) { 2759 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2760 goto err_sriov; 2761 } 2762 goto slave_start; 2763 } 2764 } else { 2765 /* Legacy mode FW requires SRIOV to be enabled before 2766 * doing QUERY_DEV_CAP, since max_eq's value is different if 2767 * SRIOV is enabled. 2768 */ 2769 memset(dev_cap, 0, sizeof(*dev_cap)); 2770 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2771 if (err) { 2772 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2773 goto err_fw; 2774 } 2775 2776 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2777 goto err_fw; 2778 } 2779 } 2780 2781 err = mlx4_init_hca(dev); 2782 if (err) { 2783 if (err == -EACCES) { 2784 /* Not primary Physical function 2785 * Running in slave mode */ 2786 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2787 /* We're not a PF */ 2788 if (dev->flags & MLX4_FLAG_SRIOV) { 2789 if (!existing_vfs) 2790 pci_disable_sriov(pdev); 2791 if (mlx4_is_master(dev) && !reset_flow) 2792 atomic_dec(&pf_loading); 2793 dev->flags &= ~MLX4_FLAG_SRIOV; 2794 } 2795 if (!mlx4_is_slave(dev)) 2796 mlx4_free_ownership(dev); 2797 dev->flags |= MLX4_FLAG_SLAVE; 2798 dev->flags &= ~MLX4_FLAG_MASTER; 2799 goto slave_start; 2800 } else 2801 goto err_fw; 2802 } 2803 2804 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2805 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2806 existing_vfs, reset_flow); 2807 2808 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2809 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2810 dev->flags = dev_flags; 2811 err = mlx4_cmd_init(dev); 2812 if (err) { 2813 /* Only VHCR is cleaned up, so could still 2814 * send FW commands 2815 */ 2816 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2817 goto err_close; 2818 } 2819 } else { 2820 dev->flags = dev_flags; 2821 } 2822 2823 if (!SRIOV_VALID_STATE(dev->flags)) { 2824 mlx4_err(dev, "Invalid SRIOV state\n"); 2825 goto err_close; 2826 } 2827 } 2828 2829 /* check if the device is functioning at its maximum possible speed. 2830 * No return code for this call, just warn the user in case of PCI 2831 * express device capabilities are under-satisfied by the bus. 2832 */ 2833 if (!mlx4_is_slave(dev)) 2834 mlx4_check_pcie_caps(dev); 2835 2836 /* In master functions, the communication channel must be initialized 2837 * after obtaining its address from fw */ 2838 if (mlx4_is_master(dev)) { 2839 int ib_ports = 0; 2840 2841 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2842 ib_ports++; 2843 2844 if (ib_ports && 2845 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2846 mlx4_err(dev, 2847 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2848 err = -EINVAL; 2849 goto err_close; 2850 } 2851 if (dev->caps.num_ports < 2 && 2852 num_vfs_argc > 1) { 2853 err = -EINVAL; 2854 mlx4_err(dev, 2855 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2856 dev->caps.num_ports); 2857 goto err_close; 2858 } 2859 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 2860 2861 for (i = 0; 2862 i < sizeof(dev->persist->nvfs)/ 2863 sizeof(dev->persist->nvfs[0]); i++) { 2864 unsigned j; 2865 2866 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 2867 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2868 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2869 dev->caps.num_ports; 2870 } 2871 } 2872 2873 /* In master functions, the communication channel 2874 * must be initialized after obtaining its address from fw 2875 */ 2876 err = mlx4_multi_func_init(dev); 2877 if (err) { 2878 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2879 goto err_close; 2880 } 2881 } 2882 2883 err = mlx4_alloc_eq_table(dev); 2884 if (err) 2885 goto err_master_mfunc; 2886 2887 priv->msix_ctl.pool_bm = 0; 2888 mutex_init(&priv->msix_ctl.pool_lock); 2889 2890 mlx4_enable_msi_x(dev); 2891 if ((mlx4_is_mfunc(dev)) && 2892 !(dev->flags & MLX4_FLAG_MSI_X)) { 2893 err = -ENOSYS; 2894 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2895 goto err_free_eq; 2896 } 2897 2898 if (!mlx4_is_slave(dev)) { 2899 err = mlx4_init_steering(dev); 2900 if (err) 2901 goto err_disable_msix; 2902 } 2903 2904 err = mlx4_setup_hca(dev); 2905 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2906 !mlx4_is_mfunc(dev)) { 2907 dev->flags &= ~MLX4_FLAG_MSI_X; 2908 dev->caps.num_comp_vectors = 1; 2909 dev->caps.comp_pool = 0; 2910 pci_disable_msix(pdev); 2911 err = mlx4_setup_hca(dev); 2912 } 2913 2914 if (err) 2915 goto err_steer; 2916 2917 mlx4_init_quotas(dev); 2918 /* When PF resources are ready arm its comm channel to enable 2919 * getting commands 2920 */ 2921 if (mlx4_is_master(dev)) { 2922 err = mlx4_ARM_COMM_CHANNEL(dev); 2923 if (err) { 2924 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 2925 err); 2926 goto err_steer; 2927 } 2928 } 2929 2930 for (port = 1; port <= dev->caps.num_ports; port++) { 2931 err = mlx4_init_port_info(dev, port); 2932 if (err) 2933 goto err_port; 2934 } 2935 2936 err = mlx4_register_device(dev); 2937 if (err) 2938 goto err_port; 2939 2940 mlx4_request_modules(dev); 2941 2942 mlx4_sense_init(dev); 2943 mlx4_start_sense(dev); 2944 2945 priv->removed = 0; 2946 2947 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 2948 atomic_dec(&pf_loading); 2949 2950 kfree(dev_cap); 2951 return 0; 2952 2953 err_port: 2954 for (--port; port >= 1; --port) 2955 mlx4_cleanup_port_info(&priv->port[port]); 2956 2957 mlx4_cleanup_counters_table(dev); 2958 mlx4_cleanup_qp_table(dev); 2959 mlx4_cleanup_srq_table(dev); 2960 mlx4_cleanup_cq_table(dev); 2961 mlx4_cmd_use_polling(dev); 2962 mlx4_cleanup_eq_table(dev); 2963 mlx4_cleanup_mcg_table(dev); 2964 mlx4_cleanup_mr_table(dev); 2965 mlx4_cleanup_xrcd_table(dev); 2966 mlx4_cleanup_pd_table(dev); 2967 mlx4_cleanup_uar_table(dev); 2968 2969 err_steer: 2970 if (!mlx4_is_slave(dev)) 2971 mlx4_clear_steering(dev); 2972 2973 err_disable_msix: 2974 if (dev->flags & MLX4_FLAG_MSI_X) 2975 pci_disable_msix(pdev); 2976 2977 err_free_eq: 2978 mlx4_free_eq_table(dev); 2979 2980 err_master_mfunc: 2981 if (mlx4_is_master(dev)) 2982 mlx4_multi_func_cleanup(dev); 2983 2984 if (mlx4_is_slave(dev)) { 2985 kfree(dev->caps.qp0_qkey); 2986 kfree(dev->caps.qp0_tunnel); 2987 kfree(dev->caps.qp0_proxy); 2988 kfree(dev->caps.qp1_tunnel); 2989 kfree(dev->caps.qp1_proxy); 2990 } 2991 2992 err_close: 2993 mlx4_close_hca(dev); 2994 2995 err_fw: 2996 mlx4_close_fw(dev); 2997 2998 err_mfunc: 2999 if (mlx4_is_slave(dev)) 3000 mlx4_multi_func_cleanup(dev); 3001 3002 err_cmd: 3003 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3004 3005 err_sriov: 3006 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3007 pci_disable_sriov(pdev); 3008 dev->flags &= ~MLX4_FLAG_SRIOV; 3009 } 3010 3011 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3012 atomic_dec(&pf_loading); 3013 3014 kfree(priv->dev.dev_vfs); 3015 3016 if (!mlx4_is_slave(dev)) 3017 mlx4_free_ownership(dev); 3018 3019 kfree(dev_cap); 3020 return err; 3021 } 3022 3023 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3024 struct mlx4_priv *priv) 3025 { 3026 int err; 3027 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3028 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3029 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3030 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3031 unsigned total_vfs = 0; 3032 unsigned int i; 3033 3034 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3035 3036 err = pci_enable_device(pdev); 3037 if (err) { 3038 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3039 return err; 3040 } 3041 3042 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3043 * per port, we must limit the number of VFs to 63 (since their are 3044 * 128 MACs) 3045 */ 3046 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3047 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3048 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3049 if (nvfs[i] < 0) { 3050 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3051 err = -EINVAL; 3052 goto err_disable_pdev; 3053 } 3054 } 3055 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3056 i++) { 3057 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3058 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3059 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3060 err = -EINVAL; 3061 goto err_disable_pdev; 3062 } 3063 } 3064 if (total_vfs >= MLX4_MAX_NUM_VF) { 3065 dev_err(&pdev->dev, 3066 "Requested more VF's (%d) than allowed (%d)\n", 3067 total_vfs, MLX4_MAX_NUM_VF - 1); 3068 err = -EINVAL; 3069 goto err_disable_pdev; 3070 } 3071 3072 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3073 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 3074 dev_err(&pdev->dev, 3075 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 3076 nvfs[i] + nvfs[2], i + 1, 3077 MLX4_MAX_NUM_VF_P_PORT - 1); 3078 err = -EINVAL; 3079 goto err_disable_pdev; 3080 } 3081 } 3082 3083 /* Check for BARs. */ 3084 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3085 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3086 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3087 pci_dev_data, pci_resource_flags(pdev, 0)); 3088 err = -ENODEV; 3089 goto err_disable_pdev; 3090 } 3091 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3092 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3093 err = -ENODEV; 3094 goto err_disable_pdev; 3095 } 3096 3097 err = pci_request_regions(pdev, DRV_NAME); 3098 if (err) { 3099 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3100 goto err_disable_pdev; 3101 } 3102 3103 pci_set_master(pdev); 3104 3105 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3106 if (err) { 3107 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3108 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3109 if (err) { 3110 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3111 goto err_release_regions; 3112 } 3113 } 3114 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3115 if (err) { 3116 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3117 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3118 if (err) { 3119 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3120 goto err_release_regions; 3121 } 3122 } 3123 3124 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3125 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3126 /* Detect if this device is a virtual function */ 3127 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3128 /* When acting as pf, we normally skip vfs unless explicitly 3129 * requested to probe them. 3130 */ 3131 if (total_vfs) { 3132 unsigned vfs_offset = 0; 3133 3134 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3135 vfs_offset + nvfs[i] < extended_func_num(pdev); 3136 vfs_offset += nvfs[i], i++) 3137 ; 3138 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3139 err = -ENODEV; 3140 goto err_release_regions; 3141 } 3142 if ((extended_func_num(pdev) - vfs_offset) 3143 > prb_vf[i]) { 3144 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3145 extended_func_num(pdev)); 3146 err = -ENODEV; 3147 goto err_release_regions; 3148 } 3149 } 3150 } 3151 3152 err = mlx4_catas_init(&priv->dev); 3153 if (err) 3154 goto err_release_regions; 3155 3156 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3157 if (err) 3158 goto err_catas; 3159 3160 return 0; 3161 3162 err_catas: 3163 mlx4_catas_end(&priv->dev); 3164 3165 err_release_regions: 3166 pci_release_regions(pdev); 3167 3168 err_disable_pdev: 3169 pci_disable_device(pdev); 3170 pci_set_drvdata(pdev, NULL); 3171 return err; 3172 } 3173 3174 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3175 { 3176 struct mlx4_priv *priv; 3177 struct mlx4_dev *dev; 3178 int ret; 3179 3180 printk_once(KERN_INFO "%s", mlx4_version); 3181 3182 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3183 if (!priv) 3184 return -ENOMEM; 3185 3186 dev = &priv->dev; 3187 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3188 if (!dev->persist) { 3189 kfree(priv); 3190 return -ENOMEM; 3191 } 3192 dev->persist->pdev = pdev; 3193 dev->persist->dev = dev; 3194 pci_set_drvdata(pdev, dev->persist); 3195 priv->pci_dev_data = id->driver_data; 3196 mutex_init(&dev->persist->device_state_mutex); 3197 mutex_init(&dev->persist->interface_state_mutex); 3198 3199 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3200 if (ret) { 3201 kfree(dev->persist); 3202 kfree(priv); 3203 } else { 3204 pci_save_state(pdev); 3205 } 3206 3207 return ret; 3208 } 3209 3210 static void mlx4_clean_dev(struct mlx4_dev *dev) 3211 { 3212 struct mlx4_dev_persistent *persist = dev->persist; 3213 struct mlx4_priv *priv = mlx4_priv(dev); 3214 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3215 3216 memset(priv, 0, sizeof(*priv)); 3217 priv->dev.persist = persist; 3218 priv->dev.flags = flags; 3219 } 3220 3221 static void mlx4_unload_one(struct pci_dev *pdev) 3222 { 3223 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3224 struct mlx4_dev *dev = persist->dev; 3225 struct mlx4_priv *priv = mlx4_priv(dev); 3226 int pci_dev_data; 3227 int p, i; 3228 3229 if (priv->removed) 3230 return; 3231 3232 /* saving current ports type for further use */ 3233 for (i = 0; i < dev->caps.num_ports; i++) { 3234 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3235 dev->persist->curr_port_poss_type[i] = dev->caps. 3236 possible_type[i + 1]; 3237 } 3238 3239 pci_dev_data = priv->pci_dev_data; 3240 3241 mlx4_stop_sense(dev); 3242 mlx4_unregister_device(dev); 3243 3244 for (p = 1; p <= dev->caps.num_ports; p++) { 3245 mlx4_cleanup_port_info(&priv->port[p]); 3246 mlx4_CLOSE_PORT(dev, p); 3247 } 3248 3249 if (mlx4_is_master(dev)) 3250 mlx4_free_resource_tracker(dev, 3251 RES_TR_FREE_SLAVES_ONLY); 3252 3253 mlx4_cleanup_counters_table(dev); 3254 mlx4_cleanup_qp_table(dev); 3255 mlx4_cleanup_srq_table(dev); 3256 mlx4_cleanup_cq_table(dev); 3257 mlx4_cmd_use_polling(dev); 3258 mlx4_cleanup_eq_table(dev); 3259 mlx4_cleanup_mcg_table(dev); 3260 mlx4_cleanup_mr_table(dev); 3261 mlx4_cleanup_xrcd_table(dev); 3262 mlx4_cleanup_pd_table(dev); 3263 3264 if (mlx4_is_master(dev)) 3265 mlx4_free_resource_tracker(dev, 3266 RES_TR_FREE_STRUCTS_ONLY); 3267 3268 iounmap(priv->kar); 3269 mlx4_uar_free(dev, &priv->driver_uar); 3270 mlx4_cleanup_uar_table(dev); 3271 if (!mlx4_is_slave(dev)) 3272 mlx4_clear_steering(dev); 3273 mlx4_free_eq_table(dev); 3274 if (mlx4_is_master(dev)) 3275 mlx4_multi_func_cleanup(dev); 3276 mlx4_close_hca(dev); 3277 mlx4_close_fw(dev); 3278 if (mlx4_is_slave(dev)) 3279 mlx4_multi_func_cleanup(dev); 3280 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3281 3282 if (dev->flags & MLX4_FLAG_MSI_X) 3283 pci_disable_msix(pdev); 3284 3285 if (!mlx4_is_slave(dev)) 3286 mlx4_free_ownership(dev); 3287 3288 kfree(dev->caps.qp0_qkey); 3289 kfree(dev->caps.qp0_tunnel); 3290 kfree(dev->caps.qp0_proxy); 3291 kfree(dev->caps.qp1_tunnel); 3292 kfree(dev->caps.qp1_proxy); 3293 kfree(dev->dev_vfs); 3294 3295 mlx4_clean_dev(dev); 3296 priv->pci_dev_data = pci_dev_data; 3297 priv->removed = 1; 3298 } 3299 3300 static void mlx4_remove_one(struct pci_dev *pdev) 3301 { 3302 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3303 struct mlx4_dev *dev = persist->dev; 3304 struct mlx4_priv *priv = mlx4_priv(dev); 3305 int active_vfs = 0; 3306 3307 mutex_lock(&persist->interface_state_mutex); 3308 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3309 mutex_unlock(&persist->interface_state_mutex); 3310 3311 /* Disabling SR-IOV is not allowed while there are active vf's */ 3312 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3313 active_vfs = mlx4_how_many_lives_vf(dev); 3314 if (active_vfs) { 3315 pr_warn("Removing PF when there are active VF's !!\n"); 3316 pr_warn("Will not disable SR-IOV.\n"); 3317 } 3318 } 3319 3320 /* device marked to be under deletion running now without the lock 3321 * letting other tasks to be terminated 3322 */ 3323 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3324 mlx4_unload_one(pdev); 3325 else 3326 mlx4_info(dev, "%s: interface is down\n", __func__); 3327 mlx4_catas_end(dev); 3328 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3329 mlx4_warn(dev, "Disabling SR-IOV\n"); 3330 pci_disable_sriov(pdev); 3331 } 3332 3333 pci_release_regions(pdev); 3334 pci_disable_device(pdev); 3335 kfree(dev->persist); 3336 kfree(priv); 3337 pci_set_drvdata(pdev, NULL); 3338 } 3339 3340 static int restore_current_port_types(struct mlx4_dev *dev, 3341 enum mlx4_port_type *types, 3342 enum mlx4_port_type *poss_types) 3343 { 3344 struct mlx4_priv *priv = mlx4_priv(dev); 3345 int err, i; 3346 3347 mlx4_stop_sense(dev); 3348 3349 mutex_lock(&priv->port_mutex); 3350 for (i = 0; i < dev->caps.num_ports; i++) 3351 dev->caps.possible_type[i + 1] = poss_types[i]; 3352 err = mlx4_change_port_types(dev, types); 3353 mlx4_start_sense(dev); 3354 mutex_unlock(&priv->port_mutex); 3355 3356 return err; 3357 } 3358 3359 int mlx4_restart_one(struct pci_dev *pdev) 3360 { 3361 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3362 struct mlx4_dev *dev = persist->dev; 3363 struct mlx4_priv *priv = mlx4_priv(dev); 3364 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3365 int pci_dev_data, err, total_vfs; 3366 3367 pci_dev_data = priv->pci_dev_data; 3368 total_vfs = dev->persist->num_vfs; 3369 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3370 3371 mlx4_unload_one(pdev); 3372 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3373 if (err) { 3374 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3375 __func__, pci_name(pdev), err); 3376 return err; 3377 } 3378 3379 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3380 dev->persist->curr_port_poss_type); 3381 if (err) 3382 mlx4_err(dev, "could not restore original port types (%d)\n", 3383 err); 3384 3385 return err; 3386 } 3387 3388 static const struct pci_device_id mlx4_pci_table[] = { 3389 /* MT25408 "Hermon" SDR */ 3390 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3391 /* MT25408 "Hermon" DDR */ 3392 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3393 /* MT25408 "Hermon" QDR */ 3394 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3395 /* MT25408 "Hermon" DDR PCIe gen2 */ 3396 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3397 /* MT25408 "Hermon" QDR PCIe gen2 */ 3398 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3399 /* MT25408 "Hermon" EN 10GigE */ 3400 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3401 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3402 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3403 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3404 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3405 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3406 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3407 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3408 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3409 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3410 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3411 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3412 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3413 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3414 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3415 /* MT27500 Family [ConnectX-3] */ 3416 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3417 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3418 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3419 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3420 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3421 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3422 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3423 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3424 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3425 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3426 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3427 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3428 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3429 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3430 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3431 { 0, } 3432 }; 3433 3434 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3435 3436 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3437 pci_channel_state_t state) 3438 { 3439 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3440 3441 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3442 mlx4_enter_error_state(persist); 3443 3444 mutex_lock(&persist->interface_state_mutex); 3445 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3446 mlx4_unload_one(pdev); 3447 3448 mutex_unlock(&persist->interface_state_mutex); 3449 if (state == pci_channel_io_perm_failure) 3450 return PCI_ERS_RESULT_DISCONNECT; 3451 3452 pci_disable_device(pdev); 3453 return PCI_ERS_RESULT_NEED_RESET; 3454 } 3455 3456 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3457 { 3458 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3459 struct mlx4_dev *dev = persist->dev; 3460 struct mlx4_priv *priv = mlx4_priv(dev); 3461 int ret; 3462 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3463 int total_vfs; 3464 3465 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3466 ret = pci_enable_device(pdev); 3467 if (ret) { 3468 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3469 return PCI_ERS_RESULT_DISCONNECT; 3470 } 3471 3472 pci_set_master(pdev); 3473 pci_restore_state(pdev); 3474 pci_save_state(pdev); 3475 3476 total_vfs = dev->persist->num_vfs; 3477 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3478 3479 mutex_lock(&persist->interface_state_mutex); 3480 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3481 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3482 priv, 1); 3483 if (ret) { 3484 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 3485 __func__, ret); 3486 goto end; 3487 } 3488 3489 ret = restore_current_port_types(dev, dev->persist-> 3490 curr_port_type, dev->persist-> 3491 curr_port_poss_type); 3492 if (ret) 3493 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 3494 } 3495 end: 3496 mutex_unlock(&persist->interface_state_mutex); 3497 3498 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3499 } 3500 3501 static void mlx4_shutdown(struct pci_dev *pdev) 3502 { 3503 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3504 3505 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 3506 mutex_lock(&persist->interface_state_mutex); 3507 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3508 mlx4_unload_one(pdev); 3509 mutex_unlock(&persist->interface_state_mutex); 3510 } 3511 3512 static const struct pci_error_handlers mlx4_err_handler = { 3513 .error_detected = mlx4_pci_err_detected, 3514 .slot_reset = mlx4_pci_slot_reset, 3515 }; 3516 3517 static struct pci_driver mlx4_driver = { 3518 .name = DRV_NAME, 3519 .id_table = mlx4_pci_table, 3520 .probe = mlx4_init_one, 3521 .shutdown = mlx4_shutdown, 3522 .remove = mlx4_remove_one, 3523 .err_handler = &mlx4_err_handler, 3524 }; 3525 3526 static int __init mlx4_verify_params(void) 3527 { 3528 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3529 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3530 return -1; 3531 } 3532 3533 if (log_num_vlan != 0) 3534 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3535 MLX4_LOG_NUM_VLANS); 3536 3537 if (use_prio != 0) 3538 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3539 3540 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3541 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3542 log_mtts_per_seg); 3543 return -1; 3544 } 3545 3546 /* Check if module param for ports type has legal combination */ 3547 if (port_type_array[0] == false && port_type_array[1] == true) { 3548 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3549 port_type_array[0] = true; 3550 } 3551 3552 if (mlx4_log_num_mgm_entry_size < -7 || 3553 (mlx4_log_num_mgm_entry_size > 0 && 3554 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3555 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3556 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3557 mlx4_log_num_mgm_entry_size, 3558 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3559 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3560 return -1; 3561 } 3562 3563 return 0; 3564 } 3565 3566 static int __init mlx4_init(void) 3567 { 3568 int ret; 3569 3570 if (mlx4_verify_params()) 3571 return -EINVAL; 3572 3573 3574 mlx4_wq = create_singlethread_workqueue("mlx4"); 3575 if (!mlx4_wq) 3576 return -ENOMEM; 3577 3578 ret = pci_register_driver(&mlx4_driver); 3579 if (ret < 0) 3580 destroy_workqueue(mlx4_wq); 3581 return ret < 0 ? ret : 0; 3582 } 3583 3584 static void __exit mlx4_cleanup(void) 3585 { 3586 pci_unregister_driver(&mlx4_driver); 3587 destroy_workqueue(mlx4_wq); 3588 } 3589 3590 module_init(mlx4_init); 3591 module_exit(mlx4_cleanup); 3592