1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 int mlx4_check_port_params(struct mlx4_dev *dev, 172 enum mlx4_port_type *port_type) 173 { 174 int i; 175 176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 177 for (i = 0; i < dev->caps.num_ports - 1; i++) { 178 if (port_type[i] != port_type[i + 1]) { 179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 180 return -EINVAL; 181 } 182 } 183 } 184 185 for (i = 0; i < dev->caps.num_ports; i++) { 186 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 188 i + 1); 189 return -EINVAL; 190 } 191 } 192 return 0; 193 } 194 195 static void mlx4_set_port_mask(struct mlx4_dev *dev) 196 { 197 int i; 198 199 for (i = 1; i <= dev->caps.num_ports; ++i) 200 dev->caps.port_mask[i] = dev->caps.port_type[i]; 201 } 202 203 enum { 204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 205 }; 206 207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 208 { 209 int err = 0; 210 struct mlx4_func func; 211 212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 213 err = mlx4_QUERY_FUNC(dev, &func, 0); 214 if (err) { 215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 216 return err; 217 } 218 dev_cap->max_eqs = func.max_eq; 219 dev_cap->reserved_eqs = func.rsvd_eqs; 220 dev_cap->reserved_uars = func.rsvd_uars; 221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 222 } 223 return err; 224 } 225 226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 227 { 228 struct mlx4_caps *dev_cap = &dev->caps; 229 230 /* FW not supporting or cancelled by user */ 231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 233 return; 234 235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 236 * When FW has NCSI it may decide not to report 64B CQE/EQEs 237 */ 238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 242 return; 243 } 244 245 if (cache_line_size() == 128 || cache_line_size() == 256) { 246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 247 /* Changing the real data inside CQE size to 32B */ 248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 250 251 if (mlx4_is_master(dev)) 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 253 } else { 254 if (cache_line_size() != 32 && cache_line_size() != 64) 255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 258 } 259 } 260 261 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 262 struct mlx4_port_cap *port_cap) 263 { 264 dev->caps.vl_cap[port] = port_cap->max_vl; 265 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 266 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 267 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 268 /* set gid and pkey table operating lengths by default 269 * to non-sriov values 270 */ 271 dev->caps.gid_table_len[port] = port_cap->max_gids; 272 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 273 dev->caps.port_width_cap[port] = port_cap->max_port_width; 274 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 275 dev->caps.def_mac[port] = port_cap->def_mac; 276 dev->caps.supported_type[port] = port_cap->supported_port_types; 277 dev->caps.suggested_type[port] = port_cap->suggested_type; 278 dev->caps.default_sense[port] = port_cap->default_sense; 279 dev->caps.trans_type[port] = port_cap->trans_type; 280 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 281 dev->caps.wavelength[port] = port_cap->wavelength; 282 dev->caps.trans_code[port] = port_cap->trans_code; 283 284 return 0; 285 } 286 287 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 288 struct mlx4_port_cap *port_cap) 289 { 290 int err = 0; 291 292 err = mlx4_QUERY_PORT(dev, port, port_cap); 293 294 if (err) 295 mlx4_err(dev, "QUERY_PORT command failed.\n"); 296 297 return err; 298 } 299 300 #define MLX4_A0_STEERING_TABLE_SIZE 256 301 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 302 { 303 int err; 304 int i; 305 306 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 307 if (err) { 308 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 309 return err; 310 } 311 mlx4_dev_cap_dump(dev, dev_cap); 312 313 if (dev_cap->min_page_sz > PAGE_SIZE) { 314 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 315 dev_cap->min_page_sz, PAGE_SIZE); 316 return -ENODEV; 317 } 318 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 319 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 320 dev_cap->num_ports, MLX4_MAX_PORTS); 321 return -ENODEV; 322 } 323 324 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 325 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 326 dev_cap->uar_size, 327 (unsigned long long) 328 pci_resource_len(dev->persist->pdev, 2)); 329 return -ENODEV; 330 } 331 332 dev->caps.num_ports = dev_cap->num_ports; 333 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 334 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 335 dev->caps.num_sys_eqs : 336 MLX4_MAX_EQ_NUM; 337 for (i = 1; i <= dev->caps.num_ports; ++i) { 338 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 339 if (err) { 340 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 341 return err; 342 } 343 } 344 345 dev->caps.uar_page_size = PAGE_SIZE; 346 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 347 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 348 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 349 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 350 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 351 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 352 dev->caps.max_wqes = dev_cap->max_qp_sz; 353 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 354 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 355 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 356 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 357 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 358 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 359 /* 360 * Subtract 1 from the limit because we need to allocate a 361 * spare CQE so the HCA HW can tell the difference between an 362 * empty CQ and a full CQ. 363 */ 364 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 365 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 366 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 367 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 368 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 369 370 /* The first 128 UARs are used for EQ doorbells */ 371 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 372 dev->caps.reserved_pds = dev_cap->reserved_pds; 373 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 374 dev_cap->reserved_xrcds : 0; 375 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 376 dev_cap->max_xrcds : 0; 377 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 378 379 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 380 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 381 dev->caps.flags = dev_cap->flags; 382 dev->caps.flags2 = dev_cap->flags2; 383 dev->caps.bmme_flags = dev_cap->bmme_flags; 384 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 385 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 386 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 387 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 388 389 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 390 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 391 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 392 /* Don't do sense port on multifunction devices (for now at least) */ 393 if (mlx4_is_mfunc(dev)) 394 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 395 396 if (mlx4_low_memory_profile()) { 397 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 398 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 399 } else { 400 dev->caps.log_num_macs = log_num_mac; 401 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 402 } 403 404 for (i = 1; i <= dev->caps.num_ports; ++i) { 405 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 406 if (dev->caps.supported_type[i]) { 407 /* if only ETH is supported - assign ETH */ 408 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 409 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 410 /* if only IB is supported, assign IB */ 411 else if (dev->caps.supported_type[i] == 412 MLX4_PORT_TYPE_IB) 413 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 414 else { 415 /* if IB and ETH are supported, we set the port 416 * type according to user selection of port type; 417 * if user selected none, take the FW hint */ 418 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 419 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 420 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 421 else 422 dev->caps.port_type[i] = port_type_array[i - 1]; 423 } 424 } 425 /* 426 * Link sensing is allowed on the port if 3 conditions are true: 427 * 1. Both protocols are supported on the port. 428 * 2. Different types are supported on the port 429 * 3. FW declared that it supports link sensing 430 */ 431 mlx4_priv(dev)->sense.sense_allowed[i] = 432 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 433 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 434 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 435 436 /* 437 * If "default_sense" bit is set, we move the port to "AUTO" mode 438 * and perform sense_port FW command to try and set the correct 439 * port type from beginning 440 */ 441 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 442 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 443 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 444 mlx4_SENSE_PORT(dev, i, &sensed_port); 445 if (sensed_port != MLX4_PORT_TYPE_NONE) 446 dev->caps.port_type[i] = sensed_port; 447 } else { 448 dev->caps.possible_type[i] = dev->caps.port_type[i]; 449 } 450 451 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 452 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 453 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 454 i, 1 << dev->caps.log_num_macs); 455 } 456 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 457 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 458 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 459 i, 1 << dev->caps.log_num_vlans); 460 } 461 } 462 463 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 464 465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 467 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 468 (1 << dev->caps.log_num_macs) * 469 (1 << dev->caps.log_num_vlans) * 470 dev->caps.num_ports; 471 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 472 473 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 474 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 475 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 476 else 477 dev->caps.dmfs_high_rate_qpn_base = 478 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 479 480 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 481 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 482 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 483 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 484 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 485 } else { 486 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 487 dev->caps.dmfs_high_rate_qpn_base = 488 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 489 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 490 } 491 492 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 493 dev->caps.dmfs_high_rate_qpn_range; 494 495 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 496 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 497 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 498 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 499 500 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 501 502 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 503 if (dev_cap->flags & 504 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 505 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 506 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 507 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 508 } 509 510 if (dev_cap->flags2 & 511 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 512 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 513 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 514 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 515 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 516 } 517 } 518 519 if ((dev->caps.flags & 520 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 521 mlx4_is_master(dev)) 522 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 523 524 if (!mlx4_is_slave(dev)) { 525 mlx4_enable_cqe_eqe_stride(dev); 526 dev->caps.alloc_res_qp_mask = 527 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 528 MLX4_RESERVE_A0_QP; 529 } else { 530 dev->caps.alloc_res_qp_mask = 0; 531 } 532 533 return 0; 534 } 535 536 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 537 enum pci_bus_speed *speed, 538 enum pcie_link_width *width) 539 { 540 u32 lnkcap1, lnkcap2; 541 int err1, err2; 542 543 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 544 545 *speed = PCI_SPEED_UNKNOWN; 546 *width = PCIE_LNK_WIDTH_UNKNOWN; 547 548 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 549 &lnkcap1); 550 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 551 &lnkcap2); 552 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 553 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 554 *speed = PCIE_SPEED_8_0GT; 555 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 556 *speed = PCIE_SPEED_5_0GT; 557 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 558 *speed = PCIE_SPEED_2_5GT; 559 } 560 if (!err1) { 561 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 562 if (!lnkcap2) { /* pre-r3.0 */ 563 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 564 *speed = PCIE_SPEED_5_0GT; 565 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 566 *speed = PCIE_SPEED_2_5GT; 567 } 568 } 569 570 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 571 return err1 ? err1 : 572 err2 ? err2 : -EINVAL; 573 } 574 return 0; 575 } 576 577 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 578 { 579 enum pcie_link_width width, width_cap; 580 enum pci_bus_speed speed, speed_cap; 581 int err; 582 583 #define PCIE_SPEED_STR(speed) \ 584 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 585 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 586 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 587 "Unknown") 588 589 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 590 if (err) { 591 mlx4_warn(dev, 592 "Unable to determine PCIe device BW capabilities\n"); 593 return; 594 } 595 596 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 597 if (err || speed == PCI_SPEED_UNKNOWN || 598 width == PCIE_LNK_WIDTH_UNKNOWN) { 599 mlx4_warn(dev, 600 "Unable to determine PCI device chain minimum BW\n"); 601 return; 602 } 603 604 if (width != width_cap || speed != speed_cap) 605 mlx4_warn(dev, 606 "PCIe BW is different than device's capability\n"); 607 608 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 609 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 610 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 611 width, width_cap); 612 return; 613 } 614 615 /*The function checks if there are live vf, return the num of them*/ 616 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 617 { 618 struct mlx4_priv *priv = mlx4_priv(dev); 619 struct mlx4_slave_state *s_state; 620 int i; 621 int ret = 0; 622 623 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 624 s_state = &priv->mfunc.master.slave_state[i]; 625 if (s_state->active && s_state->last_cmd != 626 MLX4_COMM_CMD_RESET) { 627 mlx4_warn(dev, "%s: slave: %d is still active\n", 628 __func__, i); 629 ret++; 630 } 631 } 632 return ret; 633 } 634 635 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 636 { 637 u32 qk = MLX4_RESERVED_QKEY_BASE; 638 639 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 640 qpn < dev->phys_caps.base_proxy_sqpn) 641 return -EINVAL; 642 643 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 644 /* tunnel qp */ 645 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 646 else 647 qk += qpn - dev->phys_caps.base_proxy_sqpn; 648 *qkey = qk; 649 return 0; 650 } 651 EXPORT_SYMBOL(mlx4_get_parav_qkey); 652 653 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 654 { 655 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 656 657 if (!mlx4_is_master(dev)) 658 return; 659 660 priv->virt2phys_pkey[slave][port - 1][i] = val; 661 } 662 EXPORT_SYMBOL(mlx4_sync_pkey_table); 663 664 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 665 { 666 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 667 668 if (!mlx4_is_master(dev)) 669 return; 670 671 priv->slave_node_guids[slave] = guid; 672 } 673 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 674 675 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 676 { 677 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 678 679 if (!mlx4_is_master(dev)) 680 return 0; 681 682 return priv->slave_node_guids[slave]; 683 } 684 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 685 686 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 687 { 688 struct mlx4_priv *priv = mlx4_priv(dev); 689 struct mlx4_slave_state *s_slave; 690 691 if (!mlx4_is_master(dev)) 692 return 0; 693 694 s_slave = &priv->mfunc.master.slave_state[slave]; 695 return !!s_slave->active; 696 } 697 EXPORT_SYMBOL(mlx4_is_slave_active); 698 699 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 700 struct mlx4_dev_cap *dev_cap, 701 struct mlx4_init_hca_param *hca_param) 702 { 703 dev->caps.steering_mode = hca_param->steering_mode; 704 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 705 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 706 dev->caps.fs_log_max_ucast_qp_range_size = 707 dev_cap->fs_log_max_ucast_qp_range_size; 708 } else 709 dev->caps.num_qp_per_mgm = 710 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 711 712 mlx4_dbg(dev, "Steering mode is: %s\n", 713 mlx4_steering_mode_str(dev->caps.steering_mode)); 714 } 715 716 static int mlx4_slave_cap(struct mlx4_dev *dev) 717 { 718 int err; 719 u32 page_size; 720 struct mlx4_dev_cap dev_cap; 721 struct mlx4_func_cap func_cap; 722 struct mlx4_init_hca_param hca_param; 723 u8 i; 724 725 memset(&hca_param, 0, sizeof(hca_param)); 726 err = mlx4_QUERY_HCA(dev, &hca_param); 727 if (err) { 728 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 729 return err; 730 } 731 732 /* fail if the hca has an unknown global capability 733 * at this time global_caps should be always zeroed 734 */ 735 if (hca_param.global_caps) { 736 mlx4_err(dev, "Unknown hca global capabilities\n"); 737 return -ENOSYS; 738 } 739 740 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 741 742 dev->caps.hca_core_clock = hca_param.hca_core_clock; 743 744 memset(&dev_cap, 0, sizeof(dev_cap)); 745 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 746 err = mlx4_dev_cap(dev, &dev_cap); 747 if (err) { 748 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 749 return err; 750 } 751 752 err = mlx4_QUERY_FW(dev); 753 if (err) 754 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 755 756 page_size = ~dev->caps.page_size_cap + 1; 757 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 758 if (page_size > PAGE_SIZE) { 759 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 760 page_size, PAGE_SIZE); 761 return -ENODEV; 762 } 763 764 /* slave gets uar page size from QUERY_HCA fw command */ 765 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 766 767 /* TODO: relax this assumption */ 768 if (dev->caps.uar_page_size != PAGE_SIZE) { 769 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 770 dev->caps.uar_page_size, PAGE_SIZE); 771 return -ENODEV; 772 } 773 774 memset(&func_cap, 0, sizeof(func_cap)); 775 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 776 if (err) { 777 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 778 err); 779 return err; 780 } 781 782 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 783 PF_CONTEXT_BEHAVIOUR_MASK) { 784 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 785 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 786 return -ENOSYS; 787 } 788 789 dev->caps.num_ports = func_cap.num_ports; 790 dev->quotas.qp = func_cap.qp_quota; 791 dev->quotas.srq = func_cap.srq_quota; 792 dev->quotas.cq = func_cap.cq_quota; 793 dev->quotas.mpt = func_cap.mpt_quota; 794 dev->quotas.mtt = func_cap.mtt_quota; 795 dev->caps.num_qps = 1 << hca_param.log_num_qps; 796 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 797 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 798 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 799 dev->caps.num_eqs = func_cap.max_eq; 800 dev->caps.reserved_eqs = func_cap.reserved_eq; 801 dev->caps.reserved_lkey = func_cap.reserved_lkey; 802 dev->caps.num_pds = MLX4_NUM_PDS; 803 dev->caps.num_mgms = 0; 804 dev->caps.num_amgms = 0; 805 806 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 807 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 808 dev->caps.num_ports, MLX4_MAX_PORTS); 809 return -ENODEV; 810 } 811 812 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 813 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 814 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 815 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 816 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 817 818 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 819 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 820 !dev->caps.qp0_qkey) { 821 err = -ENOMEM; 822 goto err_mem; 823 } 824 825 for (i = 1; i <= dev->caps.num_ports; ++i) { 826 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 827 if (err) { 828 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 829 i, err); 830 goto err_mem; 831 } 832 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 833 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 834 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 835 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 836 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 837 dev->caps.port_mask[i] = dev->caps.port_type[i]; 838 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 839 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 840 &dev->caps.gid_table_len[i], 841 &dev->caps.pkey_table_len[i])) 842 goto err_mem; 843 } 844 845 if (dev->caps.uar_page_size * (dev->caps.num_uars - 846 dev->caps.reserved_uars) > 847 pci_resource_len(dev->persist->pdev, 848 2)) { 849 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 850 dev->caps.uar_page_size * dev->caps.num_uars, 851 (unsigned long long) 852 pci_resource_len(dev->persist->pdev, 2)); 853 goto err_mem; 854 } 855 856 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 857 dev->caps.eqe_size = 64; 858 dev->caps.eqe_factor = 1; 859 } else { 860 dev->caps.eqe_size = 32; 861 dev->caps.eqe_factor = 0; 862 } 863 864 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 865 dev->caps.cqe_size = 64; 866 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 867 } else { 868 dev->caps.cqe_size = 32; 869 } 870 871 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 872 dev->caps.eqe_size = hca_param.eqe_size; 873 dev->caps.eqe_factor = 0; 874 } 875 876 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 877 dev->caps.cqe_size = hca_param.cqe_size; 878 /* User still need to know when CQE > 32B */ 879 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 880 } 881 882 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 883 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 884 885 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 886 887 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 888 dev->caps.bf_reg_size) 889 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 890 891 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 892 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 893 894 return 0; 895 896 err_mem: 897 kfree(dev->caps.qp0_qkey); 898 kfree(dev->caps.qp0_tunnel); 899 kfree(dev->caps.qp0_proxy); 900 kfree(dev->caps.qp1_tunnel); 901 kfree(dev->caps.qp1_proxy); 902 dev->caps.qp0_qkey = NULL; 903 dev->caps.qp0_tunnel = NULL; 904 dev->caps.qp0_proxy = NULL; 905 dev->caps.qp1_tunnel = NULL; 906 dev->caps.qp1_proxy = NULL; 907 908 return err; 909 } 910 911 static void mlx4_request_modules(struct mlx4_dev *dev) 912 { 913 int port; 914 int has_ib_port = false; 915 int has_eth_port = false; 916 #define EN_DRV_NAME "mlx4_en" 917 #define IB_DRV_NAME "mlx4_ib" 918 919 for (port = 1; port <= dev->caps.num_ports; port++) { 920 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 921 has_ib_port = true; 922 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 923 has_eth_port = true; 924 } 925 926 if (has_eth_port) 927 request_module_nowait(EN_DRV_NAME); 928 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 929 request_module_nowait(IB_DRV_NAME); 930 } 931 932 /* 933 * Change the port configuration of the device. 934 * Every user of this function must hold the port mutex. 935 */ 936 int mlx4_change_port_types(struct mlx4_dev *dev, 937 enum mlx4_port_type *port_types) 938 { 939 int err = 0; 940 int change = 0; 941 int port; 942 943 for (port = 0; port < dev->caps.num_ports; port++) { 944 /* Change the port type only if the new type is different 945 * from the current, and not set to Auto */ 946 if (port_types[port] != dev->caps.port_type[port + 1]) 947 change = 1; 948 } 949 if (change) { 950 mlx4_unregister_device(dev); 951 for (port = 1; port <= dev->caps.num_ports; port++) { 952 mlx4_CLOSE_PORT(dev, port); 953 dev->caps.port_type[port] = port_types[port - 1]; 954 err = mlx4_SET_PORT(dev, port, -1); 955 if (err) { 956 mlx4_err(dev, "Failed to set port %d, aborting\n", 957 port); 958 goto out; 959 } 960 } 961 mlx4_set_port_mask(dev); 962 err = mlx4_register_device(dev); 963 if (err) { 964 mlx4_err(dev, "Failed to register device\n"); 965 goto out; 966 } 967 mlx4_request_modules(dev); 968 } 969 970 out: 971 return err; 972 } 973 974 static ssize_t show_port_type(struct device *dev, 975 struct device_attribute *attr, 976 char *buf) 977 { 978 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 979 port_attr); 980 struct mlx4_dev *mdev = info->dev; 981 char type[8]; 982 983 sprintf(type, "%s", 984 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 985 "ib" : "eth"); 986 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 987 sprintf(buf, "auto (%s)\n", type); 988 else 989 sprintf(buf, "%s\n", type); 990 991 return strlen(buf); 992 } 993 994 static ssize_t set_port_type(struct device *dev, 995 struct device_attribute *attr, 996 const char *buf, size_t count) 997 { 998 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 999 port_attr); 1000 struct mlx4_dev *mdev = info->dev; 1001 struct mlx4_priv *priv = mlx4_priv(mdev); 1002 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1003 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1004 static DEFINE_MUTEX(set_port_type_mutex); 1005 int i; 1006 int err = 0; 1007 1008 mutex_lock(&set_port_type_mutex); 1009 1010 if (!strcmp(buf, "ib\n")) 1011 info->tmp_type = MLX4_PORT_TYPE_IB; 1012 else if (!strcmp(buf, "eth\n")) 1013 info->tmp_type = MLX4_PORT_TYPE_ETH; 1014 else if (!strcmp(buf, "auto\n")) 1015 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1016 else { 1017 mlx4_err(mdev, "%s is not supported port type\n", buf); 1018 err = -EINVAL; 1019 goto err_out; 1020 } 1021 1022 mlx4_stop_sense(mdev); 1023 mutex_lock(&priv->port_mutex); 1024 /* Possible type is always the one that was delivered */ 1025 mdev->caps.possible_type[info->port] = info->tmp_type; 1026 1027 for (i = 0; i < mdev->caps.num_ports; i++) { 1028 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1029 mdev->caps.possible_type[i+1]; 1030 if (types[i] == MLX4_PORT_TYPE_AUTO) 1031 types[i] = mdev->caps.port_type[i+1]; 1032 } 1033 1034 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1035 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1036 for (i = 1; i <= mdev->caps.num_ports; i++) { 1037 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1038 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1039 err = -EINVAL; 1040 } 1041 } 1042 } 1043 if (err) { 1044 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1045 goto out; 1046 } 1047 1048 mlx4_do_sense_ports(mdev, new_types, types); 1049 1050 err = mlx4_check_port_params(mdev, new_types); 1051 if (err) 1052 goto out; 1053 1054 /* We are about to apply the changes after the configuration 1055 * was verified, no need to remember the temporary types 1056 * any more */ 1057 for (i = 0; i < mdev->caps.num_ports; i++) 1058 priv->port[i + 1].tmp_type = 0; 1059 1060 err = mlx4_change_port_types(mdev, new_types); 1061 1062 out: 1063 mlx4_start_sense(mdev); 1064 mutex_unlock(&priv->port_mutex); 1065 err_out: 1066 mutex_unlock(&set_port_type_mutex); 1067 1068 return err ? err : count; 1069 } 1070 1071 enum ibta_mtu { 1072 IB_MTU_256 = 1, 1073 IB_MTU_512 = 2, 1074 IB_MTU_1024 = 3, 1075 IB_MTU_2048 = 4, 1076 IB_MTU_4096 = 5 1077 }; 1078 1079 static inline int int_to_ibta_mtu(int mtu) 1080 { 1081 switch (mtu) { 1082 case 256: return IB_MTU_256; 1083 case 512: return IB_MTU_512; 1084 case 1024: return IB_MTU_1024; 1085 case 2048: return IB_MTU_2048; 1086 case 4096: return IB_MTU_4096; 1087 default: return -1; 1088 } 1089 } 1090 1091 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1092 { 1093 switch (mtu) { 1094 case IB_MTU_256: return 256; 1095 case IB_MTU_512: return 512; 1096 case IB_MTU_1024: return 1024; 1097 case IB_MTU_2048: return 2048; 1098 case IB_MTU_4096: return 4096; 1099 default: return -1; 1100 } 1101 } 1102 1103 static ssize_t show_port_ib_mtu(struct device *dev, 1104 struct device_attribute *attr, 1105 char *buf) 1106 { 1107 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1108 port_mtu_attr); 1109 struct mlx4_dev *mdev = info->dev; 1110 1111 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1112 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1113 1114 sprintf(buf, "%d\n", 1115 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1116 return strlen(buf); 1117 } 1118 1119 static ssize_t set_port_ib_mtu(struct device *dev, 1120 struct device_attribute *attr, 1121 const char *buf, size_t count) 1122 { 1123 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1124 port_mtu_attr); 1125 struct mlx4_dev *mdev = info->dev; 1126 struct mlx4_priv *priv = mlx4_priv(mdev); 1127 int err, port, mtu, ibta_mtu = -1; 1128 1129 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1130 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1131 return -EINVAL; 1132 } 1133 1134 err = kstrtoint(buf, 0, &mtu); 1135 if (!err) 1136 ibta_mtu = int_to_ibta_mtu(mtu); 1137 1138 if (err || ibta_mtu < 0) { 1139 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1140 return -EINVAL; 1141 } 1142 1143 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1144 1145 mlx4_stop_sense(mdev); 1146 mutex_lock(&priv->port_mutex); 1147 mlx4_unregister_device(mdev); 1148 for (port = 1; port <= mdev->caps.num_ports; port++) { 1149 mlx4_CLOSE_PORT(mdev, port); 1150 err = mlx4_SET_PORT(mdev, port, -1); 1151 if (err) { 1152 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1153 port); 1154 goto err_set_port; 1155 } 1156 } 1157 err = mlx4_register_device(mdev); 1158 err_set_port: 1159 mutex_unlock(&priv->port_mutex); 1160 mlx4_start_sense(mdev); 1161 return err ? err : count; 1162 } 1163 1164 int mlx4_bond(struct mlx4_dev *dev) 1165 { 1166 int ret = 0; 1167 struct mlx4_priv *priv = mlx4_priv(dev); 1168 1169 mutex_lock(&priv->bond_mutex); 1170 1171 if (!mlx4_is_bonded(dev)) 1172 ret = mlx4_do_bond(dev, true); 1173 else 1174 ret = 0; 1175 1176 mutex_unlock(&priv->bond_mutex); 1177 if (ret) 1178 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1179 else 1180 mlx4_dbg(dev, "Device is bonded\n"); 1181 return ret; 1182 } 1183 EXPORT_SYMBOL_GPL(mlx4_bond); 1184 1185 int mlx4_unbond(struct mlx4_dev *dev) 1186 { 1187 int ret = 0; 1188 struct mlx4_priv *priv = mlx4_priv(dev); 1189 1190 mutex_lock(&priv->bond_mutex); 1191 1192 if (mlx4_is_bonded(dev)) 1193 ret = mlx4_do_bond(dev, false); 1194 1195 mutex_unlock(&priv->bond_mutex); 1196 if (ret) 1197 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1198 else 1199 mlx4_dbg(dev, "Device is unbonded\n"); 1200 return ret; 1201 } 1202 EXPORT_SYMBOL_GPL(mlx4_unbond); 1203 1204 1205 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1206 { 1207 u8 port1 = v2p->port1; 1208 u8 port2 = v2p->port2; 1209 struct mlx4_priv *priv = mlx4_priv(dev); 1210 int err; 1211 1212 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1213 return -ENOTSUPP; 1214 1215 mutex_lock(&priv->bond_mutex); 1216 1217 /* zero means keep current mapping for this port */ 1218 if (port1 == 0) 1219 port1 = priv->v2p.port1; 1220 if (port2 == 0) 1221 port2 = priv->v2p.port2; 1222 1223 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1224 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1225 (port1 == 2 && port2 == 1)) { 1226 /* besides boundary checks cross mapping makes 1227 * no sense and therefore not allowed */ 1228 err = -EINVAL; 1229 } else if ((port1 == priv->v2p.port1) && 1230 (port2 == priv->v2p.port2)) { 1231 err = 0; 1232 } else { 1233 err = mlx4_virt2phy_port_map(dev, port1, port2); 1234 if (!err) { 1235 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1236 port1, port2); 1237 priv->v2p.port1 = port1; 1238 priv->v2p.port2 = port2; 1239 } else { 1240 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1241 } 1242 } 1243 1244 mutex_unlock(&priv->bond_mutex); 1245 return err; 1246 } 1247 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1248 1249 static int mlx4_load_fw(struct mlx4_dev *dev) 1250 { 1251 struct mlx4_priv *priv = mlx4_priv(dev); 1252 int err; 1253 1254 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1255 GFP_HIGHUSER | __GFP_NOWARN, 0); 1256 if (!priv->fw.fw_icm) { 1257 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1258 return -ENOMEM; 1259 } 1260 1261 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1262 if (err) { 1263 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1264 goto err_free; 1265 } 1266 1267 err = mlx4_RUN_FW(dev); 1268 if (err) { 1269 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1270 goto err_unmap_fa; 1271 } 1272 1273 return 0; 1274 1275 err_unmap_fa: 1276 mlx4_UNMAP_FA(dev); 1277 1278 err_free: 1279 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1280 return err; 1281 } 1282 1283 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1284 int cmpt_entry_sz) 1285 { 1286 struct mlx4_priv *priv = mlx4_priv(dev); 1287 int err; 1288 int num_eqs; 1289 1290 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1291 cmpt_base + 1292 ((u64) (MLX4_CMPT_TYPE_QP * 1293 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1294 cmpt_entry_sz, dev->caps.num_qps, 1295 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1296 0, 0); 1297 if (err) 1298 goto err; 1299 1300 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1301 cmpt_base + 1302 ((u64) (MLX4_CMPT_TYPE_SRQ * 1303 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1304 cmpt_entry_sz, dev->caps.num_srqs, 1305 dev->caps.reserved_srqs, 0, 0); 1306 if (err) 1307 goto err_qp; 1308 1309 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1310 cmpt_base + 1311 ((u64) (MLX4_CMPT_TYPE_CQ * 1312 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1313 cmpt_entry_sz, dev->caps.num_cqs, 1314 dev->caps.reserved_cqs, 0, 0); 1315 if (err) 1316 goto err_srq; 1317 1318 num_eqs = dev->phys_caps.num_phys_eqs; 1319 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1320 cmpt_base + 1321 ((u64) (MLX4_CMPT_TYPE_EQ * 1322 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1323 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1324 if (err) 1325 goto err_cq; 1326 1327 return 0; 1328 1329 err_cq: 1330 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1331 1332 err_srq: 1333 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1334 1335 err_qp: 1336 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1337 1338 err: 1339 return err; 1340 } 1341 1342 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1343 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1344 { 1345 struct mlx4_priv *priv = mlx4_priv(dev); 1346 u64 aux_pages; 1347 int num_eqs; 1348 int err; 1349 1350 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1351 if (err) { 1352 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1353 return err; 1354 } 1355 1356 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1357 (unsigned long long) icm_size >> 10, 1358 (unsigned long long) aux_pages << 2); 1359 1360 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1361 GFP_HIGHUSER | __GFP_NOWARN, 0); 1362 if (!priv->fw.aux_icm) { 1363 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1364 return -ENOMEM; 1365 } 1366 1367 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1368 if (err) { 1369 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1370 goto err_free_aux; 1371 } 1372 1373 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1374 if (err) { 1375 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1376 goto err_unmap_aux; 1377 } 1378 1379 1380 num_eqs = dev->phys_caps.num_phys_eqs; 1381 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1382 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1383 num_eqs, num_eqs, 0, 0); 1384 if (err) { 1385 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1386 goto err_unmap_cmpt; 1387 } 1388 1389 /* 1390 * Reserved MTT entries must be aligned up to a cacheline 1391 * boundary, since the FW will write to them, while the driver 1392 * writes to all other MTT entries. (The variable 1393 * dev->caps.mtt_entry_sz below is really the MTT segment 1394 * size, not the raw entry size) 1395 */ 1396 dev->caps.reserved_mtts = 1397 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1398 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1399 1400 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1401 init_hca->mtt_base, 1402 dev->caps.mtt_entry_sz, 1403 dev->caps.num_mtts, 1404 dev->caps.reserved_mtts, 1, 0); 1405 if (err) { 1406 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1407 goto err_unmap_eq; 1408 } 1409 1410 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1411 init_hca->dmpt_base, 1412 dev_cap->dmpt_entry_sz, 1413 dev->caps.num_mpts, 1414 dev->caps.reserved_mrws, 1, 1); 1415 if (err) { 1416 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1417 goto err_unmap_mtt; 1418 } 1419 1420 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1421 init_hca->qpc_base, 1422 dev_cap->qpc_entry_sz, 1423 dev->caps.num_qps, 1424 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1425 0, 0); 1426 if (err) { 1427 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1428 goto err_unmap_dmpt; 1429 } 1430 1431 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1432 init_hca->auxc_base, 1433 dev_cap->aux_entry_sz, 1434 dev->caps.num_qps, 1435 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1436 0, 0); 1437 if (err) { 1438 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1439 goto err_unmap_qp; 1440 } 1441 1442 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1443 init_hca->altc_base, 1444 dev_cap->altc_entry_sz, 1445 dev->caps.num_qps, 1446 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1447 0, 0); 1448 if (err) { 1449 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1450 goto err_unmap_auxc; 1451 } 1452 1453 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1454 init_hca->rdmarc_base, 1455 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1456 dev->caps.num_qps, 1457 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1458 0, 0); 1459 if (err) { 1460 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1461 goto err_unmap_altc; 1462 } 1463 1464 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1465 init_hca->cqc_base, 1466 dev_cap->cqc_entry_sz, 1467 dev->caps.num_cqs, 1468 dev->caps.reserved_cqs, 0, 0); 1469 if (err) { 1470 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1471 goto err_unmap_rdmarc; 1472 } 1473 1474 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1475 init_hca->srqc_base, 1476 dev_cap->srq_entry_sz, 1477 dev->caps.num_srqs, 1478 dev->caps.reserved_srqs, 0, 0); 1479 if (err) { 1480 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1481 goto err_unmap_cq; 1482 } 1483 1484 /* 1485 * For flow steering device managed mode it is required to use 1486 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1487 * required, but for simplicity just map the whole multicast 1488 * group table now. The table isn't very big and it's a lot 1489 * easier than trying to track ref counts. 1490 */ 1491 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1492 init_hca->mc_base, 1493 mlx4_get_mgm_entry_size(dev), 1494 dev->caps.num_mgms + dev->caps.num_amgms, 1495 dev->caps.num_mgms + dev->caps.num_amgms, 1496 0, 0); 1497 if (err) { 1498 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1499 goto err_unmap_srq; 1500 } 1501 1502 return 0; 1503 1504 err_unmap_srq: 1505 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1506 1507 err_unmap_cq: 1508 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1509 1510 err_unmap_rdmarc: 1511 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1512 1513 err_unmap_altc: 1514 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1515 1516 err_unmap_auxc: 1517 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1518 1519 err_unmap_qp: 1520 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1521 1522 err_unmap_dmpt: 1523 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1524 1525 err_unmap_mtt: 1526 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1527 1528 err_unmap_eq: 1529 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1530 1531 err_unmap_cmpt: 1532 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1533 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1534 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1535 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1536 1537 err_unmap_aux: 1538 mlx4_UNMAP_ICM_AUX(dev); 1539 1540 err_free_aux: 1541 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1542 1543 return err; 1544 } 1545 1546 static void mlx4_free_icms(struct mlx4_dev *dev) 1547 { 1548 struct mlx4_priv *priv = mlx4_priv(dev); 1549 1550 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1551 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1552 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1553 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1554 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1555 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1556 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1557 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1558 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1559 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1560 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1561 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1562 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1563 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1564 1565 mlx4_UNMAP_ICM_AUX(dev); 1566 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1567 } 1568 1569 static void mlx4_slave_exit(struct mlx4_dev *dev) 1570 { 1571 struct mlx4_priv *priv = mlx4_priv(dev); 1572 1573 mutex_lock(&priv->cmd.slave_cmd_mutex); 1574 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1575 MLX4_COMM_TIME)) 1576 mlx4_warn(dev, "Failed to close slave function\n"); 1577 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1578 } 1579 1580 static int map_bf_area(struct mlx4_dev *dev) 1581 { 1582 struct mlx4_priv *priv = mlx4_priv(dev); 1583 resource_size_t bf_start; 1584 resource_size_t bf_len; 1585 int err = 0; 1586 1587 if (!dev->caps.bf_reg_size) 1588 return -ENXIO; 1589 1590 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1591 (dev->caps.num_uars << PAGE_SHIFT); 1592 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1593 (dev->caps.num_uars << PAGE_SHIFT); 1594 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1595 if (!priv->bf_mapping) 1596 err = -ENOMEM; 1597 1598 return err; 1599 } 1600 1601 static void unmap_bf_area(struct mlx4_dev *dev) 1602 { 1603 if (mlx4_priv(dev)->bf_mapping) 1604 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1605 } 1606 1607 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1608 { 1609 u32 clockhi, clocklo, clockhi1; 1610 cycle_t cycles; 1611 int i; 1612 struct mlx4_priv *priv = mlx4_priv(dev); 1613 1614 for (i = 0; i < 10; i++) { 1615 clockhi = swab32(readl(priv->clock_mapping)); 1616 clocklo = swab32(readl(priv->clock_mapping + 4)); 1617 clockhi1 = swab32(readl(priv->clock_mapping)); 1618 if (clockhi == clockhi1) 1619 break; 1620 } 1621 1622 cycles = (u64) clockhi << 32 | (u64) clocklo; 1623 1624 return cycles; 1625 } 1626 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1627 1628 1629 static int map_internal_clock(struct mlx4_dev *dev) 1630 { 1631 struct mlx4_priv *priv = mlx4_priv(dev); 1632 1633 priv->clock_mapping = 1634 ioremap(pci_resource_start(dev->persist->pdev, 1635 priv->fw.clock_bar) + 1636 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1637 1638 if (!priv->clock_mapping) 1639 return -ENOMEM; 1640 1641 return 0; 1642 } 1643 1644 static void unmap_internal_clock(struct mlx4_dev *dev) 1645 { 1646 struct mlx4_priv *priv = mlx4_priv(dev); 1647 1648 if (priv->clock_mapping) 1649 iounmap(priv->clock_mapping); 1650 } 1651 1652 static void mlx4_close_hca(struct mlx4_dev *dev) 1653 { 1654 unmap_internal_clock(dev); 1655 unmap_bf_area(dev); 1656 if (mlx4_is_slave(dev)) 1657 mlx4_slave_exit(dev); 1658 else { 1659 mlx4_CLOSE_HCA(dev, 0); 1660 mlx4_free_icms(dev); 1661 } 1662 } 1663 1664 static void mlx4_close_fw(struct mlx4_dev *dev) 1665 { 1666 if (!mlx4_is_slave(dev)) { 1667 mlx4_UNMAP_FA(dev); 1668 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1669 } 1670 } 1671 1672 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1673 { 1674 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1675 1676 u32 comm_flags; 1677 u32 offline_bit; 1678 unsigned long end; 1679 struct mlx4_priv *priv = mlx4_priv(dev); 1680 1681 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1682 while (time_before(jiffies, end)) { 1683 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1684 MLX4_COMM_CHAN_FLAGS)); 1685 offline_bit = (comm_flags & 1686 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1687 if (!offline_bit) 1688 return 0; 1689 /* There are cases as part of AER/Reset flow that PF needs 1690 * around 100 msec to load. We therefore sleep for 100 msec 1691 * to allow other tasks to make use of that CPU during this 1692 * time interval. 1693 */ 1694 msleep(100); 1695 } 1696 mlx4_err(dev, "Communication channel is offline.\n"); 1697 return -EIO; 1698 } 1699 1700 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1701 { 1702 #define COMM_CHAN_RST_OFFSET 0x1e 1703 1704 struct mlx4_priv *priv = mlx4_priv(dev); 1705 u32 comm_rst; 1706 u32 comm_caps; 1707 1708 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1709 MLX4_COMM_CHAN_CAPS)); 1710 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1711 1712 if (comm_rst) 1713 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1714 } 1715 1716 static int mlx4_init_slave(struct mlx4_dev *dev) 1717 { 1718 struct mlx4_priv *priv = mlx4_priv(dev); 1719 u64 dma = (u64) priv->mfunc.vhcr_dma; 1720 int ret_from_reset = 0; 1721 u32 slave_read; 1722 u32 cmd_channel_ver; 1723 1724 if (atomic_read(&pf_loading)) { 1725 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1726 return -EPROBE_DEFER; 1727 } 1728 1729 mutex_lock(&priv->cmd.slave_cmd_mutex); 1730 priv->cmd.max_cmds = 1; 1731 if (mlx4_comm_check_offline(dev)) { 1732 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1733 goto err_offline; 1734 } 1735 1736 mlx4_reset_vf_support(dev); 1737 mlx4_warn(dev, "Sending reset\n"); 1738 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1739 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1740 /* if we are in the middle of flr the slave will try 1741 * NUM_OF_RESET_RETRIES times before leaving.*/ 1742 if (ret_from_reset) { 1743 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1744 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1745 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1746 return -EPROBE_DEFER; 1747 } else 1748 goto err; 1749 } 1750 1751 /* check the driver version - the slave I/F revision 1752 * must match the master's */ 1753 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1754 cmd_channel_ver = mlx4_comm_get_version(); 1755 1756 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1757 MLX4_COMM_GET_IF_REV(slave_read)) { 1758 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1759 goto err; 1760 } 1761 1762 mlx4_warn(dev, "Sending vhcr0\n"); 1763 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1764 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1765 goto err; 1766 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1767 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1768 goto err; 1769 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1770 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1771 goto err; 1772 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1773 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1774 goto err; 1775 1776 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1777 return 0; 1778 1779 err: 1780 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1781 err_offline: 1782 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1783 return -EIO; 1784 } 1785 1786 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1787 { 1788 int i; 1789 1790 for (i = 1; i <= dev->caps.num_ports; i++) { 1791 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1792 dev->caps.gid_table_len[i] = 1793 mlx4_get_slave_num_gids(dev, 0, i); 1794 else 1795 dev->caps.gid_table_len[i] = 1; 1796 dev->caps.pkey_table_len[i] = 1797 dev->phys_caps.pkey_phys_table_len[i] - 1; 1798 } 1799 } 1800 1801 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1802 { 1803 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1804 1805 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1806 i++) { 1807 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1808 break; 1809 } 1810 1811 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1812 } 1813 1814 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1815 { 1816 switch (dmfs_high_steer_mode) { 1817 case MLX4_STEERING_DMFS_A0_DEFAULT: 1818 return "default performance"; 1819 1820 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1821 return "dynamic hybrid mode"; 1822 1823 case MLX4_STEERING_DMFS_A0_STATIC: 1824 return "performance optimized for limited rule configuration (static)"; 1825 1826 case MLX4_STEERING_DMFS_A0_DISABLE: 1827 return "disabled performance optimized steering"; 1828 1829 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1830 return "performance optimized steering not supported"; 1831 1832 default: 1833 return "Unrecognized mode"; 1834 } 1835 } 1836 1837 #define MLX4_DMFS_A0_STEERING (1UL << 2) 1838 1839 static void choose_steering_mode(struct mlx4_dev *dev, 1840 struct mlx4_dev_cap *dev_cap) 1841 { 1842 if (mlx4_log_num_mgm_entry_size <= 0) { 1843 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 1844 if (dev->caps.dmfs_high_steer_mode == 1845 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1846 mlx4_err(dev, "DMFS high rate mode not supported\n"); 1847 else 1848 dev->caps.dmfs_high_steer_mode = 1849 MLX4_STEERING_DMFS_A0_STATIC; 1850 } 1851 } 1852 1853 if (mlx4_log_num_mgm_entry_size <= 0 && 1854 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1855 (!mlx4_is_mfunc(dev) || 1856 (dev_cap->fs_max_num_qp_per_entry >= 1857 (dev->persist->num_vfs + 1))) && 1858 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1859 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1860 dev->oper_log_mgm_entry_size = 1861 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1862 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1863 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1864 dev->caps.fs_log_max_ucast_qp_range_size = 1865 dev_cap->fs_log_max_ucast_qp_range_size; 1866 } else { 1867 if (dev->caps.dmfs_high_steer_mode != 1868 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1869 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 1870 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1871 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1872 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1873 else { 1874 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1875 1876 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1877 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1878 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 1879 } 1880 dev->oper_log_mgm_entry_size = 1881 mlx4_log_num_mgm_entry_size > 0 ? 1882 mlx4_log_num_mgm_entry_size : 1883 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1884 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1885 } 1886 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 1887 mlx4_steering_mode_str(dev->caps.steering_mode), 1888 dev->oper_log_mgm_entry_size, 1889 mlx4_log_num_mgm_entry_size); 1890 } 1891 1892 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 1893 struct mlx4_dev_cap *dev_cap) 1894 { 1895 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1896 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1897 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1898 else 1899 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1900 1901 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 1902 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1903 } 1904 1905 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 1906 { 1907 int i; 1908 struct mlx4_port_cap port_cap; 1909 1910 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1911 return -EINVAL; 1912 1913 for (i = 1; i <= dev->caps.num_ports; i++) { 1914 if (mlx4_dev_port(dev, i, &port_cap)) { 1915 mlx4_err(dev, 1916 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 1917 } else if ((dev->caps.dmfs_high_steer_mode != 1918 MLX4_STEERING_DMFS_A0_DEFAULT) && 1919 (port_cap.dmfs_optimized_state == 1920 !!(dev->caps.dmfs_high_steer_mode == 1921 MLX4_STEERING_DMFS_A0_DISABLE))) { 1922 mlx4_err(dev, 1923 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 1924 dmfs_high_rate_steering_mode_str( 1925 dev->caps.dmfs_high_steer_mode), 1926 (port_cap.dmfs_optimized_state ? 1927 "enabled" : "disabled")); 1928 } 1929 } 1930 1931 return 0; 1932 } 1933 1934 static int mlx4_init_fw(struct mlx4_dev *dev) 1935 { 1936 struct mlx4_mod_stat_cfg mlx4_cfg; 1937 int err = 0; 1938 1939 if (!mlx4_is_slave(dev)) { 1940 err = mlx4_QUERY_FW(dev); 1941 if (err) { 1942 if (err == -EACCES) 1943 mlx4_info(dev, "non-primary physical function, skipping\n"); 1944 else 1945 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 1946 return err; 1947 } 1948 1949 err = mlx4_load_fw(dev); 1950 if (err) { 1951 mlx4_err(dev, "Failed to start FW, aborting\n"); 1952 return err; 1953 } 1954 1955 mlx4_cfg.log_pg_sz_m = 1; 1956 mlx4_cfg.log_pg_sz = 0; 1957 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1958 if (err) 1959 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1960 } 1961 1962 return err; 1963 } 1964 1965 static int mlx4_init_hca(struct mlx4_dev *dev) 1966 { 1967 struct mlx4_priv *priv = mlx4_priv(dev); 1968 struct mlx4_adapter adapter; 1969 struct mlx4_dev_cap dev_cap; 1970 struct mlx4_profile profile; 1971 struct mlx4_init_hca_param init_hca; 1972 u64 icm_size; 1973 struct mlx4_config_dev_params params; 1974 int err; 1975 1976 if (!mlx4_is_slave(dev)) { 1977 err = mlx4_dev_cap(dev, &dev_cap); 1978 if (err) { 1979 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1980 return err; 1981 } 1982 1983 choose_steering_mode(dev, &dev_cap); 1984 choose_tunnel_offload_mode(dev, &dev_cap); 1985 1986 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 1987 mlx4_is_master(dev)) 1988 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 1989 1990 err = mlx4_get_phys_port_id(dev); 1991 if (err) 1992 mlx4_err(dev, "Fail to get physical port id\n"); 1993 1994 if (mlx4_is_master(dev)) 1995 mlx4_parav_master_pf_caps(dev); 1996 1997 if (mlx4_low_memory_profile()) { 1998 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 1999 profile = low_mem_profile; 2000 } else { 2001 profile = default_profile; 2002 } 2003 if (dev->caps.steering_mode == 2004 MLX4_STEERING_MODE_DEVICE_MANAGED) 2005 profile.num_mcg = MLX4_FS_NUM_MCG; 2006 2007 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2008 &init_hca); 2009 if ((long long) icm_size < 0) { 2010 err = icm_size; 2011 return err; 2012 } 2013 2014 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2015 2016 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2017 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2018 init_hca.mw_enabled = 0; 2019 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2020 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2021 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2022 2023 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2024 if (err) 2025 return err; 2026 2027 err = mlx4_INIT_HCA(dev, &init_hca); 2028 if (err) { 2029 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2030 goto err_free_icm; 2031 } 2032 2033 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2034 err = mlx4_query_func(dev, &dev_cap); 2035 if (err < 0) { 2036 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2037 goto err_close; 2038 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2039 dev->caps.num_eqs = dev_cap.max_eqs; 2040 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2041 dev->caps.reserved_uars = dev_cap.reserved_uars; 2042 } 2043 } 2044 2045 /* 2046 * If TS is supported by FW 2047 * read HCA frequency by QUERY_HCA command 2048 */ 2049 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2050 memset(&init_hca, 0, sizeof(init_hca)); 2051 err = mlx4_QUERY_HCA(dev, &init_hca); 2052 if (err) { 2053 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2054 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2055 } else { 2056 dev->caps.hca_core_clock = 2057 init_hca.hca_core_clock; 2058 } 2059 2060 /* In case we got HCA frequency 0 - disable timestamping 2061 * to avoid dividing by zero 2062 */ 2063 if (!dev->caps.hca_core_clock) { 2064 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2065 mlx4_err(dev, 2066 "HCA frequency is 0 - timestamping is not supported\n"); 2067 } else if (map_internal_clock(dev)) { 2068 /* 2069 * Map internal clock, 2070 * in case of failure disable timestamping 2071 */ 2072 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2073 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2074 } 2075 } 2076 2077 if (dev->caps.dmfs_high_steer_mode != 2078 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2079 if (mlx4_validate_optimized_steering(dev)) 2080 mlx4_warn(dev, "Optimized steering validation failed\n"); 2081 2082 if (dev->caps.dmfs_high_steer_mode == 2083 MLX4_STEERING_DMFS_A0_DISABLE) { 2084 dev->caps.dmfs_high_rate_qpn_base = 2085 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2086 dev->caps.dmfs_high_rate_qpn_range = 2087 MLX4_A0_STEERING_TABLE_SIZE; 2088 } 2089 2090 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2091 dmfs_high_rate_steering_mode_str( 2092 dev->caps.dmfs_high_steer_mode)); 2093 } 2094 } else { 2095 err = mlx4_init_slave(dev); 2096 if (err) { 2097 if (err != -EPROBE_DEFER) 2098 mlx4_err(dev, "Failed to initialize slave\n"); 2099 return err; 2100 } 2101 2102 err = mlx4_slave_cap(dev); 2103 if (err) { 2104 mlx4_err(dev, "Failed to obtain slave caps\n"); 2105 goto err_close; 2106 } 2107 } 2108 2109 if (map_bf_area(dev)) 2110 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2111 2112 /*Only the master set the ports, all the rest got it from it.*/ 2113 if (!mlx4_is_slave(dev)) 2114 mlx4_set_port_mask(dev); 2115 2116 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2117 if (err) { 2118 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2119 goto unmap_bf; 2120 } 2121 2122 /* Query CONFIG_DEV parameters */ 2123 err = mlx4_config_dev_retrieval(dev, ¶ms); 2124 if (err && err != -ENOTSUPP) { 2125 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2126 } else if (!err) { 2127 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2128 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2129 } 2130 priv->eq_table.inta_pin = adapter.inta_pin; 2131 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2132 2133 return 0; 2134 2135 unmap_bf: 2136 unmap_internal_clock(dev); 2137 unmap_bf_area(dev); 2138 2139 if (mlx4_is_slave(dev)) { 2140 kfree(dev->caps.qp0_qkey); 2141 kfree(dev->caps.qp0_tunnel); 2142 kfree(dev->caps.qp0_proxy); 2143 kfree(dev->caps.qp1_tunnel); 2144 kfree(dev->caps.qp1_proxy); 2145 } 2146 2147 err_close: 2148 if (mlx4_is_slave(dev)) 2149 mlx4_slave_exit(dev); 2150 else 2151 mlx4_CLOSE_HCA(dev, 0); 2152 2153 err_free_icm: 2154 if (!mlx4_is_slave(dev)) 2155 mlx4_free_icms(dev); 2156 2157 return err; 2158 } 2159 2160 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2161 { 2162 struct mlx4_priv *priv = mlx4_priv(dev); 2163 int nent; 2164 2165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2166 return -ENOENT; 2167 2168 nent = dev->caps.max_counters; 2169 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 2170 } 2171 2172 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2173 { 2174 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2175 } 2176 2177 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2178 { 2179 struct mlx4_priv *priv = mlx4_priv(dev); 2180 2181 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2182 return -ENOENT; 2183 2184 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2185 if (*idx == -1) 2186 return -ENOMEM; 2187 2188 return 0; 2189 } 2190 2191 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2192 { 2193 u64 out_param; 2194 int err; 2195 2196 if (mlx4_is_mfunc(dev)) { 2197 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2198 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2199 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2200 if (!err) 2201 *idx = get_param_l(&out_param); 2202 2203 return err; 2204 } 2205 return __mlx4_counter_alloc(dev, idx); 2206 } 2207 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2208 2209 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2210 { 2211 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2212 return; 2213 } 2214 2215 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2216 { 2217 u64 in_param = 0; 2218 2219 if (mlx4_is_mfunc(dev)) { 2220 set_param_l(&in_param, idx); 2221 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2222 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2223 MLX4_CMD_WRAPPED); 2224 return; 2225 } 2226 __mlx4_counter_free(dev, idx); 2227 } 2228 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2229 2230 static int mlx4_setup_hca(struct mlx4_dev *dev) 2231 { 2232 struct mlx4_priv *priv = mlx4_priv(dev); 2233 int err; 2234 int port; 2235 __be32 ib_port_default_caps; 2236 2237 err = mlx4_init_uar_table(dev); 2238 if (err) { 2239 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2240 return err; 2241 } 2242 2243 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2244 if (err) { 2245 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2246 goto err_uar_table_free; 2247 } 2248 2249 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2250 if (!priv->kar) { 2251 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2252 err = -ENOMEM; 2253 goto err_uar_free; 2254 } 2255 2256 err = mlx4_init_pd_table(dev); 2257 if (err) { 2258 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2259 goto err_kar_unmap; 2260 } 2261 2262 err = mlx4_init_xrcd_table(dev); 2263 if (err) { 2264 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2265 goto err_pd_table_free; 2266 } 2267 2268 err = mlx4_init_mr_table(dev); 2269 if (err) { 2270 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2271 goto err_xrcd_table_free; 2272 } 2273 2274 if (!mlx4_is_slave(dev)) { 2275 err = mlx4_init_mcg_table(dev); 2276 if (err) { 2277 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2278 goto err_mr_table_free; 2279 } 2280 err = mlx4_config_mad_demux(dev); 2281 if (err) { 2282 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2283 goto err_mcg_table_free; 2284 } 2285 } 2286 2287 err = mlx4_init_eq_table(dev); 2288 if (err) { 2289 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2290 goto err_mcg_table_free; 2291 } 2292 2293 err = mlx4_cmd_use_events(dev); 2294 if (err) { 2295 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2296 goto err_eq_table_free; 2297 } 2298 2299 err = mlx4_NOP(dev); 2300 if (err) { 2301 if (dev->flags & MLX4_FLAG_MSI_X) { 2302 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2303 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2304 mlx4_warn(dev, "Trying again without MSI-X\n"); 2305 } else { 2306 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2307 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 2308 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2309 } 2310 2311 goto err_cmd_poll; 2312 } 2313 2314 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2315 2316 err = mlx4_init_cq_table(dev); 2317 if (err) { 2318 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2319 goto err_cmd_poll; 2320 } 2321 2322 err = mlx4_init_srq_table(dev); 2323 if (err) { 2324 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2325 goto err_cq_table_free; 2326 } 2327 2328 err = mlx4_init_qp_table(dev); 2329 if (err) { 2330 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2331 goto err_srq_table_free; 2332 } 2333 2334 err = mlx4_init_counters_table(dev); 2335 if (err && err != -ENOENT) { 2336 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2337 goto err_qp_table_free; 2338 } 2339 2340 if (!mlx4_is_slave(dev)) { 2341 for (port = 1; port <= dev->caps.num_ports; port++) { 2342 ib_port_default_caps = 0; 2343 err = mlx4_get_port_ib_caps(dev, port, 2344 &ib_port_default_caps); 2345 if (err) 2346 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2347 port, err); 2348 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2349 2350 /* initialize per-slave default ib port capabilities */ 2351 if (mlx4_is_master(dev)) { 2352 int i; 2353 for (i = 0; i < dev->num_slaves; i++) { 2354 if (i == mlx4_master_func_num(dev)) 2355 continue; 2356 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2357 ib_port_default_caps; 2358 } 2359 } 2360 2361 if (mlx4_is_mfunc(dev)) 2362 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2363 else 2364 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2365 2366 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2367 dev->caps.pkey_table_len[port] : -1); 2368 if (err) { 2369 mlx4_err(dev, "Failed to set port %d, aborting\n", 2370 port); 2371 goto err_counters_table_free; 2372 } 2373 } 2374 } 2375 2376 return 0; 2377 2378 err_counters_table_free: 2379 mlx4_cleanup_counters_table(dev); 2380 2381 err_qp_table_free: 2382 mlx4_cleanup_qp_table(dev); 2383 2384 err_srq_table_free: 2385 mlx4_cleanup_srq_table(dev); 2386 2387 err_cq_table_free: 2388 mlx4_cleanup_cq_table(dev); 2389 2390 err_cmd_poll: 2391 mlx4_cmd_use_polling(dev); 2392 2393 err_eq_table_free: 2394 mlx4_cleanup_eq_table(dev); 2395 2396 err_mcg_table_free: 2397 if (!mlx4_is_slave(dev)) 2398 mlx4_cleanup_mcg_table(dev); 2399 2400 err_mr_table_free: 2401 mlx4_cleanup_mr_table(dev); 2402 2403 err_xrcd_table_free: 2404 mlx4_cleanup_xrcd_table(dev); 2405 2406 err_pd_table_free: 2407 mlx4_cleanup_pd_table(dev); 2408 2409 err_kar_unmap: 2410 iounmap(priv->kar); 2411 2412 err_uar_free: 2413 mlx4_uar_free(dev, &priv->driver_uar); 2414 2415 err_uar_table_free: 2416 mlx4_cleanup_uar_table(dev); 2417 return err; 2418 } 2419 2420 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2421 { 2422 struct mlx4_priv *priv = mlx4_priv(dev); 2423 struct msix_entry *entries; 2424 int i; 2425 2426 if (msi_x) { 2427 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2428 2429 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2430 nreq); 2431 2432 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2433 if (!entries) 2434 goto no_msi; 2435 2436 for (i = 0; i < nreq; ++i) 2437 entries[i].entry = i; 2438 2439 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2440 nreq); 2441 2442 if (nreq < 0) { 2443 kfree(entries); 2444 goto no_msi; 2445 } else if (nreq < MSIX_LEGACY_SZ + 2446 dev->caps.num_ports * MIN_MSIX_P_PORT) { 2447 /*Working in legacy mode , all EQ's shared*/ 2448 dev->caps.comp_pool = 0; 2449 dev->caps.num_comp_vectors = nreq - 1; 2450 } else { 2451 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 2452 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 2453 } 2454 for (i = 0; i < nreq; ++i) 2455 priv->eq_table.eq[i].irq = entries[i].vector; 2456 2457 dev->flags |= MLX4_FLAG_MSI_X; 2458 2459 kfree(entries); 2460 return; 2461 } 2462 2463 no_msi: 2464 dev->caps.num_comp_vectors = 1; 2465 dev->caps.comp_pool = 0; 2466 2467 for (i = 0; i < 2; ++i) 2468 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2469 } 2470 2471 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2472 { 2473 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2474 int err = 0; 2475 2476 info->dev = dev; 2477 info->port = port; 2478 if (!mlx4_is_slave(dev)) { 2479 mlx4_init_mac_table(dev, &info->mac_table); 2480 mlx4_init_vlan_table(dev, &info->vlan_table); 2481 mlx4_init_roce_gid_table(dev, &info->gid_table); 2482 info->base_qpn = mlx4_get_base_qpn(dev, port); 2483 } 2484 2485 sprintf(info->dev_name, "mlx4_port%d", port); 2486 info->port_attr.attr.name = info->dev_name; 2487 if (mlx4_is_mfunc(dev)) 2488 info->port_attr.attr.mode = S_IRUGO; 2489 else { 2490 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2491 info->port_attr.store = set_port_type; 2492 } 2493 info->port_attr.show = show_port_type; 2494 sysfs_attr_init(&info->port_attr.attr); 2495 2496 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2497 if (err) { 2498 mlx4_err(dev, "Failed to create file for port %d\n", port); 2499 info->port = -1; 2500 } 2501 2502 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2503 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2504 if (mlx4_is_mfunc(dev)) 2505 info->port_mtu_attr.attr.mode = S_IRUGO; 2506 else { 2507 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2508 info->port_mtu_attr.store = set_port_ib_mtu; 2509 } 2510 info->port_mtu_attr.show = show_port_ib_mtu; 2511 sysfs_attr_init(&info->port_mtu_attr.attr); 2512 2513 err = device_create_file(&dev->persist->pdev->dev, 2514 &info->port_mtu_attr); 2515 if (err) { 2516 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2517 device_remove_file(&info->dev->persist->pdev->dev, 2518 &info->port_attr); 2519 info->port = -1; 2520 } 2521 2522 return err; 2523 } 2524 2525 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2526 { 2527 if (info->port < 0) 2528 return; 2529 2530 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2531 device_remove_file(&info->dev->persist->pdev->dev, 2532 &info->port_mtu_attr); 2533 } 2534 2535 static int mlx4_init_steering(struct mlx4_dev *dev) 2536 { 2537 struct mlx4_priv *priv = mlx4_priv(dev); 2538 int num_entries = dev->caps.num_ports; 2539 int i, j; 2540 2541 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2542 if (!priv->steer) 2543 return -ENOMEM; 2544 2545 for (i = 0; i < num_entries; i++) 2546 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2547 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2548 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2549 } 2550 return 0; 2551 } 2552 2553 static void mlx4_clear_steering(struct mlx4_dev *dev) 2554 { 2555 struct mlx4_priv *priv = mlx4_priv(dev); 2556 struct mlx4_steer_index *entry, *tmp_entry; 2557 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2558 int num_entries = dev->caps.num_ports; 2559 int i, j; 2560 2561 for (i = 0; i < num_entries; i++) { 2562 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2563 list_for_each_entry_safe(pqp, tmp_pqp, 2564 &priv->steer[i].promisc_qps[j], 2565 list) { 2566 list_del(&pqp->list); 2567 kfree(pqp); 2568 } 2569 list_for_each_entry_safe(entry, tmp_entry, 2570 &priv->steer[i].steer_entries[j], 2571 list) { 2572 list_del(&entry->list); 2573 list_for_each_entry_safe(pqp, tmp_pqp, 2574 &entry->duplicates, 2575 list) { 2576 list_del(&pqp->list); 2577 kfree(pqp); 2578 } 2579 kfree(entry); 2580 } 2581 } 2582 } 2583 kfree(priv->steer); 2584 } 2585 2586 static int extended_func_num(struct pci_dev *pdev) 2587 { 2588 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2589 } 2590 2591 #define MLX4_OWNER_BASE 0x8069c 2592 #define MLX4_OWNER_SIZE 4 2593 2594 static int mlx4_get_ownership(struct mlx4_dev *dev) 2595 { 2596 void __iomem *owner; 2597 u32 ret; 2598 2599 if (pci_channel_offline(dev->persist->pdev)) 2600 return -EIO; 2601 2602 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2603 MLX4_OWNER_BASE, 2604 MLX4_OWNER_SIZE); 2605 if (!owner) { 2606 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2607 return -ENOMEM; 2608 } 2609 2610 ret = readl(owner); 2611 iounmap(owner); 2612 return (int) !!ret; 2613 } 2614 2615 static void mlx4_free_ownership(struct mlx4_dev *dev) 2616 { 2617 void __iomem *owner; 2618 2619 if (pci_channel_offline(dev->persist->pdev)) 2620 return; 2621 2622 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2623 MLX4_OWNER_BASE, 2624 MLX4_OWNER_SIZE); 2625 if (!owner) { 2626 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2627 return; 2628 } 2629 writel(0, owner); 2630 msleep(1000); 2631 iounmap(owner); 2632 } 2633 2634 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2635 !!((flags) & MLX4_FLAG_MASTER)) 2636 2637 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2638 u8 total_vfs, int existing_vfs, int reset_flow) 2639 { 2640 u64 dev_flags = dev->flags; 2641 int err = 0; 2642 2643 if (reset_flow) { 2644 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 2645 GFP_KERNEL); 2646 if (!dev->dev_vfs) 2647 goto free_mem; 2648 return dev_flags; 2649 } 2650 2651 atomic_inc(&pf_loading); 2652 if (dev->flags & MLX4_FLAG_SRIOV) { 2653 if (existing_vfs != total_vfs) { 2654 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2655 existing_vfs, total_vfs); 2656 total_vfs = existing_vfs; 2657 } 2658 } 2659 2660 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 2661 if (NULL == dev->dev_vfs) { 2662 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2663 goto disable_sriov; 2664 } 2665 2666 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2667 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2668 err = pci_enable_sriov(pdev, total_vfs); 2669 } 2670 if (err) { 2671 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2672 err); 2673 goto disable_sriov; 2674 } else { 2675 mlx4_warn(dev, "Running in master mode\n"); 2676 dev_flags |= MLX4_FLAG_SRIOV | 2677 MLX4_FLAG_MASTER; 2678 dev_flags &= ~MLX4_FLAG_SLAVE; 2679 dev->persist->num_vfs = total_vfs; 2680 } 2681 return dev_flags; 2682 2683 disable_sriov: 2684 atomic_dec(&pf_loading); 2685 free_mem: 2686 dev->persist->num_vfs = 0; 2687 kfree(dev->dev_vfs); 2688 return dev_flags & ~MLX4_FLAG_MASTER; 2689 } 2690 2691 enum { 2692 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2693 }; 2694 2695 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2696 int *nvfs) 2697 { 2698 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2699 /* Checking for 64 VFs as a limitation of CX2 */ 2700 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2701 requested_vfs >= 64) { 2702 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2703 requested_vfs); 2704 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2705 } 2706 return 0; 2707 } 2708 2709 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2710 int total_vfs, int *nvfs, struct mlx4_priv *priv, 2711 int reset_flow) 2712 { 2713 struct mlx4_dev *dev; 2714 unsigned sum = 0; 2715 int err; 2716 int port; 2717 int i; 2718 struct mlx4_dev_cap *dev_cap = NULL; 2719 int existing_vfs = 0; 2720 2721 dev = &priv->dev; 2722 2723 INIT_LIST_HEAD(&priv->ctx_list); 2724 spin_lock_init(&priv->ctx_lock); 2725 2726 mutex_init(&priv->port_mutex); 2727 mutex_init(&priv->bond_mutex); 2728 2729 INIT_LIST_HEAD(&priv->pgdir_list); 2730 mutex_init(&priv->pgdir_mutex); 2731 2732 INIT_LIST_HEAD(&priv->bf_list); 2733 mutex_init(&priv->bf_mutex); 2734 2735 dev->rev_id = pdev->revision; 2736 dev->numa_node = dev_to_node(&pdev->dev); 2737 2738 /* Detect if this device is a virtual function */ 2739 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2740 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2741 dev->flags |= MLX4_FLAG_SLAVE; 2742 } else { 2743 /* We reset the device and enable SRIOV only for physical 2744 * devices. Try to claim ownership on the device; 2745 * if already taken, skip -- do not allow multiple PFs */ 2746 err = mlx4_get_ownership(dev); 2747 if (err) { 2748 if (err < 0) 2749 return err; 2750 else { 2751 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 2752 return -EINVAL; 2753 } 2754 } 2755 2756 atomic_set(&priv->opreq_count, 0); 2757 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2758 2759 /* 2760 * Now reset the HCA before we touch the PCI capabilities or 2761 * attempt a firmware command, since a boot ROM may have left 2762 * the HCA in an undefined state. 2763 */ 2764 err = mlx4_reset(dev); 2765 if (err) { 2766 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2767 goto err_sriov; 2768 } 2769 2770 if (total_vfs) { 2771 dev->flags = MLX4_FLAG_MASTER; 2772 existing_vfs = pci_num_vf(pdev); 2773 if (existing_vfs) 2774 dev->flags |= MLX4_FLAG_SRIOV; 2775 dev->persist->num_vfs = total_vfs; 2776 } 2777 } 2778 2779 /* on load remove any previous indication of internal error, 2780 * device is up. 2781 */ 2782 dev->persist->state = MLX4_DEVICE_STATE_UP; 2783 2784 slave_start: 2785 err = mlx4_cmd_init(dev); 2786 if (err) { 2787 mlx4_err(dev, "Failed to init command interface, aborting\n"); 2788 goto err_sriov; 2789 } 2790 2791 /* In slave functions, the communication channel must be initialized 2792 * before posting commands. Also, init num_slaves before calling 2793 * mlx4_init_hca */ 2794 if (mlx4_is_mfunc(dev)) { 2795 if (mlx4_is_master(dev)) { 2796 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2797 2798 } else { 2799 dev->num_slaves = 0; 2800 err = mlx4_multi_func_init(dev); 2801 if (err) { 2802 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 2803 goto err_cmd; 2804 } 2805 } 2806 } 2807 2808 err = mlx4_init_fw(dev); 2809 if (err) { 2810 mlx4_err(dev, "Failed to init fw, aborting.\n"); 2811 goto err_mfunc; 2812 } 2813 2814 if (mlx4_is_master(dev)) { 2815 /* when we hit the goto slave_start below, dev_cap already initialized */ 2816 if (!dev_cap) { 2817 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2818 2819 if (!dev_cap) { 2820 err = -ENOMEM; 2821 goto err_fw; 2822 } 2823 2824 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2825 if (err) { 2826 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2827 goto err_fw; 2828 } 2829 2830 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2831 goto err_fw; 2832 2833 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2834 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 2835 total_vfs, 2836 existing_vfs, 2837 reset_flow); 2838 2839 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2840 dev->flags = dev_flags; 2841 if (!SRIOV_VALID_STATE(dev->flags)) { 2842 mlx4_err(dev, "Invalid SRIOV state\n"); 2843 goto err_sriov; 2844 } 2845 err = mlx4_reset(dev); 2846 if (err) { 2847 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2848 goto err_sriov; 2849 } 2850 goto slave_start; 2851 } 2852 } else { 2853 /* Legacy mode FW requires SRIOV to be enabled before 2854 * doing QUERY_DEV_CAP, since max_eq's value is different if 2855 * SRIOV is enabled. 2856 */ 2857 memset(dev_cap, 0, sizeof(*dev_cap)); 2858 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2859 if (err) { 2860 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2861 goto err_fw; 2862 } 2863 2864 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2865 goto err_fw; 2866 } 2867 } 2868 2869 err = mlx4_init_hca(dev); 2870 if (err) { 2871 if (err == -EACCES) { 2872 /* Not primary Physical function 2873 * Running in slave mode */ 2874 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2875 /* We're not a PF */ 2876 if (dev->flags & MLX4_FLAG_SRIOV) { 2877 if (!existing_vfs) 2878 pci_disable_sriov(pdev); 2879 if (mlx4_is_master(dev) && !reset_flow) 2880 atomic_dec(&pf_loading); 2881 dev->flags &= ~MLX4_FLAG_SRIOV; 2882 } 2883 if (!mlx4_is_slave(dev)) 2884 mlx4_free_ownership(dev); 2885 dev->flags |= MLX4_FLAG_SLAVE; 2886 dev->flags &= ~MLX4_FLAG_MASTER; 2887 goto slave_start; 2888 } else 2889 goto err_fw; 2890 } 2891 2892 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2893 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2894 existing_vfs, reset_flow); 2895 2896 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2897 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2898 dev->flags = dev_flags; 2899 err = mlx4_cmd_init(dev); 2900 if (err) { 2901 /* Only VHCR is cleaned up, so could still 2902 * send FW commands 2903 */ 2904 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2905 goto err_close; 2906 } 2907 } else { 2908 dev->flags = dev_flags; 2909 } 2910 2911 if (!SRIOV_VALID_STATE(dev->flags)) { 2912 mlx4_err(dev, "Invalid SRIOV state\n"); 2913 goto err_close; 2914 } 2915 } 2916 2917 /* check if the device is functioning at its maximum possible speed. 2918 * No return code for this call, just warn the user in case of PCI 2919 * express device capabilities are under-satisfied by the bus. 2920 */ 2921 if (!mlx4_is_slave(dev)) 2922 mlx4_check_pcie_caps(dev); 2923 2924 /* In master functions, the communication channel must be initialized 2925 * after obtaining its address from fw */ 2926 if (mlx4_is_master(dev)) { 2927 int ib_ports = 0; 2928 2929 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2930 ib_ports++; 2931 2932 if (ib_ports && 2933 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2934 mlx4_err(dev, 2935 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2936 err = -EINVAL; 2937 goto err_close; 2938 } 2939 if (dev->caps.num_ports < 2 && 2940 num_vfs_argc > 1) { 2941 err = -EINVAL; 2942 mlx4_err(dev, 2943 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 2944 dev->caps.num_ports); 2945 goto err_close; 2946 } 2947 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 2948 2949 for (i = 0; 2950 i < sizeof(dev->persist->nvfs)/ 2951 sizeof(dev->persist->nvfs[0]); i++) { 2952 unsigned j; 2953 2954 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 2955 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2956 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2957 dev->caps.num_ports; 2958 } 2959 } 2960 2961 /* In master functions, the communication channel 2962 * must be initialized after obtaining its address from fw 2963 */ 2964 err = mlx4_multi_func_init(dev); 2965 if (err) { 2966 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 2967 goto err_close; 2968 } 2969 } 2970 2971 err = mlx4_alloc_eq_table(dev); 2972 if (err) 2973 goto err_master_mfunc; 2974 2975 priv->msix_ctl.pool_bm = 0; 2976 mutex_init(&priv->msix_ctl.pool_lock); 2977 2978 mlx4_enable_msi_x(dev); 2979 if ((mlx4_is_mfunc(dev)) && 2980 !(dev->flags & MLX4_FLAG_MSI_X)) { 2981 err = -ENOSYS; 2982 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 2983 goto err_free_eq; 2984 } 2985 2986 if (!mlx4_is_slave(dev)) { 2987 err = mlx4_init_steering(dev); 2988 if (err) 2989 goto err_disable_msix; 2990 } 2991 2992 err = mlx4_setup_hca(dev); 2993 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 2994 !mlx4_is_mfunc(dev)) { 2995 dev->flags &= ~MLX4_FLAG_MSI_X; 2996 dev->caps.num_comp_vectors = 1; 2997 dev->caps.comp_pool = 0; 2998 pci_disable_msix(pdev); 2999 err = mlx4_setup_hca(dev); 3000 } 3001 3002 if (err) 3003 goto err_steer; 3004 3005 mlx4_init_quotas(dev); 3006 /* When PF resources are ready arm its comm channel to enable 3007 * getting commands 3008 */ 3009 if (mlx4_is_master(dev)) { 3010 err = mlx4_ARM_COMM_CHANNEL(dev); 3011 if (err) { 3012 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3013 err); 3014 goto err_steer; 3015 } 3016 } 3017 3018 for (port = 1; port <= dev->caps.num_ports; port++) { 3019 err = mlx4_init_port_info(dev, port); 3020 if (err) 3021 goto err_port; 3022 } 3023 3024 priv->v2p.port1 = 1; 3025 priv->v2p.port2 = 2; 3026 3027 err = mlx4_register_device(dev); 3028 if (err) 3029 goto err_port; 3030 3031 mlx4_request_modules(dev); 3032 3033 mlx4_sense_init(dev); 3034 mlx4_start_sense(dev); 3035 3036 priv->removed = 0; 3037 3038 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3039 atomic_dec(&pf_loading); 3040 3041 kfree(dev_cap); 3042 return 0; 3043 3044 err_port: 3045 for (--port; port >= 1; --port) 3046 mlx4_cleanup_port_info(&priv->port[port]); 3047 3048 mlx4_cleanup_counters_table(dev); 3049 mlx4_cleanup_qp_table(dev); 3050 mlx4_cleanup_srq_table(dev); 3051 mlx4_cleanup_cq_table(dev); 3052 mlx4_cmd_use_polling(dev); 3053 mlx4_cleanup_eq_table(dev); 3054 mlx4_cleanup_mcg_table(dev); 3055 mlx4_cleanup_mr_table(dev); 3056 mlx4_cleanup_xrcd_table(dev); 3057 mlx4_cleanup_pd_table(dev); 3058 mlx4_cleanup_uar_table(dev); 3059 3060 err_steer: 3061 if (!mlx4_is_slave(dev)) 3062 mlx4_clear_steering(dev); 3063 3064 err_disable_msix: 3065 if (dev->flags & MLX4_FLAG_MSI_X) 3066 pci_disable_msix(pdev); 3067 3068 err_free_eq: 3069 mlx4_free_eq_table(dev); 3070 3071 err_master_mfunc: 3072 if (mlx4_is_master(dev)) { 3073 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3074 mlx4_multi_func_cleanup(dev); 3075 } 3076 3077 if (mlx4_is_slave(dev)) { 3078 kfree(dev->caps.qp0_qkey); 3079 kfree(dev->caps.qp0_tunnel); 3080 kfree(dev->caps.qp0_proxy); 3081 kfree(dev->caps.qp1_tunnel); 3082 kfree(dev->caps.qp1_proxy); 3083 } 3084 3085 err_close: 3086 mlx4_close_hca(dev); 3087 3088 err_fw: 3089 mlx4_close_fw(dev); 3090 3091 err_mfunc: 3092 if (mlx4_is_slave(dev)) 3093 mlx4_multi_func_cleanup(dev); 3094 3095 err_cmd: 3096 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3097 3098 err_sriov: 3099 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3100 pci_disable_sriov(pdev); 3101 dev->flags &= ~MLX4_FLAG_SRIOV; 3102 } 3103 3104 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3105 atomic_dec(&pf_loading); 3106 3107 kfree(priv->dev.dev_vfs); 3108 3109 if (!mlx4_is_slave(dev)) 3110 mlx4_free_ownership(dev); 3111 3112 kfree(dev_cap); 3113 return err; 3114 } 3115 3116 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3117 struct mlx4_priv *priv) 3118 { 3119 int err; 3120 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3121 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3122 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3123 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3124 unsigned total_vfs = 0; 3125 unsigned int i; 3126 3127 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3128 3129 err = pci_enable_device(pdev); 3130 if (err) { 3131 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3132 return err; 3133 } 3134 3135 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3136 * per port, we must limit the number of VFs to 63 (since their are 3137 * 128 MACs) 3138 */ 3139 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3140 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3141 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3142 if (nvfs[i] < 0) { 3143 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3144 err = -EINVAL; 3145 goto err_disable_pdev; 3146 } 3147 } 3148 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3149 i++) { 3150 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3151 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3152 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3153 err = -EINVAL; 3154 goto err_disable_pdev; 3155 } 3156 } 3157 if (total_vfs >= MLX4_MAX_NUM_VF) { 3158 dev_err(&pdev->dev, 3159 "Requested more VF's (%d) than allowed (%d)\n", 3160 total_vfs, MLX4_MAX_NUM_VF - 1); 3161 err = -EINVAL; 3162 goto err_disable_pdev; 3163 } 3164 3165 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3166 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { 3167 dev_err(&pdev->dev, 3168 "Requested more VF's (%d) for port (%d) than allowed (%d)\n", 3169 nvfs[i] + nvfs[2], i + 1, 3170 MLX4_MAX_NUM_VF_P_PORT - 1); 3171 err = -EINVAL; 3172 goto err_disable_pdev; 3173 } 3174 } 3175 3176 /* Check for BARs. */ 3177 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3178 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3179 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3180 pci_dev_data, pci_resource_flags(pdev, 0)); 3181 err = -ENODEV; 3182 goto err_disable_pdev; 3183 } 3184 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3185 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3186 err = -ENODEV; 3187 goto err_disable_pdev; 3188 } 3189 3190 err = pci_request_regions(pdev, DRV_NAME); 3191 if (err) { 3192 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3193 goto err_disable_pdev; 3194 } 3195 3196 pci_set_master(pdev); 3197 3198 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3199 if (err) { 3200 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3201 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3202 if (err) { 3203 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3204 goto err_release_regions; 3205 } 3206 } 3207 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3208 if (err) { 3209 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3210 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3211 if (err) { 3212 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3213 goto err_release_regions; 3214 } 3215 } 3216 3217 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3218 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3219 /* Detect if this device is a virtual function */ 3220 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3221 /* When acting as pf, we normally skip vfs unless explicitly 3222 * requested to probe them. 3223 */ 3224 if (total_vfs) { 3225 unsigned vfs_offset = 0; 3226 3227 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3228 vfs_offset + nvfs[i] < extended_func_num(pdev); 3229 vfs_offset += nvfs[i], i++) 3230 ; 3231 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3232 err = -ENODEV; 3233 goto err_release_regions; 3234 } 3235 if ((extended_func_num(pdev) - vfs_offset) 3236 > prb_vf[i]) { 3237 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3238 extended_func_num(pdev)); 3239 err = -ENODEV; 3240 goto err_release_regions; 3241 } 3242 } 3243 } 3244 3245 err = mlx4_catas_init(&priv->dev); 3246 if (err) 3247 goto err_release_regions; 3248 3249 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3250 if (err) 3251 goto err_catas; 3252 3253 return 0; 3254 3255 err_catas: 3256 mlx4_catas_end(&priv->dev); 3257 3258 err_release_regions: 3259 pci_release_regions(pdev); 3260 3261 err_disable_pdev: 3262 pci_disable_device(pdev); 3263 pci_set_drvdata(pdev, NULL); 3264 return err; 3265 } 3266 3267 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3268 { 3269 struct mlx4_priv *priv; 3270 struct mlx4_dev *dev; 3271 int ret; 3272 3273 printk_once(KERN_INFO "%s", mlx4_version); 3274 3275 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3276 if (!priv) 3277 return -ENOMEM; 3278 3279 dev = &priv->dev; 3280 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3281 if (!dev->persist) { 3282 kfree(priv); 3283 return -ENOMEM; 3284 } 3285 dev->persist->pdev = pdev; 3286 dev->persist->dev = dev; 3287 pci_set_drvdata(pdev, dev->persist); 3288 priv->pci_dev_data = id->driver_data; 3289 mutex_init(&dev->persist->device_state_mutex); 3290 mutex_init(&dev->persist->interface_state_mutex); 3291 3292 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3293 if (ret) { 3294 kfree(dev->persist); 3295 kfree(priv); 3296 } else { 3297 pci_save_state(pdev); 3298 } 3299 3300 return ret; 3301 } 3302 3303 static void mlx4_clean_dev(struct mlx4_dev *dev) 3304 { 3305 struct mlx4_dev_persistent *persist = dev->persist; 3306 struct mlx4_priv *priv = mlx4_priv(dev); 3307 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3308 3309 memset(priv, 0, sizeof(*priv)); 3310 priv->dev.persist = persist; 3311 priv->dev.flags = flags; 3312 } 3313 3314 static void mlx4_unload_one(struct pci_dev *pdev) 3315 { 3316 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3317 struct mlx4_dev *dev = persist->dev; 3318 struct mlx4_priv *priv = mlx4_priv(dev); 3319 int pci_dev_data; 3320 int p, i; 3321 3322 if (priv->removed) 3323 return; 3324 3325 /* saving current ports type for further use */ 3326 for (i = 0; i < dev->caps.num_ports; i++) { 3327 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3328 dev->persist->curr_port_poss_type[i] = dev->caps. 3329 possible_type[i + 1]; 3330 } 3331 3332 pci_dev_data = priv->pci_dev_data; 3333 3334 mlx4_stop_sense(dev); 3335 mlx4_unregister_device(dev); 3336 3337 for (p = 1; p <= dev->caps.num_ports; p++) { 3338 mlx4_cleanup_port_info(&priv->port[p]); 3339 mlx4_CLOSE_PORT(dev, p); 3340 } 3341 3342 if (mlx4_is_master(dev)) 3343 mlx4_free_resource_tracker(dev, 3344 RES_TR_FREE_SLAVES_ONLY); 3345 3346 mlx4_cleanup_counters_table(dev); 3347 mlx4_cleanup_qp_table(dev); 3348 mlx4_cleanup_srq_table(dev); 3349 mlx4_cleanup_cq_table(dev); 3350 mlx4_cmd_use_polling(dev); 3351 mlx4_cleanup_eq_table(dev); 3352 mlx4_cleanup_mcg_table(dev); 3353 mlx4_cleanup_mr_table(dev); 3354 mlx4_cleanup_xrcd_table(dev); 3355 mlx4_cleanup_pd_table(dev); 3356 3357 if (mlx4_is_master(dev)) 3358 mlx4_free_resource_tracker(dev, 3359 RES_TR_FREE_STRUCTS_ONLY); 3360 3361 iounmap(priv->kar); 3362 mlx4_uar_free(dev, &priv->driver_uar); 3363 mlx4_cleanup_uar_table(dev); 3364 if (!mlx4_is_slave(dev)) 3365 mlx4_clear_steering(dev); 3366 mlx4_free_eq_table(dev); 3367 if (mlx4_is_master(dev)) 3368 mlx4_multi_func_cleanup(dev); 3369 mlx4_close_hca(dev); 3370 mlx4_close_fw(dev); 3371 if (mlx4_is_slave(dev)) 3372 mlx4_multi_func_cleanup(dev); 3373 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3374 3375 if (dev->flags & MLX4_FLAG_MSI_X) 3376 pci_disable_msix(pdev); 3377 3378 if (!mlx4_is_slave(dev)) 3379 mlx4_free_ownership(dev); 3380 3381 kfree(dev->caps.qp0_qkey); 3382 kfree(dev->caps.qp0_tunnel); 3383 kfree(dev->caps.qp0_proxy); 3384 kfree(dev->caps.qp1_tunnel); 3385 kfree(dev->caps.qp1_proxy); 3386 kfree(dev->dev_vfs); 3387 3388 mlx4_clean_dev(dev); 3389 priv->pci_dev_data = pci_dev_data; 3390 priv->removed = 1; 3391 } 3392 3393 static void mlx4_remove_one(struct pci_dev *pdev) 3394 { 3395 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3396 struct mlx4_dev *dev = persist->dev; 3397 struct mlx4_priv *priv = mlx4_priv(dev); 3398 int active_vfs = 0; 3399 3400 mutex_lock(&persist->interface_state_mutex); 3401 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3402 mutex_unlock(&persist->interface_state_mutex); 3403 3404 /* Disabling SR-IOV is not allowed while there are active vf's */ 3405 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3406 active_vfs = mlx4_how_many_lives_vf(dev); 3407 if (active_vfs) { 3408 pr_warn("Removing PF when there are active VF's !!\n"); 3409 pr_warn("Will not disable SR-IOV.\n"); 3410 } 3411 } 3412 3413 /* device marked to be under deletion running now without the lock 3414 * letting other tasks to be terminated 3415 */ 3416 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3417 mlx4_unload_one(pdev); 3418 else 3419 mlx4_info(dev, "%s: interface is down\n", __func__); 3420 mlx4_catas_end(dev); 3421 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3422 mlx4_warn(dev, "Disabling SR-IOV\n"); 3423 pci_disable_sriov(pdev); 3424 } 3425 3426 pci_release_regions(pdev); 3427 pci_disable_device(pdev); 3428 kfree(dev->persist); 3429 kfree(priv); 3430 pci_set_drvdata(pdev, NULL); 3431 } 3432 3433 static int restore_current_port_types(struct mlx4_dev *dev, 3434 enum mlx4_port_type *types, 3435 enum mlx4_port_type *poss_types) 3436 { 3437 struct mlx4_priv *priv = mlx4_priv(dev); 3438 int err, i; 3439 3440 mlx4_stop_sense(dev); 3441 3442 mutex_lock(&priv->port_mutex); 3443 for (i = 0; i < dev->caps.num_ports; i++) 3444 dev->caps.possible_type[i + 1] = poss_types[i]; 3445 err = mlx4_change_port_types(dev, types); 3446 mlx4_start_sense(dev); 3447 mutex_unlock(&priv->port_mutex); 3448 3449 return err; 3450 } 3451 3452 int mlx4_restart_one(struct pci_dev *pdev) 3453 { 3454 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3455 struct mlx4_dev *dev = persist->dev; 3456 struct mlx4_priv *priv = mlx4_priv(dev); 3457 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3458 int pci_dev_data, err, total_vfs; 3459 3460 pci_dev_data = priv->pci_dev_data; 3461 total_vfs = dev->persist->num_vfs; 3462 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3463 3464 mlx4_unload_one(pdev); 3465 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3466 if (err) { 3467 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3468 __func__, pci_name(pdev), err); 3469 return err; 3470 } 3471 3472 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3473 dev->persist->curr_port_poss_type); 3474 if (err) 3475 mlx4_err(dev, "could not restore original port types (%d)\n", 3476 err); 3477 3478 return err; 3479 } 3480 3481 static const struct pci_device_id mlx4_pci_table[] = { 3482 /* MT25408 "Hermon" SDR */ 3483 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3484 /* MT25408 "Hermon" DDR */ 3485 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3486 /* MT25408 "Hermon" QDR */ 3487 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3488 /* MT25408 "Hermon" DDR PCIe gen2 */ 3489 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3490 /* MT25408 "Hermon" QDR PCIe gen2 */ 3491 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3492 /* MT25408 "Hermon" EN 10GigE */ 3493 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3494 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3495 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3496 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3497 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3498 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3499 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3500 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3501 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3502 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3503 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3504 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3505 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3506 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3507 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3508 /* MT27500 Family [ConnectX-3] */ 3509 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3510 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3511 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3512 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3513 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3514 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3515 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3516 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3517 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3518 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3519 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3520 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3521 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3522 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3523 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3524 { 0, } 3525 }; 3526 3527 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3528 3529 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3530 pci_channel_state_t state) 3531 { 3532 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3533 3534 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3535 mlx4_enter_error_state(persist); 3536 3537 mutex_lock(&persist->interface_state_mutex); 3538 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3539 mlx4_unload_one(pdev); 3540 3541 mutex_unlock(&persist->interface_state_mutex); 3542 if (state == pci_channel_io_perm_failure) 3543 return PCI_ERS_RESULT_DISCONNECT; 3544 3545 pci_disable_device(pdev); 3546 return PCI_ERS_RESULT_NEED_RESET; 3547 } 3548 3549 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3550 { 3551 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3552 struct mlx4_dev *dev = persist->dev; 3553 struct mlx4_priv *priv = mlx4_priv(dev); 3554 int ret; 3555 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3556 int total_vfs; 3557 3558 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3559 ret = pci_enable_device(pdev); 3560 if (ret) { 3561 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3562 return PCI_ERS_RESULT_DISCONNECT; 3563 } 3564 3565 pci_set_master(pdev); 3566 pci_restore_state(pdev); 3567 pci_save_state(pdev); 3568 3569 total_vfs = dev->persist->num_vfs; 3570 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3571 3572 mutex_lock(&persist->interface_state_mutex); 3573 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3574 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3575 priv, 1); 3576 if (ret) { 3577 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 3578 __func__, ret); 3579 goto end; 3580 } 3581 3582 ret = restore_current_port_types(dev, dev->persist-> 3583 curr_port_type, dev->persist-> 3584 curr_port_poss_type); 3585 if (ret) 3586 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 3587 } 3588 end: 3589 mutex_unlock(&persist->interface_state_mutex); 3590 3591 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3592 } 3593 3594 static void mlx4_shutdown(struct pci_dev *pdev) 3595 { 3596 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3597 3598 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 3599 mutex_lock(&persist->interface_state_mutex); 3600 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3601 mlx4_unload_one(pdev); 3602 mutex_unlock(&persist->interface_state_mutex); 3603 } 3604 3605 static const struct pci_error_handlers mlx4_err_handler = { 3606 .error_detected = mlx4_pci_err_detected, 3607 .slot_reset = mlx4_pci_slot_reset, 3608 }; 3609 3610 static struct pci_driver mlx4_driver = { 3611 .name = DRV_NAME, 3612 .id_table = mlx4_pci_table, 3613 .probe = mlx4_init_one, 3614 .shutdown = mlx4_shutdown, 3615 .remove = mlx4_remove_one, 3616 .err_handler = &mlx4_err_handler, 3617 }; 3618 3619 static int __init mlx4_verify_params(void) 3620 { 3621 if ((log_num_mac < 0) || (log_num_mac > 7)) { 3622 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 3623 return -1; 3624 } 3625 3626 if (log_num_vlan != 0) 3627 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 3628 MLX4_LOG_NUM_VLANS); 3629 3630 if (use_prio != 0) 3631 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 3632 3633 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 3634 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 3635 log_mtts_per_seg); 3636 return -1; 3637 } 3638 3639 /* Check if module param for ports type has legal combination */ 3640 if (port_type_array[0] == false && port_type_array[1] == true) { 3641 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 3642 port_type_array[0] = true; 3643 } 3644 3645 if (mlx4_log_num_mgm_entry_size < -7 || 3646 (mlx4_log_num_mgm_entry_size > 0 && 3647 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3648 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 3649 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 3650 mlx4_log_num_mgm_entry_size, 3651 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3652 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3653 return -1; 3654 } 3655 3656 return 0; 3657 } 3658 3659 static int __init mlx4_init(void) 3660 { 3661 int ret; 3662 3663 if (mlx4_verify_params()) 3664 return -EINVAL; 3665 3666 3667 mlx4_wq = create_singlethread_workqueue("mlx4"); 3668 if (!mlx4_wq) 3669 return -ENOMEM; 3670 3671 ret = pci_register_driver(&mlx4_driver); 3672 if (ret < 0) 3673 destroy_workqueue(mlx4_wq); 3674 return ret < 0 ? ret : 0; 3675 } 3676 3677 static void __exit mlx4_cleanup(void) 3678 { 3679 pci_unregister_driver(&mlx4_driver); 3680 destroy_workqueue(mlx4_wq); 3681 } 3682 3683 module_init(mlx4_init); 3684 module_exit(mlx4_cleanup); 3685