1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 int mlx4_check_port_params(struct mlx4_dev *dev, 172 enum mlx4_port_type *port_type) 173 { 174 int i; 175 176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 177 for (i = 0; i < dev->caps.num_ports - 1; i++) { 178 if (port_type[i] != port_type[i + 1]) { 179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 180 return -EINVAL; 181 } 182 } 183 } 184 185 for (i = 0; i < dev->caps.num_ports; i++) { 186 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 188 i + 1); 189 return -EINVAL; 190 } 191 } 192 return 0; 193 } 194 195 static void mlx4_set_port_mask(struct mlx4_dev *dev) 196 { 197 int i; 198 199 for (i = 1; i <= dev->caps.num_ports; ++i) 200 dev->caps.port_mask[i] = dev->caps.port_type[i]; 201 } 202 203 enum { 204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 205 }; 206 207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 208 { 209 int err = 0; 210 struct mlx4_func func; 211 212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 213 err = mlx4_QUERY_FUNC(dev, &func, 0); 214 if (err) { 215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 216 return err; 217 } 218 dev_cap->max_eqs = func.max_eq; 219 dev_cap->reserved_eqs = func.rsvd_eqs; 220 dev_cap->reserved_uars = func.rsvd_uars; 221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 222 } 223 return err; 224 } 225 226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 227 { 228 struct mlx4_caps *dev_cap = &dev->caps; 229 230 /* FW not supporting or cancelled by user */ 231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 233 return; 234 235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 236 * When FW has NCSI it may decide not to report 64B CQE/EQEs 237 */ 238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 242 return; 243 } 244 245 if (cache_line_size() == 128 || cache_line_size() == 256) { 246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 247 /* Changing the real data inside CQE size to 32B */ 248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 250 251 if (mlx4_is_master(dev)) 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 253 } else { 254 if (cache_line_size() != 32 && cache_line_size() != 64) 255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 258 } 259 } 260 261 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 262 struct mlx4_port_cap *port_cap) 263 { 264 dev->caps.vl_cap[port] = port_cap->max_vl; 265 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 266 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 267 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 268 /* set gid and pkey table operating lengths by default 269 * to non-sriov values 270 */ 271 dev->caps.gid_table_len[port] = port_cap->max_gids; 272 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 273 dev->caps.port_width_cap[port] = port_cap->max_port_width; 274 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 275 dev->caps.def_mac[port] = port_cap->def_mac; 276 dev->caps.supported_type[port] = port_cap->supported_port_types; 277 dev->caps.suggested_type[port] = port_cap->suggested_type; 278 dev->caps.default_sense[port] = port_cap->default_sense; 279 dev->caps.trans_type[port] = port_cap->trans_type; 280 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 281 dev->caps.wavelength[port] = port_cap->wavelength; 282 dev->caps.trans_code[port] = port_cap->trans_code; 283 284 return 0; 285 } 286 287 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 288 struct mlx4_port_cap *port_cap) 289 { 290 int err = 0; 291 292 err = mlx4_QUERY_PORT(dev, port, port_cap); 293 294 if (err) 295 mlx4_err(dev, "QUERY_PORT command failed.\n"); 296 297 return err; 298 } 299 300 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 301 { 302 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 303 return; 304 305 if (mlx4_is_mfunc(dev)) { 306 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 308 return; 309 } 310 311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 312 mlx4_dbg(dev, 313 "Keep FCS is not supported - Disabling Ignore FCS"); 314 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 315 return; 316 } 317 } 318 319 #define MLX4_A0_STEERING_TABLE_SIZE 256 320 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 321 { 322 int err; 323 int i; 324 325 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 326 if (err) { 327 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 328 return err; 329 } 330 mlx4_dev_cap_dump(dev, dev_cap); 331 332 if (dev_cap->min_page_sz > PAGE_SIZE) { 333 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 334 dev_cap->min_page_sz, PAGE_SIZE); 335 return -ENODEV; 336 } 337 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 338 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 339 dev_cap->num_ports, MLX4_MAX_PORTS); 340 return -ENODEV; 341 } 342 343 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 344 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 345 dev_cap->uar_size, 346 (unsigned long long) 347 pci_resource_len(dev->persist->pdev, 2)); 348 return -ENODEV; 349 } 350 351 dev->caps.num_ports = dev_cap->num_ports; 352 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 353 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 354 dev->caps.num_sys_eqs : 355 MLX4_MAX_EQ_NUM; 356 for (i = 1; i <= dev->caps.num_ports; ++i) { 357 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 358 if (err) { 359 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 360 return err; 361 } 362 } 363 364 dev->caps.uar_page_size = PAGE_SIZE; 365 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 366 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 367 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 368 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 369 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 370 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 371 dev->caps.max_wqes = dev_cap->max_qp_sz; 372 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 373 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 374 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 375 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 376 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 377 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 378 /* 379 * Subtract 1 from the limit because we need to allocate a 380 * spare CQE so the HCA HW can tell the difference between an 381 * empty CQ and a full CQ. 382 */ 383 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 384 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 385 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 388 389 /* The first 128 UARs are used for EQ doorbells */ 390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 391 dev->caps.reserved_pds = dev_cap->reserved_pds; 392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 393 dev_cap->reserved_xrcds : 0; 394 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 395 dev_cap->max_xrcds : 0; 396 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 397 398 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 399 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 400 dev->caps.flags = dev_cap->flags; 401 dev->caps.flags2 = dev_cap->flags2; 402 dev->caps.bmme_flags = dev_cap->bmme_flags; 403 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 404 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 407 408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 409 struct mlx4_init_hca_param hca_param; 410 411 memset(&hca_param, 0, sizeof(hca_param)); 412 err = mlx4_QUERY_HCA(dev, &hca_param); 413 /* Turn off PHV_EN flag in case phv_check_en is set. 414 * phv_check_en is a HW check that parse the packet and verify 415 * phv bit was reported correctly in the wqe. To allow QinQ 416 * PHV_EN flag should be set and phv_check_en must be cleared 417 * otherwise QinQ packets will be drop by the HW. 418 */ 419 if (err || hca_param.phv_check_en) 420 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 421 } 422 423 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 424 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 425 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 426 /* Don't do sense port on multifunction devices (for now at least) */ 427 if (mlx4_is_mfunc(dev)) 428 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 429 430 if (mlx4_low_memory_profile()) { 431 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 432 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 433 } else { 434 dev->caps.log_num_macs = log_num_mac; 435 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 436 } 437 438 for (i = 1; i <= dev->caps.num_ports; ++i) { 439 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 440 if (dev->caps.supported_type[i]) { 441 /* if only ETH is supported - assign ETH */ 442 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 443 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 444 /* if only IB is supported, assign IB */ 445 else if (dev->caps.supported_type[i] == 446 MLX4_PORT_TYPE_IB) 447 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 448 else { 449 /* if IB and ETH are supported, we set the port 450 * type according to user selection of port type; 451 * if user selected none, take the FW hint */ 452 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 453 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 454 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 455 else 456 dev->caps.port_type[i] = port_type_array[i - 1]; 457 } 458 } 459 /* 460 * Link sensing is allowed on the port if 3 conditions are true: 461 * 1. Both protocols are supported on the port. 462 * 2. Different types are supported on the port 463 * 3. FW declared that it supports link sensing 464 */ 465 mlx4_priv(dev)->sense.sense_allowed[i] = 466 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 467 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 468 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 469 470 /* 471 * If "default_sense" bit is set, we move the port to "AUTO" mode 472 * and perform sense_port FW command to try and set the correct 473 * port type from beginning 474 */ 475 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 476 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 477 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 478 mlx4_SENSE_PORT(dev, i, &sensed_port); 479 if (sensed_port != MLX4_PORT_TYPE_NONE) 480 dev->caps.port_type[i] = sensed_port; 481 } else { 482 dev->caps.possible_type[i] = dev->caps.port_type[i]; 483 } 484 485 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 486 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 487 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 488 i, 1 << dev->caps.log_num_macs); 489 } 490 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 491 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 492 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 493 i, 1 << dev->caps.log_num_vlans); 494 } 495 } 496 497 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 498 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 499 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 500 mlx4_warn(dev, 501 "Granular QoS per VF not supported with IB/Eth configuration\n"); 502 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 503 } 504 505 dev->caps.max_counters = dev_cap->max_counters; 506 507 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 508 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 509 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 510 (1 << dev->caps.log_num_macs) * 511 (1 << dev->caps.log_num_vlans) * 512 dev->caps.num_ports; 513 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 514 515 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 516 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 517 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 518 else 519 dev->caps.dmfs_high_rate_qpn_base = 520 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 521 522 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 523 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 524 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 525 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 526 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 527 } else { 528 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 529 dev->caps.dmfs_high_rate_qpn_base = 530 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 531 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 532 } 533 534 dev->caps.rl_caps = dev_cap->rl_caps; 535 536 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 537 dev->caps.dmfs_high_rate_qpn_range; 538 539 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 540 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 541 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 542 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 543 544 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 545 546 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 547 if (dev_cap->flags & 548 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 549 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 550 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 551 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 552 } 553 554 if (dev_cap->flags2 & 555 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 556 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 557 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 558 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 559 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 560 } 561 } 562 563 if ((dev->caps.flags & 564 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 565 mlx4_is_master(dev)) 566 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 567 568 if (!mlx4_is_slave(dev)) { 569 mlx4_enable_cqe_eqe_stride(dev); 570 dev->caps.alloc_res_qp_mask = 571 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 572 MLX4_RESERVE_A0_QP; 573 574 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 575 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 576 mlx4_warn(dev, "Old device ETS support detected\n"); 577 mlx4_warn(dev, "Consider upgrading device FW.\n"); 578 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 579 } 580 581 } else { 582 dev->caps.alloc_res_qp_mask = 0; 583 } 584 585 mlx4_enable_ignore_fcs(dev); 586 587 return 0; 588 } 589 590 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 591 enum pci_bus_speed *speed, 592 enum pcie_link_width *width) 593 { 594 u32 lnkcap1, lnkcap2; 595 int err1, err2; 596 597 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 598 599 *speed = PCI_SPEED_UNKNOWN; 600 *width = PCIE_LNK_WIDTH_UNKNOWN; 601 602 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 603 &lnkcap1); 604 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 605 &lnkcap2); 606 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 607 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 608 *speed = PCIE_SPEED_8_0GT; 609 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 610 *speed = PCIE_SPEED_5_0GT; 611 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 612 *speed = PCIE_SPEED_2_5GT; 613 } 614 if (!err1) { 615 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 616 if (!lnkcap2) { /* pre-r3.0 */ 617 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 618 *speed = PCIE_SPEED_5_0GT; 619 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 620 *speed = PCIE_SPEED_2_5GT; 621 } 622 } 623 624 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 625 return err1 ? err1 : 626 err2 ? err2 : -EINVAL; 627 } 628 return 0; 629 } 630 631 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 632 { 633 enum pcie_link_width width, width_cap; 634 enum pci_bus_speed speed, speed_cap; 635 int err; 636 637 #define PCIE_SPEED_STR(speed) \ 638 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 639 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 640 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 641 "Unknown") 642 643 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 644 if (err) { 645 mlx4_warn(dev, 646 "Unable to determine PCIe device BW capabilities\n"); 647 return; 648 } 649 650 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 651 if (err || speed == PCI_SPEED_UNKNOWN || 652 width == PCIE_LNK_WIDTH_UNKNOWN) { 653 mlx4_warn(dev, 654 "Unable to determine PCI device chain minimum BW\n"); 655 return; 656 } 657 658 if (width != width_cap || speed != speed_cap) 659 mlx4_warn(dev, 660 "PCIe BW is different than device's capability\n"); 661 662 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 663 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 664 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 665 width, width_cap); 666 return; 667 } 668 669 /*The function checks if there are live vf, return the num of them*/ 670 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 671 { 672 struct mlx4_priv *priv = mlx4_priv(dev); 673 struct mlx4_slave_state *s_state; 674 int i; 675 int ret = 0; 676 677 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 678 s_state = &priv->mfunc.master.slave_state[i]; 679 if (s_state->active && s_state->last_cmd != 680 MLX4_COMM_CMD_RESET) { 681 mlx4_warn(dev, "%s: slave: %d is still active\n", 682 __func__, i); 683 ret++; 684 } 685 } 686 return ret; 687 } 688 689 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 690 { 691 u32 qk = MLX4_RESERVED_QKEY_BASE; 692 693 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 694 qpn < dev->phys_caps.base_proxy_sqpn) 695 return -EINVAL; 696 697 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 698 /* tunnel qp */ 699 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 700 else 701 qk += qpn - dev->phys_caps.base_proxy_sqpn; 702 *qkey = qk; 703 return 0; 704 } 705 EXPORT_SYMBOL(mlx4_get_parav_qkey); 706 707 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 708 { 709 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 710 711 if (!mlx4_is_master(dev)) 712 return; 713 714 priv->virt2phys_pkey[slave][port - 1][i] = val; 715 } 716 EXPORT_SYMBOL(mlx4_sync_pkey_table); 717 718 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 719 { 720 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 721 722 if (!mlx4_is_master(dev)) 723 return; 724 725 priv->slave_node_guids[slave] = guid; 726 } 727 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 728 729 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 730 { 731 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 732 733 if (!mlx4_is_master(dev)) 734 return 0; 735 736 return priv->slave_node_guids[slave]; 737 } 738 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 739 740 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 741 { 742 struct mlx4_priv *priv = mlx4_priv(dev); 743 struct mlx4_slave_state *s_slave; 744 745 if (!mlx4_is_master(dev)) 746 return 0; 747 748 s_slave = &priv->mfunc.master.slave_state[slave]; 749 return !!s_slave->active; 750 } 751 EXPORT_SYMBOL(mlx4_is_slave_active); 752 753 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 754 struct mlx4_dev_cap *dev_cap, 755 struct mlx4_init_hca_param *hca_param) 756 { 757 dev->caps.steering_mode = hca_param->steering_mode; 758 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 759 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 760 dev->caps.fs_log_max_ucast_qp_range_size = 761 dev_cap->fs_log_max_ucast_qp_range_size; 762 } else 763 dev->caps.num_qp_per_mgm = 764 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 765 766 mlx4_dbg(dev, "Steering mode is: %s\n", 767 mlx4_steering_mode_str(dev->caps.steering_mode)); 768 } 769 770 static int mlx4_slave_cap(struct mlx4_dev *dev) 771 { 772 int err; 773 u32 page_size; 774 struct mlx4_dev_cap dev_cap; 775 struct mlx4_func_cap func_cap; 776 struct mlx4_init_hca_param hca_param; 777 u8 i; 778 779 memset(&hca_param, 0, sizeof(hca_param)); 780 err = mlx4_QUERY_HCA(dev, &hca_param); 781 if (err) { 782 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 783 return err; 784 } 785 786 /* fail if the hca has an unknown global capability 787 * at this time global_caps should be always zeroed 788 */ 789 if (hca_param.global_caps) { 790 mlx4_err(dev, "Unknown hca global capabilities\n"); 791 return -ENOSYS; 792 } 793 794 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 795 796 dev->caps.hca_core_clock = hca_param.hca_core_clock; 797 798 memset(&dev_cap, 0, sizeof(dev_cap)); 799 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 800 err = mlx4_dev_cap(dev, &dev_cap); 801 if (err) { 802 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 803 return err; 804 } 805 806 err = mlx4_QUERY_FW(dev); 807 if (err) 808 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 809 810 page_size = ~dev->caps.page_size_cap + 1; 811 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 812 if (page_size > PAGE_SIZE) { 813 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 814 page_size, PAGE_SIZE); 815 return -ENODEV; 816 } 817 818 /* slave gets uar page size from QUERY_HCA fw command */ 819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 820 821 /* TODO: relax this assumption */ 822 if (dev->caps.uar_page_size != PAGE_SIZE) { 823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 824 dev->caps.uar_page_size, PAGE_SIZE); 825 return -ENODEV; 826 } 827 828 memset(&func_cap, 0, sizeof(func_cap)); 829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 830 if (err) { 831 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 832 err); 833 return err; 834 } 835 836 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 837 PF_CONTEXT_BEHAVIOUR_MASK) { 838 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 839 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 840 return -ENOSYS; 841 } 842 843 dev->caps.num_ports = func_cap.num_ports; 844 dev->quotas.qp = func_cap.qp_quota; 845 dev->quotas.srq = func_cap.srq_quota; 846 dev->quotas.cq = func_cap.cq_quota; 847 dev->quotas.mpt = func_cap.mpt_quota; 848 dev->quotas.mtt = func_cap.mtt_quota; 849 dev->caps.num_qps = 1 << hca_param.log_num_qps; 850 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 851 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 852 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 853 dev->caps.num_eqs = func_cap.max_eq; 854 dev->caps.reserved_eqs = func_cap.reserved_eq; 855 dev->caps.reserved_lkey = func_cap.reserved_lkey; 856 dev->caps.num_pds = MLX4_NUM_PDS; 857 dev->caps.num_mgms = 0; 858 dev->caps.num_amgms = 0; 859 860 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 861 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 862 dev->caps.num_ports, MLX4_MAX_PORTS); 863 return -ENODEV; 864 } 865 866 mlx4_replace_zero_macs(dev); 867 868 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 869 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 870 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 871 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 872 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 873 874 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 875 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 876 !dev->caps.qp0_qkey) { 877 err = -ENOMEM; 878 goto err_mem; 879 } 880 881 for (i = 1; i <= dev->caps.num_ports; ++i) { 882 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 883 if (err) { 884 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 885 i, err); 886 goto err_mem; 887 } 888 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 889 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 890 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 891 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 893 dev->caps.port_mask[i] = dev->caps.port_type[i]; 894 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 895 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 896 &dev->caps.gid_table_len[i], 897 &dev->caps.pkey_table_len[i]); 898 if (err) 899 goto err_mem; 900 } 901 902 if (dev->caps.uar_page_size * (dev->caps.num_uars - 903 dev->caps.reserved_uars) > 904 pci_resource_len(dev->persist->pdev, 905 2)) { 906 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 907 dev->caps.uar_page_size * dev->caps.num_uars, 908 (unsigned long long) 909 pci_resource_len(dev->persist->pdev, 2)); 910 err = -ENOMEM; 911 goto err_mem; 912 } 913 914 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 915 dev->caps.eqe_size = 64; 916 dev->caps.eqe_factor = 1; 917 } else { 918 dev->caps.eqe_size = 32; 919 dev->caps.eqe_factor = 0; 920 } 921 922 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 923 dev->caps.cqe_size = 64; 924 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 925 } else { 926 dev->caps.cqe_size = 32; 927 } 928 929 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 930 dev->caps.eqe_size = hca_param.eqe_size; 931 dev->caps.eqe_factor = 0; 932 } 933 934 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 935 dev->caps.cqe_size = hca_param.cqe_size; 936 /* User still need to know when CQE > 32B */ 937 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 938 } 939 940 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 941 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 942 943 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 944 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 945 hca_param.rss_ip_frags ? "on" : "off"); 946 947 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 948 dev->caps.bf_reg_size) 949 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 950 951 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 952 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 953 954 return 0; 955 956 err_mem: 957 kfree(dev->caps.qp0_qkey); 958 kfree(dev->caps.qp0_tunnel); 959 kfree(dev->caps.qp0_proxy); 960 kfree(dev->caps.qp1_tunnel); 961 kfree(dev->caps.qp1_proxy); 962 dev->caps.qp0_qkey = NULL; 963 dev->caps.qp0_tunnel = NULL; 964 dev->caps.qp0_proxy = NULL; 965 dev->caps.qp1_tunnel = NULL; 966 dev->caps.qp1_proxy = NULL; 967 968 return err; 969 } 970 971 static void mlx4_request_modules(struct mlx4_dev *dev) 972 { 973 int port; 974 int has_ib_port = false; 975 int has_eth_port = false; 976 #define EN_DRV_NAME "mlx4_en" 977 #define IB_DRV_NAME "mlx4_ib" 978 979 for (port = 1; port <= dev->caps.num_ports; port++) { 980 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 981 has_ib_port = true; 982 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 983 has_eth_port = true; 984 } 985 986 if (has_eth_port) 987 request_module_nowait(EN_DRV_NAME); 988 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 989 request_module_nowait(IB_DRV_NAME); 990 } 991 992 /* 993 * Change the port configuration of the device. 994 * Every user of this function must hold the port mutex. 995 */ 996 int mlx4_change_port_types(struct mlx4_dev *dev, 997 enum mlx4_port_type *port_types) 998 { 999 int err = 0; 1000 int change = 0; 1001 int port; 1002 1003 for (port = 0; port < dev->caps.num_ports; port++) { 1004 /* Change the port type only if the new type is different 1005 * from the current, and not set to Auto */ 1006 if (port_types[port] != dev->caps.port_type[port + 1]) 1007 change = 1; 1008 } 1009 if (change) { 1010 mlx4_unregister_device(dev); 1011 for (port = 1; port <= dev->caps.num_ports; port++) { 1012 mlx4_CLOSE_PORT(dev, port); 1013 dev->caps.port_type[port] = port_types[port - 1]; 1014 err = mlx4_SET_PORT(dev, port, -1); 1015 if (err) { 1016 mlx4_err(dev, "Failed to set port %d, aborting\n", 1017 port); 1018 goto out; 1019 } 1020 } 1021 mlx4_set_port_mask(dev); 1022 err = mlx4_register_device(dev); 1023 if (err) { 1024 mlx4_err(dev, "Failed to register device\n"); 1025 goto out; 1026 } 1027 mlx4_request_modules(dev); 1028 } 1029 1030 out: 1031 return err; 1032 } 1033 1034 static ssize_t show_port_type(struct device *dev, 1035 struct device_attribute *attr, 1036 char *buf) 1037 { 1038 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1039 port_attr); 1040 struct mlx4_dev *mdev = info->dev; 1041 char type[8]; 1042 1043 sprintf(type, "%s", 1044 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1045 "ib" : "eth"); 1046 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1047 sprintf(buf, "auto (%s)\n", type); 1048 else 1049 sprintf(buf, "%s\n", type); 1050 1051 return strlen(buf); 1052 } 1053 1054 static ssize_t set_port_type(struct device *dev, 1055 struct device_attribute *attr, 1056 const char *buf, size_t count) 1057 { 1058 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1059 port_attr); 1060 struct mlx4_dev *mdev = info->dev; 1061 struct mlx4_priv *priv = mlx4_priv(mdev); 1062 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1063 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1064 static DEFINE_MUTEX(set_port_type_mutex); 1065 int i; 1066 int err = 0; 1067 1068 mutex_lock(&set_port_type_mutex); 1069 1070 if (!strcmp(buf, "ib\n")) 1071 info->tmp_type = MLX4_PORT_TYPE_IB; 1072 else if (!strcmp(buf, "eth\n")) 1073 info->tmp_type = MLX4_PORT_TYPE_ETH; 1074 else if (!strcmp(buf, "auto\n")) 1075 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1076 else { 1077 mlx4_err(mdev, "%s is not supported port type\n", buf); 1078 err = -EINVAL; 1079 goto err_out; 1080 } 1081 1082 mlx4_stop_sense(mdev); 1083 mutex_lock(&priv->port_mutex); 1084 /* Possible type is always the one that was delivered */ 1085 mdev->caps.possible_type[info->port] = info->tmp_type; 1086 1087 for (i = 0; i < mdev->caps.num_ports; i++) { 1088 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1089 mdev->caps.possible_type[i+1]; 1090 if (types[i] == MLX4_PORT_TYPE_AUTO) 1091 types[i] = mdev->caps.port_type[i+1]; 1092 } 1093 1094 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1095 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1096 for (i = 1; i <= mdev->caps.num_ports; i++) { 1097 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1098 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1099 err = -EINVAL; 1100 } 1101 } 1102 } 1103 if (err) { 1104 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1105 goto out; 1106 } 1107 1108 mlx4_do_sense_ports(mdev, new_types, types); 1109 1110 err = mlx4_check_port_params(mdev, new_types); 1111 if (err) 1112 goto out; 1113 1114 /* We are about to apply the changes after the configuration 1115 * was verified, no need to remember the temporary types 1116 * any more */ 1117 for (i = 0; i < mdev->caps.num_ports; i++) 1118 priv->port[i + 1].tmp_type = 0; 1119 1120 err = mlx4_change_port_types(mdev, new_types); 1121 1122 out: 1123 mlx4_start_sense(mdev); 1124 mutex_unlock(&priv->port_mutex); 1125 err_out: 1126 mutex_unlock(&set_port_type_mutex); 1127 1128 return err ? err : count; 1129 } 1130 1131 enum ibta_mtu { 1132 IB_MTU_256 = 1, 1133 IB_MTU_512 = 2, 1134 IB_MTU_1024 = 3, 1135 IB_MTU_2048 = 4, 1136 IB_MTU_4096 = 5 1137 }; 1138 1139 static inline int int_to_ibta_mtu(int mtu) 1140 { 1141 switch (mtu) { 1142 case 256: return IB_MTU_256; 1143 case 512: return IB_MTU_512; 1144 case 1024: return IB_MTU_1024; 1145 case 2048: return IB_MTU_2048; 1146 case 4096: return IB_MTU_4096; 1147 default: return -1; 1148 } 1149 } 1150 1151 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1152 { 1153 switch (mtu) { 1154 case IB_MTU_256: return 256; 1155 case IB_MTU_512: return 512; 1156 case IB_MTU_1024: return 1024; 1157 case IB_MTU_2048: return 2048; 1158 case IB_MTU_4096: return 4096; 1159 default: return -1; 1160 } 1161 } 1162 1163 static ssize_t show_port_ib_mtu(struct device *dev, 1164 struct device_attribute *attr, 1165 char *buf) 1166 { 1167 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1168 port_mtu_attr); 1169 struct mlx4_dev *mdev = info->dev; 1170 1171 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1172 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1173 1174 sprintf(buf, "%d\n", 1175 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1176 return strlen(buf); 1177 } 1178 1179 static ssize_t set_port_ib_mtu(struct device *dev, 1180 struct device_attribute *attr, 1181 const char *buf, size_t count) 1182 { 1183 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1184 port_mtu_attr); 1185 struct mlx4_dev *mdev = info->dev; 1186 struct mlx4_priv *priv = mlx4_priv(mdev); 1187 int err, port, mtu, ibta_mtu = -1; 1188 1189 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1190 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1191 return -EINVAL; 1192 } 1193 1194 err = kstrtoint(buf, 0, &mtu); 1195 if (!err) 1196 ibta_mtu = int_to_ibta_mtu(mtu); 1197 1198 if (err || ibta_mtu < 0) { 1199 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1200 return -EINVAL; 1201 } 1202 1203 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1204 1205 mlx4_stop_sense(mdev); 1206 mutex_lock(&priv->port_mutex); 1207 mlx4_unregister_device(mdev); 1208 for (port = 1; port <= mdev->caps.num_ports; port++) { 1209 mlx4_CLOSE_PORT(mdev, port); 1210 err = mlx4_SET_PORT(mdev, port, -1); 1211 if (err) { 1212 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1213 port); 1214 goto err_set_port; 1215 } 1216 } 1217 err = mlx4_register_device(mdev); 1218 err_set_port: 1219 mutex_unlock(&priv->port_mutex); 1220 mlx4_start_sense(mdev); 1221 return err ? err : count; 1222 } 1223 1224 /* bond for multi-function device */ 1225 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1226 static int mlx4_mf_bond(struct mlx4_dev *dev) 1227 { 1228 int err = 0; 1229 struct mlx4_slaves_pport slaves_port1; 1230 struct mlx4_slaves_pport slaves_port2; 1231 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1232 1233 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1234 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1235 bitmap_and(slaves_port_1_2, 1236 slaves_port1.slaves, slaves_port2.slaves, 1237 dev->persist->num_vfs + 1); 1238 1239 /* only single port vfs are allowed */ 1240 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1241 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1242 return -EINVAL; 1243 } 1244 1245 /* limit on maximum allowed VFs */ 1246 if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1247 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > 1248 MAX_MF_BOND_ALLOWED_SLAVES) 1249 return -EINVAL; 1250 1251 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1252 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1253 return -EINVAL; 1254 } 1255 1256 err = mlx4_bond_mac_table(dev); 1257 if (err) 1258 return err; 1259 err = mlx4_bond_vlan_table(dev); 1260 if (err) 1261 goto err1; 1262 err = mlx4_bond_fs_rules(dev); 1263 if (err) 1264 goto err2; 1265 1266 return 0; 1267 err2: 1268 (void)mlx4_unbond_vlan_table(dev); 1269 err1: 1270 (void)mlx4_unbond_mac_table(dev); 1271 return err; 1272 } 1273 1274 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1275 { 1276 int ret, ret1; 1277 1278 ret = mlx4_unbond_fs_rules(dev); 1279 if (ret) 1280 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1281 ret1 = mlx4_unbond_mac_table(dev); 1282 if (ret1) { 1283 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1284 ret = ret1; 1285 } 1286 ret1 = mlx4_unbond_vlan_table(dev); 1287 if (ret1) { 1288 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1289 ret = ret1; 1290 } 1291 return ret; 1292 } 1293 1294 int mlx4_bond(struct mlx4_dev *dev) 1295 { 1296 int ret = 0; 1297 struct mlx4_priv *priv = mlx4_priv(dev); 1298 1299 mutex_lock(&priv->bond_mutex); 1300 1301 if (!mlx4_is_bonded(dev)) { 1302 ret = mlx4_do_bond(dev, true); 1303 if (ret) 1304 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1305 if (!ret && mlx4_is_master(dev)) { 1306 ret = mlx4_mf_bond(dev); 1307 if (ret) { 1308 mlx4_err(dev, "bond for multifunction failed\n"); 1309 mlx4_do_bond(dev, false); 1310 } 1311 } 1312 } 1313 1314 mutex_unlock(&priv->bond_mutex); 1315 if (!ret) 1316 mlx4_dbg(dev, "Device is bonded\n"); 1317 1318 return ret; 1319 } 1320 EXPORT_SYMBOL_GPL(mlx4_bond); 1321 1322 int mlx4_unbond(struct mlx4_dev *dev) 1323 { 1324 int ret = 0; 1325 struct mlx4_priv *priv = mlx4_priv(dev); 1326 1327 mutex_lock(&priv->bond_mutex); 1328 1329 if (mlx4_is_bonded(dev)) { 1330 int ret2 = 0; 1331 1332 ret = mlx4_do_bond(dev, false); 1333 if (ret) 1334 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1335 if (mlx4_is_master(dev)) 1336 ret2 = mlx4_mf_unbond(dev); 1337 if (ret2) { 1338 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1339 ret = ret2; 1340 } 1341 } 1342 1343 mutex_unlock(&priv->bond_mutex); 1344 if (!ret) 1345 mlx4_dbg(dev, "Device is unbonded\n"); 1346 1347 return ret; 1348 } 1349 EXPORT_SYMBOL_GPL(mlx4_unbond); 1350 1351 1352 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1353 { 1354 u8 port1 = v2p->port1; 1355 u8 port2 = v2p->port2; 1356 struct mlx4_priv *priv = mlx4_priv(dev); 1357 int err; 1358 1359 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1360 return -ENOTSUPP; 1361 1362 mutex_lock(&priv->bond_mutex); 1363 1364 /* zero means keep current mapping for this port */ 1365 if (port1 == 0) 1366 port1 = priv->v2p.port1; 1367 if (port2 == 0) 1368 port2 = priv->v2p.port2; 1369 1370 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1371 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1372 (port1 == 2 && port2 == 1)) { 1373 /* besides boundary checks cross mapping makes 1374 * no sense and therefore not allowed */ 1375 err = -EINVAL; 1376 } else if ((port1 == priv->v2p.port1) && 1377 (port2 == priv->v2p.port2)) { 1378 err = 0; 1379 } else { 1380 err = mlx4_virt2phy_port_map(dev, port1, port2); 1381 if (!err) { 1382 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1383 port1, port2); 1384 priv->v2p.port1 = port1; 1385 priv->v2p.port2 = port2; 1386 } else { 1387 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1388 } 1389 } 1390 1391 mutex_unlock(&priv->bond_mutex); 1392 return err; 1393 } 1394 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1395 1396 static int mlx4_load_fw(struct mlx4_dev *dev) 1397 { 1398 struct mlx4_priv *priv = mlx4_priv(dev); 1399 int err; 1400 1401 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1402 GFP_HIGHUSER | __GFP_NOWARN, 0); 1403 if (!priv->fw.fw_icm) { 1404 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1405 return -ENOMEM; 1406 } 1407 1408 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1409 if (err) { 1410 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1411 goto err_free; 1412 } 1413 1414 err = mlx4_RUN_FW(dev); 1415 if (err) { 1416 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1417 goto err_unmap_fa; 1418 } 1419 1420 return 0; 1421 1422 err_unmap_fa: 1423 mlx4_UNMAP_FA(dev); 1424 1425 err_free: 1426 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1427 return err; 1428 } 1429 1430 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1431 int cmpt_entry_sz) 1432 { 1433 struct mlx4_priv *priv = mlx4_priv(dev); 1434 int err; 1435 int num_eqs; 1436 1437 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1438 cmpt_base + 1439 ((u64) (MLX4_CMPT_TYPE_QP * 1440 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1441 cmpt_entry_sz, dev->caps.num_qps, 1442 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1443 0, 0); 1444 if (err) 1445 goto err; 1446 1447 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1448 cmpt_base + 1449 ((u64) (MLX4_CMPT_TYPE_SRQ * 1450 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1451 cmpt_entry_sz, dev->caps.num_srqs, 1452 dev->caps.reserved_srqs, 0, 0); 1453 if (err) 1454 goto err_qp; 1455 1456 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1457 cmpt_base + 1458 ((u64) (MLX4_CMPT_TYPE_CQ * 1459 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1460 cmpt_entry_sz, dev->caps.num_cqs, 1461 dev->caps.reserved_cqs, 0, 0); 1462 if (err) 1463 goto err_srq; 1464 1465 num_eqs = dev->phys_caps.num_phys_eqs; 1466 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1467 cmpt_base + 1468 ((u64) (MLX4_CMPT_TYPE_EQ * 1469 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1470 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1471 if (err) 1472 goto err_cq; 1473 1474 return 0; 1475 1476 err_cq: 1477 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1478 1479 err_srq: 1480 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1481 1482 err_qp: 1483 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1484 1485 err: 1486 return err; 1487 } 1488 1489 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1490 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1491 { 1492 struct mlx4_priv *priv = mlx4_priv(dev); 1493 u64 aux_pages; 1494 int num_eqs; 1495 int err; 1496 1497 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1498 if (err) { 1499 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1500 return err; 1501 } 1502 1503 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1504 (unsigned long long) icm_size >> 10, 1505 (unsigned long long) aux_pages << 2); 1506 1507 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1508 GFP_HIGHUSER | __GFP_NOWARN, 0); 1509 if (!priv->fw.aux_icm) { 1510 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1511 return -ENOMEM; 1512 } 1513 1514 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1515 if (err) { 1516 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1517 goto err_free_aux; 1518 } 1519 1520 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1521 if (err) { 1522 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1523 goto err_unmap_aux; 1524 } 1525 1526 1527 num_eqs = dev->phys_caps.num_phys_eqs; 1528 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1529 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1530 num_eqs, num_eqs, 0, 0); 1531 if (err) { 1532 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1533 goto err_unmap_cmpt; 1534 } 1535 1536 /* 1537 * Reserved MTT entries must be aligned up to a cacheline 1538 * boundary, since the FW will write to them, while the driver 1539 * writes to all other MTT entries. (The variable 1540 * dev->caps.mtt_entry_sz below is really the MTT segment 1541 * size, not the raw entry size) 1542 */ 1543 dev->caps.reserved_mtts = 1544 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1545 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1546 1547 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1548 init_hca->mtt_base, 1549 dev->caps.mtt_entry_sz, 1550 dev->caps.num_mtts, 1551 dev->caps.reserved_mtts, 1, 0); 1552 if (err) { 1553 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1554 goto err_unmap_eq; 1555 } 1556 1557 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1558 init_hca->dmpt_base, 1559 dev_cap->dmpt_entry_sz, 1560 dev->caps.num_mpts, 1561 dev->caps.reserved_mrws, 1, 1); 1562 if (err) { 1563 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1564 goto err_unmap_mtt; 1565 } 1566 1567 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1568 init_hca->qpc_base, 1569 dev_cap->qpc_entry_sz, 1570 dev->caps.num_qps, 1571 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1572 0, 0); 1573 if (err) { 1574 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1575 goto err_unmap_dmpt; 1576 } 1577 1578 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1579 init_hca->auxc_base, 1580 dev_cap->aux_entry_sz, 1581 dev->caps.num_qps, 1582 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1583 0, 0); 1584 if (err) { 1585 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1586 goto err_unmap_qp; 1587 } 1588 1589 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1590 init_hca->altc_base, 1591 dev_cap->altc_entry_sz, 1592 dev->caps.num_qps, 1593 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1594 0, 0); 1595 if (err) { 1596 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1597 goto err_unmap_auxc; 1598 } 1599 1600 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1601 init_hca->rdmarc_base, 1602 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1603 dev->caps.num_qps, 1604 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1605 0, 0); 1606 if (err) { 1607 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1608 goto err_unmap_altc; 1609 } 1610 1611 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1612 init_hca->cqc_base, 1613 dev_cap->cqc_entry_sz, 1614 dev->caps.num_cqs, 1615 dev->caps.reserved_cqs, 0, 0); 1616 if (err) { 1617 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1618 goto err_unmap_rdmarc; 1619 } 1620 1621 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1622 init_hca->srqc_base, 1623 dev_cap->srq_entry_sz, 1624 dev->caps.num_srqs, 1625 dev->caps.reserved_srqs, 0, 0); 1626 if (err) { 1627 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1628 goto err_unmap_cq; 1629 } 1630 1631 /* 1632 * For flow steering device managed mode it is required to use 1633 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1634 * required, but for simplicity just map the whole multicast 1635 * group table now. The table isn't very big and it's a lot 1636 * easier than trying to track ref counts. 1637 */ 1638 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1639 init_hca->mc_base, 1640 mlx4_get_mgm_entry_size(dev), 1641 dev->caps.num_mgms + dev->caps.num_amgms, 1642 dev->caps.num_mgms + dev->caps.num_amgms, 1643 0, 0); 1644 if (err) { 1645 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1646 goto err_unmap_srq; 1647 } 1648 1649 return 0; 1650 1651 err_unmap_srq: 1652 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1653 1654 err_unmap_cq: 1655 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1656 1657 err_unmap_rdmarc: 1658 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1659 1660 err_unmap_altc: 1661 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1662 1663 err_unmap_auxc: 1664 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1665 1666 err_unmap_qp: 1667 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1668 1669 err_unmap_dmpt: 1670 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1671 1672 err_unmap_mtt: 1673 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1674 1675 err_unmap_eq: 1676 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1677 1678 err_unmap_cmpt: 1679 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1680 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1681 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1682 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1683 1684 err_unmap_aux: 1685 mlx4_UNMAP_ICM_AUX(dev); 1686 1687 err_free_aux: 1688 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1689 1690 return err; 1691 } 1692 1693 static void mlx4_free_icms(struct mlx4_dev *dev) 1694 { 1695 struct mlx4_priv *priv = mlx4_priv(dev); 1696 1697 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1698 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1699 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1700 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1701 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1702 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1703 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1704 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1705 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1706 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1707 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1708 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1709 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1710 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1711 1712 mlx4_UNMAP_ICM_AUX(dev); 1713 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1714 } 1715 1716 static void mlx4_slave_exit(struct mlx4_dev *dev) 1717 { 1718 struct mlx4_priv *priv = mlx4_priv(dev); 1719 1720 mutex_lock(&priv->cmd.slave_cmd_mutex); 1721 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1722 MLX4_COMM_TIME)) 1723 mlx4_warn(dev, "Failed to close slave function\n"); 1724 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1725 } 1726 1727 static int map_bf_area(struct mlx4_dev *dev) 1728 { 1729 struct mlx4_priv *priv = mlx4_priv(dev); 1730 resource_size_t bf_start; 1731 resource_size_t bf_len; 1732 int err = 0; 1733 1734 if (!dev->caps.bf_reg_size) 1735 return -ENXIO; 1736 1737 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1738 (dev->caps.num_uars << PAGE_SHIFT); 1739 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1740 (dev->caps.num_uars << PAGE_SHIFT); 1741 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1742 if (!priv->bf_mapping) 1743 err = -ENOMEM; 1744 1745 return err; 1746 } 1747 1748 static void unmap_bf_area(struct mlx4_dev *dev) 1749 { 1750 if (mlx4_priv(dev)->bf_mapping) 1751 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1752 } 1753 1754 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1755 { 1756 u32 clockhi, clocklo, clockhi1; 1757 cycle_t cycles; 1758 int i; 1759 struct mlx4_priv *priv = mlx4_priv(dev); 1760 1761 for (i = 0; i < 10; i++) { 1762 clockhi = swab32(readl(priv->clock_mapping)); 1763 clocklo = swab32(readl(priv->clock_mapping + 4)); 1764 clockhi1 = swab32(readl(priv->clock_mapping)); 1765 if (clockhi == clockhi1) 1766 break; 1767 } 1768 1769 cycles = (u64) clockhi << 32 | (u64) clocklo; 1770 1771 return cycles; 1772 } 1773 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1774 1775 1776 static int map_internal_clock(struct mlx4_dev *dev) 1777 { 1778 struct mlx4_priv *priv = mlx4_priv(dev); 1779 1780 priv->clock_mapping = 1781 ioremap(pci_resource_start(dev->persist->pdev, 1782 priv->fw.clock_bar) + 1783 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1784 1785 if (!priv->clock_mapping) 1786 return -ENOMEM; 1787 1788 return 0; 1789 } 1790 1791 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1792 struct mlx4_clock_params *params) 1793 { 1794 struct mlx4_priv *priv = mlx4_priv(dev); 1795 1796 if (mlx4_is_slave(dev)) 1797 return -ENOTSUPP; 1798 1799 if (!params) 1800 return -EINVAL; 1801 1802 params->bar = priv->fw.clock_bar; 1803 params->offset = priv->fw.clock_offset; 1804 params->size = MLX4_CLOCK_SIZE; 1805 1806 return 0; 1807 } 1808 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1809 1810 static void unmap_internal_clock(struct mlx4_dev *dev) 1811 { 1812 struct mlx4_priv *priv = mlx4_priv(dev); 1813 1814 if (priv->clock_mapping) 1815 iounmap(priv->clock_mapping); 1816 } 1817 1818 static void mlx4_close_hca(struct mlx4_dev *dev) 1819 { 1820 unmap_internal_clock(dev); 1821 unmap_bf_area(dev); 1822 if (mlx4_is_slave(dev)) 1823 mlx4_slave_exit(dev); 1824 else { 1825 mlx4_CLOSE_HCA(dev, 0); 1826 mlx4_free_icms(dev); 1827 } 1828 } 1829 1830 static void mlx4_close_fw(struct mlx4_dev *dev) 1831 { 1832 if (!mlx4_is_slave(dev)) { 1833 mlx4_UNMAP_FA(dev); 1834 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1835 } 1836 } 1837 1838 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1839 { 1840 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1841 1842 u32 comm_flags; 1843 u32 offline_bit; 1844 unsigned long end; 1845 struct mlx4_priv *priv = mlx4_priv(dev); 1846 1847 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1848 while (time_before(jiffies, end)) { 1849 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1850 MLX4_COMM_CHAN_FLAGS)); 1851 offline_bit = (comm_flags & 1852 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1853 if (!offline_bit) 1854 return 0; 1855 /* There are cases as part of AER/Reset flow that PF needs 1856 * around 100 msec to load. We therefore sleep for 100 msec 1857 * to allow other tasks to make use of that CPU during this 1858 * time interval. 1859 */ 1860 msleep(100); 1861 } 1862 mlx4_err(dev, "Communication channel is offline.\n"); 1863 return -EIO; 1864 } 1865 1866 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1867 { 1868 #define COMM_CHAN_RST_OFFSET 0x1e 1869 1870 struct mlx4_priv *priv = mlx4_priv(dev); 1871 u32 comm_rst; 1872 u32 comm_caps; 1873 1874 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1875 MLX4_COMM_CHAN_CAPS)); 1876 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1877 1878 if (comm_rst) 1879 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1880 } 1881 1882 static int mlx4_init_slave(struct mlx4_dev *dev) 1883 { 1884 struct mlx4_priv *priv = mlx4_priv(dev); 1885 u64 dma = (u64) priv->mfunc.vhcr_dma; 1886 int ret_from_reset = 0; 1887 u32 slave_read; 1888 u32 cmd_channel_ver; 1889 1890 if (atomic_read(&pf_loading)) { 1891 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1892 return -EPROBE_DEFER; 1893 } 1894 1895 mutex_lock(&priv->cmd.slave_cmd_mutex); 1896 priv->cmd.max_cmds = 1; 1897 if (mlx4_comm_check_offline(dev)) { 1898 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1899 goto err_offline; 1900 } 1901 1902 mlx4_reset_vf_support(dev); 1903 mlx4_warn(dev, "Sending reset\n"); 1904 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1905 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1906 /* if we are in the middle of flr the slave will try 1907 * NUM_OF_RESET_RETRIES times before leaving.*/ 1908 if (ret_from_reset) { 1909 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1910 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1911 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1912 return -EPROBE_DEFER; 1913 } else 1914 goto err; 1915 } 1916 1917 /* check the driver version - the slave I/F revision 1918 * must match the master's */ 1919 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1920 cmd_channel_ver = mlx4_comm_get_version(); 1921 1922 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1923 MLX4_COMM_GET_IF_REV(slave_read)) { 1924 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1925 goto err; 1926 } 1927 1928 mlx4_warn(dev, "Sending vhcr0\n"); 1929 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1930 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1931 goto err; 1932 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1933 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1934 goto err; 1935 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1936 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1937 goto err; 1938 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1939 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1940 goto err; 1941 1942 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1943 return 0; 1944 1945 err: 1946 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1947 err_offline: 1948 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1949 return -EIO; 1950 } 1951 1952 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1953 { 1954 int i; 1955 1956 for (i = 1; i <= dev->caps.num_ports; i++) { 1957 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1958 dev->caps.gid_table_len[i] = 1959 mlx4_get_slave_num_gids(dev, 0, i); 1960 else 1961 dev->caps.gid_table_len[i] = 1; 1962 dev->caps.pkey_table_len[i] = 1963 dev->phys_caps.pkey_phys_table_len[i] - 1; 1964 } 1965 } 1966 1967 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1968 { 1969 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1970 1971 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1972 i++) { 1973 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1974 break; 1975 } 1976 1977 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1978 } 1979 1980 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 1981 { 1982 switch (dmfs_high_steer_mode) { 1983 case MLX4_STEERING_DMFS_A0_DEFAULT: 1984 return "default performance"; 1985 1986 case MLX4_STEERING_DMFS_A0_DYNAMIC: 1987 return "dynamic hybrid mode"; 1988 1989 case MLX4_STEERING_DMFS_A0_STATIC: 1990 return "performance optimized for limited rule configuration (static)"; 1991 1992 case MLX4_STEERING_DMFS_A0_DISABLE: 1993 return "disabled performance optimized steering"; 1994 1995 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 1996 return "performance optimized steering not supported"; 1997 1998 default: 1999 return "Unrecognized mode"; 2000 } 2001 } 2002 2003 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2004 2005 static void choose_steering_mode(struct mlx4_dev *dev, 2006 struct mlx4_dev_cap *dev_cap) 2007 { 2008 if (mlx4_log_num_mgm_entry_size <= 0) { 2009 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2010 if (dev->caps.dmfs_high_steer_mode == 2011 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2012 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2013 else 2014 dev->caps.dmfs_high_steer_mode = 2015 MLX4_STEERING_DMFS_A0_STATIC; 2016 } 2017 } 2018 2019 if (mlx4_log_num_mgm_entry_size <= 0 && 2020 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2021 (!mlx4_is_mfunc(dev) || 2022 (dev_cap->fs_max_num_qp_per_entry >= 2023 (dev->persist->num_vfs + 1))) && 2024 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2025 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2026 dev->oper_log_mgm_entry_size = 2027 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2028 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2029 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2030 dev->caps.fs_log_max_ucast_qp_range_size = 2031 dev_cap->fs_log_max_ucast_qp_range_size; 2032 } else { 2033 if (dev->caps.dmfs_high_steer_mode != 2034 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2035 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2036 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2037 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2038 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2039 else { 2040 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2041 2042 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2043 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2044 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2045 } 2046 dev->oper_log_mgm_entry_size = 2047 mlx4_log_num_mgm_entry_size > 0 ? 2048 mlx4_log_num_mgm_entry_size : 2049 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2050 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2051 } 2052 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2053 mlx4_steering_mode_str(dev->caps.steering_mode), 2054 dev->oper_log_mgm_entry_size, 2055 mlx4_log_num_mgm_entry_size); 2056 } 2057 2058 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2059 struct mlx4_dev_cap *dev_cap) 2060 { 2061 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2062 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2063 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2064 else 2065 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2066 2067 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2068 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2069 } 2070 2071 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2072 { 2073 int i; 2074 struct mlx4_port_cap port_cap; 2075 2076 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2077 return -EINVAL; 2078 2079 for (i = 1; i <= dev->caps.num_ports; i++) { 2080 if (mlx4_dev_port(dev, i, &port_cap)) { 2081 mlx4_err(dev, 2082 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2083 } else if ((dev->caps.dmfs_high_steer_mode != 2084 MLX4_STEERING_DMFS_A0_DEFAULT) && 2085 (port_cap.dmfs_optimized_state == 2086 !!(dev->caps.dmfs_high_steer_mode == 2087 MLX4_STEERING_DMFS_A0_DISABLE))) { 2088 mlx4_err(dev, 2089 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2090 dmfs_high_rate_steering_mode_str( 2091 dev->caps.dmfs_high_steer_mode), 2092 (port_cap.dmfs_optimized_state ? 2093 "enabled" : "disabled")); 2094 } 2095 } 2096 2097 return 0; 2098 } 2099 2100 static int mlx4_init_fw(struct mlx4_dev *dev) 2101 { 2102 struct mlx4_mod_stat_cfg mlx4_cfg; 2103 int err = 0; 2104 2105 if (!mlx4_is_slave(dev)) { 2106 err = mlx4_QUERY_FW(dev); 2107 if (err) { 2108 if (err == -EACCES) 2109 mlx4_info(dev, "non-primary physical function, skipping\n"); 2110 else 2111 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2112 return err; 2113 } 2114 2115 err = mlx4_load_fw(dev); 2116 if (err) { 2117 mlx4_err(dev, "Failed to start FW, aborting\n"); 2118 return err; 2119 } 2120 2121 mlx4_cfg.log_pg_sz_m = 1; 2122 mlx4_cfg.log_pg_sz = 0; 2123 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2124 if (err) 2125 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2126 } 2127 2128 return err; 2129 } 2130 2131 static int mlx4_init_hca(struct mlx4_dev *dev) 2132 { 2133 struct mlx4_priv *priv = mlx4_priv(dev); 2134 struct mlx4_adapter adapter; 2135 struct mlx4_dev_cap dev_cap; 2136 struct mlx4_profile profile; 2137 struct mlx4_init_hca_param init_hca; 2138 u64 icm_size; 2139 struct mlx4_config_dev_params params; 2140 int err; 2141 2142 if (!mlx4_is_slave(dev)) { 2143 err = mlx4_dev_cap(dev, &dev_cap); 2144 if (err) { 2145 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2146 return err; 2147 } 2148 2149 choose_steering_mode(dev, &dev_cap); 2150 choose_tunnel_offload_mode(dev, &dev_cap); 2151 2152 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2153 mlx4_is_master(dev)) 2154 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2155 2156 err = mlx4_get_phys_port_id(dev); 2157 if (err) 2158 mlx4_err(dev, "Fail to get physical port id\n"); 2159 2160 if (mlx4_is_master(dev)) 2161 mlx4_parav_master_pf_caps(dev); 2162 2163 if (mlx4_low_memory_profile()) { 2164 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2165 profile = low_mem_profile; 2166 } else { 2167 profile = default_profile; 2168 } 2169 if (dev->caps.steering_mode == 2170 MLX4_STEERING_MODE_DEVICE_MANAGED) 2171 profile.num_mcg = MLX4_FS_NUM_MCG; 2172 2173 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2174 &init_hca); 2175 if ((long long) icm_size < 0) { 2176 err = icm_size; 2177 return err; 2178 } 2179 2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2181 2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2184 init_hca.mw_enabled = 0; 2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2187 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2188 2189 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2190 if (err) 2191 return err; 2192 2193 err = mlx4_INIT_HCA(dev, &init_hca); 2194 if (err) { 2195 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2196 goto err_free_icm; 2197 } 2198 2199 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2200 err = mlx4_query_func(dev, &dev_cap); 2201 if (err < 0) { 2202 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2203 goto err_close; 2204 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2205 dev->caps.num_eqs = dev_cap.max_eqs; 2206 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2207 dev->caps.reserved_uars = dev_cap.reserved_uars; 2208 } 2209 } 2210 2211 /* 2212 * If TS is supported by FW 2213 * read HCA frequency by QUERY_HCA command 2214 */ 2215 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2216 memset(&init_hca, 0, sizeof(init_hca)); 2217 err = mlx4_QUERY_HCA(dev, &init_hca); 2218 if (err) { 2219 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2220 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2221 } else { 2222 dev->caps.hca_core_clock = 2223 init_hca.hca_core_clock; 2224 } 2225 2226 /* In case we got HCA frequency 0 - disable timestamping 2227 * to avoid dividing by zero 2228 */ 2229 if (!dev->caps.hca_core_clock) { 2230 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2231 mlx4_err(dev, 2232 "HCA frequency is 0 - timestamping is not supported\n"); 2233 } else if (map_internal_clock(dev)) { 2234 /* 2235 * Map internal clock, 2236 * in case of failure disable timestamping 2237 */ 2238 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2239 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2240 } 2241 } 2242 2243 if (dev->caps.dmfs_high_steer_mode != 2244 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2245 if (mlx4_validate_optimized_steering(dev)) 2246 mlx4_warn(dev, "Optimized steering validation failed\n"); 2247 2248 if (dev->caps.dmfs_high_steer_mode == 2249 MLX4_STEERING_DMFS_A0_DISABLE) { 2250 dev->caps.dmfs_high_rate_qpn_base = 2251 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2252 dev->caps.dmfs_high_rate_qpn_range = 2253 MLX4_A0_STEERING_TABLE_SIZE; 2254 } 2255 2256 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2257 dmfs_high_rate_steering_mode_str( 2258 dev->caps.dmfs_high_steer_mode)); 2259 } 2260 } else { 2261 err = mlx4_init_slave(dev); 2262 if (err) { 2263 if (err != -EPROBE_DEFER) 2264 mlx4_err(dev, "Failed to initialize slave\n"); 2265 return err; 2266 } 2267 2268 err = mlx4_slave_cap(dev); 2269 if (err) { 2270 mlx4_err(dev, "Failed to obtain slave caps\n"); 2271 goto err_close; 2272 } 2273 } 2274 2275 if (map_bf_area(dev)) 2276 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2277 2278 /*Only the master set the ports, all the rest got it from it.*/ 2279 if (!mlx4_is_slave(dev)) 2280 mlx4_set_port_mask(dev); 2281 2282 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2283 if (err) { 2284 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2285 goto unmap_bf; 2286 } 2287 2288 /* Query CONFIG_DEV parameters */ 2289 err = mlx4_config_dev_retrieval(dev, ¶ms); 2290 if (err && err != -ENOTSUPP) { 2291 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2292 } else if (!err) { 2293 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2294 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2295 } 2296 priv->eq_table.inta_pin = adapter.inta_pin; 2297 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2298 2299 return 0; 2300 2301 unmap_bf: 2302 unmap_internal_clock(dev); 2303 unmap_bf_area(dev); 2304 2305 if (mlx4_is_slave(dev)) { 2306 kfree(dev->caps.qp0_qkey); 2307 kfree(dev->caps.qp0_tunnel); 2308 kfree(dev->caps.qp0_proxy); 2309 kfree(dev->caps.qp1_tunnel); 2310 kfree(dev->caps.qp1_proxy); 2311 } 2312 2313 err_close: 2314 if (mlx4_is_slave(dev)) 2315 mlx4_slave_exit(dev); 2316 else 2317 mlx4_CLOSE_HCA(dev, 0); 2318 2319 err_free_icm: 2320 if (!mlx4_is_slave(dev)) 2321 mlx4_free_icms(dev); 2322 2323 return err; 2324 } 2325 2326 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2327 { 2328 struct mlx4_priv *priv = mlx4_priv(dev); 2329 int nent_pow2; 2330 2331 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2332 return -ENOENT; 2333 2334 if (!dev->caps.max_counters) 2335 return -ENOSPC; 2336 2337 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2338 /* reserve last counter index for sink counter */ 2339 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2340 nent_pow2 - 1, 0, 2341 nent_pow2 - dev->caps.max_counters + 1); 2342 } 2343 2344 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2345 { 2346 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2347 return; 2348 2349 if (!dev->caps.max_counters) 2350 return; 2351 2352 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2353 } 2354 2355 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2356 { 2357 struct mlx4_priv *priv = mlx4_priv(dev); 2358 int port; 2359 2360 for (port = 0; port < dev->caps.num_ports; port++) 2361 if (priv->def_counter[port] != -1) 2362 mlx4_counter_free(dev, priv->def_counter[port]); 2363 } 2364 2365 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2366 { 2367 struct mlx4_priv *priv = mlx4_priv(dev); 2368 int port, err = 0; 2369 u32 idx; 2370 2371 for (port = 0; port < dev->caps.num_ports; port++) 2372 priv->def_counter[port] = -1; 2373 2374 for (port = 0; port < dev->caps.num_ports; port++) { 2375 err = mlx4_counter_alloc(dev, &idx); 2376 2377 if (!err || err == -ENOSPC) { 2378 priv->def_counter[port] = idx; 2379 } else if (err == -ENOENT) { 2380 err = 0; 2381 continue; 2382 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2383 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2384 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2385 MLX4_SINK_COUNTER_INDEX(dev)); 2386 err = 0; 2387 } else { 2388 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2389 __func__, port + 1, err); 2390 mlx4_cleanup_default_counters(dev); 2391 return err; 2392 } 2393 2394 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2395 __func__, priv->def_counter[port], port + 1); 2396 } 2397 2398 return err; 2399 } 2400 2401 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2402 { 2403 struct mlx4_priv *priv = mlx4_priv(dev); 2404 2405 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2406 return -ENOENT; 2407 2408 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2409 if (*idx == -1) { 2410 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2411 return -ENOSPC; 2412 } 2413 2414 return 0; 2415 } 2416 2417 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2418 { 2419 u64 out_param; 2420 int err; 2421 2422 if (mlx4_is_mfunc(dev)) { 2423 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2424 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2425 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2426 if (!err) 2427 *idx = get_param_l(&out_param); 2428 2429 return err; 2430 } 2431 return __mlx4_counter_alloc(dev, idx); 2432 } 2433 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2434 2435 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2436 u8 counter_index) 2437 { 2438 struct mlx4_cmd_mailbox *if_stat_mailbox; 2439 int err; 2440 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2441 2442 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2443 if (IS_ERR(if_stat_mailbox)) 2444 return PTR_ERR(if_stat_mailbox); 2445 2446 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2447 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2448 MLX4_CMD_NATIVE); 2449 2450 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2451 return err; 2452 } 2453 2454 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2455 { 2456 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2457 return; 2458 2459 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2460 return; 2461 2462 __mlx4_clear_if_stat(dev, idx); 2463 2464 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2465 return; 2466 } 2467 2468 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2469 { 2470 u64 in_param = 0; 2471 2472 if (mlx4_is_mfunc(dev)) { 2473 set_param_l(&in_param, idx); 2474 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2475 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2476 MLX4_CMD_WRAPPED); 2477 return; 2478 } 2479 __mlx4_counter_free(dev, idx); 2480 } 2481 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2482 2483 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2484 { 2485 struct mlx4_priv *priv = mlx4_priv(dev); 2486 2487 return priv->def_counter[port - 1]; 2488 } 2489 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2490 2491 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2492 { 2493 struct mlx4_priv *priv = mlx4_priv(dev); 2494 2495 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2496 } 2497 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2498 2499 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2500 { 2501 struct mlx4_priv *priv = mlx4_priv(dev); 2502 2503 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2504 } 2505 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2506 2507 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2508 { 2509 struct mlx4_priv *priv = mlx4_priv(dev); 2510 __be64 guid; 2511 2512 /* hw GUID */ 2513 if (entry == 0) 2514 return; 2515 2516 get_random_bytes((char *)&guid, sizeof(guid)); 2517 guid &= ~(cpu_to_be64(1ULL << 56)); 2518 guid |= cpu_to_be64(1ULL << 57); 2519 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2520 } 2521 2522 static int mlx4_setup_hca(struct mlx4_dev *dev) 2523 { 2524 struct mlx4_priv *priv = mlx4_priv(dev); 2525 int err; 2526 int port; 2527 __be32 ib_port_default_caps; 2528 2529 err = mlx4_init_uar_table(dev); 2530 if (err) { 2531 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2532 return err; 2533 } 2534 2535 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2536 if (err) { 2537 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2538 goto err_uar_table_free; 2539 } 2540 2541 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2542 if (!priv->kar) { 2543 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2544 err = -ENOMEM; 2545 goto err_uar_free; 2546 } 2547 2548 err = mlx4_init_pd_table(dev); 2549 if (err) { 2550 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2551 goto err_kar_unmap; 2552 } 2553 2554 err = mlx4_init_xrcd_table(dev); 2555 if (err) { 2556 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2557 goto err_pd_table_free; 2558 } 2559 2560 err = mlx4_init_mr_table(dev); 2561 if (err) { 2562 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2563 goto err_xrcd_table_free; 2564 } 2565 2566 if (!mlx4_is_slave(dev)) { 2567 err = mlx4_init_mcg_table(dev); 2568 if (err) { 2569 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2570 goto err_mr_table_free; 2571 } 2572 err = mlx4_config_mad_demux(dev); 2573 if (err) { 2574 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2575 goto err_mcg_table_free; 2576 } 2577 } 2578 2579 err = mlx4_init_eq_table(dev); 2580 if (err) { 2581 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2582 goto err_mcg_table_free; 2583 } 2584 2585 err = mlx4_cmd_use_events(dev); 2586 if (err) { 2587 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2588 goto err_eq_table_free; 2589 } 2590 2591 err = mlx4_NOP(dev); 2592 if (err) { 2593 if (dev->flags & MLX4_FLAG_MSI_X) { 2594 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2595 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2596 mlx4_warn(dev, "Trying again without MSI-X\n"); 2597 } else { 2598 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2599 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2600 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2601 } 2602 2603 goto err_cmd_poll; 2604 } 2605 2606 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2607 2608 err = mlx4_init_cq_table(dev); 2609 if (err) { 2610 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2611 goto err_cmd_poll; 2612 } 2613 2614 err = mlx4_init_srq_table(dev); 2615 if (err) { 2616 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2617 goto err_cq_table_free; 2618 } 2619 2620 err = mlx4_init_qp_table(dev); 2621 if (err) { 2622 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2623 goto err_srq_table_free; 2624 } 2625 2626 if (!mlx4_is_slave(dev)) { 2627 err = mlx4_init_counters_table(dev); 2628 if (err && err != -ENOENT) { 2629 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2630 goto err_qp_table_free; 2631 } 2632 } 2633 2634 err = mlx4_allocate_default_counters(dev); 2635 if (err) { 2636 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2637 goto err_counters_table_free; 2638 } 2639 2640 if (!mlx4_is_slave(dev)) { 2641 for (port = 1; port <= dev->caps.num_ports; port++) { 2642 ib_port_default_caps = 0; 2643 err = mlx4_get_port_ib_caps(dev, port, 2644 &ib_port_default_caps); 2645 if (err) 2646 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2647 port, err); 2648 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2649 2650 /* initialize per-slave default ib port capabilities */ 2651 if (mlx4_is_master(dev)) { 2652 int i; 2653 for (i = 0; i < dev->num_slaves; i++) { 2654 if (i == mlx4_master_func_num(dev)) 2655 continue; 2656 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2657 ib_port_default_caps; 2658 } 2659 } 2660 2661 if (mlx4_is_mfunc(dev)) 2662 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2663 else 2664 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2665 2666 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2667 dev->caps.pkey_table_len[port] : -1); 2668 if (err) { 2669 mlx4_err(dev, "Failed to set port %d, aborting\n", 2670 port); 2671 goto err_default_countes_free; 2672 } 2673 } 2674 } 2675 2676 return 0; 2677 2678 err_default_countes_free: 2679 mlx4_cleanup_default_counters(dev); 2680 2681 err_counters_table_free: 2682 if (!mlx4_is_slave(dev)) 2683 mlx4_cleanup_counters_table(dev); 2684 2685 err_qp_table_free: 2686 mlx4_cleanup_qp_table(dev); 2687 2688 err_srq_table_free: 2689 mlx4_cleanup_srq_table(dev); 2690 2691 err_cq_table_free: 2692 mlx4_cleanup_cq_table(dev); 2693 2694 err_cmd_poll: 2695 mlx4_cmd_use_polling(dev); 2696 2697 err_eq_table_free: 2698 mlx4_cleanup_eq_table(dev); 2699 2700 err_mcg_table_free: 2701 if (!mlx4_is_slave(dev)) 2702 mlx4_cleanup_mcg_table(dev); 2703 2704 err_mr_table_free: 2705 mlx4_cleanup_mr_table(dev); 2706 2707 err_xrcd_table_free: 2708 mlx4_cleanup_xrcd_table(dev); 2709 2710 err_pd_table_free: 2711 mlx4_cleanup_pd_table(dev); 2712 2713 err_kar_unmap: 2714 iounmap(priv->kar); 2715 2716 err_uar_free: 2717 mlx4_uar_free(dev, &priv->driver_uar); 2718 2719 err_uar_table_free: 2720 mlx4_cleanup_uar_table(dev); 2721 return err; 2722 } 2723 2724 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2725 { 2726 int requested_cpu = 0; 2727 struct mlx4_priv *priv = mlx4_priv(dev); 2728 struct mlx4_eq *eq; 2729 int off = 0; 2730 int i; 2731 2732 if (eqn > dev->caps.num_comp_vectors) 2733 return -EINVAL; 2734 2735 for (i = 1; i < port; i++) 2736 off += mlx4_get_eqs_per_port(dev, i); 2737 2738 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2739 2740 /* Meaning EQs are shared, and this call comes from the second port */ 2741 if (requested_cpu < 0) 2742 return 0; 2743 2744 eq = &priv->eq_table.eq[eqn]; 2745 2746 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) 2747 return -ENOMEM; 2748 2749 cpumask_set_cpu(requested_cpu, eq->affinity_mask); 2750 2751 return 0; 2752 } 2753 2754 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2755 { 2756 struct mlx4_priv *priv = mlx4_priv(dev); 2757 struct msix_entry *entries; 2758 int i; 2759 int port = 0; 2760 2761 if (msi_x) { 2762 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2763 2764 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2765 nreq); 2766 if (nreq > MAX_MSIX) 2767 nreq = MAX_MSIX; 2768 2769 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2770 if (!entries) 2771 goto no_msi; 2772 2773 for (i = 0; i < nreq; ++i) 2774 entries[i].entry = i; 2775 2776 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2777 nreq); 2778 2779 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2780 kfree(entries); 2781 goto no_msi; 2782 } 2783 /* 1 is reserved for events (asyncrounous EQ) */ 2784 dev->caps.num_comp_vectors = nreq - 1; 2785 2786 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2787 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2788 dev->caps.num_ports); 2789 2790 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2791 if (i == MLX4_EQ_ASYNC) 2792 continue; 2793 2794 priv->eq_table.eq[i].irq = 2795 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2796 2797 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2798 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2799 dev->caps.num_ports); 2800 /* We don't set affinity hint when there 2801 * aren't enough EQs 2802 */ 2803 } else { 2804 set_bit(port, 2805 priv->eq_table.eq[i].actv_ports.ports); 2806 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2807 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2808 i); 2809 } 2810 /* We divide the Eqs evenly between the two ports. 2811 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2812 * refers to the number of Eqs per port 2813 * (i.e eqs_per_port). Theoretically, we would like to 2814 * write something like (i + 1) % eqs_per_port == 0. 2815 * However, since there's an asynchronous Eq, we have 2816 * to skip over it by comparing this condition to 2817 * !!((i + 1) > MLX4_EQ_ASYNC). 2818 */ 2819 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2820 ((i + 1) % 2821 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2822 !!((i + 1) > MLX4_EQ_ASYNC)) 2823 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2824 * everything is shared anyway. 2825 */ 2826 port++; 2827 } 2828 2829 dev->flags |= MLX4_FLAG_MSI_X; 2830 2831 kfree(entries); 2832 return; 2833 } 2834 2835 no_msi: 2836 dev->caps.num_comp_vectors = 1; 2837 2838 BUG_ON(MLX4_EQ_ASYNC >= 2); 2839 for (i = 0; i < 2; ++i) { 2840 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2841 if (i != MLX4_EQ_ASYNC) { 2842 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2843 dev->caps.num_ports); 2844 } 2845 } 2846 } 2847 2848 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2849 { 2850 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2851 int err = 0; 2852 2853 info->dev = dev; 2854 info->port = port; 2855 if (!mlx4_is_slave(dev)) { 2856 mlx4_init_mac_table(dev, &info->mac_table); 2857 mlx4_init_vlan_table(dev, &info->vlan_table); 2858 mlx4_init_roce_gid_table(dev, &info->gid_table); 2859 info->base_qpn = mlx4_get_base_qpn(dev, port); 2860 } 2861 2862 sprintf(info->dev_name, "mlx4_port%d", port); 2863 info->port_attr.attr.name = info->dev_name; 2864 if (mlx4_is_mfunc(dev)) 2865 info->port_attr.attr.mode = S_IRUGO; 2866 else { 2867 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2868 info->port_attr.store = set_port_type; 2869 } 2870 info->port_attr.show = show_port_type; 2871 sysfs_attr_init(&info->port_attr.attr); 2872 2873 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2874 if (err) { 2875 mlx4_err(dev, "Failed to create file for port %d\n", port); 2876 info->port = -1; 2877 } 2878 2879 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2880 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2881 if (mlx4_is_mfunc(dev)) 2882 info->port_mtu_attr.attr.mode = S_IRUGO; 2883 else { 2884 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2885 info->port_mtu_attr.store = set_port_ib_mtu; 2886 } 2887 info->port_mtu_attr.show = show_port_ib_mtu; 2888 sysfs_attr_init(&info->port_mtu_attr.attr); 2889 2890 err = device_create_file(&dev->persist->pdev->dev, 2891 &info->port_mtu_attr); 2892 if (err) { 2893 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2894 device_remove_file(&info->dev->persist->pdev->dev, 2895 &info->port_attr); 2896 info->port = -1; 2897 } 2898 2899 return err; 2900 } 2901 2902 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2903 { 2904 if (info->port < 0) 2905 return; 2906 2907 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2908 device_remove_file(&info->dev->persist->pdev->dev, 2909 &info->port_mtu_attr); 2910 #ifdef CONFIG_RFS_ACCEL 2911 free_irq_cpu_rmap(info->rmap); 2912 info->rmap = NULL; 2913 #endif 2914 } 2915 2916 static int mlx4_init_steering(struct mlx4_dev *dev) 2917 { 2918 struct mlx4_priv *priv = mlx4_priv(dev); 2919 int num_entries = dev->caps.num_ports; 2920 int i, j; 2921 2922 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2923 if (!priv->steer) 2924 return -ENOMEM; 2925 2926 for (i = 0; i < num_entries; i++) 2927 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2928 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2929 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2930 } 2931 return 0; 2932 } 2933 2934 static void mlx4_clear_steering(struct mlx4_dev *dev) 2935 { 2936 struct mlx4_priv *priv = mlx4_priv(dev); 2937 struct mlx4_steer_index *entry, *tmp_entry; 2938 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2939 int num_entries = dev->caps.num_ports; 2940 int i, j; 2941 2942 for (i = 0; i < num_entries; i++) { 2943 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2944 list_for_each_entry_safe(pqp, tmp_pqp, 2945 &priv->steer[i].promisc_qps[j], 2946 list) { 2947 list_del(&pqp->list); 2948 kfree(pqp); 2949 } 2950 list_for_each_entry_safe(entry, tmp_entry, 2951 &priv->steer[i].steer_entries[j], 2952 list) { 2953 list_del(&entry->list); 2954 list_for_each_entry_safe(pqp, tmp_pqp, 2955 &entry->duplicates, 2956 list) { 2957 list_del(&pqp->list); 2958 kfree(pqp); 2959 } 2960 kfree(entry); 2961 } 2962 } 2963 } 2964 kfree(priv->steer); 2965 } 2966 2967 static int extended_func_num(struct pci_dev *pdev) 2968 { 2969 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 2970 } 2971 2972 #define MLX4_OWNER_BASE 0x8069c 2973 #define MLX4_OWNER_SIZE 4 2974 2975 static int mlx4_get_ownership(struct mlx4_dev *dev) 2976 { 2977 void __iomem *owner; 2978 u32 ret; 2979 2980 if (pci_channel_offline(dev->persist->pdev)) 2981 return -EIO; 2982 2983 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 2984 MLX4_OWNER_BASE, 2985 MLX4_OWNER_SIZE); 2986 if (!owner) { 2987 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2988 return -ENOMEM; 2989 } 2990 2991 ret = readl(owner); 2992 iounmap(owner); 2993 return (int) !!ret; 2994 } 2995 2996 static void mlx4_free_ownership(struct mlx4_dev *dev) 2997 { 2998 void __iomem *owner; 2999 3000 if (pci_channel_offline(dev->persist->pdev)) 3001 return; 3002 3003 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3004 MLX4_OWNER_BASE, 3005 MLX4_OWNER_SIZE); 3006 if (!owner) { 3007 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3008 return; 3009 } 3010 writel(0, owner); 3011 msleep(1000); 3012 iounmap(owner); 3013 } 3014 3015 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3016 !!((flags) & MLX4_FLAG_MASTER)) 3017 3018 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3019 u8 total_vfs, int existing_vfs, int reset_flow) 3020 { 3021 u64 dev_flags = dev->flags; 3022 int err = 0; 3023 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), 3024 MLX4_MAX_NUM_VF); 3025 3026 if (reset_flow) { 3027 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3028 GFP_KERNEL); 3029 if (!dev->dev_vfs) 3030 goto free_mem; 3031 return dev_flags; 3032 } 3033 3034 atomic_inc(&pf_loading); 3035 if (dev->flags & MLX4_FLAG_SRIOV) { 3036 if (existing_vfs != total_vfs) { 3037 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3038 existing_vfs, total_vfs); 3039 total_vfs = existing_vfs; 3040 } 3041 } 3042 3043 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3044 if (NULL == dev->dev_vfs) { 3045 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3046 goto disable_sriov; 3047 } 3048 3049 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3050 if (total_vfs > fw_enabled_sriov_vfs) { 3051 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", 3052 total_vfs, fw_enabled_sriov_vfs); 3053 err = -ENOMEM; 3054 goto disable_sriov; 3055 } 3056 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3057 err = pci_enable_sriov(pdev, total_vfs); 3058 } 3059 if (err) { 3060 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3061 err); 3062 goto disable_sriov; 3063 } else { 3064 mlx4_warn(dev, "Running in master mode\n"); 3065 dev_flags |= MLX4_FLAG_SRIOV | 3066 MLX4_FLAG_MASTER; 3067 dev_flags &= ~MLX4_FLAG_SLAVE; 3068 dev->persist->num_vfs = total_vfs; 3069 } 3070 return dev_flags; 3071 3072 disable_sriov: 3073 atomic_dec(&pf_loading); 3074 free_mem: 3075 dev->persist->num_vfs = 0; 3076 kfree(dev->dev_vfs); 3077 dev->dev_vfs = NULL; 3078 return dev_flags & ~MLX4_FLAG_MASTER; 3079 } 3080 3081 enum { 3082 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3083 }; 3084 3085 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3086 int *nvfs) 3087 { 3088 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3089 /* Checking for 64 VFs as a limitation of CX2 */ 3090 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3091 requested_vfs >= 64) { 3092 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3093 requested_vfs); 3094 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3095 } 3096 return 0; 3097 } 3098 3099 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3100 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3101 int reset_flow) 3102 { 3103 struct mlx4_dev *dev; 3104 unsigned sum = 0; 3105 int err; 3106 int port; 3107 int i; 3108 struct mlx4_dev_cap *dev_cap = NULL; 3109 int existing_vfs = 0; 3110 3111 dev = &priv->dev; 3112 3113 INIT_LIST_HEAD(&priv->ctx_list); 3114 spin_lock_init(&priv->ctx_lock); 3115 3116 mutex_init(&priv->port_mutex); 3117 mutex_init(&priv->bond_mutex); 3118 3119 INIT_LIST_HEAD(&priv->pgdir_list); 3120 mutex_init(&priv->pgdir_mutex); 3121 3122 INIT_LIST_HEAD(&priv->bf_list); 3123 mutex_init(&priv->bf_mutex); 3124 3125 dev->rev_id = pdev->revision; 3126 dev->numa_node = dev_to_node(&pdev->dev); 3127 3128 /* Detect if this device is a virtual function */ 3129 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3130 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3131 dev->flags |= MLX4_FLAG_SLAVE; 3132 } else { 3133 /* We reset the device and enable SRIOV only for physical 3134 * devices. Try to claim ownership on the device; 3135 * if already taken, skip -- do not allow multiple PFs */ 3136 err = mlx4_get_ownership(dev); 3137 if (err) { 3138 if (err < 0) 3139 return err; 3140 else { 3141 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3142 return -EINVAL; 3143 } 3144 } 3145 3146 atomic_set(&priv->opreq_count, 0); 3147 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3148 3149 /* 3150 * Now reset the HCA before we touch the PCI capabilities or 3151 * attempt a firmware command, since a boot ROM may have left 3152 * the HCA in an undefined state. 3153 */ 3154 err = mlx4_reset(dev); 3155 if (err) { 3156 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3157 goto err_sriov; 3158 } 3159 3160 if (total_vfs) { 3161 dev->flags = MLX4_FLAG_MASTER; 3162 existing_vfs = pci_num_vf(pdev); 3163 if (existing_vfs) 3164 dev->flags |= MLX4_FLAG_SRIOV; 3165 dev->persist->num_vfs = total_vfs; 3166 } 3167 } 3168 3169 /* on load remove any previous indication of internal error, 3170 * device is up. 3171 */ 3172 dev->persist->state = MLX4_DEVICE_STATE_UP; 3173 3174 slave_start: 3175 err = mlx4_cmd_init(dev); 3176 if (err) { 3177 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3178 goto err_sriov; 3179 } 3180 3181 /* In slave functions, the communication channel must be initialized 3182 * before posting commands. Also, init num_slaves before calling 3183 * mlx4_init_hca */ 3184 if (mlx4_is_mfunc(dev)) { 3185 if (mlx4_is_master(dev)) { 3186 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3187 3188 } else { 3189 dev->num_slaves = 0; 3190 err = mlx4_multi_func_init(dev); 3191 if (err) { 3192 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3193 goto err_cmd; 3194 } 3195 } 3196 } 3197 3198 err = mlx4_init_fw(dev); 3199 if (err) { 3200 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3201 goto err_mfunc; 3202 } 3203 3204 if (mlx4_is_master(dev)) { 3205 /* when we hit the goto slave_start below, dev_cap already initialized */ 3206 if (!dev_cap) { 3207 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3208 3209 if (!dev_cap) { 3210 err = -ENOMEM; 3211 goto err_fw; 3212 } 3213 3214 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3215 if (err) { 3216 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3217 goto err_fw; 3218 } 3219 3220 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3221 goto err_fw; 3222 3223 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3224 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3225 total_vfs, 3226 existing_vfs, 3227 reset_flow); 3228 3229 mlx4_close_fw(dev); 3230 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3231 dev->flags = dev_flags; 3232 if (!SRIOV_VALID_STATE(dev->flags)) { 3233 mlx4_err(dev, "Invalid SRIOV state\n"); 3234 goto err_sriov; 3235 } 3236 err = mlx4_reset(dev); 3237 if (err) { 3238 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3239 goto err_sriov; 3240 } 3241 goto slave_start; 3242 } 3243 } else { 3244 /* Legacy mode FW requires SRIOV to be enabled before 3245 * doing QUERY_DEV_CAP, since max_eq's value is different if 3246 * SRIOV is enabled. 3247 */ 3248 memset(dev_cap, 0, sizeof(*dev_cap)); 3249 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3250 if (err) { 3251 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3252 goto err_fw; 3253 } 3254 3255 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3256 goto err_fw; 3257 } 3258 } 3259 3260 err = mlx4_init_hca(dev); 3261 if (err) { 3262 if (err == -EACCES) { 3263 /* Not primary Physical function 3264 * Running in slave mode */ 3265 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3266 /* We're not a PF */ 3267 if (dev->flags & MLX4_FLAG_SRIOV) { 3268 if (!existing_vfs) 3269 pci_disable_sriov(pdev); 3270 if (mlx4_is_master(dev) && !reset_flow) 3271 atomic_dec(&pf_loading); 3272 dev->flags &= ~MLX4_FLAG_SRIOV; 3273 } 3274 if (!mlx4_is_slave(dev)) 3275 mlx4_free_ownership(dev); 3276 dev->flags |= MLX4_FLAG_SLAVE; 3277 dev->flags &= ~MLX4_FLAG_MASTER; 3278 goto slave_start; 3279 } else 3280 goto err_fw; 3281 } 3282 3283 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3284 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3285 existing_vfs, reset_flow); 3286 3287 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3288 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3289 dev->flags = dev_flags; 3290 err = mlx4_cmd_init(dev); 3291 if (err) { 3292 /* Only VHCR is cleaned up, so could still 3293 * send FW commands 3294 */ 3295 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3296 goto err_close; 3297 } 3298 } else { 3299 dev->flags = dev_flags; 3300 } 3301 3302 if (!SRIOV_VALID_STATE(dev->flags)) { 3303 mlx4_err(dev, "Invalid SRIOV state\n"); 3304 goto err_close; 3305 } 3306 } 3307 3308 /* check if the device is functioning at its maximum possible speed. 3309 * No return code for this call, just warn the user in case of PCI 3310 * express device capabilities are under-satisfied by the bus. 3311 */ 3312 if (!mlx4_is_slave(dev)) 3313 mlx4_check_pcie_caps(dev); 3314 3315 /* In master functions, the communication channel must be initialized 3316 * after obtaining its address from fw */ 3317 if (mlx4_is_master(dev)) { 3318 if (dev->caps.num_ports < 2 && 3319 num_vfs_argc > 1) { 3320 err = -EINVAL; 3321 mlx4_err(dev, 3322 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3323 dev->caps.num_ports); 3324 goto err_close; 3325 } 3326 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3327 3328 for (i = 0; 3329 i < sizeof(dev->persist->nvfs)/ 3330 sizeof(dev->persist->nvfs[0]); i++) { 3331 unsigned j; 3332 3333 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3334 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3335 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3336 dev->caps.num_ports; 3337 } 3338 } 3339 3340 /* In master functions, the communication channel 3341 * must be initialized after obtaining its address from fw 3342 */ 3343 err = mlx4_multi_func_init(dev); 3344 if (err) { 3345 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3346 goto err_close; 3347 } 3348 } 3349 3350 err = mlx4_alloc_eq_table(dev); 3351 if (err) 3352 goto err_master_mfunc; 3353 3354 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3355 mutex_init(&priv->msix_ctl.pool_lock); 3356 3357 mlx4_enable_msi_x(dev); 3358 if ((mlx4_is_mfunc(dev)) && 3359 !(dev->flags & MLX4_FLAG_MSI_X)) { 3360 err = -ENOSYS; 3361 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3362 goto err_free_eq; 3363 } 3364 3365 if (!mlx4_is_slave(dev)) { 3366 err = mlx4_init_steering(dev); 3367 if (err) 3368 goto err_disable_msix; 3369 } 3370 3371 err = mlx4_setup_hca(dev); 3372 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3373 !mlx4_is_mfunc(dev)) { 3374 dev->flags &= ~MLX4_FLAG_MSI_X; 3375 dev->caps.num_comp_vectors = 1; 3376 pci_disable_msix(pdev); 3377 err = mlx4_setup_hca(dev); 3378 } 3379 3380 if (err) 3381 goto err_steer; 3382 3383 mlx4_init_quotas(dev); 3384 /* When PF resources are ready arm its comm channel to enable 3385 * getting commands 3386 */ 3387 if (mlx4_is_master(dev)) { 3388 err = mlx4_ARM_COMM_CHANNEL(dev); 3389 if (err) { 3390 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3391 err); 3392 goto err_steer; 3393 } 3394 } 3395 3396 for (port = 1; port <= dev->caps.num_ports; port++) { 3397 err = mlx4_init_port_info(dev, port); 3398 if (err) 3399 goto err_port; 3400 } 3401 3402 priv->v2p.port1 = 1; 3403 priv->v2p.port2 = 2; 3404 3405 err = mlx4_register_device(dev); 3406 if (err) 3407 goto err_port; 3408 3409 mlx4_request_modules(dev); 3410 3411 mlx4_sense_init(dev); 3412 mlx4_start_sense(dev); 3413 3414 priv->removed = 0; 3415 3416 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3417 atomic_dec(&pf_loading); 3418 3419 kfree(dev_cap); 3420 return 0; 3421 3422 err_port: 3423 for (--port; port >= 1; --port) 3424 mlx4_cleanup_port_info(&priv->port[port]); 3425 3426 mlx4_cleanup_default_counters(dev); 3427 if (!mlx4_is_slave(dev)) 3428 mlx4_cleanup_counters_table(dev); 3429 mlx4_cleanup_qp_table(dev); 3430 mlx4_cleanup_srq_table(dev); 3431 mlx4_cleanup_cq_table(dev); 3432 mlx4_cmd_use_polling(dev); 3433 mlx4_cleanup_eq_table(dev); 3434 mlx4_cleanup_mcg_table(dev); 3435 mlx4_cleanup_mr_table(dev); 3436 mlx4_cleanup_xrcd_table(dev); 3437 mlx4_cleanup_pd_table(dev); 3438 mlx4_cleanup_uar_table(dev); 3439 3440 err_steer: 3441 if (!mlx4_is_slave(dev)) 3442 mlx4_clear_steering(dev); 3443 3444 err_disable_msix: 3445 if (dev->flags & MLX4_FLAG_MSI_X) 3446 pci_disable_msix(pdev); 3447 3448 err_free_eq: 3449 mlx4_free_eq_table(dev); 3450 3451 err_master_mfunc: 3452 if (mlx4_is_master(dev)) { 3453 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3454 mlx4_multi_func_cleanup(dev); 3455 } 3456 3457 if (mlx4_is_slave(dev)) { 3458 kfree(dev->caps.qp0_qkey); 3459 kfree(dev->caps.qp0_tunnel); 3460 kfree(dev->caps.qp0_proxy); 3461 kfree(dev->caps.qp1_tunnel); 3462 kfree(dev->caps.qp1_proxy); 3463 } 3464 3465 err_close: 3466 mlx4_close_hca(dev); 3467 3468 err_fw: 3469 mlx4_close_fw(dev); 3470 3471 err_mfunc: 3472 if (mlx4_is_slave(dev)) 3473 mlx4_multi_func_cleanup(dev); 3474 3475 err_cmd: 3476 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3477 3478 err_sriov: 3479 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3480 pci_disable_sriov(pdev); 3481 dev->flags &= ~MLX4_FLAG_SRIOV; 3482 } 3483 3484 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3485 atomic_dec(&pf_loading); 3486 3487 kfree(priv->dev.dev_vfs); 3488 3489 if (!mlx4_is_slave(dev)) 3490 mlx4_free_ownership(dev); 3491 3492 kfree(dev_cap); 3493 return err; 3494 } 3495 3496 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3497 struct mlx4_priv *priv) 3498 { 3499 int err; 3500 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3501 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3502 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3503 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3504 unsigned total_vfs = 0; 3505 unsigned int i; 3506 3507 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3508 3509 err = pci_enable_device(pdev); 3510 if (err) { 3511 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3512 return err; 3513 } 3514 3515 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3516 * per port, we must limit the number of VFs to 63 (since their are 3517 * 128 MACs) 3518 */ 3519 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3520 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3521 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3522 if (nvfs[i] < 0) { 3523 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3524 err = -EINVAL; 3525 goto err_disable_pdev; 3526 } 3527 } 3528 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3529 i++) { 3530 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3531 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3532 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3533 err = -EINVAL; 3534 goto err_disable_pdev; 3535 } 3536 } 3537 if (total_vfs > MLX4_MAX_NUM_VF) { 3538 dev_err(&pdev->dev, 3539 "Requested more VF's (%d) than allowed by hw (%d)\n", 3540 total_vfs, MLX4_MAX_NUM_VF); 3541 err = -EINVAL; 3542 goto err_disable_pdev; 3543 } 3544 3545 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3546 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3547 dev_err(&pdev->dev, 3548 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3549 nvfs[i] + nvfs[2], i + 1, 3550 MLX4_MAX_NUM_VF_P_PORT); 3551 err = -EINVAL; 3552 goto err_disable_pdev; 3553 } 3554 } 3555 3556 /* Check for BARs. */ 3557 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3558 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3559 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3560 pci_dev_data, pci_resource_flags(pdev, 0)); 3561 err = -ENODEV; 3562 goto err_disable_pdev; 3563 } 3564 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3565 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3566 err = -ENODEV; 3567 goto err_disable_pdev; 3568 } 3569 3570 err = pci_request_regions(pdev, DRV_NAME); 3571 if (err) { 3572 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3573 goto err_disable_pdev; 3574 } 3575 3576 pci_set_master(pdev); 3577 3578 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3579 if (err) { 3580 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3581 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3582 if (err) { 3583 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3584 goto err_release_regions; 3585 } 3586 } 3587 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3588 if (err) { 3589 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3590 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3591 if (err) { 3592 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3593 goto err_release_regions; 3594 } 3595 } 3596 3597 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3598 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3599 /* Detect if this device is a virtual function */ 3600 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3601 /* When acting as pf, we normally skip vfs unless explicitly 3602 * requested to probe them. 3603 */ 3604 if (total_vfs) { 3605 unsigned vfs_offset = 0; 3606 3607 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3608 vfs_offset + nvfs[i] < extended_func_num(pdev); 3609 vfs_offset += nvfs[i], i++) 3610 ; 3611 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3612 err = -ENODEV; 3613 goto err_release_regions; 3614 } 3615 if ((extended_func_num(pdev) - vfs_offset) 3616 > prb_vf[i]) { 3617 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3618 extended_func_num(pdev)); 3619 err = -ENODEV; 3620 goto err_release_regions; 3621 } 3622 } 3623 } 3624 3625 err = mlx4_catas_init(&priv->dev); 3626 if (err) 3627 goto err_release_regions; 3628 3629 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3630 if (err) 3631 goto err_catas; 3632 3633 return 0; 3634 3635 err_catas: 3636 mlx4_catas_end(&priv->dev); 3637 3638 err_release_regions: 3639 pci_release_regions(pdev); 3640 3641 err_disable_pdev: 3642 pci_disable_device(pdev); 3643 pci_set_drvdata(pdev, NULL); 3644 return err; 3645 } 3646 3647 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3648 { 3649 struct mlx4_priv *priv; 3650 struct mlx4_dev *dev; 3651 int ret; 3652 3653 printk_once(KERN_INFO "%s", mlx4_version); 3654 3655 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3656 if (!priv) 3657 return -ENOMEM; 3658 3659 dev = &priv->dev; 3660 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3661 if (!dev->persist) { 3662 kfree(priv); 3663 return -ENOMEM; 3664 } 3665 dev->persist->pdev = pdev; 3666 dev->persist->dev = dev; 3667 pci_set_drvdata(pdev, dev->persist); 3668 priv->pci_dev_data = id->driver_data; 3669 mutex_init(&dev->persist->device_state_mutex); 3670 mutex_init(&dev->persist->interface_state_mutex); 3671 3672 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3673 if (ret) { 3674 kfree(dev->persist); 3675 kfree(priv); 3676 } else { 3677 pci_save_state(pdev); 3678 } 3679 3680 return ret; 3681 } 3682 3683 static void mlx4_clean_dev(struct mlx4_dev *dev) 3684 { 3685 struct mlx4_dev_persistent *persist = dev->persist; 3686 struct mlx4_priv *priv = mlx4_priv(dev); 3687 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3688 3689 memset(priv, 0, sizeof(*priv)); 3690 priv->dev.persist = persist; 3691 priv->dev.flags = flags; 3692 } 3693 3694 static void mlx4_unload_one(struct pci_dev *pdev) 3695 { 3696 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3697 struct mlx4_dev *dev = persist->dev; 3698 struct mlx4_priv *priv = mlx4_priv(dev); 3699 int pci_dev_data; 3700 int p, i; 3701 3702 if (priv->removed) 3703 return; 3704 3705 /* saving current ports type for further use */ 3706 for (i = 0; i < dev->caps.num_ports; i++) { 3707 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3708 dev->persist->curr_port_poss_type[i] = dev->caps. 3709 possible_type[i + 1]; 3710 } 3711 3712 pci_dev_data = priv->pci_dev_data; 3713 3714 mlx4_stop_sense(dev); 3715 mlx4_unregister_device(dev); 3716 3717 for (p = 1; p <= dev->caps.num_ports; p++) { 3718 mlx4_cleanup_port_info(&priv->port[p]); 3719 mlx4_CLOSE_PORT(dev, p); 3720 } 3721 3722 if (mlx4_is_master(dev)) 3723 mlx4_free_resource_tracker(dev, 3724 RES_TR_FREE_SLAVES_ONLY); 3725 3726 mlx4_cleanup_default_counters(dev); 3727 if (!mlx4_is_slave(dev)) 3728 mlx4_cleanup_counters_table(dev); 3729 mlx4_cleanup_qp_table(dev); 3730 mlx4_cleanup_srq_table(dev); 3731 mlx4_cleanup_cq_table(dev); 3732 mlx4_cmd_use_polling(dev); 3733 mlx4_cleanup_eq_table(dev); 3734 mlx4_cleanup_mcg_table(dev); 3735 mlx4_cleanup_mr_table(dev); 3736 mlx4_cleanup_xrcd_table(dev); 3737 mlx4_cleanup_pd_table(dev); 3738 3739 if (mlx4_is_master(dev)) 3740 mlx4_free_resource_tracker(dev, 3741 RES_TR_FREE_STRUCTS_ONLY); 3742 3743 iounmap(priv->kar); 3744 mlx4_uar_free(dev, &priv->driver_uar); 3745 mlx4_cleanup_uar_table(dev); 3746 if (!mlx4_is_slave(dev)) 3747 mlx4_clear_steering(dev); 3748 mlx4_free_eq_table(dev); 3749 if (mlx4_is_master(dev)) 3750 mlx4_multi_func_cleanup(dev); 3751 mlx4_close_hca(dev); 3752 mlx4_close_fw(dev); 3753 if (mlx4_is_slave(dev)) 3754 mlx4_multi_func_cleanup(dev); 3755 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3756 3757 if (dev->flags & MLX4_FLAG_MSI_X) 3758 pci_disable_msix(pdev); 3759 3760 if (!mlx4_is_slave(dev)) 3761 mlx4_free_ownership(dev); 3762 3763 kfree(dev->caps.qp0_qkey); 3764 kfree(dev->caps.qp0_tunnel); 3765 kfree(dev->caps.qp0_proxy); 3766 kfree(dev->caps.qp1_tunnel); 3767 kfree(dev->caps.qp1_proxy); 3768 kfree(dev->dev_vfs); 3769 3770 mlx4_clean_dev(dev); 3771 priv->pci_dev_data = pci_dev_data; 3772 priv->removed = 1; 3773 } 3774 3775 static void mlx4_remove_one(struct pci_dev *pdev) 3776 { 3777 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3778 struct mlx4_dev *dev = persist->dev; 3779 struct mlx4_priv *priv = mlx4_priv(dev); 3780 int active_vfs = 0; 3781 3782 mutex_lock(&persist->interface_state_mutex); 3783 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3784 mutex_unlock(&persist->interface_state_mutex); 3785 3786 /* Disabling SR-IOV is not allowed while there are active vf's */ 3787 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3788 active_vfs = mlx4_how_many_lives_vf(dev); 3789 if (active_vfs) { 3790 pr_warn("Removing PF when there are active VF's !!\n"); 3791 pr_warn("Will not disable SR-IOV.\n"); 3792 } 3793 } 3794 3795 /* device marked to be under deletion running now without the lock 3796 * letting other tasks to be terminated 3797 */ 3798 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3799 mlx4_unload_one(pdev); 3800 else 3801 mlx4_info(dev, "%s: interface is down\n", __func__); 3802 mlx4_catas_end(dev); 3803 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3804 mlx4_warn(dev, "Disabling SR-IOV\n"); 3805 pci_disable_sriov(pdev); 3806 } 3807 3808 pci_release_regions(pdev); 3809 pci_disable_device(pdev); 3810 kfree(dev->persist); 3811 kfree(priv); 3812 pci_set_drvdata(pdev, NULL); 3813 } 3814 3815 static int restore_current_port_types(struct mlx4_dev *dev, 3816 enum mlx4_port_type *types, 3817 enum mlx4_port_type *poss_types) 3818 { 3819 struct mlx4_priv *priv = mlx4_priv(dev); 3820 int err, i; 3821 3822 mlx4_stop_sense(dev); 3823 3824 mutex_lock(&priv->port_mutex); 3825 for (i = 0; i < dev->caps.num_ports; i++) 3826 dev->caps.possible_type[i + 1] = poss_types[i]; 3827 err = mlx4_change_port_types(dev, types); 3828 mlx4_start_sense(dev); 3829 mutex_unlock(&priv->port_mutex); 3830 3831 return err; 3832 } 3833 3834 int mlx4_restart_one(struct pci_dev *pdev) 3835 { 3836 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3837 struct mlx4_dev *dev = persist->dev; 3838 struct mlx4_priv *priv = mlx4_priv(dev); 3839 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3840 int pci_dev_data, err, total_vfs; 3841 3842 pci_dev_data = priv->pci_dev_data; 3843 total_vfs = dev->persist->num_vfs; 3844 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3845 3846 mlx4_unload_one(pdev); 3847 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3848 if (err) { 3849 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3850 __func__, pci_name(pdev), err); 3851 return err; 3852 } 3853 3854 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3855 dev->persist->curr_port_poss_type); 3856 if (err) 3857 mlx4_err(dev, "could not restore original port types (%d)\n", 3858 err); 3859 3860 return err; 3861 } 3862 3863 static const struct pci_device_id mlx4_pci_table[] = { 3864 /* MT25408 "Hermon" SDR */ 3865 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3866 /* MT25408 "Hermon" DDR */ 3867 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3868 /* MT25408 "Hermon" QDR */ 3869 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3870 /* MT25408 "Hermon" DDR PCIe gen2 */ 3871 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3872 /* MT25408 "Hermon" QDR PCIe gen2 */ 3873 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3874 /* MT25408 "Hermon" EN 10GigE */ 3875 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3876 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3877 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3878 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3879 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3880 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3881 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3882 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3883 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3884 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3885 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3886 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3887 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3888 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3889 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3890 /* MT27500 Family [ConnectX-3] */ 3891 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3892 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3893 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3894 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3895 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3896 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3897 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3898 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3899 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3900 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3901 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3902 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3903 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3904 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3905 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3906 { 0, } 3907 }; 3908 3909 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3910 3911 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3912 pci_channel_state_t state) 3913 { 3914 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3915 3916 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3917 mlx4_enter_error_state(persist); 3918 3919 mutex_lock(&persist->interface_state_mutex); 3920 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3921 mlx4_unload_one(pdev); 3922 3923 mutex_unlock(&persist->interface_state_mutex); 3924 if (state == pci_channel_io_perm_failure) 3925 return PCI_ERS_RESULT_DISCONNECT; 3926 3927 pci_disable_device(pdev); 3928 return PCI_ERS_RESULT_NEED_RESET; 3929 } 3930 3931 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3932 { 3933 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3934 struct mlx4_dev *dev = persist->dev; 3935 struct mlx4_priv *priv = mlx4_priv(dev); 3936 int ret; 3937 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3938 int total_vfs; 3939 3940 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3941 ret = pci_enable_device(pdev); 3942 if (ret) { 3943 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3944 return PCI_ERS_RESULT_DISCONNECT; 3945 } 3946 3947 pci_set_master(pdev); 3948 pci_restore_state(pdev); 3949 pci_save_state(pdev); 3950 3951 total_vfs = dev->persist->num_vfs; 3952 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3953 3954 mutex_lock(&persist->interface_state_mutex); 3955 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3956 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3957 priv, 1); 3958 if (ret) { 3959 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 3960 __func__, ret); 3961 goto end; 3962 } 3963 3964 ret = restore_current_port_types(dev, dev->persist-> 3965 curr_port_type, dev->persist-> 3966 curr_port_poss_type); 3967 if (ret) 3968 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 3969 } 3970 end: 3971 mutex_unlock(&persist->interface_state_mutex); 3972 3973 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3974 } 3975 3976 static void mlx4_shutdown(struct pci_dev *pdev) 3977 { 3978 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3979 3980 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 3981 mutex_lock(&persist->interface_state_mutex); 3982 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3983 mlx4_unload_one(pdev); 3984 mutex_unlock(&persist->interface_state_mutex); 3985 } 3986 3987 static const struct pci_error_handlers mlx4_err_handler = { 3988 .error_detected = mlx4_pci_err_detected, 3989 .slot_reset = mlx4_pci_slot_reset, 3990 }; 3991 3992 static struct pci_driver mlx4_driver = { 3993 .name = DRV_NAME, 3994 .id_table = mlx4_pci_table, 3995 .probe = mlx4_init_one, 3996 .shutdown = mlx4_shutdown, 3997 .remove = mlx4_remove_one, 3998 .err_handler = &mlx4_err_handler, 3999 }; 4000 4001 static int __init mlx4_verify_params(void) 4002 { 4003 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4004 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4005 return -1; 4006 } 4007 4008 if (log_num_vlan != 0) 4009 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4010 MLX4_LOG_NUM_VLANS); 4011 4012 if (use_prio != 0) 4013 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4014 4015 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4016 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4017 log_mtts_per_seg); 4018 return -1; 4019 } 4020 4021 /* Check if module param for ports type has legal combination */ 4022 if (port_type_array[0] == false && port_type_array[1] == true) { 4023 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4024 port_type_array[0] = true; 4025 } 4026 4027 if (mlx4_log_num_mgm_entry_size < -7 || 4028 (mlx4_log_num_mgm_entry_size > 0 && 4029 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4030 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4031 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4032 mlx4_log_num_mgm_entry_size, 4033 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4034 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4035 return -1; 4036 } 4037 4038 return 0; 4039 } 4040 4041 static int __init mlx4_init(void) 4042 { 4043 int ret; 4044 4045 if (mlx4_verify_params()) 4046 return -EINVAL; 4047 4048 4049 mlx4_wq = create_singlethread_workqueue("mlx4"); 4050 if (!mlx4_wq) 4051 return -ENOMEM; 4052 4053 ret = pci_register_driver(&mlx4_driver); 4054 if (ret < 0) 4055 destroy_workqueue(mlx4_wq); 4056 return ret < 0 ? ret : 0; 4057 } 4058 4059 static void __exit mlx4_cleanup(void) 4060 { 4061 pci_unregister_driver(&mlx4_driver); 4062 destroy_workqueue(mlx4_wq); 4063 } 4064 4065 module_init(mlx4_init); 4066 module_exit(mlx4_cleanup); 4067