1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 #include <linux/etherdevice.h> 46 #include <net/devlink.h> 47 48 #include <linux/mlx4/device.h> 49 #include <linux/mlx4/doorbell.h> 50 51 #include "mlx4.h" 52 #include "fw.h" 53 #include "icm.h" 54 55 MODULE_AUTHOR("Roland Dreier"); 56 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 57 MODULE_LICENSE("Dual BSD/GPL"); 58 MODULE_VERSION(DRV_VERSION); 59 60 struct workqueue_struct *mlx4_wq; 61 62 #ifdef CONFIG_MLX4_DEBUG 63 64 int mlx4_debug_level = 0; 65 module_param_named(debug_level, mlx4_debug_level, int, 0644); 66 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 67 68 #endif /* CONFIG_MLX4_DEBUG */ 69 70 #ifdef CONFIG_PCI_MSI 71 72 static int msi_x = 1; 73 module_param(msi_x, int, 0444); 74 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 75 76 #else /* CONFIG_PCI_MSI */ 77 78 #define msi_x (0) 79 80 #endif /* CONFIG_PCI_MSI */ 81 82 static uint8_t num_vfs[3] = {0, 0, 0}; 83 static int num_vfs_argc; 84 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 85 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 86 "num_vfs=port1,port2,port1+2"); 87 88 static uint8_t probe_vf[3] = {0, 0, 0}; 89 static int probe_vfs_argc; 90 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 91 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 92 "probe_vf=port1,port2,port1+2"); 93 94 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 95 module_param_named(log_num_mgm_entry_size, 96 mlx4_log_num_mgm_entry_size, int, 0444); 97 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 98 " of qp per mcg, for example:" 99 " 10 gives 248.range: 7 <=" 100 " log_num_mgm_entry_size <= 12." 101 " To activate device managed" 102 " flow steering when available, set to -1"); 103 104 static bool enable_64b_cqe_eqe = true; 105 module_param(enable_64b_cqe_eqe, bool, 0444); 106 MODULE_PARM_DESC(enable_64b_cqe_eqe, 107 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 108 109 static bool enable_4k_uar; 110 module_param(enable_4k_uar, bool, 0444); 111 MODULE_PARM_DESC(enable_4k_uar, 112 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); 113 114 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 115 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 116 MLX4_FUNC_CAP_DMFS_A0_STATIC) 117 118 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 119 120 static char mlx4_version[] = 121 DRV_NAME ": Mellanox ConnectX core driver v" 122 DRV_VERSION "\n"; 123 124 static const struct mlx4_profile default_profile = { 125 .num_qp = 1 << 18, 126 .num_srq = 1 << 16, 127 .rdmarc_per_qp = 1 << 4, 128 .num_cq = 1 << 16, 129 .num_mcg = 1 << 13, 130 .num_mpt = 1 << 19, 131 .num_mtt = 1 << 20, /* It is really num mtt segements */ 132 }; 133 134 static const struct mlx4_profile low_mem_profile = { 135 .num_qp = 1 << 17, 136 .num_srq = 1 << 6, 137 .rdmarc_per_qp = 1 << 4, 138 .num_cq = 1 << 8, 139 .num_mcg = 1 << 8, 140 .num_mpt = 1 << 9, 141 .num_mtt = 1 << 7, 142 }; 143 144 static int log_num_mac = 7; 145 module_param_named(log_num_mac, log_num_mac, int, 0444); 146 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 147 148 static int log_num_vlan; 149 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 150 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 151 /* Log2 max number of VLANs per ETH port (0-7) */ 152 #define MLX4_LOG_NUM_VLANS 7 153 #define MLX4_MIN_LOG_NUM_VLANS 0 154 #define MLX4_MIN_LOG_NUM_MAC 1 155 156 static bool use_prio; 157 module_param_named(use_prio, use_prio, bool, 0444); 158 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 159 160 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 161 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 162 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 163 164 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 165 static int arr_argc = 2; 166 module_param_array(port_type_array, int, &arr_argc, 0444); 167 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 168 "1 for IB, 2 for Ethernet"); 169 170 struct mlx4_port_config { 171 struct list_head list; 172 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 173 struct pci_dev *pdev; 174 }; 175 176 static atomic_t pf_loading = ATOMIC_INIT(0); 177 178 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 179 struct mlx4_dev_cap *dev_cap) 180 { 181 /* The reserved_uars is calculated by system page size unit. 182 * Therefore, adjustment is added when the uar page size is less 183 * than the system page size 184 */ 185 dev->caps.reserved_uars = 186 max_t(int, 187 mlx4_get_num_reserved_uar(dev), 188 dev_cap->reserved_uars / 189 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 190 } 191 192 int mlx4_check_port_params(struct mlx4_dev *dev, 193 enum mlx4_port_type *port_type) 194 { 195 int i; 196 197 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 198 for (i = 0; i < dev->caps.num_ports - 1; i++) { 199 if (port_type[i] != port_type[i + 1]) { 200 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 201 return -EINVAL; 202 } 203 } 204 } 205 206 for (i = 0; i < dev->caps.num_ports; i++) { 207 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 208 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 209 i + 1); 210 return -EINVAL; 211 } 212 } 213 return 0; 214 } 215 216 static void mlx4_set_port_mask(struct mlx4_dev *dev) 217 { 218 int i; 219 220 for (i = 1; i <= dev->caps.num_ports; ++i) 221 dev->caps.port_mask[i] = dev->caps.port_type[i]; 222 } 223 224 enum { 225 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 226 }; 227 228 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 229 { 230 int err = 0; 231 struct mlx4_func func; 232 233 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 234 err = mlx4_QUERY_FUNC(dev, &func, 0); 235 if (err) { 236 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 237 return err; 238 } 239 dev_cap->max_eqs = func.max_eq; 240 dev_cap->reserved_eqs = func.rsvd_eqs; 241 dev_cap->reserved_uars = func.rsvd_uars; 242 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 243 } 244 return err; 245 } 246 247 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 248 { 249 struct mlx4_caps *dev_cap = &dev->caps; 250 251 /* FW not supporting or cancelled by user */ 252 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 253 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 254 return; 255 256 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 257 * When FW has NCSI it may decide not to report 64B CQE/EQEs 258 */ 259 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 260 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 261 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 262 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 263 return; 264 } 265 266 if (cache_line_size() == 128 || cache_line_size() == 256) { 267 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 268 /* Changing the real data inside CQE size to 32B */ 269 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 270 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 271 272 if (mlx4_is_master(dev)) 273 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 274 } else { 275 if (cache_line_size() != 32 && cache_line_size() != 64) 276 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 277 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 278 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 279 } 280 } 281 282 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 283 struct mlx4_port_cap *port_cap) 284 { 285 dev->caps.vl_cap[port] = port_cap->max_vl; 286 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 287 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 288 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 289 /* set gid and pkey table operating lengths by default 290 * to non-sriov values 291 */ 292 dev->caps.gid_table_len[port] = port_cap->max_gids; 293 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 294 dev->caps.port_width_cap[port] = port_cap->max_port_width; 295 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 296 dev->caps.max_tc_eth = port_cap->max_tc_eth; 297 dev->caps.def_mac[port] = port_cap->def_mac; 298 dev->caps.supported_type[port] = port_cap->supported_port_types; 299 dev->caps.suggested_type[port] = port_cap->suggested_type; 300 dev->caps.default_sense[port] = port_cap->default_sense; 301 dev->caps.trans_type[port] = port_cap->trans_type; 302 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 303 dev->caps.wavelength[port] = port_cap->wavelength; 304 dev->caps.trans_code[port] = port_cap->trans_code; 305 306 return 0; 307 } 308 309 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 310 struct mlx4_port_cap *port_cap) 311 { 312 int err = 0; 313 314 err = mlx4_QUERY_PORT(dev, port, port_cap); 315 316 if (err) 317 mlx4_err(dev, "QUERY_PORT command failed.\n"); 318 319 return err; 320 } 321 322 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 323 { 324 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 325 return; 326 327 if (mlx4_is_mfunc(dev)) { 328 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 329 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 330 return; 331 } 332 333 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 334 mlx4_dbg(dev, 335 "Keep FCS is not supported - Disabling Ignore FCS"); 336 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 337 return; 338 } 339 } 340 341 #define MLX4_A0_STEERING_TABLE_SIZE 256 342 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 343 { 344 int err; 345 int i; 346 347 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 348 if (err) { 349 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 350 return err; 351 } 352 mlx4_dev_cap_dump(dev, dev_cap); 353 354 if (dev_cap->min_page_sz > PAGE_SIZE) { 355 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 356 dev_cap->min_page_sz, PAGE_SIZE); 357 return -ENODEV; 358 } 359 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 360 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 361 dev_cap->num_ports, MLX4_MAX_PORTS); 362 return -ENODEV; 363 } 364 365 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 366 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 367 dev_cap->uar_size, 368 (unsigned long long) 369 pci_resource_len(dev->persist->pdev, 2)); 370 return -ENODEV; 371 } 372 373 dev->caps.num_ports = dev_cap->num_ports; 374 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 375 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 376 dev->caps.num_sys_eqs : 377 MLX4_MAX_EQ_NUM; 378 for (i = 1; i <= dev->caps.num_ports; ++i) { 379 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 380 if (err) { 381 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 382 return err; 383 } 384 } 385 386 dev->caps.uar_page_size = PAGE_SIZE; 387 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 388 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 389 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 390 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 391 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 392 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 393 dev->caps.max_wqes = dev_cap->max_qp_sz; 394 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 395 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 396 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 397 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 398 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 399 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 400 /* 401 * Subtract 1 from the limit because we need to allocate a 402 * spare CQE so the HCA HW can tell the difference between an 403 * empty CQ and a full CQ. 404 */ 405 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 406 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 407 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 408 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 409 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 410 411 dev->caps.reserved_pds = dev_cap->reserved_pds; 412 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 413 dev_cap->reserved_xrcds : 0; 414 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 415 dev_cap->max_xrcds : 0; 416 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 417 418 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 419 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 420 dev->caps.flags = dev_cap->flags; 421 dev->caps.flags2 = dev_cap->flags2; 422 dev->caps.bmme_flags = dev_cap->bmme_flags; 423 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 427 dev->caps.wol_port[1] = dev_cap->wol_port[1]; 428 dev->caps.wol_port[2] = dev_cap->wol_port[2]; 429 430 /* Save uar page shift */ 431 if (!mlx4_is_slave(dev)) { 432 /* Virtual PCI function needs to determine UAR page size from 433 * firmware. Only master PCI function can set the uar page size 434 */ 435 if (enable_4k_uar || !dev->persist->num_vfs) 436 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 437 else 438 dev->uar_page_shift = PAGE_SHIFT; 439 440 mlx4_set_num_reserved_uars(dev, dev_cap); 441 } 442 443 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 444 struct mlx4_init_hca_param hca_param; 445 446 memset(&hca_param, 0, sizeof(hca_param)); 447 err = mlx4_QUERY_HCA(dev, &hca_param); 448 /* Turn off PHV_EN flag in case phv_check_en is set. 449 * phv_check_en is a HW check that parse the packet and verify 450 * phv bit was reported correctly in the wqe. To allow QinQ 451 * PHV_EN flag should be set and phv_check_en must be cleared 452 * otherwise QinQ packets will be drop by the HW. 453 */ 454 if (err || hca_param.phv_check_en) 455 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 456 } 457 458 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 459 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 460 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 461 /* Don't do sense port on multifunction devices (for now at least) */ 462 if (mlx4_is_mfunc(dev)) 463 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 464 465 if (mlx4_low_memory_profile()) { 466 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 467 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 468 } else { 469 dev->caps.log_num_macs = log_num_mac; 470 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 471 } 472 473 for (i = 1; i <= dev->caps.num_ports; ++i) { 474 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 475 if (dev->caps.supported_type[i]) { 476 /* if only ETH is supported - assign ETH */ 477 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 478 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 479 /* if only IB is supported, assign IB */ 480 else if (dev->caps.supported_type[i] == 481 MLX4_PORT_TYPE_IB) 482 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 483 else { 484 /* if IB and ETH are supported, we set the port 485 * type according to user selection of port type; 486 * if user selected none, take the FW hint */ 487 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 488 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 489 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 490 else 491 dev->caps.port_type[i] = port_type_array[i - 1]; 492 } 493 } 494 /* 495 * Link sensing is allowed on the port if 3 conditions are true: 496 * 1. Both protocols are supported on the port. 497 * 2. Different types are supported on the port 498 * 3. FW declared that it supports link sensing 499 */ 500 mlx4_priv(dev)->sense.sense_allowed[i] = 501 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 502 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 503 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 504 505 /* 506 * If "default_sense" bit is set, we move the port to "AUTO" mode 507 * and perform sense_port FW command to try and set the correct 508 * port type from beginning 509 */ 510 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 511 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 512 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 513 mlx4_SENSE_PORT(dev, i, &sensed_port); 514 if (sensed_port != MLX4_PORT_TYPE_NONE) 515 dev->caps.port_type[i] = sensed_port; 516 } else { 517 dev->caps.possible_type[i] = dev->caps.port_type[i]; 518 } 519 520 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 521 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 522 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 523 i, 1 << dev->caps.log_num_macs); 524 } 525 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 526 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 527 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 528 i, 1 << dev->caps.log_num_vlans); 529 } 530 } 531 532 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 533 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 534 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 535 mlx4_warn(dev, 536 "Granular QoS per VF not supported with IB/Eth configuration\n"); 537 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 538 } 539 540 dev->caps.max_counters = dev_cap->max_counters; 541 542 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 543 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 545 (1 << dev->caps.log_num_macs) * 546 (1 << dev->caps.log_num_vlans) * 547 dev->caps.num_ports; 548 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 549 550 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 551 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 552 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 553 else 554 dev->caps.dmfs_high_rate_qpn_base = 555 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 556 557 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 558 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 559 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 560 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 561 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 562 } else { 563 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 564 dev->caps.dmfs_high_rate_qpn_base = 565 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 566 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 567 } 568 569 dev->caps.rl_caps = dev_cap->rl_caps; 570 571 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 572 dev->caps.dmfs_high_rate_qpn_range; 573 574 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 575 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 576 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 577 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 578 579 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 580 581 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 582 if (dev_cap->flags & 583 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 584 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 585 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 586 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 587 } 588 589 if (dev_cap->flags2 & 590 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 591 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 592 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 593 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 594 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 595 } 596 } 597 598 if ((dev->caps.flags & 599 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 600 mlx4_is_master(dev)) 601 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 602 603 if (!mlx4_is_slave(dev)) { 604 mlx4_enable_cqe_eqe_stride(dev); 605 dev->caps.alloc_res_qp_mask = 606 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 607 MLX4_RESERVE_A0_QP; 608 609 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 610 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 611 mlx4_warn(dev, "Old device ETS support detected\n"); 612 mlx4_warn(dev, "Consider upgrading device FW.\n"); 613 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 614 } 615 616 } else { 617 dev->caps.alloc_res_qp_mask = 0; 618 } 619 620 mlx4_enable_ignore_fcs(dev); 621 622 return 0; 623 } 624 625 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 626 enum pci_bus_speed *speed, 627 enum pcie_link_width *width) 628 { 629 u32 lnkcap1, lnkcap2; 630 int err1, err2; 631 632 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 633 634 *speed = PCI_SPEED_UNKNOWN; 635 *width = PCIE_LNK_WIDTH_UNKNOWN; 636 637 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 638 &lnkcap1); 639 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 640 &lnkcap2); 641 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 642 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 643 *speed = PCIE_SPEED_8_0GT; 644 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 645 *speed = PCIE_SPEED_5_0GT; 646 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 647 *speed = PCIE_SPEED_2_5GT; 648 } 649 if (!err1) { 650 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 651 if (!lnkcap2) { /* pre-r3.0 */ 652 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 653 *speed = PCIE_SPEED_5_0GT; 654 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 655 *speed = PCIE_SPEED_2_5GT; 656 } 657 } 658 659 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 660 return err1 ? err1 : 661 err2 ? err2 : -EINVAL; 662 } 663 return 0; 664 } 665 666 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 667 { 668 enum pcie_link_width width, width_cap; 669 enum pci_bus_speed speed, speed_cap; 670 int err; 671 672 #define PCIE_SPEED_STR(speed) \ 673 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 674 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 675 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 676 "Unknown") 677 678 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 679 if (err) { 680 mlx4_warn(dev, 681 "Unable to determine PCIe device BW capabilities\n"); 682 return; 683 } 684 685 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 686 if (err || speed == PCI_SPEED_UNKNOWN || 687 width == PCIE_LNK_WIDTH_UNKNOWN) { 688 mlx4_warn(dev, 689 "Unable to determine PCI device chain minimum BW\n"); 690 return; 691 } 692 693 if (width != width_cap || speed != speed_cap) 694 mlx4_warn(dev, 695 "PCIe BW is different than device's capability\n"); 696 697 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 698 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 699 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 700 width, width_cap); 701 return; 702 } 703 704 /*The function checks if there are live vf, return the num of them*/ 705 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 706 { 707 struct mlx4_priv *priv = mlx4_priv(dev); 708 struct mlx4_slave_state *s_state; 709 int i; 710 int ret = 0; 711 712 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 713 s_state = &priv->mfunc.master.slave_state[i]; 714 if (s_state->active && s_state->last_cmd != 715 MLX4_COMM_CMD_RESET) { 716 mlx4_warn(dev, "%s: slave: %d is still active\n", 717 __func__, i); 718 ret++; 719 } 720 } 721 return ret; 722 } 723 724 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 725 { 726 u32 qk = MLX4_RESERVED_QKEY_BASE; 727 728 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 729 qpn < dev->phys_caps.base_proxy_sqpn) 730 return -EINVAL; 731 732 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 733 /* tunnel qp */ 734 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 735 else 736 qk += qpn - dev->phys_caps.base_proxy_sqpn; 737 *qkey = qk; 738 return 0; 739 } 740 EXPORT_SYMBOL(mlx4_get_parav_qkey); 741 742 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 743 { 744 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 745 746 if (!mlx4_is_master(dev)) 747 return; 748 749 priv->virt2phys_pkey[slave][port - 1][i] = val; 750 } 751 EXPORT_SYMBOL(mlx4_sync_pkey_table); 752 753 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 754 { 755 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 756 757 if (!mlx4_is_master(dev)) 758 return; 759 760 priv->slave_node_guids[slave] = guid; 761 } 762 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 763 764 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 765 { 766 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 767 768 if (!mlx4_is_master(dev)) 769 return 0; 770 771 return priv->slave_node_guids[slave]; 772 } 773 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 774 775 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 776 { 777 struct mlx4_priv *priv = mlx4_priv(dev); 778 struct mlx4_slave_state *s_slave; 779 780 if (!mlx4_is_master(dev)) 781 return 0; 782 783 s_slave = &priv->mfunc.master.slave_state[slave]; 784 return !!s_slave->active; 785 } 786 EXPORT_SYMBOL(mlx4_is_slave_active); 787 788 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, 789 struct _rule_hw *eth_header) 790 { 791 if (is_multicast_ether_addr(eth_header->eth.dst_mac) || 792 is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 793 struct mlx4_net_trans_rule_hw_eth *eth = 794 (struct mlx4_net_trans_rule_hw_eth *)eth_header; 795 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); 796 bool last_rule = next_rule->size == 0 && next_rule->id == 0 && 797 next_rule->rsvd == 0; 798 799 if (last_rule) 800 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); 801 } 802 } 803 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio); 804 805 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 806 struct mlx4_dev_cap *dev_cap, 807 struct mlx4_init_hca_param *hca_param) 808 { 809 dev->caps.steering_mode = hca_param->steering_mode; 810 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 811 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 812 dev->caps.fs_log_max_ucast_qp_range_size = 813 dev_cap->fs_log_max_ucast_qp_range_size; 814 } else 815 dev->caps.num_qp_per_mgm = 816 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 817 818 mlx4_dbg(dev, "Steering mode is: %s\n", 819 mlx4_steering_mode_str(dev->caps.steering_mode)); 820 } 821 822 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev) 823 { 824 kfree(dev->caps.spec_qps); 825 dev->caps.spec_qps = NULL; 826 } 827 828 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev) 829 { 830 struct mlx4_func_cap *func_cap = NULL; 831 struct mlx4_caps *caps = &dev->caps; 832 int i, err = 0; 833 834 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); 835 caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL); 836 837 if (!func_cap || !caps->spec_qps) { 838 mlx4_err(dev, "Failed to allocate memory for special qps cap\n"); 839 err = -ENOMEM; 840 goto err_mem; 841 } 842 843 for (i = 1; i <= caps->num_ports; ++i) { 844 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap); 845 if (err) { 846 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 847 i, err); 848 goto err_mem; 849 } 850 caps->spec_qps[i - 1] = func_cap->spec_qps; 851 caps->port_mask[i] = caps->port_type[i]; 852 caps->phys_port_id[i] = func_cap->phys_port_id; 853 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 854 &caps->gid_table_len[i], 855 &caps->pkey_table_len[i]); 856 if (err) { 857 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n", 858 i, err); 859 goto err_mem; 860 } 861 } 862 863 err_mem: 864 if (err) 865 mlx4_slave_destroy_special_qp_cap(dev); 866 kfree(func_cap); 867 return err; 868 } 869 870 static int mlx4_slave_cap(struct mlx4_dev *dev) 871 { 872 int err; 873 u32 page_size; 874 struct mlx4_dev_cap *dev_cap = NULL; 875 struct mlx4_func_cap *func_cap = NULL; 876 struct mlx4_init_hca_param *hca_param = NULL; 877 878 hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL); 879 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); 880 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 881 if (!hca_param || !func_cap || !dev_cap) { 882 mlx4_err(dev, "Failed to allocate memory for slave_cap\n"); 883 err = -ENOMEM; 884 goto free_mem; 885 } 886 887 err = mlx4_QUERY_HCA(dev, hca_param); 888 if (err) { 889 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 890 goto free_mem; 891 } 892 893 /* fail if the hca has an unknown global capability 894 * at this time global_caps should be always zeroed 895 */ 896 if (hca_param->global_caps) { 897 mlx4_err(dev, "Unknown hca global capabilities\n"); 898 err = -EINVAL; 899 goto free_mem; 900 } 901 902 dev->caps.hca_core_clock = hca_param->hca_core_clock; 903 904 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp; 905 err = mlx4_dev_cap(dev, dev_cap); 906 if (err) { 907 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 908 goto free_mem; 909 } 910 911 err = mlx4_QUERY_FW(dev); 912 if (err) 913 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 914 915 page_size = ~dev->caps.page_size_cap + 1; 916 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 917 if (page_size > PAGE_SIZE) { 918 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 919 page_size, PAGE_SIZE); 920 err = -ENODEV; 921 goto free_mem; 922 } 923 924 /* Set uar_page_shift for VF */ 925 dev->uar_page_shift = hca_param->uar_page_sz + 12; 926 927 /* Make sure the master uar page size is valid */ 928 if (dev->uar_page_shift > PAGE_SHIFT) { 929 mlx4_err(dev, 930 "Invalid configuration: uar page size is larger than system page size\n"); 931 err = -ENODEV; 932 goto free_mem; 933 } 934 935 /* Set reserved_uars based on the uar_page_shift */ 936 mlx4_set_num_reserved_uars(dev, dev_cap); 937 938 /* Although uar page size in FW differs from system page size, 939 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 940 * still works with assumption that uar page size == system page size 941 */ 942 dev->caps.uar_page_size = PAGE_SIZE; 943 944 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap); 945 if (err) { 946 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 947 err); 948 goto free_mem; 949 } 950 951 if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 952 PF_CONTEXT_BEHAVIOUR_MASK) { 953 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 954 func_cap->pf_context_behaviour, 955 PF_CONTEXT_BEHAVIOUR_MASK); 956 err = -EINVAL; 957 goto free_mem; 958 } 959 960 dev->caps.num_ports = func_cap->num_ports; 961 dev->quotas.qp = func_cap->qp_quota; 962 dev->quotas.srq = func_cap->srq_quota; 963 dev->quotas.cq = func_cap->cq_quota; 964 dev->quotas.mpt = func_cap->mpt_quota; 965 dev->quotas.mtt = func_cap->mtt_quota; 966 dev->caps.num_qps = 1 << hca_param->log_num_qps; 967 dev->caps.num_srqs = 1 << hca_param->log_num_srqs; 968 dev->caps.num_cqs = 1 << hca_param->log_num_cqs; 969 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz; 970 dev->caps.num_eqs = func_cap->max_eq; 971 dev->caps.reserved_eqs = func_cap->reserved_eq; 972 dev->caps.reserved_lkey = func_cap->reserved_lkey; 973 dev->caps.num_pds = MLX4_NUM_PDS; 974 dev->caps.num_mgms = 0; 975 dev->caps.num_amgms = 0; 976 977 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 978 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 979 dev->caps.num_ports, MLX4_MAX_PORTS); 980 err = -ENODEV; 981 goto free_mem; 982 } 983 984 mlx4_replace_zero_macs(dev); 985 986 err = mlx4_slave_special_qp_cap(dev); 987 if (err) { 988 mlx4_err(dev, "Set special QP caps failed. aborting\n"); 989 goto free_mem; 990 } 991 992 if (dev->caps.uar_page_size * (dev->caps.num_uars - 993 dev->caps.reserved_uars) > 994 pci_resource_len(dev->persist->pdev, 995 2)) { 996 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 997 dev->caps.uar_page_size * dev->caps.num_uars, 998 (unsigned long long) 999 pci_resource_len(dev->persist->pdev, 2)); 1000 err = -ENOMEM; 1001 goto err_mem; 1002 } 1003 1004 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 1005 dev->caps.eqe_size = 64; 1006 dev->caps.eqe_factor = 1; 1007 } else { 1008 dev->caps.eqe_size = 32; 1009 dev->caps.eqe_factor = 0; 1010 } 1011 1012 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 1013 dev->caps.cqe_size = 64; 1014 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1015 } else { 1016 dev->caps.cqe_size = 32; 1017 } 1018 1019 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 1020 dev->caps.eqe_size = hca_param->eqe_size; 1021 dev->caps.eqe_factor = 0; 1022 } 1023 1024 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 1025 dev->caps.cqe_size = hca_param->cqe_size; 1026 /* User still need to know when CQE > 32B */ 1027 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1028 } 1029 1030 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1031 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 1032 1033 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN; 1034 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n"); 1035 1036 slave_adjust_steering_mode(dev, dev_cap, hca_param); 1037 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 1038 hca_param->rss_ip_frags ? "on" : "off"); 1039 1040 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 1041 dev->caps.bf_reg_size) 1042 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 1043 1044 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 1045 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 1046 1047 err_mem: 1048 if (err) 1049 mlx4_slave_destroy_special_qp_cap(dev); 1050 free_mem: 1051 kfree(hca_param); 1052 kfree(func_cap); 1053 kfree(dev_cap); 1054 return err; 1055 } 1056 1057 static void mlx4_request_modules(struct mlx4_dev *dev) 1058 { 1059 int port; 1060 int has_ib_port = false; 1061 int has_eth_port = false; 1062 #define EN_DRV_NAME "mlx4_en" 1063 #define IB_DRV_NAME "mlx4_ib" 1064 1065 for (port = 1; port <= dev->caps.num_ports; port++) { 1066 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1067 has_ib_port = true; 1068 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1069 has_eth_port = true; 1070 } 1071 1072 if (has_eth_port) 1073 request_module_nowait(EN_DRV_NAME); 1074 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1075 request_module_nowait(IB_DRV_NAME); 1076 } 1077 1078 /* 1079 * Change the port configuration of the device. 1080 * Every user of this function must hold the port mutex. 1081 */ 1082 int mlx4_change_port_types(struct mlx4_dev *dev, 1083 enum mlx4_port_type *port_types) 1084 { 1085 int err = 0; 1086 int change = 0; 1087 int port; 1088 1089 for (port = 0; port < dev->caps.num_ports; port++) { 1090 /* Change the port type only if the new type is different 1091 * from the current, and not set to Auto */ 1092 if (port_types[port] != dev->caps.port_type[port + 1]) 1093 change = 1; 1094 } 1095 if (change) { 1096 mlx4_unregister_device(dev); 1097 for (port = 1; port <= dev->caps.num_ports; port++) { 1098 mlx4_CLOSE_PORT(dev, port); 1099 dev->caps.port_type[port] = port_types[port - 1]; 1100 err = mlx4_SET_PORT(dev, port, -1); 1101 if (err) { 1102 mlx4_err(dev, "Failed to set port %d, aborting\n", 1103 port); 1104 goto out; 1105 } 1106 } 1107 mlx4_set_port_mask(dev); 1108 err = mlx4_register_device(dev); 1109 if (err) { 1110 mlx4_err(dev, "Failed to register device\n"); 1111 goto out; 1112 } 1113 mlx4_request_modules(dev); 1114 } 1115 1116 out: 1117 return err; 1118 } 1119 1120 static ssize_t show_port_type(struct device *dev, 1121 struct device_attribute *attr, 1122 char *buf) 1123 { 1124 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1125 port_attr); 1126 struct mlx4_dev *mdev = info->dev; 1127 char type[8]; 1128 1129 sprintf(type, "%s", 1130 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1131 "ib" : "eth"); 1132 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1133 sprintf(buf, "auto (%s)\n", type); 1134 else 1135 sprintf(buf, "%s\n", type); 1136 1137 return strlen(buf); 1138 } 1139 1140 static int __set_port_type(struct mlx4_port_info *info, 1141 enum mlx4_port_type port_type) 1142 { 1143 struct mlx4_dev *mdev = info->dev; 1144 struct mlx4_priv *priv = mlx4_priv(mdev); 1145 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1146 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1147 int i; 1148 int err = 0; 1149 1150 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { 1151 mlx4_err(mdev, 1152 "Requested port type for port %d is not supported on this HCA\n", 1153 info->port); 1154 err = -EINVAL; 1155 goto err_sup; 1156 } 1157 1158 mlx4_stop_sense(mdev); 1159 mutex_lock(&priv->port_mutex); 1160 info->tmp_type = port_type; 1161 1162 /* Possible type is always the one that was delivered */ 1163 mdev->caps.possible_type[info->port] = info->tmp_type; 1164 1165 for (i = 0; i < mdev->caps.num_ports; i++) { 1166 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1167 mdev->caps.possible_type[i+1]; 1168 if (types[i] == MLX4_PORT_TYPE_AUTO) 1169 types[i] = mdev->caps.port_type[i+1]; 1170 } 1171 1172 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1173 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1174 for (i = 1; i <= mdev->caps.num_ports; i++) { 1175 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1176 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1177 err = -EINVAL; 1178 } 1179 } 1180 } 1181 if (err) { 1182 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1183 goto out; 1184 } 1185 1186 mlx4_do_sense_ports(mdev, new_types, types); 1187 1188 err = mlx4_check_port_params(mdev, new_types); 1189 if (err) 1190 goto out; 1191 1192 /* We are about to apply the changes after the configuration 1193 * was verified, no need to remember the temporary types 1194 * any more */ 1195 for (i = 0; i < mdev->caps.num_ports; i++) 1196 priv->port[i + 1].tmp_type = 0; 1197 1198 err = mlx4_change_port_types(mdev, new_types); 1199 1200 out: 1201 mlx4_start_sense(mdev); 1202 mutex_unlock(&priv->port_mutex); 1203 err_sup: 1204 return err; 1205 } 1206 1207 static ssize_t set_port_type(struct device *dev, 1208 struct device_attribute *attr, 1209 const char *buf, size_t count) 1210 { 1211 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1212 port_attr); 1213 struct mlx4_dev *mdev = info->dev; 1214 enum mlx4_port_type port_type; 1215 static DEFINE_MUTEX(set_port_type_mutex); 1216 int err; 1217 1218 mutex_lock(&set_port_type_mutex); 1219 1220 if (!strcmp(buf, "ib\n")) { 1221 port_type = MLX4_PORT_TYPE_IB; 1222 } else if (!strcmp(buf, "eth\n")) { 1223 port_type = MLX4_PORT_TYPE_ETH; 1224 } else if (!strcmp(buf, "auto\n")) { 1225 port_type = MLX4_PORT_TYPE_AUTO; 1226 } else { 1227 mlx4_err(mdev, "%s is not supported port type\n", buf); 1228 err = -EINVAL; 1229 goto err_out; 1230 } 1231 1232 err = __set_port_type(info, port_type); 1233 1234 err_out: 1235 mutex_unlock(&set_port_type_mutex); 1236 1237 return err ? err : count; 1238 } 1239 1240 enum ibta_mtu { 1241 IB_MTU_256 = 1, 1242 IB_MTU_512 = 2, 1243 IB_MTU_1024 = 3, 1244 IB_MTU_2048 = 4, 1245 IB_MTU_4096 = 5 1246 }; 1247 1248 static inline int int_to_ibta_mtu(int mtu) 1249 { 1250 switch (mtu) { 1251 case 256: return IB_MTU_256; 1252 case 512: return IB_MTU_512; 1253 case 1024: return IB_MTU_1024; 1254 case 2048: return IB_MTU_2048; 1255 case 4096: return IB_MTU_4096; 1256 default: return -1; 1257 } 1258 } 1259 1260 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1261 { 1262 switch (mtu) { 1263 case IB_MTU_256: return 256; 1264 case IB_MTU_512: return 512; 1265 case IB_MTU_1024: return 1024; 1266 case IB_MTU_2048: return 2048; 1267 case IB_MTU_4096: return 4096; 1268 default: return -1; 1269 } 1270 } 1271 1272 static ssize_t show_port_ib_mtu(struct device *dev, 1273 struct device_attribute *attr, 1274 char *buf) 1275 { 1276 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1277 port_mtu_attr); 1278 struct mlx4_dev *mdev = info->dev; 1279 1280 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1281 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1282 1283 sprintf(buf, "%d\n", 1284 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1285 return strlen(buf); 1286 } 1287 1288 static ssize_t set_port_ib_mtu(struct device *dev, 1289 struct device_attribute *attr, 1290 const char *buf, size_t count) 1291 { 1292 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1293 port_mtu_attr); 1294 struct mlx4_dev *mdev = info->dev; 1295 struct mlx4_priv *priv = mlx4_priv(mdev); 1296 int err, port, mtu, ibta_mtu = -1; 1297 1298 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1299 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1300 return -EINVAL; 1301 } 1302 1303 err = kstrtoint(buf, 0, &mtu); 1304 if (!err) 1305 ibta_mtu = int_to_ibta_mtu(mtu); 1306 1307 if (err || ibta_mtu < 0) { 1308 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1309 return -EINVAL; 1310 } 1311 1312 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1313 1314 mlx4_stop_sense(mdev); 1315 mutex_lock(&priv->port_mutex); 1316 mlx4_unregister_device(mdev); 1317 for (port = 1; port <= mdev->caps.num_ports; port++) { 1318 mlx4_CLOSE_PORT(mdev, port); 1319 err = mlx4_SET_PORT(mdev, port, -1); 1320 if (err) { 1321 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1322 port); 1323 goto err_set_port; 1324 } 1325 } 1326 err = mlx4_register_device(mdev); 1327 err_set_port: 1328 mutex_unlock(&priv->port_mutex); 1329 mlx4_start_sense(mdev); 1330 return err ? err : count; 1331 } 1332 1333 /* bond for multi-function device */ 1334 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1335 static int mlx4_mf_bond(struct mlx4_dev *dev) 1336 { 1337 int err = 0; 1338 int nvfs; 1339 struct mlx4_slaves_pport slaves_port1; 1340 struct mlx4_slaves_pport slaves_port2; 1341 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1342 1343 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1344 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1345 bitmap_and(slaves_port_1_2, 1346 slaves_port1.slaves, slaves_port2.slaves, 1347 dev->persist->num_vfs + 1); 1348 1349 /* only single port vfs are allowed */ 1350 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1351 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1352 return -EINVAL; 1353 } 1354 1355 /* number of virtual functions is number of total functions minus one 1356 * physical function for each port. 1357 */ 1358 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1359 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1360 1361 /* limit on maximum allowed VFs */ 1362 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1363 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1364 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1365 return -EINVAL; 1366 } 1367 1368 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1369 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1370 return -EINVAL; 1371 } 1372 1373 err = mlx4_bond_mac_table(dev); 1374 if (err) 1375 return err; 1376 err = mlx4_bond_vlan_table(dev); 1377 if (err) 1378 goto err1; 1379 err = mlx4_bond_fs_rules(dev); 1380 if (err) 1381 goto err2; 1382 1383 return 0; 1384 err2: 1385 (void)mlx4_unbond_vlan_table(dev); 1386 err1: 1387 (void)mlx4_unbond_mac_table(dev); 1388 return err; 1389 } 1390 1391 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1392 { 1393 int ret, ret1; 1394 1395 ret = mlx4_unbond_fs_rules(dev); 1396 if (ret) 1397 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1398 ret1 = mlx4_unbond_mac_table(dev); 1399 if (ret1) { 1400 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1401 ret = ret1; 1402 } 1403 ret1 = mlx4_unbond_vlan_table(dev); 1404 if (ret1) { 1405 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1406 ret = ret1; 1407 } 1408 return ret; 1409 } 1410 1411 int mlx4_bond(struct mlx4_dev *dev) 1412 { 1413 int ret = 0; 1414 struct mlx4_priv *priv = mlx4_priv(dev); 1415 1416 mutex_lock(&priv->bond_mutex); 1417 1418 if (!mlx4_is_bonded(dev)) { 1419 ret = mlx4_do_bond(dev, true); 1420 if (ret) 1421 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1422 if (!ret && mlx4_is_master(dev)) { 1423 ret = mlx4_mf_bond(dev); 1424 if (ret) { 1425 mlx4_err(dev, "bond for multifunction failed\n"); 1426 mlx4_do_bond(dev, false); 1427 } 1428 } 1429 } 1430 1431 mutex_unlock(&priv->bond_mutex); 1432 if (!ret) 1433 mlx4_dbg(dev, "Device is bonded\n"); 1434 1435 return ret; 1436 } 1437 EXPORT_SYMBOL_GPL(mlx4_bond); 1438 1439 int mlx4_unbond(struct mlx4_dev *dev) 1440 { 1441 int ret = 0; 1442 struct mlx4_priv *priv = mlx4_priv(dev); 1443 1444 mutex_lock(&priv->bond_mutex); 1445 1446 if (mlx4_is_bonded(dev)) { 1447 int ret2 = 0; 1448 1449 ret = mlx4_do_bond(dev, false); 1450 if (ret) 1451 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1452 if (mlx4_is_master(dev)) 1453 ret2 = mlx4_mf_unbond(dev); 1454 if (ret2) { 1455 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1456 ret = ret2; 1457 } 1458 } 1459 1460 mutex_unlock(&priv->bond_mutex); 1461 if (!ret) 1462 mlx4_dbg(dev, "Device is unbonded\n"); 1463 1464 return ret; 1465 } 1466 EXPORT_SYMBOL_GPL(mlx4_unbond); 1467 1468 1469 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1470 { 1471 u8 port1 = v2p->port1; 1472 u8 port2 = v2p->port2; 1473 struct mlx4_priv *priv = mlx4_priv(dev); 1474 int err; 1475 1476 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1477 return -EOPNOTSUPP; 1478 1479 mutex_lock(&priv->bond_mutex); 1480 1481 /* zero means keep current mapping for this port */ 1482 if (port1 == 0) 1483 port1 = priv->v2p.port1; 1484 if (port2 == 0) 1485 port2 = priv->v2p.port2; 1486 1487 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1488 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1489 (port1 == 2 && port2 == 1)) { 1490 /* besides boundary checks cross mapping makes 1491 * no sense and therefore not allowed */ 1492 err = -EINVAL; 1493 } else if ((port1 == priv->v2p.port1) && 1494 (port2 == priv->v2p.port2)) { 1495 err = 0; 1496 } else { 1497 err = mlx4_virt2phy_port_map(dev, port1, port2); 1498 if (!err) { 1499 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1500 port1, port2); 1501 priv->v2p.port1 = port1; 1502 priv->v2p.port2 = port2; 1503 } else { 1504 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1505 } 1506 } 1507 1508 mutex_unlock(&priv->bond_mutex); 1509 return err; 1510 } 1511 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1512 1513 static int mlx4_load_fw(struct mlx4_dev *dev) 1514 { 1515 struct mlx4_priv *priv = mlx4_priv(dev); 1516 int err; 1517 1518 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1519 GFP_HIGHUSER | __GFP_NOWARN, 0); 1520 if (!priv->fw.fw_icm) { 1521 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1522 return -ENOMEM; 1523 } 1524 1525 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1526 if (err) { 1527 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1528 goto err_free; 1529 } 1530 1531 err = mlx4_RUN_FW(dev); 1532 if (err) { 1533 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1534 goto err_unmap_fa; 1535 } 1536 1537 return 0; 1538 1539 err_unmap_fa: 1540 mlx4_UNMAP_FA(dev); 1541 1542 err_free: 1543 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1544 return err; 1545 } 1546 1547 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1548 int cmpt_entry_sz) 1549 { 1550 struct mlx4_priv *priv = mlx4_priv(dev); 1551 int err; 1552 int num_eqs; 1553 1554 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1555 cmpt_base + 1556 ((u64) (MLX4_CMPT_TYPE_QP * 1557 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1558 cmpt_entry_sz, dev->caps.num_qps, 1559 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1560 0, 0); 1561 if (err) 1562 goto err; 1563 1564 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1565 cmpt_base + 1566 ((u64) (MLX4_CMPT_TYPE_SRQ * 1567 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1568 cmpt_entry_sz, dev->caps.num_srqs, 1569 dev->caps.reserved_srqs, 0, 0); 1570 if (err) 1571 goto err_qp; 1572 1573 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1574 cmpt_base + 1575 ((u64) (MLX4_CMPT_TYPE_CQ * 1576 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1577 cmpt_entry_sz, dev->caps.num_cqs, 1578 dev->caps.reserved_cqs, 0, 0); 1579 if (err) 1580 goto err_srq; 1581 1582 num_eqs = dev->phys_caps.num_phys_eqs; 1583 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1584 cmpt_base + 1585 ((u64) (MLX4_CMPT_TYPE_EQ * 1586 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1587 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1588 if (err) 1589 goto err_cq; 1590 1591 return 0; 1592 1593 err_cq: 1594 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1595 1596 err_srq: 1597 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1598 1599 err_qp: 1600 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1601 1602 err: 1603 return err; 1604 } 1605 1606 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1607 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1608 { 1609 struct mlx4_priv *priv = mlx4_priv(dev); 1610 u64 aux_pages; 1611 int num_eqs; 1612 int err; 1613 1614 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1615 if (err) { 1616 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1617 return err; 1618 } 1619 1620 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1621 (unsigned long long) icm_size >> 10, 1622 (unsigned long long) aux_pages << 2); 1623 1624 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1625 GFP_HIGHUSER | __GFP_NOWARN, 0); 1626 if (!priv->fw.aux_icm) { 1627 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1628 return -ENOMEM; 1629 } 1630 1631 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1632 if (err) { 1633 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1634 goto err_free_aux; 1635 } 1636 1637 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1638 if (err) { 1639 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1640 goto err_unmap_aux; 1641 } 1642 1643 1644 num_eqs = dev->phys_caps.num_phys_eqs; 1645 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1646 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1647 num_eqs, num_eqs, 0, 0); 1648 if (err) { 1649 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1650 goto err_unmap_cmpt; 1651 } 1652 1653 /* 1654 * Reserved MTT entries must be aligned up to a cacheline 1655 * boundary, since the FW will write to them, while the driver 1656 * writes to all other MTT entries. (The variable 1657 * dev->caps.mtt_entry_sz below is really the MTT segment 1658 * size, not the raw entry size) 1659 */ 1660 dev->caps.reserved_mtts = 1661 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1662 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1663 1664 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1665 init_hca->mtt_base, 1666 dev->caps.mtt_entry_sz, 1667 dev->caps.num_mtts, 1668 dev->caps.reserved_mtts, 1, 0); 1669 if (err) { 1670 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1671 goto err_unmap_eq; 1672 } 1673 1674 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1675 init_hca->dmpt_base, 1676 dev_cap->dmpt_entry_sz, 1677 dev->caps.num_mpts, 1678 dev->caps.reserved_mrws, 1, 1); 1679 if (err) { 1680 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1681 goto err_unmap_mtt; 1682 } 1683 1684 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1685 init_hca->qpc_base, 1686 dev_cap->qpc_entry_sz, 1687 dev->caps.num_qps, 1688 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1689 0, 0); 1690 if (err) { 1691 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1692 goto err_unmap_dmpt; 1693 } 1694 1695 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1696 init_hca->auxc_base, 1697 dev_cap->aux_entry_sz, 1698 dev->caps.num_qps, 1699 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1700 0, 0); 1701 if (err) { 1702 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1703 goto err_unmap_qp; 1704 } 1705 1706 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1707 init_hca->altc_base, 1708 dev_cap->altc_entry_sz, 1709 dev->caps.num_qps, 1710 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1711 0, 0); 1712 if (err) { 1713 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1714 goto err_unmap_auxc; 1715 } 1716 1717 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1718 init_hca->rdmarc_base, 1719 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1720 dev->caps.num_qps, 1721 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1722 0, 0); 1723 if (err) { 1724 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1725 goto err_unmap_altc; 1726 } 1727 1728 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1729 init_hca->cqc_base, 1730 dev_cap->cqc_entry_sz, 1731 dev->caps.num_cqs, 1732 dev->caps.reserved_cqs, 0, 0); 1733 if (err) { 1734 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1735 goto err_unmap_rdmarc; 1736 } 1737 1738 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1739 init_hca->srqc_base, 1740 dev_cap->srq_entry_sz, 1741 dev->caps.num_srqs, 1742 dev->caps.reserved_srqs, 0, 0); 1743 if (err) { 1744 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1745 goto err_unmap_cq; 1746 } 1747 1748 /* 1749 * For flow steering device managed mode it is required to use 1750 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1751 * required, but for simplicity just map the whole multicast 1752 * group table now. The table isn't very big and it's a lot 1753 * easier than trying to track ref counts. 1754 */ 1755 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1756 init_hca->mc_base, 1757 mlx4_get_mgm_entry_size(dev), 1758 dev->caps.num_mgms + dev->caps.num_amgms, 1759 dev->caps.num_mgms + dev->caps.num_amgms, 1760 0, 0); 1761 if (err) { 1762 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1763 goto err_unmap_srq; 1764 } 1765 1766 return 0; 1767 1768 err_unmap_srq: 1769 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1770 1771 err_unmap_cq: 1772 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1773 1774 err_unmap_rdmarc: 1775 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1776 1777 err_unmap_altc: 1778 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1779 1780 err_unmap_auxc: 1781 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1782 1783 err_unmap_qp: 1784 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1785 1786 err_unmap_dmpt: 1787 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1788 1789 err_unmap_mtt: 1790 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1791 1792 err_unmap_eq: 1793 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1794 1795 err_unmap_cmpt: 1796 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1797 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1798 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1799 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1800 1801 err_unmap_aux: 1802 mlx4_UNMAP_ICM_AUX(dev); 1803 1804 err_free_aux: 1805 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1806 1807 return err; 1808 } 1809 1810 static void mlx4_free_icms(struct mlx4_dev *dev) 1811 { 1812 struct mlx4_priv *priv = mlx4_priv(dev); 1813 1814 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1815 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1816 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1817 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1818 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1819 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1820 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1821 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1822 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1823 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1824 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1825 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1826 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1827 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1828 1829 mlx4_UNMAP_ICM_AUX(dev); 1830 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1831 } 1832 1833 static void mlx4_slave_exit(struct mlx4_dev *dev) 1834 { 1835 struct mlx4_priv *priv = mlx4_priv(dev); 1836 1837 mutex_lock(&priv->cmd.slave_cmd_mutex); 1838 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1839 MLX4_COMM_TIME)) 1840 mlx4_warn(dev, "Failed to close slave function\n"); 1841 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1842 } 1843 1844 static int map_bf_area(struct mlx4_dev *dev) 1845 { 1846 struct mlx4_priv *priv = mlx4_priv(dev); 1847 resource_size_t bf_start; 1848 resource_size_t bf_len; 1849 int err = 0; 1850 1851 if (!dev->caps.bf_reg_size) 1852 return -ENXIO; 1853 1854 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1855 (dev->caps.num_uars << PAGE_SHIFT); 1856 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1857 (dev->caps.num_uars << PAGE_SHIFT); 1858 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1859 if (!priv->bf_mapping) 1860 err = -ENOMEM; 1861 1862 return err; 1863 } 1864 1865 static void unmap_bf_area(struct mlx4_dev *dev) 1866 { 1867 if (mlx4_priv(dev)->bf_mapping) 1868 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1869 } 1870 1871 u64 mlx4_read_clock(struct mlx4_dev *dev) 1872 { 1873 u32 clockhi, clocklo, clockhi1; 1874 u64 cycles; 1875 int i; 1876 struct mlx4_priv *priv = mlx4_priv(dev); 1877 1878 for (i = 0; i < 10; i++) { 1879 clockhi = swab32(readl(priv->clock_mapping)); 1880 clocklo = swab32(readl(priv->clock_mapping + 4)); 1881 clockhi1 = swab32(readl(priv->clock_mapping)); 1882 if (clockhi == clockhi1) 1883 break; 1884 } 1885 1886 cycles = (u64) clockhi << 32 | (u64) clocklo; 1887 1888 return cycles; 1889 } 1890 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1891 1892 1893 static int map_internal_clock(struct mlx4_dev *dev) 1894 { 1895 struct mlx4_priv *priv = mlx4_priv(dev); 1896 1897 priv->clock_mapping = 1898 ioremap(pci_resource_start(dev->persist->pdev, 1899 priv->fw.clock_bar) + 1900 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1901 1902 if (!priv->clock_mapping) 1903 return -ENOMEM; 1904 1905 return 0; 1906 } 1907 1908 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1909 struct mlx4_clock_params *params) 1910 { 1911 struct mlx4_priv *priv = mlx4_priv(dev); 1912 1913 if (mlx4_is_slave(dev)) 1914 return -EOPNOTSUPP; 1915 1916 if (!params) 1917 return -EINVAL; 1918 1919 params->bar = priv->fw.clock_bar; 1920 params->offset = priv->fw.clock_offset; 1921 params->size = MLX4_CLOCK_SIZE; 1922 1923 return 0; 1924 } 1925 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1926 1927 static void unmap_internal_clock(struct mlx4_dev *dev) 1928 { 1929 struct mlx4_priv *priv = mlx4_priv(dev); 1930 1931 if (priv->clock_mapping) 1932 iounmap(priv->clock_mapping); 1933 } 1934 1935 static void mlx4_close_hca(struct mlx4_dev *dev) 1936 { 1937 unmap_internal_clock(dev); 1938 unmap_bf_area(dev); 1939 if (mlx4_is_slave(dev)) 1940 mlx4_slave_exit(dev); 1941 else { 1942 mlx4_CLOSE_HCA(dev, 0); 1943 mlx4_free_icms(dev); 1944 } 1945 } 1946 1947 static void mlx4_close_fw(struct mlx4_dev *dev) 1948 { 1949 if (!mlx4_is_slave(dev)) { 1950 mlx4_UNMAP_FA(dev); 1951 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1952 } 1953 } 1954 1955 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1956 { 1957 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1958 1959 u32 comm_flags; 1960 u32 offline_bit; 1961 unsigned long end; 1962 struct mlx4_priv *priv = mlx4_priv(dev); 1963 1964 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1965 while (time_before(jiffies, end)) { 1966 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1967 MLX4_COMM_CHAN_FLAGS)); 1968 offline_bit = (comm_flags & 1969 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1970 if (!offline_bit) 1971 return 0; 1972 1973 /* If device removal has been requested, 1974 * do not continue retrying. 1975 */ 1976 if (dev->persist->interface_state & 1977 MLX4_INTERFACE_STATE_NOWAIT) 1978 break; 1979 1980 /* There are cases as part of AER/Reset flow that PF needs 1981 * around 100 msec to load. We therefore sleep for 100 msec 1982 * to allow other tasks to make use of that CPU during this 1983 * time interval. 1984 */ 1985 msleep(100); 1986 } 1987 mlx4_err(dev, "Communication channel is offline.\n"); 1988 return -EIO; 1989 } 1990 1991 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1992 { 1993 #define COMM_CHAN_RST_OFFSET 0x1e 1994 1995 struct mlx4_priv *priv = mlx4_priv(dev); 1996 u32 comm_rst; 1997 u32 comm_caps; 1998 1999 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 2000 MLX4_COMM_CHAN_CAPS)); 2001 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 2002 2003 if (comm_rst) 2004 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 2005 } 2006 2007 static int mlx4_init_slave(struct mlx4_dev *dev) 2008 { 2009 struct mlx4_priv *priv = mlx4_priv(dev); 2010 u64 dma = (u64) priv->mfunc.vhcr_dma; 2011 int ret_from_reset = 0; 2012 u32 slave_read; 2013 u32 cmd_channel_ver; 2014 2015 if (atomic_read(&pf_loading)) { 2016 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 2017 return -EPROBE_DEFER; 2018 } 2019 2020 mutex_lock(&priv->cmd.slave_cmd_mutex); 2021 priv->cmd.max_cmds = 1; 2022 if (mlx4_comm_check_offline(dev)) { 2023 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 2024 goto err_offline; 2025 } 2026 2027 mlx4_reset_vf_support(dev); 2028 mlx4_warn(dev, "Sending reset\n"); 2029 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 2030 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 2031 /* if we are in the middle of flr the slave will try 2032 * NUM_OF_RESET_RETRIES times before leaving.*/ 2033 if (ret_from_reset) { 2034 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 2035 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 2036 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2037 return -EPROBE_DEFER; 2038 } else 2039 goto err; 2040 } 2041 2042 /* check the driver version - the slave I/F revision 2043 * must match the master's */ 2044 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 2045 cmd_channel_ver = mlx4_comm_get_version(); 2046 2047 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 2048 MLX4_COMM_GET_IF_REV(slave_read)) { 2049 mlx4_err(dev, "slave driver version is not supported by the master\n"); 2050 goto err; 2051 } 2052 2053 mlx4_warn(dev, "Sending vhcr0\n"); 2054 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 2055 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2056 goto err; 2057 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 2058 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2059 goto err; 2060 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 2061 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2062 goto err; 2063 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 2064 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2065 goto err; 2066 2067 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2068 return 0; 2069 2070 err: 2071 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 2072 err_offline: 2073 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2074 return -EIO; 2075 } 2076 2077 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 2078 { 2079 int i; 2080 2081 for (i = 1; i <= dev->caps.num_ports; i++) { 2082 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 2083 dev->caps.gid_table_len[i] = 2084 mlx4_get_slave_num_gids(dev, 0, i); 2085 else 2086 dev->caps.gid_table_len[i] = 1; 2087 dev->caps.pkey_table_len[i] = 2088 dev->phys_caps.pkey_phys_table_len[i] - 1; 2089 } 2090 } 2091 2092 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2093 { 2094 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2095 2096 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2097 i++) { 2098 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2099 break; 2100 } 2101 2102 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2103 } 2104 2105 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2106 { 2107 switch (dmfs_high_steer_mode) { 2108 case MLX4_STEERING_DMFS_A0_DEFAULT: 2109 return "default performance"; 2110 2111 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2112 return "dynamic hybrid mode"; 2113 2114 case MLX4_STEERING_DMFS_A0_STATIC: 2115 return "performance optimized for limited rule configuration (static)"; 2116 2117 case MLX4_STEERING_DMFS_A0_DISABLE: 2118 return "disabled performance optimized steering"; 2119 2120 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2121 return "performance optimized steering not supported"; 2122 2123 default: 2124 return "Unrecognized mode"; 2125 } 2126 } 2127 2128 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2129 2130 static void choose_steering_mode(struct mlx4_dev *dev, 2131 struct mlx4_dev_cap *dev_cap) 2132 { 2133 if (mlx4_log_num_mgm_entry_size <= 0) { 2134 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2135 if (dev->caps.dmfs_high_steer_mode == 2136 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2137 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2138 else 2139 dev->caps.dmfs_high_steer_mode = 2140 MLX4_STEERING_DMFS_A0_STATIC; 2141 } 2142 } 2143 2144 if (mlx4_log_num_mgm_entry_size <= 0 && 2145 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2146 (!mlx4_is_mfunc(dev) || 2147 (dev_cap->fs_max_num_qp_per_entry >= 2148 (dev->persist->num_vfs + 1))) && 2149 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2150 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2151 dev->oper_log_mgm_entry_size = 2152 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2153 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2154 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2155 dev->caps.fs_log_max_ucast_qp_range_size = 2156 dev_cap->fs_log_max_ucast_qp_range_size; 2157 } else { 2158 if (dev->caps.dmfs_high_steer_mode != 2159 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2160 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2161 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2162 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2163 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2164 else { 2165 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2166 2167 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2168 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2169 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2170 } 2171 dev->oper_log_mgm_entry_size = 2172 mlx4_log_num_mgm_entry_size > 0 ? 2173 mlx4_log_num_mgm_entry_size : 2174 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2175 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2176 } 2177 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2178 mlx4_steering_mode_str(dev->caps.steering_mode), 2179 dev->oper_log_mgm_entry_size, 2180 mlx4_log_num_mgm_entry_size); 2181 } 2182 2183 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2184 struct mlx4_dev_cap *dev_cap) 2185 { 2186 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2187 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2188 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2189 else 2190 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2191 2192 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2193 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2194 } 2195 2196 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2197 { 2198 int i; 2199 struct mlx4_port_cap port_cap; 2200 2201 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2202 return -EINVAL; 2203 2204 for (i = 1; i <= dev->caps.num_ports; i++) { 2205 if (mlx4_dev_port(dev, i, &port_cap)) { 2206 mlx4_err(dev, 2207 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2208 } else if ((dev->caps.dmfs_high_steer_mode != 2209 MLX4_STEERING_DMFS_A0_DEFAULT) && 2210 (port_cap.dmfs_optimized_state == 2211 !!(dev->caps.dmfs_high_steer_mode == 2212 MLX4_STEERING_DMFS_A0_DISABLE))) { 2213 mlx4_err(dev, 2214 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2215 dmfs_high_rate_steering_mode_str( 2216 dev->caps.dmfs_high_steer_mode), 2217 (port_cap.dmfs_optimized_state ? 2218 "enabled" : "disabled")); 2219 } 2220 } 2221 2222 return 0; 2223 } 2224 2225 static int mlx4_init_fw(struct mlx4_dev *dev) 2226 { 2227 struct mlx4_mod_stat_cfg mlx4_cfg; 2228 int err = 0; 2229 2230 if (!mlx4_is_slave(dev)) { 2231 err = mlx4_QUERY_FW(dev); 2232 if (err) { 2233 if (err == -EACCES) 2234 mlx4_info(dev, "non-primary physical function, skipping\n"); 2235 else 2236 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2237 return err; 2238 } 2239 2240 err = mlx4_load_fw(dev); 2241 if (err) { 2242 mlx4_err(dev, "Failed to start FW, aborting\n"); 2243 return err; 2244 } 2245 2246 mlx4_cfg.log_pg_sz_m = 1; 2247 mlx4_cfg.log_pg_sz = 0; 2248 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2249 if (err) 2250 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2251 } 2252 2253 return err; 2254 } 2255 2256 static int mlx4_init_hca(struct mlx4_dev *dev) 2257 { 2258 struct mlx4_priv *priv = mlx4_priv(dev); 2259 struct mlx4_adapter adapter; 2260 struct mlx4_dev_cap dev_cap; 2261 struct mlx4_profile profile; 2262 struct mlx4_init_hca_param init_hca; 2263 u64 icm_size; 2264 struct mlx4_config_dev_params params; 2265 int err; 2266 2267 if (!mlx4_is_slave(dev)) { 2268 err = mlx4_dev_cap(dev, &dev_cap); 2269 if (err) { 2270 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2271 return err; 2272 } 2273 2274 choose_steering_mode(dev, &dev_cap); 2275 choose_tunnel_offload_mode(dev, &dev_cap); 2276 2277 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2278 mlx4_is_master(dev)) 2279 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2280 2281 err = mlx4_get_phys_port_id(dev); 2282 if (err) 2283 mlx4_err(dev, "Fail to get physical port id\n"); 2284 2285 if (mlx4_is_master(dev)) 2286 mlx4_parav_master_pf_caps(dev); 2287 2288 if (mlx4_low_memory_profile()) { 2289 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2290 profile = low_mem_profile; 2291 } else { 2292 profile = default_profile; 2293 } 2294 if (dev->caps.steering_mode == 2295 MLX4_STEERING_MODE_DEVICE_MANAGED) 2296 profile.num_mcg = MLX4_FS_NUM_MCG; 2297 2298 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2299 &init_hca); 2300 if ((long long) icm_size < 0) { 2301 err = icm_size; 2302 return err; 2303 } 2304 2305 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2306 2307 if (enable_4k_uar || !dev->persist->num_vfs) { 2308 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2309 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2310 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2311 } else { 2312 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2313 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2314 } 2315 2316 init_hca.mw_enabled = 0; 2317 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2318 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2319 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2320 2321 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2322 if (err) 2323 return err; 2324 2325 err = mlx4_INIT_HCA(dev, &init_hca); 2326 if (err) { 2327 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2328 goto err_free_icm; 2329 } 2330 2331 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2332 err = mlx4_query_func(dev, &dev_cap); 2333 if (err < 0) { 2334 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2335 goto err_close; 2336 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2337 dev->caps.num_eqs = dev_cap.max_eqs; 2338 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2339 dev->caps.reserved_uars = dev_cap.reserved_uars; 2340 } 2341 } 2342 2343 /* 2344 * If TS is supported by FW 2345 * read HCA frequency by QUERY_HCA command 2346 */ 2347 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2348 memset(&init_hca, 0, sizeof(init_hca)); 2349 err = mlx4_QUERY_HCA(dev, &init_hca); 2350 if (err) { 2351 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2352 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2353 } else { 2354 dev->caps.hca_core_clock = 2355 init_hca.hca_core_clock; 2356 } 2357 2358 /* In case we got HCA frequency 0 - disable timestamping 2359 * to avoid dividing by zero 2360 */ 2361 if (!dev->caps.hca_core_clock) { 2362 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2363 mlx4_err(dev, 2364 "HCA frequency is 0 - timestamping is not supported\n"); 2365 } else if (map_internal_clock(dev)) { 2366 /* 2367 * Map internal clock, 2368 * in case of failure disable timestamping 2369 */ 2370 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2371 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2372 } 2373 } 2374 2375 if (dev->caps.dmfs_high_steer_mode != 2376 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2377 if (mlx4_validate_optimized_steering(dev)) 2378 mlx4_warn(dev, "Optimized steering validation failed\n"); 2379 2380 if (dev->caps.dmfs_high_steer_mode == 2381 MLX4_STEERING_DMFS_A0_DISABLE) { 2382 dev->caps.dmfs_high_rate_qpn_base = 2383 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2384 dev->caps.dmfs_high_rate_qpn_range = 2385 MLX4_A0_STEERING_TABLE_SIZE; 2386 } 2387 2388 mlx4_info(dev, "DMFS high rate steer mode is: %s\n", 2389 dmfs_high_rate_steering_mode_str( 2390 dev->caps.dmfs_high_steer_mode)); 2391 } 2392 } else { 2393 err = mlx4_init_slave(dev); 2394 if (err) { 2395 if (err != -EPROBE_DEFER) 2396 mlx4_err(dev, "Failed to initialize slave\n"); 2397 return err; 2398 } 2399 2400 err = mlx4_slave_cap(dev); 2401 if (err) { 2402 mlx4_err(dev, "Failed to obtain slave caps\n"); 2403 goto err_close; 2404 } 2405 } 2406 2407 if (map_bf_area(dev)) 2408 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2409 2410 /*Only the master set the ports, all the rest got it from it.*/ 2411 if (!mlx4_is_slave(dev)) 2412 mlx4_set_port_mask(dev); 2413 2414 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2415 if (err) { 2416 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2417 goto unmap_bf; 2418 } 2419 2420 /* Query CONFIG_DEV parameters */ 2421 err = mlx4_config_dev_retrieval(dev, ¶ms); 2422 if (err && err != -EOPNOTSUPP) { 2423 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2424 } else if (!err) { 2425 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2426 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2427 } 2428 priv->eq_table.inta_pin = adapter.inta_pin; 2429 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); 2430 2431 return 0; 2432 2433 unmap_bf: 2434 unmap_internal_clock(dev); 2435 unmap_bf_area(dev); 2436 2437 if (mlx4_is_slave(dev)) 2438 mlx4_slave_destroy_special_qp_cap(dev); 2439 2440 err_close: 2441 if (mlx4_is_slave(dev)) 2442 mlx4_slave_exit(dev); 2443 else 2444 mlx4_CLOSE_HCA(dev, 0); 2445 2446 err_free_icm: 2447 if (!mlx4_is_slave(dev)) 2448 mlx4_free_icms(dev); 2449 2450 return err; 2451 } 2452 2453 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2454 { 2455 struct mlx4_priv *priv = mlx4_priv(dev); 2456 int nent_pow2; 2457 2458 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2459 return -ENOENT; 2460 2461 if (!dev->caps.max_counters) 2462 return -ENOSPC; 2463 2464 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2465 /* reserve last counter index for sink counter */ 2466 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2467 nent_pow2 - 1, 0, 2468 nent_pow2 - dev->caps.max_counters + 1); 2469 } 2470 2471 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2472 { 2473 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2474 return; 2475 2476 if (!dev->caps.max_counters) 2477 return; 2478 2479 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2480 } 2481 2482 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2483 { 2484 struct mlx4_priv *priv = mlx4_priv(dev); 2485 int port; 2486 2487 for (port = 0; port < dev->caps.num_ports; port++) 2488 if (priv->def_counter[port] != -1) 2489 mlx4_counter_free(dev, priv->def_counter[port]); 2490 } 2491 2492 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2493 { 2494 struct mlx4_priv *priv = mlx4_priv(dev); 2495 int port, err = 0; 2496 u32 idx; 2497 2498 for (port = 0; port < dev->caps.num_ports; port++) 2499 priv->def_counter[port] = -1; 2500 2501 for (port = 0; port < dev->caps.num_ports; port++) { 2502 err = mlx4_counter_alloc(dev, &idx); 2503 2504 if (!err || err == -ENOSPC) { 2505 priv->def_counter[port] = idx; 2506 } else if (err == -ENOENT) { 2507 err = 0; 2508 continue; 2509 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2510 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2511 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2512 MLX4_SINK_COUNTER_INDEX(dev)); 2513 err = 0; 2514 } else { 2515 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2516 __func__, port + 1, err); 2517 mlx4_cleanup_default_counters(dev); 2518 return err; 2519 } 2520 2521 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2522 __func__, priv->def_counter[port], port + 1); 2523 } 2524 2525 return err; 2526 } 2527 2528 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2529 { 2530 struct mlx4_priv *priv = mlx4_priv(dev); 2531 2532 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2533 return -ENOENT; 2534 2535 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2536 if (*idx == -1) { 2537 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2538 return -ENOSPC; 2539 } 2540 2541 return 0; 2542 } 2543 2544 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2545 { 2546 u64 out_param; 2547 int err; 2548 2549 if (mlx4_is_mfunc(dev)) { 2550 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2551 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2552 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2553 if (!err) 2554 *idx = get_param_l(&out_param); 2555 2556 return err; 2557 } 2558 return __mlx4_counter_alloc(dev, idx); 2559 } 2560 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2561 2562 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2563 u8 counter_index) 2564 { 2565 struct mlx4_cmd_mailbox *if_stat_mailbox; 2566 int err; 2567 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2568 2569 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2570 if (IS_ERR(if_stat_mailbox)) 2571 return PTR_ERR(if_stat_mailbox); 2572 2573 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2574 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2575 MLX4_CMD_NATIVE); 2576 2577 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2578 return err; 2579 } 2580 2581 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2582 { 2583 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2584 return; 2585 2586 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2587 return; 2588 2589 __mlx4_clear_if_stat(dev, idx); 2590 2591 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2592 return; 2593 } 2594 2595 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2596 { 2597 u64 in_param = 0; 2598 2599 if (mlx4_is_mfunc(dev)) { 2600 set_param_l(&in_param, idx); 2601 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2602 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2603 MLX4_CMD_WRAPPED); 2604 return; 2605 } 2606 __mlx4_counter_free(dev, idx); 2607 } 2608 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2609 2610 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2611 { 2612 struct mlx4_priv *priv = mlx4_priv(dev); 2613 2614 return priv->def_counter[port - 1]; 2615 } 2616 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2617 2618 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2619 { 2620 struct mlx4_priv *priv = mlx4_priv(dev); 2621 2622 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2623 } 2624 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2625 2626 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2627 { 2628 struct mlx4_priv *priv = mlx4_priv(dev); 2629 2630 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2631 } 2632 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2633 2634 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2635 { 2636 struct mlx4_priv *priv = mlx4_priv(dev); 2637 __be64 guid; 2638 2639 /* hw GUID */ 2640 if (entry == 0) 2641 return; 2642 2643 get_random_bytes((char *)&guid, sizeof(guid)); 2644 guid &= ~(cpu_to_be64(1ULL << 56)); 2645 guid |= cpu_to_be64(1ULL << 57); 2646 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2647 } 2648 2649 static int mlx4_setup_hca(struct mlx4_dev *dev) 2650 { 2651 struct mlx4_priv *priv = mlx4_priv(dev); 2652 int err; 2653 int port; 2654 __be32 ib_port_default_caps; 2655 2656 err = mlx4_init_uar_table(dev); 2657 if (err) { 2658 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2659 return err; 2660 } 2661 2662 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2663 if (err) { 2664 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2665 goto err_uar_table_free; 2666 } 2667 2668 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2669 if (!priv->kar) { 2670 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2671 err = -ENOMEM; 2672 goto err_uar_free; 2673 } 2674 2675 err = mlx4_init_pd_table(dev); 2676 if (err) { 2677 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2678 goto err_kar_unmap; 2679 } 2680 2681 err = mlx4_init_xrcd_table(dev); 2682 if (err) { 2683 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2684 goto err_pd_table_free; 2685 } 2686 2687 err = mlx4_init_mr_table(dev); 2688 if (err) { 2689 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2690 goto err_xrcd_table_free; 2691 } 2692 2693 if (!mlx4_is_slave(dev)) { 2694 err = mlx4_init_mcg_table(dev); 2695 if (err) { 2696 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2697 goto err_mr_table_free; 2698 } 2699 err = mlx4_config_mad_demux(dev); 2700 if (err) { 2701 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2702 goto err_mcg_table_free; 2703 } 2704 } 2705 2706 err = mlx4_init_eq_table(dev); 2707 if (err) { 2708 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2709 goto err_mcg_table_free; 2710 } 2711 2712 err = mlx4_cmd_use_events(dev); 2713 if (err) { 2714 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2715 goto err_eq_table_free; 2716 } 2717 2718 err = mlx4_NOP(dev); 2719 if (err) { 2720 if (dev->flags & MLX4_FLAG_MSI_X) { 2721 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2722 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2723 mlx4_warn(dev, "Trying again without MSI-X\n"); 2724 } else { 2725 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2726 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2727 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2728 } 2729 2730 goto err_cmd_poll; 2731 } 2732 2733 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2734 2735 err = mlx4_init_cq_table(dev); 2736 if (err) { 2737 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2738 goto err_cmd_poll; 2739 } 2740 2741 err = mlx4_init_srq_table(dev); 2742 if (err) { 2743 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2744 goto err_cq_table_free; 2745 } 2746 2747 err = mlx4_init_qp_table(dev); 2748 if (err) { 2749 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2750 goto err_srq_table_free; 2751 } 2752 2753 if (!mlx4_is_slave(dev)) { 2754 err = mlx4_init_counters_table(dev); 2755 if (err && err != -ENOENT) { 2756 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2757 goto err_qp_table_free; 2758 } 2759 } 2760 2761 err = mlx4_allocate_default_counters(dev); 2762 if (err) { 2763 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2764 goto err_counters_table_free; 2765 } 2766 2767 if (!mlx4_is_slave(dev)) { 2768 for (port = 1; port <= dev->caps.num_ports; port++) { 2769 ib_port_default_caps = 0; 2770 err = mlx4_get_port_ib_caps(dev, port, 2771 &ib_port_default_caps); 2772 if (err) 2773 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2774 port, err); 2775 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2776 2777 /* initialize per-slave default ib port capabilities */ 2778 if (mlx4_is_master(dev)) { 2779 int i; 2780 for (i = 0; i < dev->num_slaves; i++) { 2781 if (i == mlx4_master_func_num(dev)) 2782 continue; 2783 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2784 ib_port_default_caps; 2785 } 2786 } 2787 2788 if (mlx4_is_mfunc(dev)) 2789 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2790 else 2791 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2792 2793 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2794 dev->caps.pkey_table_len[port] : -1); 2795 if (err) { 2796 mlx4_err(dev, "Failed to set port %d, aborting\n", 2797 port); 2798 goto err_default_countes_free; 2799 } 2800 } 2801 } 2802 2803 return 0; 2804 2805 err_default_countes_free: 2806 mlx4_cleanup_default_counters(dev); 2807 2808 err_counters_table_free: 2809 if (!mlx4_is_slave(dev)) 2810 mlx4_cleanup_counters_table(dev); 2811 2812 err_qp_table_free: 2813 mlx4_cleanup_qp_table(dev); 2814 2815 err_srq_table_free: 2816 mlx4_cleanup_srq_table(dev); 2817 2818 err_cq_table_free: 2819 mlx4_cleanup_cq_table(dev); 2820 2821 err_cmd_poll: 2822 mlx4_cmd_use_polling(dev); 2823 2824 err_eq_table_free: 2825 mlx4_cleanup_eq_table(dev); 2826 2827 err_mcg_table_free: 2828 if (!mlx4_is_slave(dev)) 2829 mlx4_cleanup_mcg_table(dev); 2830 2831 err_mr_table_free: 2832 mlx4_cleanup_mr_table(dev); 2833 2834 err_xrcd_table_free: 2835 mlx4_cleanup_xrcd_table(dev); 2836 2837 err_pd_table_free: 2838 mlx4_cleanup_pd_table(dev); 2839 2840 err_kar_unmap: 2841 iounmap(priv->kar); 2842 2843 err_uar_free: 2844 mlx4_uar_free(dev, &priv->driver_uar); 2845 2846 err_uar_table_free: 2847 mlx4_cleanup_uar_table(dev); 2848 return err; 2849 } 2850 2851 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2852 { 2853 int requested_cpu = 0; 2854 struct mlx4_priv *priv = mlx4_priv(dev); 2855 struct mlx4_eq *eq; 2856 int off = 0; 2857 int i; 2858 2859 if (eqn > dev->caps.num_comp_vectors) 2860 return -EINVAL; 2861 2862 for (i = 1; i < port; i++) 2863 off += mlx4_get_eqs_per_port(dev, i); 2864 2865 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2866 2867 /* Meaning EQs are shared, and this call comes from the second port */ 2868 if (requested_cpu < 0) 2869 return 0; 2870 2871 eq = &priv->eq_table.eq[eqn]; 2872 2873 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) 2874 return -ENOMEM; 2875 2876 cpumask_set_cpu(requested_cpu, eq->affinity_mask); 2877 2878 return 0; 2879 } 2880 2881 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2882 { 2883 struct mlx4_priv *priv = mlx4_priv(dev); 2884 struct msix_entry *entries; 2885 int i; 2886 int port = 0; 2887 2888 if (msi_x) { 2889 int nreq = min3(dev->caps.num_ports * 2890 (int)num_online_cpus() + 1, 2891 dev->caps.num_eqs - dev->caps.reserved_eqs, 2892 MAX_MSIX); 2893 2894 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); 2895 if (!entries) 2896 goto no_msi; 2897 2898 for (i = 0; i < nreq; ++i) 2899 entries[i].entry = i; 2900 2901 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2902 nreq); 2903 2904 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2905 kfree(entries); 2906 goto no_msi; 2907 } 2908 /* 1 is reserved for events (asyncrounous EQ) */ 2909 dev->caps.num_comp_vectors = nreq - 1; 2910 2911 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2912 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2913 dev->caps.num_ports); 2914 2915 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2916 if (i == MLX4_EQ_ASYNC) 2917 continue; 2918 2919 priv->eq_table.eq[i].irq = 2920 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2921 2922 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2923 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2924 dev->caps.num_ports); 2925 /* We don't set affinity hint when there 2926 * aren't enough EQs 2927 */ 2928 } else { 2929 set_bit(port, 2930 priv->eq_table.eq[i].actv_ports.ports); 2931 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2932 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2933 i); 2934 } 2935 /* We divide the Eqs evenly between the two ports. 2936 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2937 * refers to the number of Eqs per port 2938 * (i.e eqs_per_port). Theoretically, we would like to 2939 * write something like (i + 1) % eqs_per_port == 0. 2940 * However, since there's an asynchronous Eq, we have 2941 * to skip over it by comparing this condition to 2942 * !!((i + 1) > MLX4_EQ_ASYNC). 2943 */ 2944 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2945 ((i + 1) % 2946 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2947 !!((i + 1) > MLX4_EQ_ASYNC)) 2948 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2949 * everything is shared anyway. 2950 */ 2951 port++; 2952 } 2953 2954 dev->flags |= MLX4_FLAG_MSI_X; 2955 2956 kfree(entries); 2957 return; 2958 } 2959 2960 no_msi: 2961 dev->caps.num_comp_vectors = 1; 2962 2963 BUG_ON(MLX4_EQ_ASYNC >= 2); 2964 for (i = 0; i < 2; ++i) { 2965 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2966 if (i != MLX4_EQ_ASYNC) { 2967 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2968 dev->caps.num_ports); 2969 } 2970 } 2971 } 2972 2973 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2974 { 2975 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); 2976 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2977 int err; 2978 2979 err = devlink_port_register(devlink, &info->devlink_port, port); 2980 if (err) 2981 return err; 2982 2983 info->dev = dev; 2984 info->port = port; 2985 if (!mlx4_is_slave(dev)) { 2986 mlx4_init_mac_table(dev, &info->mac_table); 2987 mlx4_init_vlan_table(dev, &info->vlan_table); 2988 mlx4_init_roce_gid_table(dev, &info->gid_table); 2989 info->base_qpn = mlx4_get_base_qpn(dev, port); 2990 } 2991 2992 sprintf(info->dev_name, "mlx4_port%d", port); 2993 info->port_attr.attr.name = info->dev_name; 2994 if (mlx4_is_mfunc(dev)) 2995 info->port_attr.attr.mode = S_IRUGO; 2996 else { 2997 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2998 info->port_attr.store = set_port_type; 2999 } 3000 info->port_attr.show = show_port_type; 3001 sysfs_attr_init(&info->port_attr.attr); 3002 3003 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 3004 if (err) { 3005 mlx4_err(dev, "Failed to create file for port %d\n", port); 3006 devlink_port_unregister(&info->devlink_port); 3007 info->port = -1; 3008 } 3009 3010 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 3011 info->port_mtu_attr.attr.name = info->dev_mtu_name; 3012 if (mlx4_is_mfunc(dev)) 3013 info->port_mtu_attr.attr.mode = S_IRUGO; 3014 else { 3015 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 3016 info->port_mtu_attr.store = set_port_ib_mtu; 3017 } 3018 info->port_mtu_attr.show = show_port_ib_mtu; 3019 sysfs_attr_init(&info->port_mtu_attr.attr); 3020 3021 err = device_create_file(&dev->persist->pdev->dev, 3022 &info->port_mtu_attr); 3023 if (err) { 3024 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 3025 device_remove_file(&info->dev->persist->pdev->dev, 3026 &info->port_attr); 3027 devlink_port_unregister(&info->devlink_port); 3028 info->port = -1; 3029 } 3030 3031 return err; 3032 } 3033 3034 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 3035 { 3036 if (info->port < 0) 3037 return; 3038 3039 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 3040 device_remove_file(&info->dev->persist->pdev->dev, 3041 &info->port_mtu_attr); 3042 devlink_port_unregister(&info->devlink_port); 3043 3044 #ifdef CONFIG_RFS_ACCEL 3045 free_irq_cpu_rmap(info->rmap); 3046 info->rmap = NULL; 3047 #endif 3048 } 3049 3050 static int mlx4_init_steering(struct mlx4_dev *dev) 3051 { 3052 struct mlx4_priv *priv = mlx4_priv(dev); 3053 int num_entries = dev->caps.num_ports; 3054 int i, j; 3055 3056 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 3057 if (!priv->steer) 3058 return -ENOMEM; 3059 3060 for (i = 0; i < num_entries; i++) 3061 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3062 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 3063 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 3064 } 3065 return 0; 3066 } 3067 3068 static void mlx4_clear_steering(struct mlx4_dev *dev) 3069 { 3070 struct mlx4_priv *priv = mlx4_priv(dev); 3071 struct mlx4_steer_index *entry, *tmp_entry; 3072 struct mlx4_promisc_qp *pqp, *tmp_pqp; 3073 int num_entries = dev->caps.num_ports; 3074 int i, j; 3075 3076 for (i = 0; i < num_entries; i++) { 3077 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3078 list_for_each_entry_safe(pqp, tmp_pqp, 3079 &priv->steer[i].promisc_qps[j], 3080 list) { 3081 list_del(&pqp->list); 3082 kfree(pqp); 3083 } 3084 list_for_each_entry_safe(entry, tmp_entry, 3085 &priv->steer[i].steer_entries[j], 3086 list) { 3087 list_del(&entry->list); 3088 list_for_each_entry_safe(pqp, tmp_pqp, 3089 &entry->duplicates, 3090 list) { 3091 list_del(&pqp->list); 3092 kfree(pqp); 3093 } 3094 kfree(entry); 3095 } 3096 } 3097 } 3098 kfree(priv->steer); 3099 } 3100 3101 static int extended_func_num(struct pci_dev *pdev) 3102 { 3103 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3104 } 3105 3106 #define MLX4_OWNER_BASE 0x8069c 3107 #define MLX4_OWNER_SIZE 4 3108 3109 static int mlx4_get_ownership(struct mlx4_dev *dev) 3110 { 3111 void __iomem *owner; 3112 u32 ret; 3113 3114 if (pci_channel_offline(dev->persist->pdev)) 3115 return -EIO; 3116 3117 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3118 MLX4_OWNER_BASE, 3119 MLX4_OWNER_SIZE); 3120 if (!owner) { 3121 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3122 return -ENOMEM; 3123 } 3124 3125 ret = readl(owner); 3126 iounmap(owner); 3127 return (int) !!ret; 3128 } 3129 3130 static void mlx4_free_ownership(struct mlx4_dev *dev) 3131 { 3132 void __iomem *owner; 3133 3134 if (pci_channel_offline(dev->persist->pdev)) 3135 return; 3136 3137 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3138 MLX4_OWNER_BASE, 3139 MLX4_OWNER_SIZE); 3140 if (!owner) { 3141 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3142 return; 3143 } 3144 writel(0, owner); 3145 msleep(1000); 3146 iounmap(owner); 3147 } 3148 3149 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3150 !!((flags) & MLX4_FLAG_MASTER)) 3151 3152 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3153 u8 total_vfs, int existing_vfs, int reset_flow) 3154 { 3155 u64 dev_flags = dev->flags; 3156 int err = 0; 3157 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), 3158 MLX4_MAX_NUM_VF); 3159 3160 if (reset_flow) { 3161 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3162 GFP_KERNEL); 3163 if (!dev->dev_vfs) 3164 goto free_mem; 3165 return dev_flags; 3166 } 3167 3168 atomic_inc(&pf_loading); 3169 if (dev->flags & MLX4_FLAG_SRIOV) { 3170 if (existing_vfs != total_vfs) { 3171 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3172 existing_vfs, total_vfs); 3173 total_vfs = existing_vfs; 3174 } 3175 } 3176 3177 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3178 if (NULL == dev->dev_vfs) { 3179 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3180 goto disable_sriov; 3181 } 3182 3183 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3184 if (total_vfs > fw_enabled_sriov_vfs) { 3185 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", 3186 total_vfs, fw_enabled_sriov_vfs); 3187 err = -ENOMEM; 3188 goto disable_sriov; 3189 } 3190 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3191 err = pci_enable_sriov(pdev, total_vfs); 3192 } 3193 if (err) { 3194 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3195 err); 3196 goto disable_sriov; 3197 } else { 3198 mlx4_warn(dev, "Running in master mode\n"); 3199 dev_flags |= MLX4_FLAG_SRIOV | 3200 MLX4_FLAG_MASTER; 3201 dev_flags &= ~MLX4_FLAG_SLAVE; 3202 dev->persist->num_vfs = total_vfs; 3203 } 3204 return dev_flags; 3205 3206 disable_sriov: 3207 atomic_dec(&pf_loading); 3208 free_mem: 3209 dev->persist->num_vfs = 0; 3210 kfree(dev->dev_vfs); 3211 dev->dev_vfs = NULL; 3212 return dev_flags & ~MLX4_FLAG_MASTER; 3213 } 3214 3215 enum { 3216 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3217 }; 3218 3219 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3220 int *nvfs) 3221 { 3222 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3223 /* Checking for 64 VFs as a limitation of CX2 */ 3224 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3225 requested_vfs >= 64) { 3226 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3227 requested_vfs); 3228 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3229 } 3230 return 0; 3231 } 3232 3233 static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3234 { 3235 struct pci_dev *pdev = dev->persist->pdev; 3236 int err = 0; 3237 3238 mutex_lock(&dev->persist->pci_status_mutex); 3239 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3240 err = pci_enable_device(pdev); 3241 if (!err) 3242 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3243 } 3244 mutex_unlock(&dev->persist->pci_status_mutex); 3245 3246 return err; 3247 } 3248 3249 static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3250 { 3251 struct pci_dev *pdev = dev->persist->pdev; 3252 3253 mutex_lock(&dev->persist->pci_status_mutex); 3254 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3255 pci_disable_device(pdev); 3256 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3257 } 3258 mutex_unlock(&dev->persist->pci_status_mutex); 3259 } 3260 3261 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3262 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3263 int reset_flow) 3264 { 3265 struct mlx4_dev *dev; 3266 unsigned sum = 0; 3267 int err; 3268 int port; 3269 int i; 3270 struct mlx4_dev_cap *dev_cap = NULL; 3271 int existing_vfs = 0; 3272 3273 dev = &priv->dev; 3274 3275 INIT_LIST_HEAD(&priv->ctx_list); 3276 spin_lock_init(&priv->ctx_lock); 3277 3278 mutex_init(&priv->port_mutex); 3279 mutex_init(&priv->bond_mutex); 3280 3281 INIT_LIST_HEAD(&priv->pgdir_list); 3282 mutex_init(&priv->pgdir_mutex); 3283 spin_lock_init(&priv->cmd.context_lock); 3284 3285 INIT_LIST_HEAD(&priv->bf_list); 3286 mutex_init(&priv->bf_mutex); 3287 3288 dev->rev_id = pdev->revision; 3289 dev->numa_node = dev_to_node(&pdev->dev); 3290 3291 /* Detect if this device is a virtual function */ 3292 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3293 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3294 dev->flags |= MLX4_FLAG_SLAVE; 3295 } else { 3296 /* We reset the device and enable SRIOV only for physical 3297 * devices. Try to claim ownership on the device; 3298 * if already taken, skip -- do not allow multiple PFs */ 3299 err = mlx4_get_ownership(dev); 3300 if (err) { 3301 if (err < 0) 3302 return err; 3303 else { 3304 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3305 return -EINVAL; 3306 } 3307 } 3308 3309 atomic_set(&priv->opreq_count, 0); 3310 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3311 3312 /* 3313 * Now reset the HCA before we touch the PCI capabilities or 3314 * attempt a firmware command, since a boot ROM may have left 3315 * the HCA in an undefined state. 3316 */ 3317 err = mlx4_reset(dev); 3318 if (err) { 3319 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3320 goto err_sriov; 3321 } 3322 3323 if (total_vfs) { 3324 dev->flags = MLX4_FLAG_MASTER; 3325 existing_vfs = pci_num_vf(pdev); 3326 if (existing_vfs) 3327 dev->flags |= MLX4_FLAG_SRIOV; 3328 dev->persist->num_vfs = total_vfs; 3329 } 3330 } 3331 3332 /* on load remove any previous indication of internal error, 3333 * device is up. 3334 */ 3335 dev->persist->state = MLX4_DEVICE_STATE_UP; 3336 3337 slave_start: 3338 err = mlx4_cmd_init(dev); 3339 if (err) { 3340 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3341 goto err_sriov; 3342 } 3343 3344 /* In slave functions, the communication channel must be initialized 3345 * before posting commands. Also, init num_slaves before calling 3346 * mlx4_init_hca */ 3347 if (mlx4_is_mfunc(dev)) { 3348 if (mlx4_is_master(dev)) { 3349 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3350 3351 } else { 3352 dev->num_slaves = 0; 3353 err = mlx4_multi_func_init(dev); 3354 if (err) { 3355 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3356 goto err_cmd; 3357 } 3358 } 3359 } 3360 3361 err = mlx4_init_fw(dev); 3362 if (err) { 3363 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3364 goto err_mfunc; 3365 } 3366 3367 if (mlx4_is_master(dev)) { 3368 /* when we hit the goto slave_start below, dev_cap already initialized */ 3369 if (!dev_cap) { 3370 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3371 3372 if (!dev_cap) { 3373 err = -ENOMEM; 3374 goto err_fw; 3375 } 3376 3377 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3378 if (err) { 3379 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3380 goto err_fw; 3381 } 3382 3383 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3384 goto err_fw; 3385 3386 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3387 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3388 total_vfs, 3389 existing_vfs, 3390 reset_flow); 3391 3392 mlx4_close_fw(dev); 3393 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3394 dev->flags = dev_flags; 3395 if (!SRIOV_VALID_STATE(dev->flags)) { 3396 mlx4_err(dev, "Invalid SRIOV state\n"); 3397 goto err_sriov; 3398 } 3399 err = mlx4_reset(dev); 3400 if (err) { 3401 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3402 goto err_sriov; 3403 } 3404 goto slave_start; 3405 } 3406 } else { 3407 /* Legacy mode FW requires SRIOV to be enabled before 3408 * doing QUERY_DEV_CAP, since max_eq's value is different if 3409 * SRIOV is enabled. 3410 */ 3411 memset(dev_cap, 0, sizeof(*dev_cap)); 3412 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3413 if (err) { 3414 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3415 goto err_fw; 3416 } 3417 3418 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3419 goto err_fw; 3420 } 3421 } 3422 3423 err = mlx4_init_hca(dev); 3424 if (err) { 3425 if (err == -EACCES) { 3426 /* Not primary Physical function 3427 * Running in slave mode */ 3428 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3429 /* We're not a PF */ 3430 if (dev->flags & MLX4_FLAG_SRIOV) { 3431 if (!existing_vfs) 3432 pci_disable_sriov(pdev); 3433 if (mlx4_is_master(dev) && !reset_flow) 3434 atomic_dec(&pf_loading); 3435 dev->flags &= ~MLX4_FLAG_SRIOV; 3436 } 3437 if (!mlx4_is_slave(dev)) 3438 mlx4_free_ownership(dev); 3439 dev->flags |= MLX4_FLAG_SLAVE; 3440 dev->flags &= ~MLX4_FLAG_MASTER; 3441 goto slave_start; 3442 } else 3443 goto err_fw; 3444 } 3445 3446 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3447 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3448 existing_vfs, reset_flow); 3449 3450 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3451 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3452 dev->flags = dev_flags; 3453 err = mlx4_cmd_init(dev); 3454 if (err) { 3455 /* Only VHCR is cleaned up, so could still 3456 * send FW commands 3457 */ 3458 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3459 goto err_close; 3460 } 3461 } else { 3462 dev->flags = dev_flags; 3463 } 3464 3465 if (!SRIOV_VALID_STATE(dev->flags)) { 3466 mlx4_err(dev, "Invalid SRIOV state\n"); 3467 goto err_close; 3468 } 3469 } 3470 3471 /* check if the device is functioning at its maximum possible speed. 3472 * No return code for this call, just warn the user in case of PCI 3473 * express device capabilities are under-satisfied by the bus. 3474 */ 3475 if (!mlx4_is_slave(dev)) 3476 mlx4_check_pcie_caps(dev); 3477 3478 /* In master functions, the communication channel must be initialized 3479 * after obtaining its address from fw */ 3480 if (mlx4_is_master(dev)) { 3481 if (dev->caps.num_ports < 2 && 3482 num_vfs_argc > 1) { 3483 err = -EINVAL; 3484 mlx4_err(dev, 3485 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3486 dev->caps.num_ports); 3487 goto err_close; 3488 } 3489 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3490 3491 for (i = 0; 3492 i < sizeof(dev->persist->nvfs)/ 3493 sizeof(dev->persist->nvfs[0]); i++) { 3494 unsigned j; 3495 3496 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3497 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3498 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3499 dev->caps.num_ports; 3500 } 3501 } 3502 3503 /* In master functions, the communication channel 3504 * must be initialized after obtaining its address from fw 3505 */ 3506 err = mlx4_multi_func_init(dev); 3507 if (err) { 3508 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3509 goto err_close; 3510 } 3511 } 3512 3513 err = mlx4_alloc_eq_table(dev); 3514 if (err) 3515 goto err_master_mfunc; 3516 3517 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3518 mutex_init(&priv->msix_ctl.pool_lock); 3519 3520 mlx4_enable_msi_x(dev); 3521 if ((mlx4_is_mfunc(dev)) && 3522 !(dev->flags & MLX4_FLAG_MSI_X)) { 3523 err = -EOPNOTSUPP; 3524 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3525 goto err_free_eq; 3526 } 3527 3528 if (!mlx4_is_slave(dev)) { 3529 err = mlx4_init_steering(dev); 3530 if (err) 3531 goto err_disable_msix; 3532 } 3533 3534 mlx4_init_quotas(dev); 3535 3536 err = mlx4_setup_hca(dev); 3537 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3538 !mlx4_is_mfunc(dev)) { 3539 dev->flags &= ~MLX4_FLAG_MSI_X; 3540 dev->caps.num_comp_vectors = 1; 3541 pci_disable_msix(pdev); 3542 err = mlx4_setup_hca(dev); 3543 } 3544 3545 if (err) 3546 goto err_steer; 3547 3548 /* When PF resources are ready arm its comm channel to enable 3549 * getting commands 3550 */ 3551 if (mlx4_is_master(dev)) { 3552 err = mlx4_ARM_COMM_CHANNEL(dev); 3553 if (err) { 3554 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3555 err); 3556 goto err_steer; 3557 } 3558 } 3559 3560 for (port = 1; port <= dev->caps.num_ports; port++) { 3561 err = mlx4_init_port_info(dev, port); 3562 if (err) 3563 goto err_port; 3564 } 3565 3566 priv->v2p.port1 = 1; 3567 priv->v2p.port2 = 2; 3568 3569 err = mlx4_register_device(dev); 3570 if (err) 3571 goto err_port; 3572 3573 mlx4_request_modules(dev); 3574 3575 mlx4_sense_init(dev); 3576 mlx4_start_sense(dev); 3577 3578 priv->removed = 0; 3579 3580 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3581 atomic_dec(&pf_loading); 3582 3583 kfree(dev_cap); 3584 return 0; 3585 3586 err_port: 3587 for (--port; port >= 1; --port) 3588 mlx4_cleanup_port_info(&priv->port[port]); 3589 3590 mlx4_cleanup_default_counters(dev); 3591 if (!mlx4_is_slave(dev)) 3592 mlx4_cleanup_counters_table(dev); 3593 mlx4_cleanup_qp_table(dev); 3594 mlx4_cleanup_srq_table(dev); 3595 mlx4_cleanup_cq_table(dev); 3596 mlx4_cmd_use_polling(dev); 3597 mlx4_cleanup_eq_table(dev); 3598 mlx4_cleanup_mcg_table(dev); 3599 mlx4_cleanup_mr_table(dev); 3600 mlx4_cleanup_xrcd_table(dev); 3601 mlx4_cleanup_pd_table(dev); 3602 mlx4_cleanup_uar_table(dev); 3603 3604 err_steer: 3605 if (!mlx4_is_slave(dev)) 3606 mlx4_clear_steering(dev); 3607 3608 err_disable_msix: 3609 if (dev->flags & MLX4_FLAG_MSI_X) 3610 pci_disable_msix(pdev); 3611 3612 err_free_eq: 3613 mlx4_free_eq_table(dev); 3614 3615 err_master_mfunc: 3616 if (mlx4_is_master(dev)) { 3617 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3618 mlx4_multi_func_cleanup(dev); 3619 } 3620 3621 if (mlx4_is_slave(dev)) 3622 mlx4_slave_destroy_special_qp_cap(dev); 3623 3624 err_close: 3625 mlx4_close_hca(dev); 3626 3627 err_fw: 3628 mlx4_close_fw(dev); 3629 3630 err_mfunc: 3631 if (mlx4_is_slave(dev)) 3632 mlx4_multi_func_cleanup(dev); 3633 3634 err_cmd: 3635 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3636 3637 err_sriov: 3638 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3639 pci_disable_sriov(pdev); 3640 dev->flags &= ~MLX4_FLAG_SRIOV; 3641 } 3642 3643 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3644 atomic_dec(&pf_loading); 3645 3646 kfree(priv->dev.dev_vfs); 3647 3648 if (!mlx4_is_slave(dev)) 3649 mlx4_free_ownership(dev); 3650 3651 kfree(dev_cap); 3652 return err; 3653 } 3654 3655 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3656 struct mlx4_priv *priv) 3657 { 3658 int err; 3659 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3660 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3661 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3662 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3663 unsigned total_vfs = 0; 3664 unsigned int i; 3665 3666 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3667 3668 err = mlx4_pci_enable_device(&priv->dev); 3669 if (err) { 3670 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3671 return err; 3672 } 3673 3674 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3675 * per port, we must limit the number of VFs to 63 (since their are 3676 * 128 MACs) 3677 */ 3678 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3679 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3680 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3681 if (nvfs[i] < 0) { 3682 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3683 err = -EINVAL; 3684 goto err_disable_pdev; 3685 } 3686 } 3687 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3688 i++) { 3689 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3690 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3691 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3692 err = -EINVAL; 3693 goto err_disable_pdev; 3694 } 3695 } 3696 if (total_vfs > MLX4_MAX_NUM_VF) { 3697 dev_err(&pdev->dev, 3698 "Requested more VF's (%d) than allowed by hw (%d)\n", 3699 total_vfs, MLX4_MAX_NUM_VF); 3700 err = -EINVAL; 3701 goto err_disable_pdev; 3702 } 3703 3704 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3705 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3706 dev_err(&pdev->dev, 3707 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3708 nvfs[i] + nvfs[2], i + 1, 3709 MLX4_MAX_NUM_VF_P_PORT); 3710 err = -EINVAL; 3711 goto err_disable_pdev; 3712 } 3713 } 3714 3715 /* Check for BARs. */ 3716 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3717 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3718 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3719 pci_dev_data, pci_resource_flags(pdev, 0)); 3720 err = -ENODEV; 3721 goto err_disable_pdev; 3722 } 3723 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3724 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3725 err = -ENODEV; 3726 goto err_disable_pdev; 3727 } 3728 3729 err = pci_request_regions(pdev, DRV_NAME); 3730 if (err) { 3731 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3732 goto err_disable_pdev; 3733 } 3734 3735 pci_set_master(pdev); 3736 3737 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3738 if (err) { 3739 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3740 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3741 if (err) { 3742 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3743 goto err_release_regions; 3744 } 3745 } 3746 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3747 if (err) { 3748 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3749 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3750 if (err) { 3751 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3752 goto err_release_regions; 3753 } 3754 } 3755 3756 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3757 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3758 /* Detect if this device is a virtual function */ 3759 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3760 /* When acting as pf, we normally skip vfs unless explicitly 3761 * requested to probe them. 3762 */ 3763 if (total_vfs) { 3764 unsigned vfs_offset = 0; 3765 3766 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3767 vfs_offset + nvfs[i] < extended_func_num(pdev); 3768 vfs_offset += nvfs[i], i++) 3769 ; 3770 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3771 err = -ENODEV; 3772 goto err_release_regions; 3773 } 3774 if ((extended_func_num(pdev) - vfs_offset) 3775 > prb_vf[i]) { 3776 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3777 extended_func_num(pdev)); 3778 err = -ENODEV; 3779 goto err_release_regions; 3780 } 3781 } 3782 } 3783 3784 err = mlx4_catas_init(&priv->dev); 3785 if (err) 3786 goto err_release_regions; 3787 3788 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3789 if (err) 3790 goto err_catas; 3791 3792 return 0; 3793 3794 err_catas: 3795 mlx4_catas_end(&priv->dev); 3796 3797 err_release_regions: 3798 pci_release_regions(pdev); 3799 3800 err_disable_pdev: 3801 mlx4_pci_disable_device(&priv->dev); 3802 return err; 3803 } 3804 3805 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, 3806 enum devlink_port_type port_type) 3807 { 3808 struct mlx4_port_info *info = container_of(devlink_port, 3809 struct mlx4_port_info, 3810 devlink_port); 3811 enum mlx4_port_type mlx4_port_type; 3812 3813 switch (port_type) { 3814 case DEVLINK_PORT_TYPE_AUTO: 3815 mlx4_port_type = MLX4_PORT_TYPE_AUTO; 3816 break; 3817 case DEVLINK_PORT_TYPE_ETH: 3818 mlx4_port_type = MLX4_PORT_TYPE_ETH; 3819 break; 3820 case DEVLINK_PORT_TYPE_IB: 3821 mlx4_port_type = MLX4_PORT_TYPE_IB; 3822 break; 3823 default: 3824 return -EOPNOTSUPP; 3825 } 3826 3827 return __set_port_type(info, mlx4_port_type); 3828 } 3829 3830 static const struct devlink_ops mlx4_devlink_ops = { 3831 .port_type_set = mlx4_devlink_port_type_set, 3832 }; 3833 3834 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3835 { 3836 struct devlink *devlink; 3837 struct mlx4_priv *priv; 3838 struct mlx4_dev *dev; 3839 int ret; 3840 3841 printk_once(KERN_INFO "%s", mlx4_version); 3842 3843 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); 3844 if (!devlink) 3845 return -ENOMEM; 3846 priv = devlink_priv(devlink); 3847 3848 dev = &priv->dev; 3849 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3850 if (!dev->persist) { 3851 ret = -ENOMEM; 3852 goto err_devlink_free; 3853 } 3854 dev->persist->pdev = pdev; 3855 dev->persist->dev = dev; 3856 pci_set_drvdata(pdev, dev->persist); 3857 priv->pci_dev_data = id->driver_data; 3858 mutex_init(&dev->persist->device_state_mutex); 3859 mutex_init(&dev->persist->interface_state_mutex); 3860 mutex_init(&dev->persist->pci_status_mutex); 3861 3862 ret = devlink_register(devlink, &pdev->dev); 3863 if (ret) 3864 goto err_persist_free; 3865 3866 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3867 if (ret) 3868 goto err_devlink_unregister; 3869 3870 pci_save_state(pdev); 3871 return 0; 3872 3873 err_devlink_unregister: 3874 devlink_unregister(devlink); 3875 err_persist_free: 3876 kfree(dev->persist); 3877 err_devlink_free: 3878 devlink_free(devlink); 3879 return ret; 3880 } 3881 3882 static void mlx4_clean_dev(struct mlx4_dev *dev) 3883 { 3884 struct mlx4_dev_persistent *persist = dev->persist; 3885 struct mlx4_priv *priv = mlx4_priv(dev); 3886 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3887 3888 memset(priv, 0, sizeof(*priv)); 3889 priv->dev.persist = persist; 3890 priv->dev.flags = flags; 3891 } 3892 3893 static void mlx4_unload_one(struct pci_dev *pdev) 3894 { 3895 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3896 struct mlx4_dev *dev = persist->dev; 3897 struct mlx4_priv *priv = mlx4_priv(dev); 3898 int pci_dev_data; 3899 int p, i; 3900 3901 if (priv->removed) 3902 return; 3903 3904 /* saving current ports type for further use */ 3905 for (i = 0; i < dev->caps.num_ports; i++) { 3906 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3907 dev->persist->curr_port_poss_type[i] = dev->caps. 3908 possible_type[i + 1]; 3909 } 3910 3911 pci_dev_data = priv->pci_dev_data; 3912 3913 mlx4_stop_sense(dev); 3914 mlx4_unregister_device(dev); 3915 3916 for (p = 1; p <= dev->caps.num_ports; p++) { 3917 mlx4_cleanup_port_info(&priv->port[p]); 3918 mlx4_CLOSE_PORT(dev, p); 3919 } 3920 3921 if (mlx4_is_master(dev)) 3922 mlx4_free_resource_tracker(dev, 3923 RES_TR_FREE_SLAVES_ONLY); 3924 3925 mlx4_cleanup_default_counters(dev); 3926 if (!mlx4_is_slave(dev)) 3927 mlx4_cleanup_counters_table(dev); 3928 mlx4_cleanup_qp_table(dev); 3929 mlx4_cleanup_srq_table(dev); 3930 mlx4_cleanup_cq_table(dev); 3931 mlx4_cmd_use_polling(dev); 3932 mlx4_cleanup_eq_table(dev); 3933 mlx4_cleanup_mcg_table(dev); 3934 mlx4_cleanup_mr_table(dev); 3935 mlx4_cleanup_xrcd_table(dev); 3936 mlx4_cleanup_pd_table(dev); 3937 3938 if (mlx4_is_master(dev)) 3939 mlx4_free_resource_tracker(dev, 3940 RES_TR_FREE_STRUCTS_ONLY); 3941 3942 iounmap(priv->kar); 3943 mlx4_uar_free(dev, &priv->driver_uar); 3944 mlx4_cleanup_uar_table(dev); 3945 if (!mlx4_is_slave(dev)) 3946 mlx4_clear_steering(dev); 3947 mlx4_free_eq_table(dev); 3948 if (mlx4_is_master(dev)) 3949 mlx4_multi_func_cleanup(dev); 3950 mlx4_close_hca(dev); 3951 mlx4_close_fw(dev); 3952 if (mlx4_is_slave(dev)) 3953 mlx4_multi_func_cleanup(dev); 3954 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3955 3956 if (dev->flags & MLX4_FLAG_MSI_X) 3957 pci_disable_msix(pdev); 3958 3959 if (!mlx4_is_slave(dev)) 3960 mlx4_free_ownership(dev); 3961 3962 mlx4_slave_destroy_special_qp_cap(dev); 3963 kfree(dev->dev_vfs); 3964 3965 mlx4_clean_dev(dev); 3966 priv->pci_dev_data = pci_dev_data; 3967 priv->removed = 1; 3968 } 3969 3970 static void mlx4_remove_one(struct pci_dev *pdev) 3971 { 3972 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3973 struct mlx4_dev *dev = persist->dev; 3974 struct mlx4_priv *priv = mlx4_priv(dev); 3975 struct devlink *devlink = priv_to_devlink(priv); 3976 int active_vfs = 0; 3977 3978 if (mlx4_is_slave(dev)) 3979 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; 3980 3981 mutex_lock(&persist->interface_state_mutex); 3982 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3983 mutex_unlock(&persist->interface_state_mutex); 3984 3985 /* Disabling SR-IOV is not allowed while there are active vf's */ 3986 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3987 active_vfs = mlx4_how_many_lives_vf(dev); 3988 if (active_vfs) { 3989 pr_warn("Removing PF when there are active VF's !!\n"); 3990 pr_warn("Will not disable SR-IOV.\n"); 3991 } 3992 } 3993 3994 /* device marked to be under deletion running now without the lock 3995 * letting other tasks to be terminated 3996 */ 3997 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3998 mlx4_unload_one(pdev); 3999 else 4000 mlx4_info(dev, "%s: interface is down\n", __func__); 4001 mlx4_catas_end(dev); 4002 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 4003 mlx4_warn(dev, "Disabling SR-IOV\n"); 4004 pci_disable_sriov(pdev); 4005 } 4006 4007 pci_release_regions(pdev); 4008 mlx4_pci_disable_device(dev); 4009 devlink_unregister(devlink); 4010 kfree(dev->persist); 4011 devlink_free(devlink); 4012 } 4013 4014 static int restore_current_port_types(struct mlx4_dev *dev, 4015 enum mlx4_port_type *types, 4016 enum mlx4_port_type *poss_types) 4017 { 4018 struct mlx4_priv *priv = mlx4_priv(dev); 4019 int err, i; 4020 4021 mlx4_stop_sense(dev); 4022 4023 mutex_lock(&priv->port_mutex); 4024 for (i = 0; i < dev->caps.num_ports; i++) 4025 dev->caps.possible_type[i + 1] = poss_types[i]; 4026 err = mlx4_change_port_types(dev, types); 4027 mlx4_start_sense(dev); 4028 mutex_unlock(&priv->port_mutex); 4029 4030 return err; 4031 } 4032 4033 int mlx4_restart_one(struct pci_dev *pdev) 4034 { 4035 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4036 struct mlx4_dev *dev = persist->dev; 4037 struct mlx4_priv *priv = mlx4_priv(dev); 4038 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4039 int pci_dev_data, err, total_vfs; 4040 4041 pci_dev_data = priv->pci_dev_data; 4042 total_vfs = dev->persist->num_vfs; 4043 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4044 4045 mlx4_unload_one(pdev); 4046 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 4047 if (err) { 4048 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 4049 __func__, pci_name(pdev), err); 4050 return err; 4051 } 4052 4053 err = restore_current_port_types(dev, dev->persist->curr_port_type, 4054 dev->persist->curr_port_poss_type); 4055 if (err) 4056 mlx4_err(dev, "could not restore original port types (%d)\n", 4057 err); 4058 4059 return err; 4060 } 4061 4062 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } 4063 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } 4064 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } 4065 4066 static const struct pci_device_id mlx4_pci_table[] = { 4067 /* MT25408 "Hermon" */ 4068 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ 4069 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ 4070 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */ 4071 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */ 4072 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */ 4073 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */ 4074 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */ 4075 /* MT25458 ConnectX EN 10GBASE-T */ 4076 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN), 4077 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */ 4078 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/ 4079 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2), 4080 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */ 4081 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2), 4082 /* MT26478 ConnectX2 40GigE PCIe Gen2 */ 4083 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), 4084 /* MT25400 Family [ConnectX-2] */ 4085 MLX_VF(0x1002), /* Virtual Function */ 4086 /* MT27500 Family [ConnectX-3] */ 4087 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), 4088 MLX_VF(0x1004), /* Virtual Function */ 4089 MLX_GN(0x1005), /* MT27510 Family */ 4090 MLX_GN(0x1006), /* MT27511 Family */ 4091 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */ 4092 MLX_GN(0x1008), /* MT27521 Family */ 4093 MLX_GN(0x1009), /* MT27530 Family */ 4094 MLX_GN(0x100a), /* MT27531 Family */ 4095 MLX_GN(0x100b), /* MT27540 Family */ 4096 MLX_GN(0x100c), /* MT27541 Family */ 4097 MLX_GN(0x100d), /* MT27550 Family */ 4098 MLX_GN(0x100e), /* MT27551 Family */ 4099 MLX_GN(0x100f), /* MT27560 Family */ 4100 MLX_GN(0x1010), /* MT27561 Family */ 4101 4102 /* 4103 * See the mellanox_check_broken_intx_masking() quirk when 4104 * adding devices 4105 */ 4106 4107 { 0, } 4108 }; 4109 4110 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 4111 4112 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 4113 pci_channel_state_t state) 4114 { 4115 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4116 4117 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4118 mlx4_enter_error_state(persist); 4119 4120 mutex_lock(&persist->interface_state_mutex); 4121 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4122 mlx4_unload_one(pdev); 4123 4124 mutex_unlock(&persist->interface_state_mutex); 4125 if (state == pci_channel_io_perm_failure) 4126 return PCI_ERS_RESULT_DISCONNECT; 4127 4128 mlx4_pci_disable_device(persist->dev); 4129 return PCI_ERS_RESULT_NEED_RESET; 4130 } 4131 4132 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 4133 { 4134 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4135 struct mlx4_dev *dev = persist->dev; 4136 int err; 4137 4138 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4139 err = mlx4_pci_enable_device(dev); 4140 if (err) { 4141 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4142 return PCI_ERS_RESULT_DISCONNECT; 4143 } 4144 4145 pci_set_master(pdev); 4146 pci_restore_state(pdev); 4147 pci_save_state(pdev); 4148 return PCI_ERS_RESULT_RECOVERED; 4149 } 4150 4151 static void mlx4_pci_resume(struct pci_dev *pdev) 4152 { 4153 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4154 struct mlx4_dev *dev = persist->dev; 4155 struct mlx4_priv *priv = mlx4_priv(dev); 4156 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4157 int total_vfs; 4158 int err; 4159 4160 mlx4_err(dev, "%s was called\n", __func__); 4161 total_vfs = dev->persist->num_vfs; 4162 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4163 4164 mutex_lock(&persist->interface_state_mutex); 4165 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4166 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4167 priv, 1); 4168 if (err) { 4169 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4170 __func__, err); 4171 goto end; 4172 } 4173 4174 err = restore_current_port_types(dev, dev->persist-> 4175 curr_port_type, dev->persist-> 4176 curr_port_poss_type); 4177 if (err) 4178 mlx4_err(dev, "could not restore original port types (%d)\n", err); 4179 } 4180 end: 4181 mutex_unlock(&persist->interface_state_mutex); 4182 4183 } 4184 4185 static void mlx4_shutdown(struct pci_dev *pdev) 4186 { 4187 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4188 4189 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4190 mutex_lock(&persist->interface_state_mutex); 4191 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4192 mlx4_unload_one(pdev); 4193 mutex_unlock(&persist->interface_state_mutex); 4194 } 4195 4196 static const struct pci_error_handlers mlx4_err_handler = { 4197 .error_detected = mlx4_pci_err_detected, 4198 .slot_reset = mlx4_pci_slot_reset, 4199 .resume = mlx4_pci_resume, 4200 }; 4201 4202 static struct pci_driver mlx4_driver = { 4203 .name = DRV_NAME, 4204 .id_table = mlx4_pci_table, 4205 .probe = mlx4_init_one, 4206 .shutdown = mlx4_shutdown, 4207 .remove = mlx4_remove_one, 4208 .err_handler = &mlx4_err_handler, 4209 }; 4210 4211 static int __init mlx4_verify_params(void) 4212 { 4213 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4214 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4215 return -1; 4216 } 4217 4218 if (log_num_vlan != 0) 4219 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4220 MLX4_LOG_NUM_VLANS); 4221 4222 if (use_prio != 0) 4223 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4224 4225 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4226 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4227 log_mtts_per_seg); 4228 return -1; 4229 } 4230 4231 /* Check if module param for ports type has legal combination */ 4232 if (port_type_array[0] == false && port_type_array[1] == true) { 4233 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4234 port_type_array[0] = true; 4235 } 4236 4237 if (mlx4_log_num_mgm_entry_size < -7 || 4238 (mlx4_log_num_mgm_entry_size > 0 && 4239 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4240 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4241 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4242 mlx4_log_num_mgm_entry_size, 4243 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4244 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4245 return -1; 4246 } 4247 4248 return 0; 4249 } 4250 4251 static int __init mlx4_init(void) 4252 { 4253 int ret; 4254 4255 if (mlx4_verify_params()) 4256 return -EINVAL; 4257 4258 4259 mlx4_wq = create_singlethread_workqueue("mlx4"); 4260 if (!mlx4_wq) 4261 return -ENOMEM; 4262 4263 ret = pci_register_driver(&mlx4_driver); 4264 if (ret < 0) 4265 destroy_workqueue(mlx4_wq); 4266 return ret < 0 ? ret : 0; 4267 } 4268 4269 static void __exit mlx4_cleanup(void) 4270 { 4271 pci_unregister_driver(&mlx4_driver); 4272 destroy_workqueue(mlx4_wq); 4273 } 4274 4275 module_init(mlx4_init); 4276 module_exit(mlx4_cleanup); 4277