1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/slab.h>
43 #include <linux/io-mapping.h>
44 #include <linux/delay.h>
45 #include <linux/kmod.h>
46 #include <linux/etherdevice.h>
47 #include <net/devlink.h>
48 
49 #include <uapi/rdma/mlx4-abi.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/doorbell.h>
52 
53 #include "mlx4.h"
54 #include "fw.h"
55 #include "icm.h"
56 
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION);
61 
62 struct workqueue_struct *mlx4_wq;
63 
64 #ifdef CONFIG_MLX4_DEBUG
65 
66 int mlx4_debug_level; /* 0 by default */
67 module_param_named(debug_level, mlx4_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
69 
70 #endif /* CONFIG_MLX4_DEBUG */
71 
72 #ifdef CONFIG_PCI_MSI
73 
74 static int msi_x = 1;
75 module_param(msi_x, int, 0444);
76 MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
77 
78 #else /* CONFIG_PCI_MSI */
79 
80 #define msi_x (0)
81 
82 #endif /* CONFIG_PCI_MSI */
83 
84 static uint8_t num_vfs[3] = {0, 0, 0};
85 static int num_vfs_argc;
86 module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
87 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
88 			  "num_vfs=port1,port2,port1+2");
89 
90 static uint8_t probe_vf[3] = {0, 0, 0};
91 static int probe_vfs_argc;
92 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
93 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
94 			   "probe_vf=port1,port2,port1+2");
95 
96 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
97 module_param_named(log_num_mgm_entry_size,
98 			mlx4_log_num_mgm_entry_size, int, 0444);
99 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
100 					 " of qp per mcg, for example:"
101 					 " 10 gives 248.range: 7 <="
102 					 " log_num_mgm_entry_size <= 12."
103 					 " To activate device managed"
104 					 " flow steering when available, set to -1");
105 
106 static bool enable_64b_cqe_eqe = true;
107 module_param(enable_64b_cqe_eqe, bool, 0444);
108 MODULE_PARM_DESC(enable_64b_cqe_eqe,
109 		 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
110 
111 static bool enable_4k_uar;
112 module_param(enable_4k_uar, bool, 0444);
113 MODULE_PARM_DESC(enable_4k_uar,
114 		 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
115 
116 #define PF_CONTEXT_BEHAVIOUR_MASK	(MLX4_FUNC_CAP_64B_EQE_CQE | \
117 					 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
118 					 MLX4_FUNC_CAP_DMFS_A0_STATIC)
119 
120 #define RESET_PERSIST_MASK_FLAGS	(MLX4_FLAG_SRIOV)
121 
122 static char mlx4_version[] =
123 	DRV_NAME ": Mellanox ConnectX core driver v"
124 	DRV_VERSION "\n";
125 
126 static const struct mlx4_profile default_profile = {
127 	.num_qp		= 1 << 18,
128 	.num_srq	= 1 << 16,
129 	.rdmarc_per_qp	= 1 << 4,
130 	.num_cq		= 1 << 16,
131 	.num_mcg	= 1 << 13,
132 	.num_mpt	= 1 << 19,
133 	.num_mtt	= 1 << 20, /* It is really num mtt segements */
134 };
135 
136 static const struct mlx4_profile low_mem_profile = {
137 	.num_qp		= 1 << 17,
138 	.num_srq	= 1 << 6,
139 	.rdmarc_per_qp	= 1 << 4,
140 	.num_cq		= 1 << 8,
141 	.num_mcg	= 1 << 8,
142 	.num_mpt	= 1 << 9,
143 	.num_mtt	= 1 << 7,
144 };
145 
146 static int log_num_mac = 7;
147 module_param_named(log_num_mac, log_num_mac, int, 0444);
148 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
149 
150 static int log_num_vlan;
151 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
152 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
153 /* Log2 max number of VLANs per ETH port (0-7) */
154 #define MLX4_LOG_NUM_VLANS 7
155 #define MLX4_MIN_LOG_NUM_VLANS 0
156 #define MLX4_MIN_LOG_NUM_MAC 1
157 
158 static bool use_prio;
159 module_param_named(use_prio, use_prio, bool, 0444);
160 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
161 
162 int log_mtts_per_seg = ilog2(1);
163 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
164 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
165 		 "(0-7) (default: 0)");
166 
167 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
168 static int arr_argc = 2;
169 module_param_array(port_type_array, int, &arr_argc, 0444);
170 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
171 				"1 for IB, 2 for Ethernet");
172 
173 struct mlx4_port_config {
174 	struct list_head list;
175 	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
176 	struct pci_dev *pdev;
177 };
178 
179 static atomic_t pf_loading = ATOMIC_INIT(0);
180 
181 static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
182 				       struct devlink_param_gset_ctx *ctx)
183 {
184 	ctx->val.vbool = !!mlx4_internal_err_reset;
185 	return 0;
186 }
187 
188 static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
189 				       struct devlink_param_gset_ctx *ctx)
190 {
191 	mlx4_internal_err_reset = ctx->val.vbool;
192 	return 0;
193 }
194 
195 static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
196 					    struct devlink_param_gset_ctx *ctx)
197 {
198 	struct mlx4_priv *priv = devlink_priv(devlink);
199 	struct mlx4_dev *dev = &priv->dev;
200 
201 	ctx->val.vbool = dev->persist->crdump.snapshot_enable;
202 	return 0;
203 }
204 
205 static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
206 					    struct devlink_param_gset_ctx *ctx)
207 {
208 	struct mlx4_priv *priv = devlink_priv(devlink);
209 	struct mlx4_dev *dev = &priv->dev;
210 
211 	dev->persist->crdump.snapshot_enable = ctx->val.vbool;
212 	return 0;
213 }
214 
215 static int
216 mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
217 			       union devlink_param_value val,
218 			       struct netlink_ext_ack *extack)
219 {
220 	u32 value = val.vu32;
221 
222 	if (value < 1 || value > 128)
223 		return -ERANGE;
224 
225 	if (!is_power_of_2(value)) {
226 		NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
227 		return -EINVAL;
228 	}
229 
230 	return 0;
231 }
232 
233 enum mlx4_devlink_param_id {
234 	MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
235 	MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
236 	MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
237 };
238 
239 static const struct devlink_param mlx4_devlink_params[] = {
240 	DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
241 			      BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
242 			      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
243 			      mlx4_devlink_ierr_reset_get,
244 			      mlx4_devlink_ierr_reset_set, NULL),
245 	DEVLINK_PARAM_GENERIC(MAX_MACS,
246 			      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
247 			      NULL, NULL, mlx4_devlink_max_macs_validate),
248 	DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
249 			      BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
250 			      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
251 			      mlx4_devlink_crdump_snapshot_get,
252 			      mlx4_devlink_crdump_snapshot_set, NULL),
253 	DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
254 			     "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
255 			     BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
256 			     NULL, NULL, NULL),
257 	DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
258 			     "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
259 			     BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
260 			     NULL, NULL, NULL),
261 };
262 
263 static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
264 {
265 	union devlink_param_value value;
266 
267 	value.vbool = !!mlx4_internal_err_reset;
268 	devl_param_driverinit_value_set(devlink,
269 					DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
270 					value);
271 
272 	value.vu32 = 1UL << log_num_mac;
273 	devl_param_driverinit_value_set(devlink,
274 					DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
275 					value);
276 
277 	value.vbool = enable_64b_cqe_eqe;
278 	devl_param_driverinit_value_set(devlink,
279 					MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
280 					value);
281 
282 	value.vbool = enable_4k_uar;
283 	devl_param_driverinit_value_set(devlink,
284 					MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
285 					value);
286 
287 	value.vbool = false;
288 	devl_param_driverinit_value_set(devlink,
289 					DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
290 					value);
291 }
292 
293 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
294 					      struct mlx4_dev_cap *dev_cap)
295 {
296 	/* The reserved_uars is calculated by system page size unit.
297 	 * Therefore, adjustment is added when the uar page size is less
298 	 * than the system page size
299 	 */
300 	dev->caps.reserved_uars	=
301 		max_t(int,
302 		      mlx4_get_num_reserved_uar(dev),
303 		      dev_cap->reserved_uars /
304 			(1 << (PAGE_SHIFT - dev->uar_page_shift)));
305 }
306 
307 int mlx4_check_port_params(struct mlx4_dev *dev,
308 			   enum mlx4_port_type *port_type)
309 {
310 	int i;
311 
312 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
313 		for (i = 0; i < dev->caps.num_ports - 1; i++) {
314 			if (port_type[i] != port_type[i + 1]) {
315 				mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
316 				return -EOPNOTSUPP;
317 			}
318 		}
319 	}
320 
321 	for (i = 0; i < dev->caps.num_ports; i++) {
322 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
323 			mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
324 				 i + 1);
325 			return -EOPNOTSUPP;
326 		}
327 	}
328 	return 0;
329 }
330 
331 static void mlx4_set_port_mask(struct mlx4_dev *dev)
332 {
333 	int i;
334 
335 	for (i = 1; i <= dev->caps.num_ports; ++i)
336 		dev->caps.port_mask[i] = dev->caps.port_type[i];
337 }
338 
339 enum {
340 	MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
341 };
342 
343 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
344 {
345 	int err = 0;
346 	struct mlx4_func func;
347 
348 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
349 		err = mlx4_QUERY_FUNC(dev, &func, 0);
350 		if (err) {
351 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
352 			return err;
353 		}
354 		dev_cap->max_eqs = func.max_eq;
355 		dev_cap->reserved_eqs = func.rsvd_eqs;
356 		dev_cap->reserved_uars = func.rsvd_uars;
357 		err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
358 	}
359 	return err;
360 }
361 
362 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
363 {
364 	struct mlx4_caps *dev_cap = &dev->caps;
365 
366 	/* FW not supporting or cancelled by user */
367 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
368 	    !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
369 		return;
370 
371 	/* Must have 64B CQE_EQE enabled by FW to use bigger stride
372 	 * When FW has NCSI it may decide not to report 64B CQE/EQEs
373 	 */
374 	if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
375 	    !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
376 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
377 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
378 		return;
379 	}
380 
381 	if (cache_line_size() == 128 || cache_line_size() == 256) {
382 		mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
383 		/* Changing the real data inside CQE size to 32B */
384 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
385 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
386 
387 		if (mlx4_is_master(dev))
388 			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
389 	} else {
390 		if (cache_line_size() != 32  && cache_line_size() != 64)
391 			mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
392 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
393 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
394 	}
395 }
396 
397 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
398 			  struct mlx4_port_cap *port_cap)
399 {
400 	dev->caps.vl_cap[port]	    = port_cap->max_vl;
401 	dev->caps.ib_mtu_cap[port]	    = port_cap->ib_mtu;
402 	dev->phys_caps.gid_phys_table_len[port]  = port_cap->max_gids;
403 	dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
404 	/* set gid and pkey table operating lengths by default
405 	 * to non-sriov values
406 	 */
407 	dev->caps.gid_table_len[port]  = port_cap->max_gids;
408 	dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
409 	dev->caps.port_width_cap[port] = port_cap->max_port_width;
410 	dev->caps.eth_mtu_cap[port]    = port_cap->eth_mtu;
411 	dev->caps.max_tc_eth	       = port_cap->max_tc_eth;
412 	dev->caps.def_mac[port]        = port_cap->def_mac;
413 	dev->caps.supported_type[port] = port_cap->supported_port_types;
414 	dev->caps.suggested_type[port] = port_cap->suggested_type;
415 	dev->caps.default_sense[port] = port_cap->default_sense;
416 	dev->caps.trans_type[port]	    = port_cap->trans_type;
417 	dev->caps.vendor_oui[port]     = port_cap->vendor_oui;
418 	dev->caps.wavelength[port]     = port_cap->wavelength;
419 	dev->caps.trans_code[port]     = port_cap->trans_code;
420 
421 	return 0;
422 }
423 
424 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
425 			 struct mlx4_port_cap *port_cap)
426 {
427 	int err = 0;
428 
429 	err = mlx4_QUERY_PORT(dev, port, port_cap);
430 
431 	if (err)
432 		mlx4_err(dev, "QUERY_PORT command failed.\n");
433 
434 	return err;
435 }
436 
437 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
438 {
439 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
440 		return;
441 
442 	if (mlx4_is_mfunc(dev)) {
443 		mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
444 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
445 		return;
446 	}
447 
448 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
449 		mlx4_dbg(dev,
450 			 "Keep FCS is not supported - Disabling Ignore FCS");
451 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
452 		return;
453 	}
454 }
455 
456 #define MLX4_A0_STEERING_TABLE_SIZE	256
457 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
458 {
459 	int err;
460 	int i;
461 
462 	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
463 	if (err) {
464 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
465 		return err;
466 	}
467 	mlx4_dev_cap_dump(dev, dev_cap);
468 
469 	if (dev_cap->min_page_sz > PAGE_SIZE) {
470 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
471 			 dev_cap->min_page_sz, PAGE_SIZE);
472 		return -ENODEV;
473 	}
474 	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
475 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
476 			 dev_cap->num_ports, MLX4_MAX_PORTS);
477 		return -ENODEV;
478 	}
479 
480 	if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
481 		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
482 			 dev_cap->uar_size,
483 			 (unsigned long long)
484 			 pci_resource_len(dev->persist->pdev, 2));
485 		return -ENODEV;
486 	}
487 
488 	dev->caps.num_ports	     = dev_cap->num_ports;
489 	dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
490 	dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
491 				      dev->caps.num_sys_eqs :
492 				      MLX4_MAX_EQ_NUM;
493 	for (i = 1; i <= dev->caps.num_ports; ++i) {
494 		err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
495 		if (err) {
496 			mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
497 			return err;
498 		}
499 	}
500 
501 	dev->caps.map_clock_to_user  = dev_cap->map_clock_to_user;
502 	dev->caps.uar_page_size	     = PAGE_SIZE;
503 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
504 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
505 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
506 	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
507 	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
508 	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
509 	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
510 	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
511 	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
512 	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
513 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
514 	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
515 	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
516 	/*
517 	 * Subtract 1 from the limit because we need to allocate a
518 	 * spare CQE to enable resizing the CQ.
519 	 */
520 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
521 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
522 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
523 	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
524 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
525 
526 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
527 	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
528 					dev_cap->reserved_xrcds : 0;
529 	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
530 					dev_cap->max_xrcds : 0;
531 	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
532 
533 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
534 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
535 	dev->caps.flags		     = dev_cap->flags;
536 	dev->caps.flags2	     = dev_cap->flags2;
537 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
538 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
539 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
540 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
541 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
542 	dev->caps.wol_port[1]          = dev_cap->wol_port[1];
543 	dev->caps.wol_port[2]          = dev_cap->wol_port[2];
544 	dev->caps.health_buffer_addrs  = dev_cap->health_buffer_addrs;
545 
546 	/* Save uar page shift */
547 	if (!mlx4_is_slave(dev)) {
548 		/* Virtual PCI function needs to determine UAR page size from
549 		 * firmware. Only master PCI function can set the uar page size
550 		 */
551 		if (enable_4k_uar || !dev->persist->num_vfs)
552 			dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
553 		else
554 			dev->uar_page_shift = PAGE_SHIFT;
555 
556 		mlx4_set_num_reserved_uars(dev, dev_cap);
557 	}
558 
559 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
560 		struct mlx4_init_hca_param hca_param;
561 
562 		memset(&hca_param, 0, sizeof(hca_param));
563 		err = mlx4_QUERY_HCA(dev, &hca_param);
564 		/* Turn off PHV_EN flag in case phv_check_en is set.
565 		 * phv_check_en is a HW check that parse the packet and verify
566 		 * phv bit was reported correctly in the wqe. To allow QinQ
567 		 * PHV_EN flag should be set and phv_check_en must be cleared
568 		 * otherwise QinQ packets will be drop by the HW.
569 		 */
570 		if (err || hca_param.phv_check_en)
571 			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
572 	}
573 
574 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
575 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
576 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
577 	/* Don't do sense port on multifunction devices (for now at least) */
578 	if (mlx4_is_mfunc(dev))
579 		dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
580 
581 	if (mlx4_low_memory_profile()) {
582 		dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
583 		dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
584 	} else {
585 		dev->caps.log_num_macs  = log_num_mac;
586 		dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
587 	}
588 
589 	for (i = 1; i <= dev->caps.num_ports; ++i) {
590 		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
591 		if (dev->caps.supported_type[i]) {
592 			/* if only ETH is supported - assign ETH */
593 			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
594 				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
595 			/* if only IB is supported, assign IB */
596 			else if (dev->caps.supported_type[i] ==
597 				 MLX4_PORT_TYPE_IB)
598 				dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
599 			else {
600 				/* if IB and ETH are supported, we set the port
601 				 * type according to user selection of port type;
602 				 * if user selected none, take the FW hint */
603 				if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
604 					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
605 						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
606 				else
607 					dev->caps.port_type[i] = port_type_array[i - 1];
608 			}
609 		}
610 		/*
611 		 * Link sensing is allowed on the port if 3 conditions are true:
612 		 * 1. Both protocols are supported on the port.
613 		 * 2. Different types are supported on the port
614 		 * 3. FW declared that it supports link sensing
615 		 */
616 		mlx4_priv(dev)->sense.sense_allowed[i] =
617 			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
618 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
619 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
620 
621 		/*
622 		 * If "default_sense" bit is set, we move the port to "AUTO" mode
623 		 * and perform sense_port FW command to try and set the correct
624 		 * port type from beginning
625 		 */
626 		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
627 			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
628 			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
629 			mlx4_SENSE_PORT(dev, i, &sensed_port);
630 			if (sensed_port != MLX4_PORT_TYPE_NONE)
631 				dev->caps.port_type[i] = sensed_port;
632 		} else {
633 			dev->caps.possible_type[i] = dev->caps.port_type[i];
634 		}
635 
636 		if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
637 			dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
638 			mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
639 				  i, 1 << dev->caps.log_num_macs);
640 		}
641 		if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
642 			dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
643 			mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
644 				  i, 1 << dev->caps.log_num_vlans);
645 		}
646 	}
647 
648 	if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
649 	    (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
650 	    (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
651 		mlx4_warn(dev,
652 			  "Granular QoS per VF not supported with IB/Eth configuration\n");
653 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
654 	}
655 
656 	dev->caps.max_counters = dev_cap->max_counters;
657 
658 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
659 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
660 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
661 		(1 << dev->caps.log_num_macs) *
662 		(1 << dev->caps.log_num_vlans) *
663 		dev->caps.num_ports;
664 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
665 
666 	if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
667 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
668 		dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
669 	else
670 		dev->caps.dmfs_high_rate_qpn_base =
671 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
672 
673 	if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
674 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
675 		dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
676 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
677 		dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
678 	} else {
679 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
680 		dev->caps.dmfs_high_rate_qpn_base =
681 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
682 		dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
683 	}
684 
685 	dev->caps.rl_caps = dev_cap->rl_caps;
686 
687 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
688 		dev->caps.dmfs_high_rate_qpn_range;
689 
690 	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
691 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
692 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
693 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
694 
695 	dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
696 
697 	if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
698 		if (dev_cap->flags &
699 		    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
700 			mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
701 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
702 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
703 		}
704 
705 		if (dev_cap->flags2 &
706 		    (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
707 		     MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
708 			mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
709 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
710 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
711 		}
712 	}
713 
714 	if ((dev->caps.flags &
715 	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
716 	    mlx4_is_master(dev))
717 		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
718 
719 	if (!mlx4_is_slave(dev)) {
720 		mlx4_enable_cqe_eqe_stride(dev);
721 		dev->caps.alloc_res_qp_mask =
722 			(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
723 			MLX4_RESERVE_A0_QP;
724 
725 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
726 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
727 			mlx4_warn(dev, "Old device ETS support detected\n");
728 			mlx4_warn(dev, "Consider upgrading device FW.\n");
729 			dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
730 		}
731 
732 	} else {
733 		dev->caps.alloc_res_qp_mask = 0;
734 	}
735 
736 	mlx4_enable_ignore_fcs(dev);
737 
738 	return 0;
739 }
740 
741 /*The function checks if there are live vf, return the num of them*/
742 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
743 {
744 	struct mlx4_priv *priv = mlx4_priv(dev);
745 	struct mlx4_slave_state *s_state;
746 	int i;
747 	int ret = 0;
748 
749 	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
750 		s_state = &priv->mfunc.master.slave_state[i];
751 		if (s_state->active && s_state->last_cmd !=
752 		    MLX4_COMM_CMD_RESET) {
753 			mlx4_warn(dev, "%s: slave: %d is still active\n",
754 				  __func__, i);
755 			ret++;
756 		}
757 	}
758 	return ret;
759 }
760 
761 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
762 {
763 	u32 qk = MLX4_RESERVED_QKEY_BASE;
764 
765 	if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
766 	    qpn < dev->phys_caps.base_proxy_sqpn)
767 		return -EINVAL;
768 
769 	if (qpn >= dev->phys_caps.base_tunnel_sqpn)
770 		/* tunnel qp */
771 		qk += qpn - dev->phys_caps.base_tunnel_sqpn;
772 	else
773 		qk += qpn - dev->phys_caps.base_proxy_sqpn;
774 	*qkey = qk;
775 	return 0;
776 }
777 EXPORT_SYMBOL(mlx4_get_parav_qkey);
778 
779 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
780 {
781 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
782 
783 	if (!mlx4_is_master(dev))
784 		return;
785 
786 	priv->virt2phys_pkey[slave][port - 1][i] = val;
787 }
788 EXPORT_SYMBOL(mlx4_sync_pkey_table);
789 
790 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
791 {
792 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
793 
794 	if (!mlx4_is_master(dev))
795 		return;
796 
797 	priv->slave_node_guids[slave] = guid;
798 }
799 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
800 
801 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
802 {
803 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
804 
805 	if (!mlx4_is_master(dev))
806 		return 0;
807 
808 	return priv->slave_node_guids[slave];
809 }
810 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
811 
812 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
813 {
814 	struct mlx4_priv *priv = mlx4_priv(dev);
815 	struct mlx4_slave_state *s_slave;
816 
817 	if (!mlx4_is_master(dev))
818 		return 0;
819 
820 	s_slave = &priv->mfunc.master.slave_state[slave];
821 	return !!s_slave->active;
822 }
823 EXPORT_SYMBOL(mlx4_is_slave_active);
824 
825 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
826 				       struct _rule_hw *eth_header)
827 {
828 	if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
829 	    is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
830 		struct mlx4_net_trans_rule_hw_eth *eth =
831 			(struct mlx4_net_trans_rule_hw_eth *)eth_header;
832 		struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
833 		bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
834 			next_rule->rsvd == 0;
835 
836 		if (last_rule)
837 			ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
838 	}
839 }
840 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
841 
842 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
843 				       struct mlx4_dev_cap *dev_cap,
844 				       struct mlx4_init_hca_param *hca_param)
845 {
846 	dev->caps.steering_mode = hca_param->steering_mode;
847 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
848 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
849 		dev->caps.fs_log_max_ucast_qp_range_size =
850 			dev_cap->fs_log_max_ucast_qp_range_size;
851 	} else
852 		dev->caps.num_qp_per_mgm =
853 			4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
854 
855 	mlx4_dbg(dev, "Steering mode is: %s\n",
856 		 mlx4_steering_mode_str(dev->caps.steering_mode));
857 }
858 
859 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
860 {
861 	kfree(dev->caps.spec_qps);
862 	dev->caps.spec_qps = NULL;
863 }
864 
865 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
866 {
867 	struct mlx4_func_cap *func_cap = NULL;
868 	struct mlx4_caps *caps = &dev->caps;
869 	int i, err = 0;
870 
871 	func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
872 	caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL);
873 
874 	if (!func_cap || !caps->spec_qps) {
875 		mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
876 		err = -ENOMEM;
877 		goto err_mem;
878 	}
879 
880 	for (i = 1; i <= caps->num_ports; ++i) {
881 		err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap);
882 		if (err) {
883 			mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
884 				 i, err);
885 			goto err_mem;
886 		}
887 		caps->spec_qps[i - 1] = func_cap->spec_qps;
888 		caps->port_mask[i] = caps->port_type[i];
889 		caps->phys_port_id[i] = func_cap->phys_port_id;
890 		err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
891 						      &caps->gid_table_len[i],
892 						      &caps->pkey_table_len[i]);
893 		if (err) {
894 			mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n",
895 				 i, err);
896 			goto err_mem;
897 		}
898 	}
899 
900 err_mem:
901 	if (err)
902 		mlx4_slave_destroy_special_qp_cap(dev);
903 	kfree(func_cap);
904 	return err;
905 }
906 
907 static int mlx4_slave_cap(struct mlx4_dev *dev)
908 {
909 	int			   err;
910 	u32			   page_size;
911 	struct mlx4_dev_cap	   *dev_cap = NULL;
912 	struct mlx4_func_cap	   *func_cap = NULL;
913 	struct mlx4_init_hca_param *hca_param = NULL;
914 
915 	hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL);
916 	func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
917 	dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
918 	if (!hca_param || !func_cap || !dev_cap) {
919 		mlx4_err(dev, "Failed to allocate memory for slave_cap\n");
920 		err = -ENOMEM;
921 		goto free_mem;
922 	}
923 
924 	err = mlx4_QUERY_HCA(dev, hca_param);
925 	if (err) {
926 		mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
927 		goto free_mem;
928 	}
929 
930 	/* fail if the hca has an unknown global capability
931 	 * at this time global_caps should be always zeroed
932 	 */
933 	if (hca_param->global_caps) {
934 		mlx4_err(dev, "Unknown hca global capabilities\n");
935 		err = -EINVAL;
936 		goto free_mem;
937 	}
938 
939 	dev->caps.hca_core_clock = hca_param->hca_core_clock;
940 
941 	dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp;
942 	err = mlx4_dev_cap(dev, dev_cap);
943 	if (err) {
944 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
945 		goto free_mem;
946 	}
947 
948 	err = mlx4_QUERY_FW(dev);
949 	if (err)
950 		mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
951 
952 	page_size = ~dev->caps.page_size_cap + 1;
953 	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
954 	if (page_size > PAGE_SIZE) {
955 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
956 			 page_size, PAGE_SIZE);
957 		err = -ENODEV;
958 		goto free_mem;
959 	}
960 
961 	/* Set uar_page_shift for VF */
962 	dev->uar_page_shift = hca_param->uar_page_sz + 12;
963 
964 	/* Make sure the master uar page size is valid */
965 	if (dev->uar_page_shift > PAGE_SHIFT) {
966 		mlx4_err(dev,
967 			 "Invalid configuration: uar page size is larger than system page size\n");
968 		err = -ENODEV;
969 		goto free_mem;
970 	}
971 
972 	/* Set reserved_uars based on the uar_page_shift */
973 	mlx4_set_num_reserved_uars(dev, dev_cap);
974 
975 	/* Although uar page size in FW differs from system page size,
976 	 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
977 	 * still works with assumption that uar page size == system page size
978 	 */
979 	dev->caps.uar_page_size = PAGE_SIZE;
980 
981 	err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap);
982 	if (err) {
983 		mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
984 			 err);
985 		goto free_mem;
986 	}
987 
988 	if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
989 	    PF_CONTEXT_BEHAVIOUR_MASK) {
990 		mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
991 			 func_cap->pf_context_behaviour,
992 			 PF_CONTEXT_BEHAVIOUR_MASK);
993 		err = -EINVAL;
994 		goto free_mem;
995 	}
996 
997 	dev->caps.num_ports		= func_cap->num_ports;
998 	dev->quotas.qp			= func_cap->qp_quota;
999 	dev->quotas.srq			= func_cap->srq_quota;
1000 	dev->quotas.cq			= func_cap->cq_quota;
1001 	dev->quotas.mpt			= func_cap->mpt_quota;
1002 	dev->quotas.mtt			= func_cap->mtt_quota;
1003 	dev->caps.num_qps		= 1 << hca_param->log_num_qps;
1004 	dev->caps.num_srqs		= 1 << hca_param->log_num_srqs;
1005 	dev->caps.num_cqs		= 1 << hca_param->log_num_cqs;
1006 	dev->caps.num_mpts		= 1 << hca_param->log_mpt_sz;
1007 	dev->caps.num_eqs		= func_cap->max_eq;
1008 	dev->caps.reserved_eqs		= func_cap->reserved_eq;
1009 	dev->caps.reserved_lkey		= func_cap->reserved_lkey;
1010 	dev->caps.num_pds               = MLX4_NUM_PDS;
1011 	dev->caps.num_mgms              = 0;
1012 	dev->caps.num_amgms             = 0;
1013 
1014 	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
1015 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
1016 			 dev->caps.num_ports, MLX4_MAX_PORTS);
1017 		err = -ENODEV;
1018 		goto free_mem;
1019 	}
1020 
1021 	mlx4_replace_zero_macs(dev);
1022 
1023 	err = mlx4_slave_special_qp_cap(dev);
1024 	if (err) {
1025 		mlx4_err(dev, "Set special QP caps failed. aborting\n");
1026 		goto free_mem;
1027 	}
1028 
1029 	if (dev->caps.uar_page_size * (dev->caps.num_uars -
1030 				       dev->caps.reserved_uars) >
1031 				       pci_resource_len(dev->persist->pdev,
1032 							2)) {
1033 		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
1034 			 dev->caps.uar_page_size * dev->caps.num_uars,
1035 			 (unsigned long long)
1036 			 pci_resource_len(dev->persist->pdev, 2));
1037 		err = -ENOMEM;
1038 		goto err_mem;
1039 	}
1040 
1041 	if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
1042 		dev->caps.eqe_size   = 64;
1043 		dev->caps.eqe_factor = 1;
1044 	} else {
1045 		dev->caps.eqe_size   = 32;
1046 		dev->caps.eqe_factor = 0;
1047 	}
1048 
1049 	if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
1050 		dev->caps.cqe_size   = 64;
1051 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1052 	} else {
1053 		dev->caps.cqe_size   = 32;
1054 	}
1055 
1056 	if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
1057 		dev->caps.eqe_size = hca_param->eqe_size;
1058 		dev->caps.eqe_factor = 0;
1059 	}
1060 
1061 	if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
1062 		dev->caps.cqe_size = hca_param->cqe_size;
1063 		/* User still need to know when CQE > 32B */
1064 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1065 	}
1066 
1067 	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1068 	mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1069 
1070 	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
1071 	mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n");
1072 
1073 	slave_adjust_steering_mode(dev, dev_cap, hca_param);
1074 	mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1075 		 hca_param->rss_ip_frags ? "on" : "off");
1076 
1077 	if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
1078 	    dev->caps.bf_reg_size)
1079 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1080 
1081 	if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
1082 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1083 
1084 err_mem:
1085 	if (err)
1086 		mlx4_slave_destroy_special_qp_cap(dev);
1087 free_mem:
1088 	kfree(hca_param);
1089 	kfree(func_cap);
1090 	kfree(dev_cap);
1091 	return err;
1092 }
1093 
1094 static void mlx4_request_modules(struct mlx4_dev *dev)
1095 {
1096 	int port;
1097 	int has_ib_port = false;
1098 	int has_eth_port = false;
1099 #define EN_DRV_NAME	"mlx4_en"
1100 #define IB_DRV_NAME	"mlx4_ib"
1101 
1102 	for (port = 1; port <= dev->caps.num_ports; port++) {
1103 		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1104 			has_ib_port = true;
1105 		else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1106 			has_eth_port = true;
1107 	}
1108 
1109 	if (has_eth_port)
1110 		request_module_nowait(EN_DRV_NAME);
1111 	if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1112 		request_module_nowait(IB_DRV_NAME);
1113 }
1114 
1115 /*
1116  * Change the port configuration of the device.
1117  * Every user of this function must hold the port mutex.
1118  */
1119 int mlx4_change_port_types(struct mlx4_dev *dev,
1120 			   enum mlx4_port_type *port_types)
1121 {
1122 	int err = 0;
1123 	int change = 0;
1124 	int port;
1125 
1126 	for (port = 0; port <  dev->caps.num_ports; port++) {
1127 		/* Change the port type only if the new type is different
1128 		 * from the current, and not set to Auto */
1129 		if (port_types[port] != dev->caps.port_type[port + 1])
1130 			change = 1;
1131 	}
1132 	if (change) {
1133 		mlx4_unregister_device(dev);
1134 		for (port = 1; port <= dev->caps.num_ports; port++) {
1135 			mlx4_CLOSE_PORT(dev, port);
1136 			dev->caps.port_type[port] = port_types[port - 1];
1137 			err = mlx4_SET_PORT(dev, port, -1);
1138 			if (err) {
1139 				mlx4_err(dev, "Failed to set port %d, aborting\n",
1140 					 port);
1141 				goto out;
1142 			}
1143 		}
1144 		mlx4_set_port_mask(dev);
1145 		err = mlx4_register_device(dev);
1146 		if (err) {
1147 			mlx4_err(dev, "Failed to register device\n");
1148 			goto out;
1149 		}
1150 		mlx4_request_modules(dev);
1151 	}
1152 
1153 out:
1154 	return err;
1155 }
1156 
1157 static ssize_t show_port_type(struct device *dev,
1158 			      struct device_attribute *attr,
1159 			      char *buf)
1160 {
1161 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1162 						   port_attr);
1163 	struct mlx4_dev *mdev = info->dev;
1164 	char type[8];
1165 
1166 	sprintf(type, "%s",
1167 		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1168 		"ib" : "eth");
1169 	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1170 		sprintf(buf, "auto (%s)\n", type);
1171 	else
1172 		sprintf(buf, "%s\n", type);
1173 
1174 	return strlen(buf);
1175 }
1176 
1177 static int __set_port_type(struct mlx4_port_info *info,
1178 			   enum mlx4_port_type port_type)
1179 {
1180 	struct mlx4_dev *mdev = info->dev;
1181 	struct mlx4_priv *priv = mlx4_priv(mdev);
1182 	enum mlx4_port_type types[MLX4_MAX_PORTS];
1183 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1184 	int i;
1185 	int err = 0;
1186 
1187 	if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1188 		mlx4_err(mdev,
1189 			 "Requested port type for port %d is not supported on this HCA\n",
1190 			 info->port);
1191 		return -EOPNOTSUPP;
1192 	}
1193 
1194 	mlx4_stop_sense(mdev);
1195 	mutex_lock(&priv->port_mutex);
1196 	info->tmp_type = port_type;
1197 
1198 	/* Possible type is always the one that was delivered */
1199 	mdev->caps.possible_type[info->port] = info->tmp_type;
1200 
1201 	for (i = 0; i < mdev->caps.num_ports; i++) {
1202 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1203 					mdev->caps.possible_type[i+1];
1204 		if (types[i] == MLX4_PORT_TYPE_AUTO)
1205 			types[i] = mdev->caps.port_type[i+1];
1206 	}
1207 
1208 	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1209 	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1210 		for (i = 1; i <= mdev->caps.num_ports; i++) {
1211 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1212 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1213 				err = -EOPNOTSUPP;
1214 			}
1215 		}
1216 	}
1217 	if (err) {
1218 		mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1219 		goto out;
1220 	}
1221 
1222 	mlx4_do_sense_ports(mdev, new_types, types);
1223 
1224 	err = mlx4_check_port_params(mdev, new_types);
1225 	if (err)
1226 		goto out;
1227 
1228 	/* We are about to apply the changes after the configuration
1229 	 * was verified, no need to remember the temporary types
1230 	 * any more */
1231 	for (i = 0; i < mdev->caps.num_ports; i++)
1232 		priv->port[i + 1].tmp_type = 0;
1233 
1234 	err = mlx4_change_port_types(mdev, new_types);
1235 
1236 out:
1237 	mlx4_start_sense(mdev);
1238 	mutex_unlock(&priv->port_mutex);
1239 
1240 	return err;
1241 }
1242 
1243 static ssize_t set_port_type(struct device *dev,
1244 			     struct device_attribute *attr,
1245 			     const char *buf, size_t count)
1246 {
1247 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1248 						   port_attr);
1249 	struct mlx4_dev *mdev = info->dev;
1250 	enum mlx4_port_type port_type;
1251 	static DEFINE_MUTEX(set_port_type_mutex);
1252 	int err;
1253 
1254 	mutex_lock(&set_port_type_mutex);
1255 
1256 	if (!strcmp(buf, "ib\n")) {
1257 		port_type = MLX4_PORT_TYPE_IB;
1258 	} else if (!strcmp(buf, "eth\n")) {
1259 		port_type = MLX4_PORT_TYPE_ETH;
1260 	} else if (!strcmp(buf, "auto\n")) {
1261 		port_type = MLX4_PORT_TYPE_AUTO;
1262 	} else {
1263 		mlx4_err(mdev, "%s is not supported port type\n", buf);
1264 		err = -EINVAL;
1265 		goto err_out;
1266 	}
1267 
1268 	err = __set_port_type(info, port_type);
1269 
1270 err_out:
1271 	mutex_unlock(&set_port_type_mutex);
1272 
1273 	return err ? err : count;
1274 }
1275 
1276 enum ibta_mtu {
1277 	IB_MTU_256  = 1,
1278 	IB_MTU_512  = 2,
1279 	IB_MTU_1024 = 3,
1280 	IB_MTU_2048 = 4,
1281 	IB_MTU_4096 = 5
1282 };
1283 
1284 static inline int int_to_ibta_mtu(int mtu)
1285 {
1286 	switch (mtu) {
1287 	case 256:  return IB_MTU_256;
1288 	case 512:  return IB_MTU_512;
1289 	case 1024: return IB_MTU_1024;
1290 	case 2048: return IB_MTU_2048;
1291 	case 4096: return IB_MTU_4096;
1292 	default: return -1;
1293 	}
1294 }
1295 
1296 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1297 {
1298 	switch (mtu) {
1299 	case IB_MTU_256:  return  256;
1300 	case IB_MTU_512:  return  512;
1301 	case IB_MTU_1024: return 1024;
1302 	case IB_MTU_2048: return 2048;
1303 	case IB_MTU_4096: return 4096;
1304 	default: return -1;
1305 	}
1306 }
1307 
1308 static ssize_t show_port_ib_mtu(struct device *dev,
1309 			     struct device_attribute *attr,
1310 			     char *buf)
1311 {
1312 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1313 						   port_mtu_attr);
1314 	struct mlx4_dev *mdev = info->dev;
1315 
1316 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1317 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1318 
1319 	sprintf(buf, "%d\n",
1320 			ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1321 	return strlen(buf);
1322 }
1323 
1324 static ssize_t set_port_ib_mtu(struct device *dev,
1325 			     struct device_attribute *attr,
1326 			     const char *buf, size_t count)
1327 {
1328 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1329 						   port_mtu_attr);
1330 	struct mlx4_dev *mdev = info->dev;
1331 	struct mlx4_priv *priv = mlx4_priv(mdev);
1332 	int err, port, mtu, ibta_mtu = -1;
1333 
1334 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1335 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1336 		return -EINVAL;
1337 	}
1338 
1339 	err = kstrtoint(buf, 0, &mtu);
1340 	if (!err)
1341 		ibta_mtu = int_to_ibta_mtu(mtu);
1342 
1343 	if (err || ibta_mtu < 0) {
1344 		mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1345 		return -EINVAL;
1346 	}
1347 
1348 	mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1349 
1350 	mlx4_stop_sense(mdev);
1351 	mutex_lock(&priv->port_mutex);
1352 	mlx4_unregister_device(mdev);
1353 	for (port = 1; port <= mdev->caps.num_ports; port++) {
1354 		mlx4_CLOSE_PORT(mdev, port);
1355 		err = mlx4_SET_PORT(mdev, port, -1);
1356 		if (err) {
1357 			mlx4_err(mdev, "Failed to set port %d, aborting\n",
1358 				 port);
1359 			goto err_set_port;
1360 		}
1361 	}
1362 	err = mlx4_register_device(mdev);
1363 err_set_port:
1364 	mutex_unlock(&priv->port_mutex);
1365 	mlx4_start_sense(mdev);
1366 	return err ? err : count;
1367 }
1368 
1369 /* bond for multi-function device */
1370 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1371 static int mlx4_mf_bond(struct mlx4_dev *dev)
1372 {
1373 	int err = 0;
1374 	int nvfs;
1375 	struct mlx4_slaves_pport slaves_port1;
1376 	struct mlx4_slaves_pport slaves_port2;
1377 
1378 	slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1379 	slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1380 
1381 	/* only single port vfs are allowed */
1382 	if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves,
1383 			      dev->persist->num_vfs + 1) > 1) {
1384 		mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1385 		return -EINVAL;
1386 	}
1387 
1388 	/* number of virtual functions is number of total functions minus one
1389 	 * physical function for each port.
1390 	 */
1391 	nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1392 		bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1393 
1394 	/* limit on maximum allowed VFs */
1395 	if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1396 		mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1397 			  nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1402 		mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1403 		return -EINVAL;
1404 	}
1405 
1406 	err = mlx4_bond_mac_table(dev);
1407 	if (err)
1408 		return err;
1409 	err = mlx4_bond_vlan_table(dev);
1410 	if (err)
1411 		goto err1;
1412 	err = mlx4_bond_fs_rules(dev);
1413 	if (err)
1414 		goto err2;
1415 
1416 	return 0;
1417 err2:
1418 	(void)mlx4_unbond_vlan_table(dev);
1419 err1:
1420 	(void)mlx4_unbond_mac_table(dev);
1421 	return err;
1422 }
1423 
1424 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1425 {
1426 	int ret, ret1;
1427 
1428 	ret = mlx4_unbond_fs_rules(dev);
1429 	if (ret)
1430 		mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
1431 	ret1 = mlx4_unbond_mac_table(dev);
1432 	if (ret1) {
1433 		mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1434 		ret = ret1;
1435 	}
1436 	ret1 = mlx4_unbond_vlan_table(dev);
1437 	if (ret1) {
1438 		mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1439 		ret = ret1;
1440 	}
1441 	return ret;
1442 }
1443 
1444 int mlx4_bond(struct mlx4_dev *dev)
1445 {
1446 	int ret = 0;
1447 	struct mlx4_priv *priv = mlx4_priv(dev);
1448 
1449 	mutex_lock(&priv->bond_mutex);
1450 
1451 	if (!mlx4_is_bonded(dev)) {
1452 		ret = mlx4_do_bond(dev, true);
1453 		if (ret)
1454 			mlx4_err(dev, "Failed to bond device: %d\n", ret);
1455 		if (!ret && mlx4_is_master(dev)) {
1456 			ret = mlx4_mf_bond(dev);
1457 			if (ret) {
1458 				mlx4_err(dev, "bond for multifunction failed\n");
1459 				mlx4_do_bond(dev, false);
1460 			}
1461 		}
1462 	}
1463 
1464 	mutex_unlock(&priv->bond_mutex);
1465 	if (!ret)
1466 		mlx4_dbg(dev, "Device is bonded\n");
1467 
1468 	return ret;
1469 }
1470 EXPORT_SYMBOL_GPL(mlx4_bond);
1471 
1472 int mlx4_unbond(struct mlx4_dev *dev)
1473 {
1474 	int ret = 0;
1475 	struct mlx4_priv *priv = mlx4_priv(dev);
1476 
1477 	mutex_lock(&priv->bond_mutex);
1478 
1479 	if (mlx4_is_bonded(dev)) {
1480 		int ret2 = 0;
1481 
1482 		ret = mlx4_do_bond(dev, false);
1483 		if (ret)
1484 			mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1485 		if (mlx4_is_master(dev))
1486 			ret2 = mlx4_mf_unbond(dev);
1487 		if (ret2) {
1488 			mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1489 			ret = ret2;
1490 		}
1491 	}
1492 
1493 	mutex_unlock(&priv->bond_mutex);
1494 	if (!ret)
1495 		mlx4_dbg(dev, "Device is unbonded\n");
1496 
1497 	return ret;
1498 }
1499 EXPORT_SYMBOL_GPL(mlx4_unbond);
1500 
1501 
1502 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1503 {
1504 	u8 port1 = v2p->port1;
1505 	u8 port2 = v2p->port2;
1506 	struct mlx4_priv *priv = mlx4_priv(dev);
1507 	int err;
1508 
1509 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1510 		return -EOPNOTSUPP;
1511 
1512 	mutex_lock(&priv->bond_mutex);
1513 
1514 	/* zero means keep current mapping for this port */
1515 	if (port1 == 0)
1516 		port1 = priv->v2p.port1;
1517 	if (port2 == 0)
1518 		port2 = priv->v2p.port2;
1519 
1520 	if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1521 	    (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1522 	    (port1 == 2 && port2 == 1)) {
1523 		/* besides boundary checks cross mapping makes
1524 		 * no sense and therefore not allowed */
1525 		err = -EINVAL;
1526 	} else if ((port1 == priv->v2p.port1) &&
1527 		 (port2 == priv->v2p.port2)) {
1528 		err = 0;
1529 	} else {
1530 		err = mlx4_virt2phy_port_map(dev, port1, port2);
1531 		if (!err) {
1532 			mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1533 				 port1, port2);
1534 			priv->v2p.port1 = port1;
1535 			priv->v2p.port2 = port2;
1536 		} else {
1537 			mlx4_err(dev, "Failed to change port mape: %d\n", err);
1538 		}
1539 	}
1540 
1541 	mutex_unlock(&priv->bond_mutex);
1542 	return err;
1543 }
1544 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1545 
1546 static int mlx4_load_fw(struct mlx4_dev *dev)
1547 {
1548 	struct mlx4_priv *priv = mlx4_priv(dev);
1549 	int err;
1550 
1551 	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1552 					 GFP_HIGHUSER | __GFP_NOWARN, 0);
1553 	if (!priv->fw.fw_icm) {
1554 		mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1555 		return -ENOMEM;
1556 	}
1557 
1558 	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1559 	if (err) {
1560 		mlx4_err(dev, "MAP_FA command failed, aborting\n");
1561 		goto err_free;
1562 	}
1563 
1564 	err = mlx4_RUN_FW(dev);
1565 	if (err) {
1566 		mlx4_err(dev, "RUN_FW command failed, aborting\n");
1567 		goto err_unmap_fa;
1568 	}
1569 
1570 	return 0;
1571 
1572 err_unmap_fa:
1573 	mlx4_UNMAP_FA(dev);
1574 
1575 err_free:
1576 	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1577 	return err;
1578 }
1579 
1580 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1581 				int cmpt_entry_sz)
1582 {
1583 	struct mlx4_priv *priv = mlx4_priv(dev);
1584 	int err;
1585 	int num_eqs;
1586 
1587 	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1588 				  cmpt_base +
1589 				  ((u64) (MLX4_CMPT_TYPE_QP *
1590 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1591 				  cmpt_entry_sz, dev->caps.num_qps,
1592 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1593 				  0, 0);
1594 	if (err)
1595 		goto err;
1596 
1597 	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1598 				  cmpt_base +
1599 				  ((u64) (MLX4_CMPT_TYPE_SRQ *
1600 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1601 				  cmpt_entry_sz, dev->caps.num_srqs,
1602 				  dev->caps.reserved_srqs, 0, 0);
1603 	if (err)
1604 		goto err_qp;
1605 
1606 	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1607 				  cmpt_base +
1608 				  ((u64) (MLX4_CMPT_TYPE_CQ *
1609 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1610 				  cmpt_entry_sz, dev->caps.num_cqs,
1611 				  dev->caps.reserved_cqs, 0, 0);
1612 	if (err)
1613 		goto err_srq;
1614 
1615 	num_eqs = dev->phys_caps.num_phys_eqs;
1616 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1617 				  cmpt_base +
1618 				  ((u64) (MLX4_CMPT_TYPE_EQ *
1619 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1620 				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1621 	if (err)
1622 		goto err_cq;
1623 
1624 	return 0;
1625 
1626 err_cq:
1627 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1628 
1629 err_srq:
1630 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1631 
1632 err_qp:
1633 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1634 
1635 err:
1636 	return err;
1637 }
1638 
1639 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1640 			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1641 {
1642 	struct mlx4_priv *priv = mlx4_priv(dev);
1643 	u64 aux_pages;
1644 	int num_eqs;
1645 	int err;
1646 
1647 	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1648 	if (err) {
1649 		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1650 		return err;
1651 	}
1652 
1653 	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1654 		 (unsigned long long) icm_size >> 10,
1655 		 (unsigned long long) aux_pages << 2);
1656 
1657 	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1658 					  GFP_HIGHUSER | __GFP_NOWARN, 0);
1659 	if (!priv->fw.aux_icm) {
1660 		mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1661 		return -ENOMEM;
1662 	}
1663 
1664 	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1665 	if (err) {
1666 		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1667 		goto err_free_aux;
1668 	}
1669 
1670 	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1671 	if (err) {
1672 		mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1673 		goto err_unmap_aux;
1674 	}
1675 
1676 
1677 	num_eqs = dev->phys_caps.num_phys_eqs;
1678 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1679 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
1680 				  num_eqs, num_eqs, 0, 0);
1681 	if (err) {
1682 		mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1683 		goto err_unmap_cmpt;
1684 	}
1685 
1686 	/*
1687 	 * Reserved MTT entries must be aligned up to a cacheline
1688 	 * boundary, since the FW will write to them, while the driver
1689 	 * writes to all other MTT entries. (The variable
1690 	 * dev->caps.mtt_entry_sz below is really the MTT segment
1691 	 * size, not the raw entry size)
1692 	 */
1693 	dev->caps.reserved_mtts =
1694 		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1695 		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1696 
1697 	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1698 				  init_hca->mtt_base,
1699 				  dev->caps.mtt_entry_sz,
1700 				  dev->caps.num_mtts,
1701 				  dev->caps.reserved_mtts, 1, 0);
1702 	if (err) {
1703 		mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1704 		goto err_unmap_eq;
1705 	}
1706 
1707 	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1708 				  init_hca->dmpt_base,
1709 				  dev_cap->dmpt_entry_sz,
1710 				  dev->caps.num_mpts,
1711 				  dev->caps.reserved_mrws, 1, 1);
1712 	if (err) {
1713 		mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1714 		goto err_unmap_mtt;
1715 	}
1716 
1717 	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1718 				  init_hca->qpc_base,
1719 				  dev_cap->qpc_entry_sz,
1720 				  dev->caps.num_qps,
1721 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1722 				  0, 0);
1723 	if (err) {
1724 		mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1725 		goto err_unmap_dmpt;
1726 	}
1727 
1728 	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1729 				  init_hca->auxc_base,
1730 				  dev_cap->aux_entry_sz,
1731 				  dev->caps.num_qps,
1732 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1733 				  0, 0);
1734 	if (err) {
1735 		mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1736 		goto err_unmap_qp;
1737 	}
1738 
1739 	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1740 				  init_hca->altc_base,
1741 				  dev_cap->altc_entry_sz,
1742 				  dev->caps.num_qps,
1743 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1744 				  0, 0);
1745 	if (err) {
1746 		mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1747 		goto err_unmap_auxc;
1748 	}
1749 
1750 	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1751 				  init_hca->rdmarc_base,
1752 				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1753 				  dev->caps.num_qps,
1754 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1755 				  0, 0);
1756 	if (err) {
1757 		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1758 		goto err_unmap_altc;
1759 	}
1760 
1761 	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1762 				  init_hca->cqc_base,
1763 				  dev_cap->cqc_entry_sz,
1764 				  dev->caps.num_cqs,
1765 				  dev->caps.reserved_cqs, 0, 0);
1766 	if (err) {
1767 		mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1768 		goto err_unmap_rdmarc;
1769 	}
1770 
1771 	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1772 				  init_hca->srqc_base,
1773 				  dev_cap->srq_entry_sz,
1774 				  dev->caps.num_srqs,
1775 				  dev->caps.reserved_srqs, 0, 0);
1776 	if (err) {
1777 		mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1778 		goto err_unmap_cq;
1779 	}
1780 
1781 	/*
1782 	 * For flow steering device managed mode it is required to use
1783 	 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1784 	 * required, but for simplicity just map the whole multicast
1785 	 * group table now.  The table isn't very big and it's a lot
1786 	 * easier than trying to track ref counts.
1787 	 */
1788 	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1789 				  init_hca->mc_base,
1790 				  mlx4_get_mgm_entry_size(dev),
1791 				  dev->caps.num_mgms + dev->caps.num_amgms,
1792 				  dev->caps.num_mgms + dev->caps.num_amgms,
1793 				  0, 0);
1794 	if (err) {
1795 		mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1796 		goto err_unmap_srq;
1797 	}
1798 
1799 	return 0;
1800 
1801 err_unmap_srq:
1802 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1803 
1804 err_unmap_cq:
1805 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1806 
1807 err_unmap_rdmarc:
1808 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1809 
1810 err_unmap_altc:
1811 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1812 
1813 err_unmap_auxc:
1814 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1815 
1816 err_unmap_qp:
1817 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1818 
1819 err_unmap_dmpt:
1820 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1821 
1822 err_unmap_mtt:
1823 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1824 
1825 err_unmap_eq:
1826 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1827 
1828 err_unmap_cmpt:
1829 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1830 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1831 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1832 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1833 
1834 err_unmap_aux:
1835 	mlx4_UNMAP_ICM_AUX(dev);
1836 
1837 err_free_aux:
1838 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1839 
1840 	return err;
1841 }
1842 
1843 static void mlx4_free_icms(struct mlx4_dev *dev)
1844 {
1845 	struct mlx4_priv *priv = mlx4_priv(dev);
1846 
1847 	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1848 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1849 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1850 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1851 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1852 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1853 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1854 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1855 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1856 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1857 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1858 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1859 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1860 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1861 
1862 	mlx4_UNMAP_ICM_AUX(dev);
1863 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1864 }
1865 
1866 static void mlx4_slave_exit(struct mlx4_dev *dev)
1867 {
1868 	struct mlx4_priv *priv = mlx4_priv(dev);
1869 
1870 	mutex_lock(&priv->cmd.slave_cmd_mutex);
1871 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1872 			  MLX4_COMM_TIME))
1873 		mlx4_warn(dev, "Failed to close slave function\n");
1874 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1875 }
1876 
1877 static int map_bf_area(struct mlx4_dev *dev)
1878 {
1879 	struct mlx4_priv *priv = mlx4_priv(dev);
1880 	resource_size_t bf_start;
1881 	resource_size_t bf_len;
1882 	int err = 0;
1883 
1884 	if (!dev->caps.bf_reg_size)
1885 		return -ENXIO;
1886 
1887 	bf_start = pci_resource_start(dev->persist->pdev, 2) +
1888 			(dev->caps.num_uars << PAGE_SHIFT);
1889 	bf_len = pci_resource_len(dev->persist->pdev, 2) -
1890 			(dev->caps.num_uars << PAGE_SHIFT);
1891 	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1892 	if (!priv->bf_mapping)
1893 		err = -ENOMEM;
1894 
1895 	return err;
1896 }
1897 
1898 static void unmap_bf_area(struct mlx4_dev *dev)
1899 {
1900 	if (mlx4_priv(dev)->bf_mapping)
1901 		io_mapping_free(mlx4_priv(dev)->bf_mapping);
1902 }
1903 
1904 u64 mlx4_read_clock(struct mlx4_dev *dev)
1905 {
1906 	u32 clockhi, clocklo, clockhi1;
1907 	u64 cycles;
1908 	int i;
1909 	struct mlx4_priv *priv = mlx4_priv(dev);
1910 
1911 	for (i = 0; i < 10; i++) {
1912 		clockhi = swab32(readl(priv->clock_mapping));
1913 		clocklo = swab32(readl(priv->clock_mapping + 4));
1914 		clockhi1 = swab32(readl(priv->clock_mapping));
1915 		if (clockhi == clockhi1)
1916 			break;
1917 	}
1918 
1919 	cycles = (u64) clockhi << 32 | (u64) clocklo;
1920 
1921 	return cycles;
1922 }
1923 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1924 
1925 
1926 static int map_internal_clock(struct mlx4_dev *dev)
1927 {
1928 	struct mlx4_priv *priv = mlx4_priv(dev);
1929 
1930 	priv->clock_mapping =
1931 		ioremap(pci_resource_start(dev->persist->pdev,
1932 					   priv->fw.clock_bar) +
1933 			priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1934 
1935 	if (!priv->clock_mapping)
1936 		return -ENOMEM;
1937 
1938 	return 0;
1939 }
1940 
1941 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1942 				   struct mlx4_clock_params *params)
1943 {
1944 	struct mlx4_priv *priv = mlx4_priv(dev);
1945 
1946 	if (mlx4_is_slave(dev))
1947 		return -EOPNOTSUPP;
1948 
1949 	if (!dev->caps.map_clock_to_user) {
1950 		mlx4_dbg(dev, "Map clock to user is not supported.\n");
1951 		return -EOPNOTSUPP;
1952 	}
1953 
1954 	if (!params)
1955 		return -EINVAL;
1956 
1957 	params->bar = priv->fw.clock_bar;
1958 	params->offset = priv->fw.clock_offset;
1959 	params->size = MLX4_CLOCK_SIZE;
1960 
1961 	return 0;
1962 }
1963 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1964 
1965 static void unmap_internal_clock(struct mlx4_dev *dev)
1966 {
1967 	struct mlx4_priv *priv = mlx4_priv(dev);
1968 
1969 	if (priv->clock_mapping)
1970 		iounmap(priv->clock_mapping);
1971 }
1972 
1973 static void mlx4_close_hca(struct mlx4_dev *dev)
1974 {
1975 	unmap_internal_clock(dev);
1976 	unmap_bf_area(dev);
1977 	if (mlx4_is_slave(dev))
1978 		mlx4_slave_exit(dev);
1979 	else {
1980 		mlx4_CLOSE_HCA(dev, 0);
1981 		mlx4_free_icms(dev);
1982 	}
1983 }
1984 
1985 static void mlx4_close_fw(struct mlx4_dev *dev)
1986 {
1987 	if (!mlx4_is_slave(dev)) {
1988 		mlx4_UNMAP_FA(dev);
1989 		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1990 	}
1991 }
1992 
1993 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1994 {
1995 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1996 
1997 	u32 comm_flags;
1998 	u32 offline_bit;
1999 	unsigned long end;
2000 	struct mlx4_priv *priv = mlx4_priv(dev);
2001 
2002 	end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
2003 	while (time_before(jiffies, end)) {
2004 		comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
2005 					  MLX4_COMM_CHAN_FLAGS));
2006 		offline_bit = (comm_flags &
2007 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
2008 		if (!offline_bit)
2009 			return 0;
2010 
2011 		/* If device removal has been requested,
2012 		 * do not continue retrying.
2013 		 */
2014 		if (dev->persist->interface_state &
2015 		    MLX4_INTERFACE_STATE_NOWAIT)
2016 			break;
2017 
2018 		/* There are cases as part of AER/Reset flow that PF needs
2019 		 * around 100 msec to load. We therefore sleep for 100 msec
2020 		 * to allow other tasks to make use of that CPU during this
2021 		 * time interval.
2022 		 */
2023 		msleep(100);
2024 	}
2025 	mlx4_err(dev, "Communication channel is offline.\n");
2026 	return -EIO;
2027 }
2028 
2029 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
2030 {
2031 #define COMM_CHAN_RST_OFFSET 0x1e
2032 
2033 	struct mlx4_priv *priv = mlx4_priv(dev);
2034 	u32 comm_rst;
2035 	u32 comm_caps;
2036 
2037 	comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
2038 				 MLX4_COMM_CHAN_CAPS));
2039 	comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
2040 
2041 	if (comm_rst)
2042 		dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
2043 }
2044 
2045 static int mlx4_init_slave(struct mlx4_dev *dev)
2046 {
2047 	struct mlx4_priv *priv = mlx4_priv(dev);
2048 	u64 dma = (u64) priv->mfunc.vhcr_dma;
2049 	int ret_from_reset = 0;
2050 	u32 slave_read;
2051 	u32 cmd_channel_ver;
2052 
2053 	if (atomic_read(&pf_loading)) {
2054 		mlx4_warn(dev, "PF is not ready - Deferring probe\n");
2055 		return -EPROBE_DEFER;
2056 	}
2057 
2058 	mutex_lock(&priv->cmd.slave_cmd_mutex);
2059 	priv->cmd.max_cmds = 1;
2060 	if (mlx4_comm_check_offline(dev)) {
2061 		mlx4_err(dev, "PF is not responsive, skipping initialization\n");
2062 		goto err_offline;
2063 	}
2064 
2065 	mlx4_reset_vf_support(dev);
2066 	mlx4_warn(dev, "Sending reset\n");
2067 	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
2068 				       MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
2069 	/* if we are in the middle of flr the slave will try
2070 	 * NUM_OF_RESET_RETRIES times before leaving.*/
2071 	if (ret_from_reset) {
2072 		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
2073 			mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
2074 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2075 			return -EPROBE_DEFER;
2076 		} else
2077 			goto err;
2078 	}
2079 
2080 	/* check the driver version - the slave I/F revision
2081 	 * must match the master's */
2082 	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2083 	cmd_channel_ver = mlx4_comm_get_version();
2084 
2085 	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2086 		MLX4_COMM_GET_IF_REV(slave_read)) {
2087 		mlx4_err(dev, "slave driver version is not supported by the master\n");
2088 		goto err;
2089 	}
2090 
2091 	mlx4_warn(dev, "Sending vhcr0\n");
2092 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2093 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2094 		goto err;
2095 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2096 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2097 		goto err;
2098 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2099 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2100 		goto err;
2101 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2102 			  MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2103 		goto err;
2104 
2105 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
2106 	return 0;
2107 
2108 err:
2109 	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2110 err_offline:
2111 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
2112 	return -EIO;
2113 }
2114 
2115 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2116 {
2117 	int i;
2118 
2119 	for (i = 1; i <= dev->caps.num_ports; i++) {
2120 		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2121 			dev->caps.gid_table_len[i] =
2122 				mlx4_get_slave_num_gids(dev, 0, i);
2123 		else
2124 			dev->caps.gid_table_len[i] = 1;
2125 		dev->caps.pkey_table_len[i] =
2126 			dev->phys_caps.pkey_phys_table_len[i] - 1;
2127 	}
2128 }
2129 
2130 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2131 {
2132 	int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2133 
2134 	for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2135 	      i++) {
2136 		if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2137 			break;
2138 	}
2139 
2140 	return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2141 }
2142 
2143 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2144 {
2145 	switch (dmfs_high_steer_mode) {
2146 	case MLX4_STEERING_DMFS_A0_DEFAULT:
2147 		return "default performance";
2148 
2149 	case MLX4_STEERING_DMFS_A0_DYNAMIC:
2150 		return "dynamic hybrid mode";
2151 
2152 	case MLX4_STEERING_DMFS_A0_STATIC:
2153 		return "performance optimized for limited rule configuration (static)";
2154 
2155 	case MLX4_STEERING_DMFS_A0_DISABLE:
2156 		return "disabled performance optimized steering";
2157 
2158 	case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2159 		return "performance optimized steering not supported";
2160 
2161 	default:
2162 		return "Unrecognized mode";
2163 	}
2164 }
2165 
2166 #define MLX4_DMFS_A0_STEERING			(1UL << 2)
2167 
2168 static void choose_steering_mode(struct mlx4_dev *dev,
2169 				 struct mlx4_dev_cap *dev_cap)
2170 {
2171 	if (mlx4_log_num_mgm_entry_size <= 0) {
2172 		if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2173 			if (dev->caps.dmfs_high_steer_mode ==
2174 			    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2175 				mlx4_err(dev, "DMFS high rate mode not supported\n");
2176 			else
2177 				dev->caps.dmfs_high_steer_mode =
2178 					MLX4_STEERING_DMFS_A0_STATIC;
2179 		}
2180 	}
2181 
2182 	if (mlx4_log_num_mgm_entry_size <= 0 &&
2183 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2184 	    (!mlx4_is_mfunc(dev) ||
2185 	     (dev_cap->fs_max_num_qp_per_entry >=
2186 	     (dev->persist->num_vfs + 1))) &&
2187 	    choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2188 		MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2189 		dev->oper_log_mgm_entry_size =
2190 			choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2191 		dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2192 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2193 		dev->caps.fs_log_max_ucast_qp_range_size =
2194 			dev_cap->fs_log_max_ucast_qp_range_size;
2195 	} else {
2196 		if (dev->caps.dmfs_high_steer_mode !=
2197 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2198 			dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2199 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2200 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2201 			dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2202 		else {
2203 			dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2204 
2205 			if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2206 			    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2207 				mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2208 		}
2209 		dev->oper_log_mgm_entry_size =
2210 			mlx4_log_num_mgm_entry_size > 0 ?
2211 			mlx4_log_num_mgm_entry_size :
2212 			MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2213 		dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2214 	}
2215 	mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2216 		 mlx4_steering_mode_str(dev->caps.steering_mode),
2217 		 dev->oper_log_mgm_entry_size,
2218 		 mlx4_log_num_mgm_entry_size);
2219 }
2220 
2221 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2222 				       struct mlx4_dev_cap *dev_cap)
2223 {
2224 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2225 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2226 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2227 	else
2228 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2229 
2230 	mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
2231 		 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2232 }
2233 
2234 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2235 {
2236 	int i;
2237 	struct mlx4_port_cap port_cap;
2238 
2239 	if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2240 		return -EINVAL;
2241 
2242 	for (i = 1; i <= dev->caps.num_ports; i++) {
2243 		if (mlx4_dev_port(dev, i, &port_cap)) {
2244 			mlx4_err(dev,
2245 				 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
2246 		} else if ((dev->caps.dmfs_high_steer_mode !=
2247 			    MLX4_STEERING_DMFS_A0_DEFAULT) &&
2248 			   (port_cap.dmfs_optimized_state ==
2249 			    !!(dev->caps.dmfs_high_steer_mode ==
2250 			    MLX4_STEERING_DMFS_A0_DISABLE))) {
2251 			mlx4_err(dev,
2252 				 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2253 				 dmfs_high_rate_steering_mode_str(
2254 					dev->caps.dmfs_high_steer_mode),
2255 				 (port_cap.dmfs_optimized_state ?
2256 					"enabled" : "disabled"));
2257 		}
2258 	}
2259 
2260 	return 0;
2261 }
2262 
2263 static int mlx4_init_fw(struct mlx4_dev *dev)
2264 {
2265 	struct mlx4_mod_stat_cfg   mlx4_cfg;
2266 	int err = 0;
2267 
2268 	if (!mlx4_is_slave(dev)) {
2269 		err = mlx4_QUERY_FW(dev);
2270 		if (err) {
2271 			if (err == -EACCES)
2272 				mlx4_info(dev, "non-primary physical function, skipping\n");
2273 			else
2274 				mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2275 			return err;
2276 		}
2277 
2278 		err = mlx4_load_fw(dev);
2279 		if (err) {
2280 			mlx4_err(dev, "Failed to start FW, aborting\n");
2281 			return err;
2282 		}
2283 
2284 		mlx4_cfg.log_pg_sz_m = 1;
2285 		mlx4_cfg.log_pg_sz = 0;
2286 		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2287 		if (err)
2288 			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2289 	}
2290 
2291 	return err;
2292 }
2293 
2294 static int mlx4_init_hca(struct mlx4_dev *dev)
2295 {
2296 	struct mlx4_priv	  *priv = mlx4_priv(dev);
2297 	struct mlx4_init_hca_param *init_hca = NULL;
2298 	struct mlx4_dev_cap	  *dev_cap = NULL;
2299 	struct mlx4_adapter	   adapter;
2300 	struct mlx4_profile	   profile;
2301 	u64 icm_size;
2302 	struct mlx4_config_dev_params params;
2303 	int err;
2304 
2305 	if (!mlx4_is_slave(dev)) {
2306 		dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
2307 		init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
2308 
2309 		if (!dev_cap || !init_hca) {
2310 			err = -ENOMEM;
2311 			goto out_free;
2312 		}
2313 
2314 		err = mlx4_dev_cap(dev, dev_cap);
2315 		if (err) {
2316 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2317 			goto out_free;
2318 		}
2319 
2320 		choose_steering_mode(dev, dev_cap);
2321 		choose_tunnel_offload_mode(dev, dev_cap);
2322 
2323 		if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2324 		    mlx4_is_master(dev))
2325 			dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2326 
2327 		err = mlx4_get_phys_port_id(dev);
2328 		if (err)
2329 			mlx4_err(dev, "Fail to get physical port id\n");
2330 
2331 		if (mlx4_is_master(dev))
2332 			mlx4_parav_master_pf_caps(dev);
2333 
2334 		if (mlx4_low_memory_profile()) {
2335 			mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2336 			profile = low_mem_profile;
2337 		} else {
2338 			profile = default_profile;
2339 		}
2340 		if (dev->caps.steering_mode ==
2341 		    MLX4_STEERING_MODE_DEVICE_MANAGED)
2342 			profile.num_mcg = MLX4_FS_NUM_MCG;
2343 
2344 		icm_size = mlx4_make_profile(dev, &profile, dev_cap,
2345 					     init_hca);
2346 		if ((long long) icm_size < 0) {
2347 			err = icm_size;
2348 			goto out_free;
2349 		}
2350 
2351 		if (enable_4k_uar || !dev->persist->num_vfs) {
2352 			init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
2353 						    PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2354 			init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2355 		} else {
2356 			init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
2357 			init_hca->uar_page_sz = PAGE_SHIFT - 12;
2358 		}
2359 
2360 		init_hca->mw_enabled = 0;
2361 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2362 		    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2363 			init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2364 
2365 		err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
2366 		if (err)
2367 			goto out_free;
2368 
2369 		err = mlx4_INIT_HCA(dev, init_hca);
2370 		if (err) {
2371 			mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2372 			goto err_free_icm;
2373 		}
2374 
2375 		if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2376 			err = mlx4_query_func(dev, dev_cap);
2377 			if (err < 0) {
2378 				mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2379 				goto err_close;
2380 			} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2381 				dev->caps.num_eqs = dev_cap->max_eqs;
2382 				dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2383 				dev->caps.reserved_uars = dev_cap->reserved_uars;
2384 			}
2385 		}
2386 
2387 		/*
2388 		 * If TS is supported by FW
2389 		 * read HCA frequency by QUERY_HCA command
2390 		 */
2391 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2392 			err = mlx4_QUERY_HCA(dev, init_hca);
2393 			if (err) {
2394 				mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2395 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2396 			} else {
2397 				dev->caps.hca_core_clock =
2398 					init_hca->hca_core_clock;
2399 			}
2400 
2401 			/* In case we got HCA frequency 0 - disable timestamping
2402 			 * to avoid dividing by zero
2403 			 */
2404 			if (!dev->caps.hca_core_clock) {
2405 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2406 				mlx4_err(dev,
2407 					 "HCA frequency is 0 - timestamping is not supported\n");
2408 			} else if (map_internal_clock(dev)) {
2409 				/*
2410 				 * Map internal clock,
2411 				 * in case of failure disable timestamping
2412 				 */
2413 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2414 				mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2415 			}
2416 		}
2417 
2418 		if (dev->caps.dmfs_high_steer_mode !=
2419 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2420 			if (mlx4_validate_optimized_steering(dev))
2421 				mlx4_warn(dev, "Optimized steering validation failed\n");
2422 
2423 			if (dev->caps.dmfs_high_steer_mode ==
2424 			    MLX4_STEERING_DMFS_A0_DISABLE) {
2425 				dev->caps.dmfs_high_rate_qpn_base =
2426 					dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2427 				dev->caps.dmfs_high_rate_qpn_range =
2428 					MLX4_A0_STEERING_TABLE_SIZE;
2429 			}
2430 
2431 			mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
2432 				  dmfs_high_rate_steering_mode_str(
2433 					dev->caps.dmfs_high_steer_mode));
2434 		}
2435 	} else {
2436 		err = mlx4_init_slave(dev);
2437 		if (err) {
2438 			if (err != -EPROBE_DEFER)
2439 				mlx4_err(dev, "Failed to initialize slave\n");
2440 			return err;
2441 		}
2442 
2443 		err = mlx4_slave_cap(dev);
2444 		if (err) {
2445 			mlx4_err(dev, "Failed to obtain slave caps\n");
2446 			goto err_close;
2447 		}
2448 	}
2449 
2450 	if (map_bf_area(dev))
2451 		mlx4_dbg(dev, "Failed to map blue flame area\n");
2452 
2453 	/*Only the master set the ports, all the rest got it from it.*/
2454 	if (!mlx4_is_slave(dev))
2455 		mlx4_set_port_mask(dev);
2456 
2457 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
2458 	if (err) {
2459 		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2460 		goto unmap_bf;
2461 	}
2462 
2463 	/* Query CONFIG_DEV parameters */
2464 	err = mlx4_config_dev_retrieval(dev, &params);
2465 	if (err && err != -EOPNOTSUPP) {
2466 		mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2467 	} else if (!err) {
2468 		dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2469 		dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2470 	}
2471 	priv->eq_table.inta_pin = adapter.inta_pin;
2472 	memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
2473 
2474 	err = 0;
2475 	goto out_free;
2476 
2477 unmap_bf:
2478 	unmap_internal_clock(dev);
2479 	unmap_bf_area(dev);
2480 
2481 	if (mlx4_is_slave(dev))
2482 		mlx4_slave_destroy_special_qp_cap(dev);
2483 
2484 err_close:
2485 	if (mlx4_is_slave(dev))
2486 		mlx4_slave_exit(dev);
2487 	else
2488 		mlx4_CLOSE_HCA(dev, 0);
2489 
2490 err_free_icm:
2491 	if (!mlx4_is_slave(dev))
2492 		mlx4_free_icms(dev);
2493 
2494 out_free:
2495 	kfree(dev_cap);
2496 	kfree(init_hca);
2497 
2498 	return err;
2499 }
2500 
2501 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2502 {
2503 	struct mlx4_priv *priv = mlx4_priv(dev);
2504 	int nent_pow2;
2505 
2506 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2507 		return -ENOENT;
2508 
2509 	if (!dev->caps.max_counters)
2510 		return -ENOSPC;
2511 
2512 	nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2513 	/* reserve last counter index for sink counter */
2514 	return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2515 				nent_pow2 - 1, 0,
2516 				nent_pow2 - dev->caps.max_counters + 1);
2517 }
2518 
2519 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2520 {
2521 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2522 		return;
2523 
2524 	if (!dev->caps.max_counters)
2525 		return;
2526 
2527 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2528 }
2529 
2530 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2531 {
2532 	struct mlx4_priv *priv = mlx4_priv(dev);
2533 	int port;
2534 
2535 	for (port = 0; port < dev->caps.num_ports; port++)
2536 		if (priv->def_counter[port] != -1)
2537 			mlx4_counter_free(dev,  priv->def_counter[port]);
2538 }
2539 
2540 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2541 {
2542 	struct mlx4_priv *priv = mlx4_priv(dev);
2543 	int port, err = 0;
2544 	u32 idx;
2545 
2546 	for (port = 0; port < dev->caps.num_ports; port++)
2547 		priv->def_counter[port] = -1;
2548 
2549 	for (port = 0; port < dev->caps.num_ports; port++) {
2550 		err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
2551 
2552 		if (!err || err == -ENOSPC) {
2553 			priv->def_counter[port] = idx;
2554 			err = 0;
2555 		} else if (err == -ENOENT) {
2556 			err = 0;
2557 			continue;
2558 		} else if (mlx4_is_slave(dev) && err == -EINVAL) {
2559 			priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2560 			mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2561 				  MLX4_SINK_COUNTER_INDEX(dev));
2562 			err = 0;
2563 		} else {
2564 			mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2565 				 __func__, port + 1, err);
2566 			mlx4_cleanup_default_counters(dev);
2567 			return err;
2568 		}
2569 
2570 		mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2571 			 __func__, priv->def_counter[port], port + 1);
2572 	}
2573 
2574 	return err;
2575 }
2576 
2577 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2578 {
2579 	struct mlx4_priv *priv = mlx4_priv(dev);
2580 
2581 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2582 		return -ENOENT;
2583 
2584 	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2585 	if (*idx == -1) {
2586 		*idx = MLX4_SINK_COUNTER_INDEX(dev);
2587 		return -ENOSPC;
2588 	}
2589 
2590 	return 0;
2591 }
2592 
2593 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
2594 {
2595 	u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30);
2596 	u64 out_param;
2597 	int err;
2598 
2599 	if (mlx4_is_mfunc(dev)) {
2600 		err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
2601 				   RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2602 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2603 		if (!err)
2604 			*idx = get_param_l(&out_param);
2605 		if (WARN_ON(err == -ENOSPC))
2606 			err = -EINVAL;
2607 		return err;
2608 	}
2609 	return __mlx4_counter_alloc(dev, idx);
2610 }
2611 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2612 
2613 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2614 				u8 counter_index)
2615 {
2616 	struct mlx4_cmd_mailbox *if_stat_mailbox;
2617 	int err;
2618 	u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2619 
2620 	if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2621 	if (IS_ERR(if_stat_mailbox))
2622 		return PTR_ERR(if_stat_mailbox);
2623 
2624 	err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2625 			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2626 			   MLX4_CMD_NATIVE);
2627 
2628 	mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2629 	return err;
2630 }
2631 
2632 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2633 {
2634 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2635 		return;
2636 
2637 	if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2638 		return;
2639 
2640 	__mlx4_clear_if_stat(dev, idx);
2641 
2642 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2643 	return;
2644 }
2645 
2646 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2647 {
2648 	u64 in_param = 0;
2649 
2650 	if (mlx4_is_mfunc(dev)) {
2651 		set_param_l(&in_param, idx);
2652 		mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2653 			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2654 			 MLX4_CMD_WRAPPED);
2655 		return;
2656 	}
2657 	__mlx4_counter_free(dev, idx);
2658 }
2659 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2660 
2661 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2662 {
2663 	struct mlx4_priv *priv = mlx4_priv(dev);
2664 
2665 	return priv->def_counter[port - 1];
2666 }
2667 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2668 
2669 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2670 {
2671 	struct mlx4_priv *priv = mlx4_priv(dev);
2672 
2673 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2674 }
2675 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2676 
2677 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2678 {
2679 	struct mlx4_priv *priv = mlx4_priv(dev);
2680 
2681 	return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2682 }
2683 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2684 
2685 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2686 {
2687 	struct mlx4_priv *priv = mlx4_priv(dev);
2688 	__be64 guid;
2689 
2690 	/* hw GUID */
2691 	if (entry == 0)
2692 		return;
2693 
2694 	get_random_bytes((char *)&guid, sizeof(guid));
2695 	guid &= ~(cpu_to_be64(1ULL << 56));
2696 	guid |= cpu_to_be64(1ULL << 57);
2697 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2698 }
2699 
2700 static int mlx4_setup_hca(struct mlx4_dev *dev)
2701 {
2702 	struct mlx4_priv *priv = mlx4_priv(dev);
2703 	int err;
2704 	int port;
2705 	__be32 ib_port_default_caps;
2706 
2707 	err = mlx4_init_uar_table(dev);
2708 	if (err) {
2709 		mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2710 		return err;
2711 	}
2712 
2713 	err = mlx4_uar_alloc(dev, &priv->driver_uar);
2714 	if (err) {
2715 		mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2716 		goto err_uar_table_free;
2717 	}
2718 
2719 	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2720 	if (!priv->kar) {
2721 		mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2722 		err = -ENOMEM;
2723 		goto err_uar_free;
2724 	}
2725 
2726 	err = mlx4_init_pd_table(dev);
2727 	if (err) {
2728 		mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2729 		goto err_kar_unmap;
2730 	}
2731 
2732 	err = mlx4_init_xrcd_table(dev);
2733 	if (err) {
2734 		mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2735 		goto err_pd_table_free;
2736 	}
2737 
2738 	err = mlx4_init_mr_table(dev);
2739 	if (err) {
2740 		mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2741 		goto err_xrcd_table_free;
2742 	}
2743 
2744 	if (!mlx4_is_slave(dev)) {
2745 		err = mlx4_init_mcg_table(dev);
2746 		if (err) {
2747 			mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2748 			goto err_mr_table_free;
2749 		}
2750 		err = mlx4_config_mad_demux(dev);
2751 		if (err) {
2752 			mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2753 			goto err_mcg_table_free;
2754 		}
2755 	}
2756 
2757 	err = mlx4_init_eq_table(dev);
2758 	if (err) {
2759 		mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2760 		goto err_mcg_table_free;
2761 	}
2762 
2763 	err = mlx4_cmd_use_events(dev);
2764 	if (err) {
2765 		mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2766 		goto err_eq_table_free;
2767 	}
2768 
2769 	err = mlx4_NOP(dev);
2770 	if (err) {
2771 		if (dev->flags & MLX4_FLAG_MSI_X) {
2772 			mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2773 				  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2774 			mlx4_warn(dev, "Trying again without MSI-X\n");
2775 		} else {
2776 			mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2777 				 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2778 			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2779 		}
2780 
2781 		goto err_cmd_poll;
2782 	}
2783 
2784 	mlx4_dbg(dev, "NOP command IRQ test passed\n");
2785 
2786 	err = mlx4_init_cq_table(dev);
2787 	if (err) {
2788 		mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2789 		goto err_cmd_poll;
2790 	}
2791 
2792 	err = mlx4_init_srq_table(dev);
2793 	if (err) {
2794 		mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2795 		goto err_cq_table_free;
2796 	}
2797 
2798 	err = mlx4_init_qp_table(dev);
2799 	if (err) {
2800 		mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2801 		goto err_srq_table_free;
2802 	}
2803 
2804 	if (!mlx4_is_slave(dev)) {
2805 		err = mlx4_init_counters_table(dev);
2806 		if (err && err != -ENOENT) {
2807 			mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2808 			goto err_qp_table_free;
2809 		}
2810 	}
2811 
2812 	err = mlx4_allocate_default_counters(dev);
2813 	if (err) {
2814 		mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2815 		goto err_counters_table_free;
2816 	}
2817 
2818 	if (!mlx4_is_slave(dev)) {
2819 		for (port = 1; port <= dev->caps.num_ports; port++) {
2820 			ib_port_default_caps = 0;
2821 			err = mlx4_get_port_ib_caps(dev, port,
2822 						    &ib_port_default_caps);
2823 			if (err)
2824 				mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2825 					  port, err);
2826 			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2827 
2828 			/* initialize per-slave default ib port capabilities */
2829 			if (mlx4_is_master(dev)) {
2830 				int i;
2831 				for (i = 0; i < dev->num_slaves; i++) {
2832 					if (i == mlx4_master_func_num(dev))
2833 						continue;
2834 					priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2835 						ib_port_default_caps;
2836 				}
2837 			}
2838 
2839 			if (mlx4_is_mfunc(dev))
2840 				dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2841 			else
2842 				dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2843 
2844 			err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2845 					    dev->caps.pkey_table_len[port] : -1);
2846 			if (err) {
2847 				mlx4_err(dev, "Failed to set port %d, aborting\n",
2848 					 port);
2849 				goto err_default_countes_free;
2850 			}
2851 		}
2852 	}
2853 
2854 	return 0;
2855 
2856 err_default_countes_free:
2857 	mlx4_cleanup_default_counters(dev);
2858 
2859 err_counters_table_free:
2860 	if (!mlx4_is_slave(dev))
2861 		mlx4_cleanup_counters_table(dev);
2862 
2863 err_qp_table_free:
2864 	mlx4_cleanup_qp_table(dev);
2865 
2866 err_srq_table_free:
2867 	mlx4_cleanup_srq_table(dev);
2868 
2869 err_cq_table_free:
2870 	mlx4_cleanup_cq_table(dev);
2871 
2872 err_cmd_poll:
2873 	mlx4_cmd_use_polling(dev);
2874 
2875 err_eq_table_free:
2876 	mlx4_cleanup_eq_table(dev);
2877 
2878 err_mcg_table_free:
2879 	if (!mlx4_is_slave(dev))
2880 		mlx4_cleanup_mcg_table(dev);
2881 
2882 err_mr_table_free:
2883 	mlx4_cleanup_mr_table(dev);
2884 
2885 err_xrcd_table_free:
2886 	mlx4_cleanup_xrcd_table(dev);
2887 
2888 err_pd_table_free:
2889 	mlx4_cleanup_pd_table(dev);
2890 
2891 err_kar_unmap:
2892 	iounmap(priv->kar);
2893 
2894 err_uar_free:
2895 	mlx4_uar_free(dev, &priv->driver_uar);
2896 
2897 err_uar_table_free:
2898 	mlx4_cleanup_uar_table(dev);
2899 	return err;
2900 }
2901 
2902 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2903 {
2904 	int requested_cpu = 0;
2905 	struct mlx4_priv *priv = mlx4_priv(dev);
2906 	struct mlx4_eq *eq;
2907 	int off = 0;
2908 	int i;
2909 
2910 	if (eqn > dev->caps.num_comp_vectors)
2911 		return -EINVAL;
2912 
2913 	for (i = 1; i < port; i++)
2914 		off += mlx4_get_eqs_per_port(dev, i);
2915 
2916 	requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2917 
2918 	/* Meaning EQs are shared, and this call comes from the second port */
2919 	if (requested_cpu < 0)
2920 		return 0;
2921 
2922 	eq = &priv->eq_table.eq[eqn];
2923 
2924 	if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2925 		return -ENOMEM;
2926 
2927 	cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2928 
2929 	return 0;
2930 }
2931 
2932 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2933 {
2934 	struct mlx4_priv *priv = mlx4_priv(dev);
2935 	struct msix_entry *entries;
2936 	int i;
2937 	int port = 0;
2938 
2939 	if (msi_x) {
2940 		int nreq = min3(dev->caps.num_ports *
2941 				(int)num_online_cpus() + 1,
2942 				dev->caps.num_eqs - dev->caps.reserved_eqs,
2943 				MAX_MSIX);
2944 
2945 		if (msi_x > 1)
2946 			nreq = min_t(int, nreq, msi_x);
2947 
2948 		entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL);
2949 		if (!entries)
2950 			goto no_msi;
2951 
2952 		for (i = 0; i < nreq; ++i)
2953 			entries[i].entry = i;
2954 
2955 		nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2956 					     nreq);
2957 
2958 		if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2959 			kfree(entries);
2960 			goto no_msi;
2961 		}
2962 		/* 1 is reserved for events (asyncrounous EQ) */
2963 		dev->caps.num_comp_vectors = nreq - 1;
2964 
2965 		priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2966 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2967 			    dev->caps.num_ports);
2968 
2969 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2970 			if (i == MLX4_EQ_ASYNC)
2971 				continue;
2972 
2973 			priv->eq_table.eq[i].irq =
2974 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2975 
2976 			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2977 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2978 					    dev->caps.num_ports);
2979 				/* We don't set affinity hint when there
2980 				 * aren't enough EQs
2981 				 */
2982 			} else {
2983 				set_bit(port,
2984 					priv->eq_table.eq[i].actv_ports.ports);
2985 				if (mlx4_init_affinity_hint(dev, port + 1, i))
2986 					mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2987 						  i);
2988 			}
2989 			/* We divide the Eqs evenly between the two ports.
2990 			 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2991 			 * refers to the number of Eqs per port
2992 			 * (i.e eqs_per_port). Theoretically, we would like to
2993 			 * write something like (i + 1) % eqs_per_port == 0.
2994 			 * However, since there's an asynchronous Eq, we have
2995 			 * to skip over it by comparing this condition to
2996 			 * !!((i + 1) > MLX4_EQ_ASYNC).
2997 			 */
2998 			if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2999 			    ((i + 1) %
3000 			     (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
3001 			    !!((i + 1) > MLX4_EQ_ASYNC))
3002 				/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
3003 				 * everything is shared anyway.
3004 				 */
3005 				port++;
3006 		}
3007 
3008 		dev->flags |= MLX4_FLAG_MSI_X;
3009 
3010 		kfree(entries);
3011 		return;
3012 	}
3013 
3014 no_msi:
3015 	dev->caps.num_comp_vectors = 1;
3016 
3017 	BUG_ON(MLX4_EQ_ASYNC >= 2);
3018 	for (i = 0; i < 2; ++i) {
3019 		priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
3020 		if (i != MLX4_EQ_ASYNC) {
3021 			bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
3022 				    dev->caps.num_ports);
3023 		}
3024 	}
3025 }
3026 
3027 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
3028 				      enum devlink_port_type port_type)
3029 {
3030 	struct mlx4_port_info *info = container_of(devlink_port,
3031 						   struct mlx4_port_info,
3032 						   devlink_port);
3033 	enum mlx4_port_type mlx4_port_type;
3034 
3035 	switch (port_type) {
3036 	case DEVLINK_PORT_TYPE_AUTO:
3037 		mlx4_port_type = MLX4_PORT_TYPE_AUTO;
3038 		break;
3039 	case DEVLINK_PORT_TYPE_ETH:
3040 		mlx4_port_type = MLX4_PORT_TYPE_ETH;
3041 		break;
3042 	case DEVLINK_PORT_TYPE_IB:
3043 		mlx4_port_type = MLX4_PORT_TYPE_IB;
3044 		break;
3045 	default:
3046 		return -EOPNOTSUPP;
3047 	}
3048 
3049 	return __set_port_type(info, mlx4_port_type);
3050 }
3051 
3052 static const struct devlink_port_ops mlx4_devlink_port_ops = {
3053 	.port_type_set = mlx4_devlink_port_type_set,
3054 };
3055 
3056 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
3057 {
3058 	struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
3059 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
3060 	int err;
3061 
3062 	err = devl_port_register_with_ops(devlink, &info->devlink_port, port,
3063 					  &mlx4_devlink_port_ops);
3064 	if (err)
3065 		return err;
3066 
3067 	/* Ethernet and IB drivers will normally set the port type,
3068 	 * but if they are not built set the type now to prevent
3069 	 * devlink_port_type_warn() from firing.
3070 	 */
3071 	if (!IS_ENABLED(CONFIG_MLX4_EN) &&
3072 	    dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
3073 		devlink_port_type_eth_set(&info->devlink_port);
3074 	else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
3075 		 dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
3076 		devlink_port_type_ib_set(&info->devlink_port, NULL);
3077 
3078 	info->dev = dev;
3079 	info->port = port;
3080 	if (!mlx4_is_slave(dev)) {
3081 		mlx4_init_mac_table(dev, &info->mac_table);
3082 		mlx4_init_vlan_table(dev, &info->vlan_table);
3083 		mlx4_init_roce_gid_table(dev, &info->gid_table);
3084 		info->base_qpn = mlx4_get_base_qpn(dev, port);
3085 	}
3086 
3087 	sprintf(info->dev_name, "mlx4_port%d", port);
3088 	info->port_attr.attr.name = info->dev_name;
3089 	if (mlx4_is_mfunc(dev)) {
3090 		info->port_attr.attr.mode = 0444;
3091 	} else {
3092 		info->port_attr.attr.mode = 0644;
3093 		info->port_attr.store     = set_port_type;
3094 	}
3095 	info->port_attr.show      = show_port_type;
3096 	sysfs_attr_init(&info->port_attr.attr);
3097 
3098 	err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
3099 	if (err) {
3100 		mlx4_err(dev, "Failed to create file for port %d\n", port);
3101 		devlink_port_type_clear(&info->devlink_port);
3102 		devl_port_unregister(&info->devlink_port);
3103 		info->port = -1;
3104 		return err;
3105 	}
3106 
3107 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
3108 	info->port_mtu_attr.attr.name = info->dev_mtu_name;
3109 	if (mlx4_is_mfunc(dev)) {
3110 		info->port_mtu_attr.attr.mode = 0444;
3111 	} else {
3112 		info->port_mtu_attr.attr.mode = 0644;
3113 		info->port_mtu_attr.store     = set_port_ib_mtu;
3114 	}
3115 	info->port_mtu_attr.show      = show_port_ib_mtu;
3116 	sysfs_attr_init(&info->port_mtu_attr.attr);
3117 
3118 	err = device_create_file(&dev->persist->pdev->dev,
3119 				 &info->port_mtu_attr);
3120 	if (err) {
3121 		mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
3122 		device_remove_file(&info->dev->persist->pdev->dev,
3123 				   &info->port_attr);
3124 		devlink_port_type_clear(&info->devlink_port);
3125 		devl_port_unregister(&info->devlink_port);
3126 		info->port = -1;
3127 		return err;
3128 	}
3129 
3130 	return 0;
3131 }
3132 
3133 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
3134 {
3135 	if (info->port < 0)
3136 		return;
3137 
3138 	device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3139 	device_remove_file(&info->dev->persist->pdev->dev,
3140 			   &info->port_mtu_attr);
3141 	devlink_port_type_clear(&info->devlink_port);
3142 	devl_port_unregister(&info->devlink_port);
3143 
3144 #ifdef CONFIG_RFS_ACCEL
3145 	free_irq_cpu_rmap(info->rmap);
3146 	info->rmap = NULL;
3147 #endif
3148 }
3149 
3150 static int mlx4_init_steering(struct mlx4_dev *dev)
3151 {
3152 	struct mlx4_priv *priv = mlx4_priv(dev);
3153 	int num_entries = dev->caps.num_ports;
3154 	int i, j;
3155 
3156 	priv->steer = kcalloc(num_entries, sizeof(struct mlx4_steer),
3157 			      GFP_KERNEL);
3158 	if (!priv->steer)
3159 		return -ENOMEM;
3160 
3161 	for (i = 0; i < num_entries; i++)
3162 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
3163 			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3164 			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3165 		}
3166 	return 0;
3167 }
3168 
3169 static void mlx4_clear_steering(struct mlx4_dev *dev)
3170 {
3171 	struct mlx4_priv *priv = mlx4_priv(dev);
3172 	struct mlx4_steer_index *entry, *tmp_entry;
3173 	struct mlx4_promisc_qp *pqp, *tmp_pqp;
3174 	int num_entries = dev->caps.num_ports;
3175 	int i, j;
3176 
3177 	for (i = 0; i < num_entries; i++) {
3178 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
3179 			list_for_each_entry_safe(pqp, tmp_pqp,
3180 						 &priv->steer[i].promisc_qps[j],
3181 						 list) {
3182 				list_del(&pqp->list);
3183 				kfree(pqp);
3184 			}
3185 			list_for_each_entry_safe(entry, tmp_entry,
3186 						 &priv->steer[i].steer_entries[j],
3187 						 list) {
3188 				list_del(&entry->list);
3189 				list_for_each_entry_safe(pqp, tmp_pqp,
3190 							 &entry->duplicates,
3191 							 list) {
3192 					list_del(&pqp->list);
3193 					kfree(pqp);
3194 				}
3195 				kfree(entry);
3196 			}
3197 		}
3198 	}
3199 	kfree(priv->steer);
3200 }
3201 
3202 static int extended_func_num(struct pci_dev *pdev)
3203 {
3204 	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3205 }
3206 
3207 #define MLX4_OWNER_BASE	0x8069c
3208 #define MLX4_OWNER_SIZE	4
3209 
3210 static int mlx4_get_ownership(struct mlx4_dev *dev)
3211 {
3212 	void __iomem *owner;
3213 	u32 ret;
3214 
3215 	if (pci_channel_offline(dev->persist->pdev))
3216 		return -EIO;
3217 
3218 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3219 			MLX4_OWNER_BASE,
3220 			MLX4_OWNER_SIZE);
3221 	if (!owner) {
3222 		mlx4_err(dev, "Failed to obtain ownership bit\n");
3223 		return -ENOMEM;
3224 	}
3225 
3226 	ret = readl(owner);
3227 	iounmap(owner);
3228 	return (int) !!ret;
3229 }
3230 
3231 static void mlx4_free_ownership(struct mlx4_dev *dev)
3232 {
3233 	void __iomem *owner;
3234 
3235 	if (pci_channel_offline(dev->persist->pdev))
3236 		return;
3237 
3238 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3239 			MLX4_OWNER_BASE,
3240 			MLX4_OWNER_SIZE);
3241 	if (!owner) {
3242 		mlx4_err(dev, "Failed to obtain ownership bit\n");
3243 		return;
3244 	}
3245 	writel(0, owner);
3246 	msleep(1000);
3247 	iounmap(owner);
3248 }
3249 
3250 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV)	==\
3251 				  !!((flags) & MLX4_FLAG_MASTER))
3252 
3253 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3254 			     u8 total_vfs, int existing_vfs, int reset_flow)
3255 {
3256 	u64 dev_flags = dev->flags;
3257 	int err = 0;
3258 	int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
3259 					MLX4_MAX_NUM_VF);
3260 
3261 	if (reset_flow) {
3262 		dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3263 				       GFP_KERNEL);
3264 		if (!dev->dev_vfs)
3265 			goto free_mem;
3266 		return dev_flags;
3267 	}
3268 
3269 	atomic_inc(&pf_loading);
3270 	if (dev->flags &  MLX4_FLAG_SRIOV) {
3271 		if (existing_vfs != total_vfs) {
3272 			mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3273 				 existing_vfs, total_vfs);
3274 			total_vfs = existing_vfs;
3275 		}
3276 	}
3277 
3278 	dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL);
3279 	if (NULL == dev->dev_vfs) {
3280 		mlx4_err(dev, "Failed to allocate memory for VFs\n");
3281 		goto disable_sriov;
3282 	}
3283 
3284 	if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
3285 		if (total_vfs > fw_enabled_sriov_vfs) {
3286 			mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3287 				 total_vfs, fw_enabled_sriov_vfs);
3288 			err = -ENOMEM;
3289 			goto disable_sriov;
3290 		}
3291 		mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3292 		err = pci_enable_sriov(pdev, total_vfs);
3293 	}
3294 	if (err) {
3295 		mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3296 			 err);
3297 		goto disable_sriov;
3298 	} else {
3299 		mlx4_warn(dev, "Running in master mode\n");
3300 		dev_flags |= MLX4_FLAG_SRIOV |
3301 			MLX4_FLAG_MASTER;
3302 		dev_flags &= ~MLX4_FLAG_SLAVE;
3303 		dev->persist->num_vfs = total_vfs;
3304 	}
3305 	return dev_flags;
3306 
3307 disable_sriov:
3308 	atomic_dec(&pf_loading);
3309 free_mem:
3310 	dev->persist->num_vfs = 0;
3311 	kfree(dev->dev_vfs);
3312 	dev->dev_vfs = NULL;
3313 	return dev_flags & ~MLX4_FLAG_MASTER;
3314 }
3315 
3316 enum {
3317 	MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3318 };
3319 
3320 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3321 			      int *nvfs)
3322 {
3323 	int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3324 	/* Checking for 64 VFs as a limitation of CX2 */
3325 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3326 	    requested_vfs >= 64) {
3327 		mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3328 			 requested_vfs);
3329 		return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3330 	}
3331 	return 0;
3332 }
3333 
3334 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3335 {
3336 	struct pci_dev *pdev = dev->persist->pdev;
3337 	int err = 0;
3338 
3339 	mutex_lock(&dev->persist->pci_status_mutex);
3340 	if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3341 		err = pci_enable_device(pdev);
3342 		if (!err)
3343 			dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3344 	}
3345 	mutex_unlock(&dev->persist->pci_status_mutex);
3346 
3347 	return err;
3348 }
3349 
3350 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3351 {
3352 	struct pci_dev *pdev = dev->persist->pdev;
3353 
3354 	mutex_lock(&dev->persist->pci_status_mutex);
3355 	if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3356 		pci_disable_device(pdev);
3357 		dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3358 	}
3359 	mutex_unlock(&dev->persist->pci_status_mutex);
3360 }
3361 
3362 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3363 			 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3364 			 int reset_flow)
3365 {
3366 	struct devlink *devlink = priv_to_devlink(priv);
3367 	struct mlx4_dev *dev;
3368 	unsigned sum = 0;
3369 	int err;
3370 	int port;
3371 	int i;
3372 	struct mlx4_dev_cap *dev_cap = NULL;
3373 	int existing_vfs = 0;
3374 
3375 	devl_assert_locked(devlink);
3376 	dev = &priv->dev;
3377 
3378 	INIT_LIST_HEAD(&priv->ctx_list);
3379 	spin_lock_init(&priv->ctx_lock);
3380 
3381 	mutex_init(&priv->port_mutex);
3382 	mutex_init(&priv->bond_mutex);
3383 
3384 	INIT_LIST_HEAD(&priv->pgdir_list);
3385 	mutex_init(&priv->pgdir_mutex);
3386 	spin_lock_init(&priv->cmd.context_lock);
3387 
3388 	INIT_LIST_HEAD(&priv->bf_list);
3389 	mutex_init(&priv->bf_mutex);
3390 
3391 	dev->rev_id = pdev->revision;
3392 	dev->numa_node = dev_to_node(&pdev->dev);
3393 
3394 	/* Detect if this device is a virtual function */
3395 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3396 		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3397 		dev->flags |= MLX4_FLAG_SLAVE;
3398 	} else {
3399 		/* We reset the device and enable SRIOV only for physical
3400 		 * devices.  Try to claim ownership on the device;
3401 		 * if already taken, skip -- do not allow multiple PFs */
3402 		err = mlx4_get_ownership(dev);
3403 		if (err) {
3404 			if (err < 0)
3405 				return err;
3406 			else {
3407 				mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3408 				return -EINVAL;
3409 			}
3410 		}
3411 
3412 		atomic_set(&priv->opreq_count, 0);
3413 		INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3414 
3415 		/*
3416 		 * Now reset the HCA before we touch the PCI capabilities or
3417 		 * attempt a firmware command, since a boot ROM may have left
3418 		 * the HCA in an undefined state.
3419 		 */
3420 		err = mlx4_reset(dev);
3421 		if (err) {
3422 			mlx4_err(dev, "Failed to reset HCA, aborting\n");
3423 			goto err_sriov;
3424 		}
3425 
3426 		if (total_vfs) {
3427 			dev->flags = MLX4_FLAG_MASTER;
3428 			existing_vfs = pci_num_vf(pdev);
3429 			if (existing_vfs)
3430 				dev->flags |= MLX4_FLAG_SRIOV;
3431 			dev->persist->num_vfs = total_vfs;
3432 		}
3433 	}
3434 
3435 	/* on load remove any previous indication of internal error,
3436 	 * device is up.
3437 	 */
3438 	dev->persist->state = MLX4_DEVICE_STATE_UP;
3439 
3440 slave_start:
3441 	err = mlx4_cmd_init(dev);
3442 	if (err) {
3443 		mlx4_err(dev, "Failed to init command interface, aborting\n");
3444 		goto err_sriov;
3445 	}
3446 
3447 	/* In slave functions, the communication channel must be initialized
3448 	 * before posting commands. Also, init num_slaves before calling
3449 	 * mlx4_init_hca */
3450 	if (mlx4_is_mfunc(dev)) {
3451 		if (mlx4_is_master(dev)) {
3452 			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3453 
3454 		} else {
3455 			dev->num_slaves = 0;
3456 			err = mlx4_multi_func_init(dev);
3457 			if (err) {
3458 				mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3459 				goto err_cmd;
3460 			}
3461 		}
3462 	}
3463 
3464 	err = mlx4_init_fw(dev);
3465 	if (err) {
3466 		mlx4_err(dev, "Failed to init fw, aborting.\n");
3467 		goto err_mfunc;
3468 	}
3469 
3470 	if (mlx4_is_master(dev)) {
3471 		/* when we hit the goto slave_start below, dev_cap already initialized */
3472 		if (!dev_cap) {
3473 			dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3474 
3475 			if (!dev_cap) {
3476 				err = -ENOMEM;
3477 				goto err_fw;
3478 			}
3479 
3480 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3481 			if (err) {
3482 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3483 				goto err_fw;
3484 			}
3485 
3486 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3487 				goto err_fw;
3488 
3489 			if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3490 				u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3491 								  total_vfs,
3492 								  existing_vfs,
3493 								  reset_flow);
3494 
3495 				mlx4_close_fw(dev);
3496 				mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3497 				dev->flags = dev_flags;
3498 				if (!SRIOV_VALID_STATE(dev->flags)) {
3499 					mlx4_err(dev, "Invalid SRIOV state\n");
3500 					goto err_sriov;
3501 				}
3502 				err = mlx4_reset(dev);
3503 				if (err) {
3504 					mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3505 					goto err_sriov;
3506 				}
3507 				goto slave_start;
3508 			}
3509 		} else {
3510 			/* Legacy mode FW requires SRIOV to be enabled before
3511 			 * doing QUERY_DEV_CAP, since max_eq's value is different if
3512 			 * SRIOV is enabled.
3513 			 */
3514 			memset(dev_cap, 0, sizeof(*dev_cap));
3515 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3516 			if (err) {
3517 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3518 				goto err_fw;
3519 			}
3520 
3521 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3522 				goto err_fw;
3523 		}
3524 	}
3525 
3526 	err = mlx4_init_hca(dev);
3527 	if (err) {
3528 		if (err == -EACCES) {
3529 			/* Not primary Physical function
3530 			 * Running in slave mode */
3531 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3532 			/* We're not a PF */
3533 			if (dev->flags & MLX4_FLAG_SRIOV) {
3534 				if (!existing_vfs)
3535 					pci_disable_sriov(pdev);
3536 				if (mlx4_is_master(dev) && !reset_flow)
3537 					atomic_dec(&pf_loading);
3538 				dev->flags &= ~MLX4_FLAG_SRIOV;
3539 			}
3540 			if (!mlx4_is_slave(dev))
3541 				mlx4_free_ownership(dev);
3542 			dev->flags |= MLX4_FLAG_SLAVE;
3543 			dev->flags &= ~MLX4_FLAG_MASTER;
3544 			goto slave_start;
3545 		} else
3546 			goto err_fw;
3547 	}
3548 
3549 	if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3550 		u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3551 						  existing_vfs, reset_flow);
3552 
3553 		if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3554 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3555 			dev->flags = dev_flags;
3556 			err = mlx4_cmd_init(dev);
3557 			if (err) {
3558 				/* Only VHCR is cleaned up, so could still
3559 				 * send FW commands
3560 				 */
3561 				mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3562 				goto err_close;
3563 			}
3564 		} else {
3565 			dev->flags = dev_flags;
3566 		}
3567 
3568 		if (!SRIOV_VALID_STATE(dev->flags)) {
3569 			mlx4_err(dev, "Invalid SRIOV state\n");
3570 			err = -EINVAL;
3571 			goto err_close;
3572 		}
3573 	}
3574 
3575 	/* check if the device is functioning at its maximum possible speed.
3576 	 * No return code for this call, just warn the user in case of PCI
3577 	 * express device capabilities are under-satisfied by the bus.
3578 	 */
3579 	if (!mlx4_is_slave(dev))
3580 		pcie_print_link_status(dev->persist->pdev);
3581 
3582 	/* In master functions, the communication channel must be initialized
3583 	 * after obtaining its address from fw */
3584 	if (mlx4_is_master(dev)) {
3585 		if (dev->caps.num_ports < 2 &&
3586 		    num_vfs_argc > 1) {
3587 			err = -EINVAL;
3588 			mlx4_err(dev,
3589 				 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3590 				 dev->caps.num_ports);
3591 			goto err_close;
3592 		}
3593 		memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3594 
3595 		for (i = 0;
3596 		     i < sizeof(dev->persist->nvfs)/
3597 		     sizeof(dev->persist->nvfs[0]); i++) {
3598 			unsigned j;
3599 
3600 			for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3601 				dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3602 				dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3603 					dev->caps.num_ports;
3604 			}
3605 		}
3606 
3607 		/* In master functions, the communication channel
3608 		 * must be initialized after obtaining its address from fw
3609 		 */
3610 		err = mlx4_multi_func_init(dev);
3611 		if (err) {
3612 			mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3613 			goto err_close;
3614 		}
3615 	}
3616 
3617 	err = mlx4_alloc_eq_table(dev);
3618 	if (err)
3619 		goto err_master_mfunc;
3620 
3621 	bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3622 	mutex_init(&priv->msix_ctl.pool_lock);
3623 
3624 	mlx4_enable_msi_x(dev);
3625 	if ((mlx4_is_mfunc(dev)) &&
3626 	    !(dev->flags & MLX4_FLAG_MSI_X)) {
3627 		err = -EOPNOTSUPP;
3628 		mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3629 		goto err_free_eq;
3630 	}
3631 
3632 	if (!mlx4_is_slave(dev)) {
3633 		err = mlx4_init_steering(dev);
3634 		if (err)
3635 			goto err_disable_msix;
3636 	}
3637 
3638 	mlx4_init_quotas(dev);
3639 
3640 	err = mlx4_setup_hca(dev);
3641 	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3642 	    !mlx4_is_mfunc(dev)) {
3643 		dev->flags &= ~MLX4_FLAG_MSI_X;
3644 		dev->caps.num_comp_vectors = 1;
3645 		pci_disable_msix(pdev);
3646 		err = mlx4_setup_hca(dev);
3647 	}
3648 
3649 	if (err)
3650 		goto err_steer;
3651 
3652 	/* When PF resources are ready arm its comm channel to enable
3653 	 * getting commands
3654 	 */
3655 	if (mlx4_is_master(dev)) {
3656 		err = mlx4_ARM_COMM_CHANNEL(dev);
3657 		if (err) {
3658 			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3659 				 err);
3660 			goto err_steer;
3661 		}
3662 	}
3663 
3664 	for (port = 1; port <= dev->caps.num_ports; port++) {
3665 		err = mlx4_init_port_info(dev, port);
3666 		if (err)
3667 			goto err_port;
3668 	}
3669 
3670 	priv->v2p.port1 = 1;
3671 	priv->v2p.port2 = 2;
3672 
3673 	err = mlx4_register_device(dev);
3674 	if (err)
3675 		goto err_port;
3676 
3677 	mlx4_request_modules(dev);
3678 
3679 	mlx4_sense_init(dev);
3680 	mlx4_start_sense(dev);
3681 
3682 	priv->removed = 0;
3683 
3684 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3685 		atomic_dec(&pf_loading);
3686 
3687 	kfree(dev_cap);
3688 	return 0;
3689 
3690 err_port:
3691 	for (--port; port >= 1; --port)
3692 		mlx4_cleanup_port_info(&priv->port[port]);
3693 
3694 	mlx4_cleanup_default_counters(dev);
3695 	if (!mlx4_is_slave(dev))
3696 		mlx4_cleanup_counters_table(dev);
3697 	mlx4_cleanup_qp_table(dev);
3698 	mlx4_cleanup_srq_table(dev);
3699 	mlx4_cleanup_cq_table(dev);
3700 	mlx4_cmd_use_polling(dev);
3701 	mlx4_cleanup_eq_table(dev);
3702 	mlx4_cleanup_mcg_table(dev);
3703 	mlx4_cleanup_mr_table(dev);
3704 	mlx4_cleanup_xrcd_table(dev);
3705 	mlx4_cleanup_pd_table(dev);
3706 	mlx4_cleanup_uar_table(dev);
3707 
3708 err_steer:
3709 	if (!mlx4_is_slave(dev))
3710 		mlx4_clear_steering(dev);
3711 
3712 err_disable_msix:
3713 	if (dev->flags & MLX4_FLAG_MSI_X)
3714 		pci_disable_msix(pdev);
3715 
3716 err_free_eq:
3717 	mlx4_free_eq_table(dev);
3718 
3719 err_master_mfunc:
3720 	if (mlx4_is_master(dev)) {
3721 		mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3722 		mlx4_multi_func_cleanup(dev);
3723 	}
3724 
3725 	if (mlx4_is_slave(dev))
3726 		mlx4_slave_destroy_special_qp_cap(dev);
3727 
3728 err_close:
3729 	mlx4_close_hca(dev);
3730 
3731 err_fw:
3732 	mlx4_close_fw(dev);
3733 
3734 err_mfunc:
3735 	if (mlx4_is_slave(dev))
3736 		mlx4_multi_func_cleanup(dev);
3737 
3738 err_cmd:
3739 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3740 
3741 err_sriov:
3742 	if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3743 		pci_disable_sriov(pdev);
3744 		dev->flags &= ~MLX4_FLAG_SRIOV;
3745 	}
3746 
3747 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3748 		atomic_dec(&pf_loading);
3749 
3750 	kfree(priv->dev.dev_vfs);
3751 
3752 	if (!mlx4_is_slave(dev))
3753 		mlx4_free_ownership(dev);
3754 
3755 	kfree(dev_cap);
3756 	return err;
3757 }
3758 
3759 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3760 			   struct mlx4_priv *priv)
3761 {
3762 	int err;
3763 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3764 	int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3765 	const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3766 		{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3767 	unsigned total_vfs = 0;
3768 	unsigned int i;
3769 
3770 	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3771 
3772 	err = mlx4_pci_enable_device(&priv->dev);
3773 	if (err) {
3774 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3775 		return err;
3776 	}
3777 
3778 	/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3779 	 * per port, we must limit the number of VFs to 63 (since their are
3780 	 * 128 MACs)
3781 	 */
3782 	for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc;
3783 	     total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3784 		nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3785 		if (nvfs[i] < 0) {
3786 			dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3787 			err = -EINVAL;
3788 			goto err_disable_pdev;
3789 		}
3790 	}
3791 	for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc;
3792 	     i++) {
3793 		prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3794 		if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3795 			dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3796 			err = -EINVAL;
3797 			goto err_disable_pdev;
3798 		}
3799 	}
3800 	if (total_vfs > MLX4_MAX_NUM_VF) {
3801 		dev_err(&pdev->dev,
3802 			"Requested more VF's (%d) than allowed by hw (%d)\n",
3803 			total_vfs, MLX4_MAX_NUM_VF);
3804 		err = -EINVAL;
3805 		goto err_disable_pdev;
3806 	}
3807 
3808 	for (i = 0; i < MLX4_MAX_PORTS; i++) {
3809 		if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3810 			dev_err(&pdev->dev,
3811 				"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3812 				nvfs[i] + nvfs[2], i + 1,
3813 				MLX4_MAX_NUM_VF_P_PORT);
3814 			err = -EINVAL;
3815 			goto err_disable_pdev;
3816 		}
3817 	}
3818 
3819 	/* Check for BARs. */
3820 	if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3821 	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3822 		dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3823 			pci_dev_data, pci_resource_flags(pdev, 0));
3824 		err = -ENODEV;
3825 		goto err_disable_pdev;
3826 	}
3827 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3828 		dev_err(&pdev->dev, "Missing UAR, aborting\n");
3829 		err = -ENODEV;
3830 		goto err_disable_pdev;
3831 	}
3832 
3833 	err = pci_request_regions(pdev, DRV_NAME);
3834 	if (err) {
3835 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3836 		goto err_disable_pdev;
3837 	}
3838 
3839 	pci_set_master(pdev);
3840 
3841 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3842 	if (err) {
3843 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3844 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3845 		if (err) {
3846 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3847 			goto err_release_regions;
3848 		}
3849 	}
3850 
3851 	/* Allow large DMA segments, up to the firmware limit of 1 GB */
3852 	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3853 	/* Detect if this device is a virtual function */
3854 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3855 		/* When acting as pf, we normally skip vfs unless explicitly
3856 		 * requested to probe them.
3857 		 */
3858 		if (total_vfs) {
3859 			unsigned vfs_offset = 0;
3860 
3861 			for (i = 0; i < ARRAY_SIZE(nvfs) &&
3862 			     vfs_offset + nvfs[i] < extended_func_num(pdev);
3863 			     vfs_offset += nvfs[i], i++)
3864 				;
3865 			if (i == ARRAY_SIZE(nvfs)) {
3866 				err = -ENODEV;
3867 				goto err_release_regions;
3868 			}
3869 			if ((extended_func_num(pdev) - vfs_offset)
3870 			    > prb_vf[i]) {
3871 				dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3872 					 extended_func_num(pdev));
3873 				err = -ENODEV;
3874 				goto err_release_regions;
3875 			}
3876 		}
3877 	}
3878 
3879 	err = mlx4_crdump_init(&priv->dev);
3880 	if (err)
3881 		goto err_release_regions;
3882 
3883 	err = mlx4_catas_init(&priv->dev);
3884 	if (err)
3885 		goto err_crdump;
3886 
3887 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3888 	if (err)
3889 		goto err_catas;
3890 
3891 	return 0;
3892 
3893 err_catas:
3894 	mlx4_catas_end(&priv->dev);
3895 
3896 err_crdump:
3897 	mlx4_crdump_end(&priv->dev);
3898 
3899 err_release_regions:
3900 	pci_release_regions(pdev);
3901 
3902 err_disable_pdev:
3903 	mlx4_pci_disable_device(&priv->dev);
3904 	return err;
3905 }
3906 
3907 static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
3908 {
3909 	struct mlx4_priv *priv = devlink_priv(devlink);
3910 	struct mlx4_dev *dev = &priv->dev;
3911 	struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
3912 	union devlink_param_value saved_value;
3913 	int err;
3914 
3915 	err = devl_param_driverinit_value_get(devlink,
3916 					      DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
3917 					      &saved_value);
3918 	if (!err && mlx4_internal_err_reset != saved_value.vbool) {
3919 		mlx4_internal_err_reset = saved_value.vbool;
3920 		/* Notify on value changed on runtime configuration mode */
3921 		devl_param_value_changed(devlink,
3922 					 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
3923 	}
3924 	err = devl_param_driverinit_value_get(devlink,
3925 					      DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
3926 					      &saved_value);
3927 	if (!err)
3928 		log_num_mac = order_base_2(saved_value.vu32);
3929 	err = devl_param_driverinit_value_get(devlink,
3930 					      MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
3931 					      &saved_value);
3932 	if (!err)
3933 		enable_64b_cqe_eqe = saved_value.vbool;
3934 	err = devl_param_driverinit_value_get(devlink,
3935 					      MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
3936 					      &saved_value);
3937 	if (!err)
3938 		enable_4k_uar = saved_value.vbool;
3939 	err = devl_param_driverinit_value_get(devlink,
3940 					      DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
3941 					      &saved_value);
3942 	if (!err && crdump->snapshot_enable != saved_value.vbool) {
3943 		crdump->snapshot_enable = saved_value.vbool;
3944 		devl_param_value_changed(devlink,
3945 					 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
3946 	}
3947 }
3948 
3949 static void mlx4_restart_one_down(struct pci_dev *pdev);
3950 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
3951 			       struct devlink *devlink);
3952 
3953 static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
3954 				    enum devlink_reload_action action,
3955 				    enum devlink_reload_limit limit,
3956 				    struct netlink_ext_ack *extack)
3957 {
3958 	struct mlx4_priv *priv = devlink_priv(devlink);
3959 	struct mlx4_dev *dev = &priv->dev;
3960 	struct mlx4_dev_persistent *persist = dev->persist;
3961 
3962 	if (netns_change) {
3963 		NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
3964 		return -EOPNOTSUPP;
3965 	}
3966 	if (persist->num_vfs)
3967 		mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
3968 	mlx4_restart_one_down(persist->pdev);
3969 	return 0;
3970 }
3971 
3972 static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
3973 				  enum devlink_reload_limit limit, u32 *actions_performed,
3974 				  struct netlink_ext_ack *extack)
3975 {
3976 	struct mlx4_priv *priv = devlink_priv(devlink);
3977 	struct mlx4_dev *dev = &priv->dev;
3978 	struct mlx4_dev_persistent *persist = dev->persist;
3979 	int err;
3980 
3981 	*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
3982 	err = mlx4_restart_one_up(persist->pdev, true, devlink);
3983 	if (err)
3984 		mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
3985 			 err);
3986 
3987 	return err;
3988 }
3989 
3990 static const struct devlink_ops mlx4_devlink_ops = {
3991 	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
3992 	.reload_down	= mlx4_devlink_reload_down,
3993 	.reload_up	= mlx4_devlink_reload_up,
3994 };
3995 
3996 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3997 {
3998 	struct devlink *devlink;
3999 	struct mlx4_priv *priv;
4000 	struct mlx4_dev *dev;
4001 	int ret;
4002 
4003 	printk_once(KERN_INFO "%s", mlx4_version);
4004 
4005 	devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
4006 	if (!devlink)
4007 		return -ENOMEM;
4008 	devl_lock(devlink);
4009 	priv = devlink_priv(devlink);
4010 
4011 	dev       = &priv->dev;
4012 	dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
4013 	if (!dev->persist) {
4014 		ret = -ENOMEM;
4015 		goto err_devlink_free;
4016 	}
4017 	dev->persist->pdev = pdev;
4018 	dev->persist->dev = dev;
4019 	pci_set_drvdata(pdev, dev->persist);
4020 	priv->pci_dev_data = id->driver_data;
4021 	mutex_init(&dev->persist->device_state_mutex);
4022 	mutex_init(&dev->persist->interface_state_mutex);
4023 	mutex_init(&dev->persist->pci_status_mutex);
4024 
4025 	ret = devl_params_register(devlink, mlx4_devlink_params,
4026 				   ARRAY_SIZE(mlx4_devlink_params));
4027 	if (ret)
4028 		goto err_devlink_unregister;
4029 	mlx4_devlink_set_params_init_values(devlink);
4030 	ret =  __mlx4_init_one(pdev, id->driver_data, priv);
4031 	if (ret)
4032 		goto err_params_unregister;
4033 
4034 	pci_save_state(pdev);
4035 	devl_unlock(devlink);
4036 	devlink_register(devlink);
4037 	return 0;
4038 
4039 err_params_unregister:
4040 	devl_params_unregister(devlink, mlx4_devlink_params,
4041 			       ARRAY_SIZE(mlx4_devlink_params));
4042 err_devlink_unregister:
4043 	kfree(dev->persist);
4044 err_devlink_free:
4045 	devl_unlock(devlink);
4046 	devlink_free(devlink);
4047 	return ret;
4048 }
4049 
4050 static void mlx4_clean_dev(struct mlx4_dev *dev)
4051 {
4052 	struct mlx4_dev_persistent *persist = dev->persist;
4053 	struct mlx4_priv *priv = mlx4_priv(dev);
4054 	unsigned long	flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
4055 
4056 	memset(priv, 0, sizeof(*priv));
4057 	priv->dev.persist = persist;
4058 	priv->dev.flags = flags;
4059 }
4060 
4061 static void mlx4_unload_one(struct pci_dev *pdev)
4062 {
4063 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4064 	struct mlx4_dev  *dev  = persist->dev;
4065 	struct mlx4_priv *priv = mlx4_priv(dev);
4066 	int               pci_dev_data;
4067 	struct devlink *devlink;
4068 	int p, i;
4069 
4070 	devlink = priv_to_devlink(priv);
4071 	devl_assert_locked(devlink);
4072 	if (priv->removed)
4073 		return;
4074 
4075 	/* saving current ports type for further use */
4076 	for (i = 0; i < dev->caps.num_ports; i++) {
4077 		dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
4078 		dev->persist->curr_port_poss_type[i] = dev->caps.
4079 						       possible_type[i + 1];
4080 	}
4081 
4082 	pci_dev_data = priv->pci_dev_data;
4083 
4084 	mlx4_stop_sense(dev);
4085 	mlx4_unregister_device(dev);
4086 
4087 	for (p = 1; p <= dev->caps.num_ports; p++) {
4088 		mlx4_cleanup_port_info(&priv->port[p]);
4089 		mlx4_CLOSE_PORT(dev, p);
4090 	}
4091 
4092 	if (mlx4_is_master(dev))
4093 		mlx4_free_resource_tracker(dev,
4094 					   RES_TR_FREE_SLAVES_ONLY);
4095 
4096 	mlx4_cleanup_default_counters(dev);
4097 	if (!mlx4_is_slave(dev))
4098 		mlx4_cleanup_counters_table(dev);
4099 	mlx4_cleanup_qp_table(dev);
4100 	mlx4_cleanup_srq_table(dev);
4101 	mlx4_cleanup_cq_table(dev);
4102 	mlx4_cmd_use_polling(dev);
4103 	mlx4_cleanup_eq_table(dev);
4104 	mlx4_cleanup_mcg_table(dev);
4105 	mlx4_cleanup_mr_table(dev);
4106 	mlx4_cleanup_xrcd_table(dev);
4107 	mlx4_cleanup_pd_table(dev);
4108 
4109 	if (mlx4_is_master(dev))
4110 		mlx4_free_resource_tracker(dev,
4111 					   RES_TR_FREE_STRUCTS_ONLY);
4112 
4113 	iounmap(priv->kar);
4114 	mlx4_uar_free(dev, &priv->driver_uar);
4115 	mlx4_cleanup_uar_table(dev);
4116 	if (!mlx4_is_slave(dev))
4117 		mlx4_clear_steering(dev);
4118 	mlx4_free_eq_table(dev);
4119 	if (mlx4_is_master(dev))
4120 		mlx4_multi_func_cleanup(dev);
4121 	mlx4_close_hca(dev);
4122 	mlx4_close_fw(dev);
4123 	if (mlx4_is_slave(dev))
4124 		mlx4_multi_func_cleanup(dev);
4125 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
4126 
4127 	if (dev->flags & MLX4_FLAG_MSI_X)
4128 		pci_disable_msix(pdev);
4129 
4130 	if (!mlx4_is_slave(dev))
4131 		mlx4_free_ownership(dev);
4132 
4133 	mlx4_slave_destroy_special_qp_cap(dev);
4134 	kfree(dev->dev_vfs);
4135 
4136 	mlx4_clean_dev(dev);
4137 	priv->pci_dev_data = pci_dev_data;
4138 	priv->removed = 1;
4139 }
4140 
4141 static void mlx4_remove_one(struct pci_dev *pdev)
4142 {
4143 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4144 	struct mlx4_dev  *dev  = persist->dev;
4145 	struct mlx4_priv *priv = mlx4_priv(dev);
4146 	struct devlink *devlink = priv_to_devlink(priv);
4147 	int active_vfs = 0;
4148 
4149 	devlink_unregister(devlink);
4150 
4151 	devl_lock(devlink);
4152 	if (mlx4_is_slave(dev))
4153 		persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
4154 
4155 	mutex_lock(&persist->interface_state_mutex);
4156 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
4157 	mutex_unlock(&persist->interface_state_mutex);
4158 
4159 	/* Disabling SR-IOV is not allowed while there are active vf's */
4160 	if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
4161 		active_vfs = mlx4_how_many_lives_vf(dev);
4162 		if (active_vfs) {
4163 			pr_warn("Removing PF when there are active VF's !!\n");
4164 			pr_warn("Will not disable SR-IOV.\n");
4165 		}
4166 	}
4167 
4168 	/* device marked to be under deletion running now without the lock
4169 	 * letting other tasks to be terminated
4170 	 */
4171 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4172 		mlx4_unload_one(pdev);
4173 	else
4174 		mlx4_info(dev, "%s: interface is down\n", __func__);
4175 	mlx4_catas_end(dev);
4176 	mlx4_crdump_end(dev);
4177 	if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
4178 		mlx4_warn(dev, "Disabling SR-IOV\n");
4179 		pci_disable_sriov(pdev);
4180 	}
4181 
4182 	pci_release_regions(pdev);
4183 	mlx4_pci_disable_device(dev);
4184 	devl_params_unregister(devlink, mlx4_devlink_params,
4185 			       ARRAY_SIZE(mlx4_devlink_params));
4186 	kfree(dev->persist);
4187 	devl_unlock(devlink);
4188 	devlink_free(devlink);
4189 }
4190 
4191 static int restore_current_port_types(struct mlx4_dev *dev,
4192 				      enum mlx4_port_type *types,
4193 				      enum mlx4_port_type *poss_types)
4194 {
4195 	struct mlx4_priv *priv = mlx4_priv(dev);
4196 	int err, i;
4197 
4198 	mlx4_stop_sense(dev);
4199 
4200 	mutex_lock(&priv->port_mutex);
4201 	for (i = 0; i < dev->caps.num_ports; i++)
4202 		dev->caps.possible_type[i + 1] = poss_types[i];
4203 	err = mlx4_change_port_types(dev, types);
4204 	mlx4_start_sense(dev);
4205 	mutex_unlock(&priv->port_mutex);
4206 
4207 	return err;
4208 }
4209 
4210 static void mlx4_restart_one_down(struct pci_dev *pdev)
4211 {
4212 	mlx4_unload_one(pdev);
4213 }
4214 
4215 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
4216 			       struct devlink *devlink)
4217 {
4218 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4219 	struct mlx4_dev	 *dev  = persist->dev;
4220 	struct mlx4_priv *priv = mlx4_priv(dev);
4221 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4222 	int pci_dev_data, err, total_vfs;
4223 
4224 	pci_dev_data = priv->pci_dev_data;
4225 	total_vfs = dev->persist->num_vfs;
4226 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4227 
4228 	if (reload)
4229 		mlx4_devlink_param_load_driverinit_values(devlink);
4230 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
4231 	if (err) {
4232 		mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4233 			 __func__, pci_name(pdev), err);
4234 		return err;
4235 	}
4236 
4237 	err = restore_current_port_types(dev, dev->persist->curr_port_type,
4238 					 dev->persist->curr_port_poss_type);
4239 	if (err)
4240 		mlx4_err(dev, "could not restore original port types (%d)\n",
4241 			 err);
4242 
4243 	return err;
4244 }
4245 
4246 int mlx4_restart_one(struct pci_dev *pdev)
4247 {
4248 	mlx4_restart_one_down(pdev);
4249 	return mlx4_restart_one_up(pdev, false, NULL);
4250 }
4251 
4252 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4253 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4254 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4255 
4256 static const struct pci_device_id mlx4_pci_table[] = {
4257 #ifdef CONFIG_MLX4_CORE_GEN2
4258 	/* MT25408 "Hermon" */
4259 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR),	/* SDR */
4260 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR),	/* DDR */
4261 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR),	/* QDR */
4262 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
4263 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2),	/* QDR Gen2 */
4264 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN),	/* EN 10GigE */
4265 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2),  /* EN 10GigE Gen2 */
4266 	/* MT25458 ConnectX EN 10GBASE-T */
4267 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
4268 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2),	/* Gen2 */
4269 	/* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4270 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
4271 	/* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4272 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
4273 	/* MT26478 ConnectX2 40GigE PCIe Gen2 */
4274 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
4275 	/* MT25400 Family [ConnectX-2] */
4276 	MLX_VF(0x1002),					/* Virtual Function */
4277 #endif /* CONFIG_MLX4_CORE_GEN2 */
4278 	/* MT27500 Family [ConnectX-3] */
4279 	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
4280 	MLX_VF(0x1004),					/* Virtual Function */
4281 	MLX_GN(0x1005),					/* MT27510 Family */
4282 	MLX_GN(0x1006),					/* MT27511 Family */
4283 	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO),	/* MT27520 Family */
4284 	MLX_GN(0x1008),					/* MT27521 Family */
4285 	MLX_GN(0x1009),					/* MT27530 Family */
4286 	MLX_GN(0x100a),					/* MT27531 Family */
4287 	MLX_GN(0x100b),					/* MT27540 Family */
4288 	MLX_GN(0x100c),					/* MT27541 Family */
4289 	MLX_GN(0x100d),					/* MT27550 Family */
4290 	MLX_GN(0x100e),					/* MT27551 Family */
4291 	MLX_GN(0x100f),					/* MT27560 Family */
4292 	MLX_GN(0x1010),					/* MT27561 Family */
4293 
4294 	/*
4295 	 * See the mellanox_check_broken_intx_masking() quirk when
4296 	 * adding devices
4297 	 */
4298 
4299 	{ 0, }
4300 };
4301 
4302 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4303 
4304 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4305 					      pci_channel_state_t state)
4306 {
4307 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4308 	struct mlx4_dev *dev = persist->dev;
4309 	struct devlink *devlink;
4310 
4311 	mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4312 	mlx4_enter_error_state(persist);
4313 
4314 	devlink = priv_to_devlink(mlx4_priv(dev));
4315 	devl_lock(devlink);
4316 	mutex_lock(&persist->interface_state_mutex);
4317 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4318 		mlx4_unload_one(pdev);
4319 
4320 	mutex_unlock(&persist->interface_state_mutex);
4321 	devl_unlock(devlink);
4322 	if (state == pci_channel_io_perm_failure)
4323 		return PCI_ERS_RESULT_DISCONNECT;
4324 
4325 	mlx4_pci_disable_device(persist->dev);
4326 	return PCI_ERS_RESULT_NEED_RESET;
4327 }
4328 
4329 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4330 {
4331 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4332 	struct mlx4_dev	 *dev  = persist->dev;
4333 	int err;
4334 
4335 	mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4336 	err = mlx4_pci_enable_device(dev);
4337 	if (err) {
4338 		mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4339 		return PCI_ERS_RESULT_DISCONNECT;
4340 	}
4341 
4342 	pci_set_master(pdev);
4343 	pci_restore_state(pdev);
4344 	pci_save_state(pdev);
4345 	return PCI_ERS_RESULT_RECOVERED;
4346 }
4347 
4348 static void mlx4_pci_resume(struct pci_dev *pdev)
4349 {
4350 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4351 	struct mlx4_dev	 *dev  = persist->dev;
4352 	struct mlx4_priv *priv = mlx4_priv(dev);
4353 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4354 	struct devlink *devlink;
4355 	int total_vfs;
4356 	int err;
4357 
4358 	mlx4_err(dev, "%s was called\n", __func__);
4359 	total_vfs = dev->persist->num_vfs;
4360 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4361 
4362 	devlink = priv_to_devlink(priv);
4363 	devl_lock(devlink);
4364 	mutex_lock(&persist->interface_state_mutex);
4365 	if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4366 		err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4367 				    priv, 1);
4368 		if (err) {
4369 			mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4370 				 __func__,  err);
4371 			goto end;
4372 		}
4373 
4374 		err = restore_current_port_types(dev, dev->persist->
4375 						 curr_port_type, dev->persist->
4376 						 curr_port_poss_type);
4377 		if (err)
4378 			mlx4_err(dev, "could not restore original port types (%d)\n", err);
4379 	}
4380 end:
4381 	mutex_unlock(&persist->interface_state_mutex);
4382 	devl_unlock(devlink);
4383 }
4384 
4385 static void mlx4_shutdown(struct pci_dev *pdev)
4386 {
4387 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4388 	struct mlx4_dev *dev = persist->dev;
4389 	struct devlink *devlink;
4390 
4391 	mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4392 	devlink = priv_to_devlink(mlx4_priv(dev));
4393 	devl_lock(devlink);
4394 	mutex_lock(&persist->interface_state_mutex);
4395 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4396 		mlx4_unload_one(pdev);
4397 	mutex_unlock(&persist->interface_state_mutex);
4398 	devl_unlock(devlink);
4399 	mlx4_pci_disable_device(dev);
4400 }
4401 
4402 static const struct pci_error_handlers mlx4_err_handler = {
4403 	.error_detected = mlx4_pci_err_detected,
4404 	.slot_reset     = mlx4_pci_slot_reset,
4405 	.resume		= mlx4_pci_resume,
4406 };
4407 
4408 static int __maybe_unused mlx4_suspend(struct device *dev_d)
4409 {
4410 	struct pci_dev *pdev = to_pci_dev(dev_d);
4411 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4412 	struct mlx4_dev	*dev = persist->dev;
4413 	struct devlink *devlink;
4414 
4415 	mlx4_err(dev, "suspend was called\n");
4416 	devlink = priv_to_devlink(mlx4_priv(dev));
4417 	devl_lock(devlink);
4418 	mutex_lock(&persist->interface_state_mutex);
4419 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4420 		mlx4_unload_one(pdev);
4421 	mutex_unlock(&persist->interface_state_mutex);
4422 	devl_unlock(devlink);
4423 
4424 	return 0;
4425 }
4426 
4427 static int __maybe_unused mlx4_resume(struct device *dev_d)
4428 {
4429 	struct pci_dev *pdev = to_pci_dev(dev_d);
4430 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4431 	struct mlx4_dev	*dev = persist->dev;
4432 	struct mlx4_priv *priv = mlx4_priv(dev);
4433 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4434 	struct devlink *devlink;
4435 	int total_vfs;
4436 	int ret = 0;
4437 
4438 	mlx4_err(dev, "resume was called\n");
4439 	total_vfs = dev->persist->num_vfs;
4440 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4441 
4442 	devlink = priv_to_devlink(priv);
4443 	devl_lock(devlink);
4444 	mutex_lock(&persist->interface_state_mutex);
4445 	if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4446 		ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
4447 				    nvfs, priv, 1);
4448 		if (!ret) {
4449 			ret = restore_current_port_types(dev,
4450 					dev->persist->curr_port_type,
4451 					dev->persist->curr_port_poss_type);
4452 			if (ret)
4453 				mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
4454 		}
4455 	}
4456 	mutex_unlock(&persist->interface_state_mutex);
4457 	devl_unlock(devlink);
4458 
4459 	return ret;
4460 }
4461 
4462 static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume);
4463 
4464 static struct pci_driver mlx4_driver = {
4465 	.name		= DRV_NAME,
4466 	.id_table	= mlx4_pci_table,
4467 	.probe		= mlx4_init_one,
4468 	.shutdown	= mlx4_shutdown,
4469 	.remove		= mlx4_remove_one,
4470 	.driver.pm	= &mlx4_pm_ops,
4471 	.err_handler    = &mlx4_err_handler,
4472 };
4473 
4474 static int __init mlx4_verify_params(void)
4475 {
4476 	if (msi_x < 0) {
4477 		pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
4478 		return -1;
4479 	}
4480 
4481 	if ((log_num_mac < 0) || (log_num_mac > 7)) {
4482 		pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4483 		return -1;
4484 	}
4485 
4486 	if (log_num_vlan != 0)
4487 		pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4488 			MLX4_LOG_NUM_VLANS);
4489 
4490 	if (use_prio != 0)
4491 		pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4492 
4493 	if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
4494 		pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4495 			log_mtts_per_seg);
4496 		return -1;
4497 	}
4498 
4499 	/* Check if module param for ports type has legal combination */
4500 	if (port_type_array[0] == false && port_type_array[1] == true) {
4501 		pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4502 		port_type_array[0] = true;
4503 	}
4504 
4505 	if (mlx4_log_num_mgm_entry_size < -7 ||
4506 	    (mlx4_log_num_mgm_entry_size > 0 &&
4507 	     (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4508 	      mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4509 		pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4510 			mlx4_log_num_mgm_entry_size,
4511 			MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4512 			MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4513 		return -1;
4514 	}
4515 
4516 	return 0;
4517 }
4518 
4519 static int __init mlx4_init(void)
4520 {
4521 	int ret;
4522 
4523 	if (mlx4_verify_params())
4524 		return -EINVAL;
4525 
4526 
4527 	mlx4_wq = create_singlethread_workqueue("mlx4");
4528 	if (!mlx4_wq)
4529 		return -ENOMEM;
4530 
4531 	ret = pci_register_driver(&mlx4_driver);
4532 	if (ret < 0)
4533 		destroy_workqueue(mlx4_wq);
4534 	return ret < 0 ? ret : 0;
4535 }
4536 
4537 static void __exit mlx4_cleanup(void)
4538 {
4539 	pci_unregister_driver(&mlx4_driver);
4540 	destroy_workqueue(mlx4_wq);
4541 }
4542 
4543 module_init(mlx4_init);
4544 module_exit(mlx4_cleanup);
4545