1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
45 #include <linux/etherdevice.h>
46 #include <net/devlink.h>
47 
48 #include <linux/mlx4/device.h>
49 #include <linux/mlx4/doorbell.h>
50 
51 #include "mlx4.h"
52 #include "fw.h"
53 #include "icm.h"
54 
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
57 MODULE_LICENSE("Dual BSD/GPL");
58 MODULE_VERSION(DRV_VERSION);
59 
60 struct workqueue_struct *mlx4_wq;
61 
62 #ifdef CONFIG_MLX4_DEBUG
63 
64 int mlx4_debug_level = 0;
65 module_param_named(debug_level, mlx4_debug_level, int, 0644);
66 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
67 
68 #endif /* CONFIG_MLX4_DEBUG */
69 
70 #ifdef CONFIG_PCI_MSI
71 
72 static int msi_x = 1;
73 module_param(msi_x, int, 0444);
74 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
75 
76 #else /* CONFIG_PCI_MSI */
77 
78 #define msi_x (0)
79 
80 #endif /* CONFIG_PCI_MSI */
81 
82 static uint8_t num_vfs[3] = {0, 0, 0};
83 static int num_vfs_argc;
84 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
85 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
86 			  "num_vfs=port1,port2,port1+2");
87 
88 static uint8_t probe_vf[3] = {0, 0, 0};
89 static int probe_vfs_argc;
90 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
91 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
92 			   "probe_vf=port1,port2,port1+2");
93 
94 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
95 module_param_named(log_num_mgm_entry_size,
96 			mlx4_log_num_mgm_entry_size, int, 0444);
97 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
98 					 " of qp per mcg, for example:"
99 					 " 10 gives 248.range: 7 <="
100 					 " log_num_mgm_entry_size <= 12."
101 					 " To activate device managed"
102 					 " flow steering when available, set to -1");
103 
104 static bool enable_64b_cqe_eqe = true;
105 module_param(enable_64b_cqe_eqe, bool, 0444);
106 MODULE_PARM_DESC(enable_64b_cqe_eqe,
107 		 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
108 
109 static bool enable_4k_uar;
110 module_param(enable_4k_uar, bool, 0444);
111 MODULE_PARM_DESC(enable_4k_uar,
112 		 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
113 
114 #define PF_CONTEXT_BEHAVIOUR_MASK	(MLX4_FUNC_CAP_64B_EQE_CQE | \
115 					 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
116 					 MLX4_FUNC_CAP_DMFS_A0_STATIC)
117 
118 #define RESET_PERSIST_MASK_FLAGS	(MLX4_FLAG_SRIOV)
119 
120 static char mlx4_version[] =
121 	DRV_NAME ": Mellanox ConnectX core driver v"
122 	DRV_VERSION "\n";
123 
124 static struct mlx4_profile default_profile = {
125 	.num_qp		= 1 << 18,
126 	.num_srq	= 1 << 16,
127 	.rdmarc_per_qp	= 1 << 4,
128 	.num_cq		= 1 << 16,
129 	.num_mcg	= 1 << 13,
130 	.num_mpt	= 1 << 19,
131 	.num_mtt	= 1 << 20, /* It is really num mtt segements */
132 };
133 
134 static struct mlx4_profile low_mem_profile = {
135 	.num_qp		= 1 << 17,
136 	.num_srq	= 1 << 6,
137 	.rdmarc_per_qp	= 1 << 4,
138 	.num_cq		= 1 << 8,
139 	.num_mcg	= 1 << 8,
140 	.num_mpt	= 1 << 9,
141 	.num_mtt	= 1 << 7,
142 };
143 
144 static int log_num_mac = 7;
145 module_param_named(log_num_mac, log_num_mac, int, 0444);
146 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
147 
148 static int log_num_vlan;
149 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
150 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
151 /* Log2 max number of VLANs per ETH port (0-7) */
152 #define MLX4_LOG_NUM_VLANS 7
153 #define MLX4_MIN_LOG_NUM_VLANS 0
154 #define MLX4_MIN_LOG_NUM_MAC 1
155 
156 static bool use_prio;
157 module_param_named(use_prio, use_prio, bool, 0444);
158 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
159 
160 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
161 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
162 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
163 
164 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
165 static int arr_argc = 2;
166 module_param_array(port_type_array, int, &arr_argc, 0444);
167 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
168 				"1 for IB, 2 for Ethernet");
169 
170 struct mlx4_port_config {
171 	struct list_head list;
172 	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
173 	struct pci_dev *pdev;
174 };
175 
176 static atomic_t pf_loading = ATOMIC_INIT(0);
177 
178 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
179 					      struct mlx4_dev_cap *dev_cap)
180 {
181 	/* The reserved_uars is calculated by system page size unit.
182 	 * Therefore, adjustment is added when the uar page size is less
183 	 * than the system page size
184 	 */
185 	dev->caps.reserved_uars	=
186 		max_t(int,
187 		      mlx4_get_num_reserved_uar(dev),
188 		      dev_cap->reserved_uars /
189 			(1 << (PAGE_SHIFT - dev->uar_page_shift)));
190 }
191 
192 int mlx4_check_port_params(struct mlx4_dev *dev,
193 			   enum mlx4_port_type *port_type)
194 {
195 	int i;
196 
197 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
198 		for (i = 0; i < dev->caps.num_ports - 1; i++) {
199 			if (port_type[i] != port_type[i + 1]) {
200 				mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
201 				return -EINVAL;
202 			}
203 		}
204 	}
205 
206 	for (i = 0; i < dev->caps.num_ports; i++) {
207 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
208 			mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
209 				 i + 1);
210 			return -EINVAL;
211 		}
212 	}
213 	return 0;
214 }
215 
216 static void mlx4_set_port_mask(struct mlx4_dev *dev)
217 {
218 	int i;
219 
220 	for (i = 1; i <= dev->caps.num_ports; ++i)
221 		dev->caps.port_mask[i] = dev->caps.port_type[i];
222 }
223 
224 enum {
225 	MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
226 };
227 
228 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
229 {
230 	int err = 0;
231 	struct mlx4_func func;
232 
233 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
234 		err = mlx4_QUERY_FUNC(dev, &func, 0);
235 		if (err) {
236 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
237 			return err;
238 		}
239 		dev_cap->max_eqs = func.max_eq;
240 		dev_cap->reserved_eqs = func.rsvd_eqs;
241 		dev_cap->reserved_uars = func.rsvd_uars;
242 		err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
243 	}
244 	return err;
245 }
246 
247 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
248 {
249 	struct mlx4_caps *dev_cap = &dev->caps;
250 
251 	/* FW not supporting or cancelled by user */
252 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
253 	    !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
254 		return;
255 
256 	/* Must have 64B CQE_EQE enabled by FW to use bigger stride
257 	 * When FW has NCSI it may decide not to report 64B CQE/EQEs
258 	 */
259 	if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
260 	    !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
261 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
262 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
263 		return;
264 	}
265 
266 	if (cache_line_size() == 128 || cache_line_size() == 256) {
267 		mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
268 		/* Changing the real data inside CQE size to 32B */
269 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
270 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
271 
272 		if (mlx4_is_master(dev))
273 			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
274 	} else {
275 		if (cache_line_size() != 32  && cache_line_size() != 64)
276 			mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
277 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
278 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
279 	}
280 }
281 
282 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
283 			  struct mlx4_port_cap *port_cap)
284 {
285 	dev->caps.vl_cap[port]	    = port_cap->max_vl;
286 	dev->caps.ib_mtu_cap[port]	    = port_cap->ib_mtu;
287 	dev->phys_caps.gid_phys_table_len[port]  = port_cap->max_gids;
288 	dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
289 	/* set gid and pkey table operating lengths by default
290 	 * to non-sriov values
291 	 */
292 	dev->caps.gid_table_len[port]  = port_cap->max_gids;
293 	dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
294 	dev->caps.port_width_cap[port] = port_cap->max_port_width;
295 	dev->caps.eth_mtu_cap[port]    = port_cap->eth_mtu;
296 	dev->caps.max_tc_eth	       = port_cap->max_tc_eth;
297 	dev->caps.def_mac[port]        = port_cap->def_mac;
298 	dev->caps.supported_type[port] = port_cap->supported_port_types;
299 	dev->caps.suggested_type[port] = port_cap->suggested_type;
300 	dev->caps.default_sense[port] = port_cap->default_sense;
301 	dev->caps.trans_type[port]	    = port_cap->trans_type;
302 	dev->caps.vendor_oui[port]     = port_cap->vendor_oui;
303 	dev->caps.wavelength[port]     = port_cap->wavelength;
304 	dev->caps.trans_code[port]     = port_cap->trans_code;
305 
306 	return 0;
307 }
308 
309 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
310 			 struct mlx4_port_cap *port_cap)
311 {
312 	int err = 0;
313 
314 	err = mlx4_QUERY_PORT(dev, port, port_cap);
315 
316 	if (err)
317 		mlx4_err(dev, "QUERY_PORT command failed.\n");
318 
319 	return err;
320 }
321 
322 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
323 {
324 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
325 		return;
326 
327 	if (mlx4_is_mfunc(dev)) {
328 		mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
329 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
330 		return;
331 	}
332 
333 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
334 		mlx4_dbg(dev,
335 			 "Keep FCS is not supported - Disabling Ignore FCS");
336 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
337 		return;
338 	}
339 }
340 
341 #define MLX4_A0_STEERING_TABLE_SIZE	256
342 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
343 {
344 	int err;
345 	int i;
346 
347 	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
348 	if (err) {
349 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
350 		return err;
351 	}
352 	mlx4_dev_cap_dump(dev, dev_cap);
353 
354 	if (dev_cap->min_page_sz > PAGE_SIZE) {
355 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
356 			 dev_cap->min_page_sz, PAGE_SIZE);
357 		return -ENODEV;
358 	}
359 	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
360 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
361 			 dev_cap->num_ports, MLX4_MAX_PORTS);
362 		return -ENODEV;
363 	}
364 
365 	if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
366 		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
367 			 dev_cap->uar_size,
368 			 (unsigned long long)
369 			 pci_resource_len(dev->persist->pdev, 2));
370 		return -ENODEV;
371 	}
372 
373 	dev->caps.num_ports	     = dev_cap->num_ports;
374 	dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
375 	dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
376 				      dev->caps.num_sys_eqs :
377 				      MLX4_MAX_EQ_NUM;
378 	for (i = 1; i <= dev->caps.num_ports; ++i) {
379 		err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
380 		if (err) {
381 			mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
382 			return err;
383 		}
384 	}
385 
386 	dev->caps.uar_page_size	     = PAGE_SIZE;
387 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
388 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
389 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
390 	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
391 	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
392 	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
393 	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
394 	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
395 	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
396 	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
397 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
398 	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
399 	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
400 	/*
401 	 * Subtract 1 from the limit because we need to allocate a
402 	 * spare CQE so the HCA HW can tell the difference between an
403 	 * empty CQ and a full CQ.
404 	 */
405 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
406 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
407 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
408 	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
409 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
410 
411 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
412 	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
413 					dev_cap->reserved_xrcds : 0;
414 	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
415 					dev_cap->max_xrcds : 0;
416 	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
417 
418 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
419 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
420 	dev->caps.flags		     = dev_cap->flags;
421 	dev->caps.flags2	     = dev_cap->flags2;
422 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
423 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
424 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
425 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
426 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
427 	dev->caps.wol_port[1]          = dev_cap->wol_port[1];
428 	dev->caps.wol_port[2]          = dev_cap->wol_port[2];
429 
430 	/* Save uar page shift */
431 	if (!mlx4_is_slave(dev)) {
432 		/* Virtual PCI function needs to determine UAR page size from
433 		 * firmware. Only master PCI function can set the uar page size
434 		 */
435 		if (enable_4k_uar)
436 			dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
437 		else
438 			dev->uar_page_shift = PAGE_SHIFT;
439 
440 		mlx4_set_num_reserved_uars(dev, dev_cap);
441 	}
442 
443 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
444 		struct mlx4_init_hca_param hca_param;
445 
446 		memset(&hca_param, 0, sizeof(hca_param));
447 		err = mlx4_QUERY_HCA(dev, &hca_param);
448 		/* Turn off PHV_EN flag in case phv_check_en is set.
449 		 * phv_check_en is a HW check that parse the packet and verify
450 		 * phv bit was reported correctly in the wqe. To allow QinQ
451 		 * PHV_EN flag should be set and phv_check_en must be cleared
452 		 * otherwise QinQ packets will be drop by the HW.
453 		 */
454 		if (err || hca_param.phv_check_en)
455 			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
456 	}
457 
458 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
459 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
460 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
461 	/* Don't do sense port on multifunction devices (for now at least) */
462 	if (mlx4_is_mfunc(dev))
463 		dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
464 
465 	if (mlx4_low_memory_profile()) {
466 		dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
467 		dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
468 	} else {
469 		dev->caps.log_num_macs  = log_num_mac;
470 		dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
471 	}
472 
473 	for (i = 1; i <= dev->caps.num_ports; ++i) {
474 		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
475 		if (dev->caps.supported_type[i]) {
476 			/* if only ETH is supported - assign ETH */
477 			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
478 				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
479 			/* if only IB is supported, assign IB */
480 			else if (dev->caps.supported_type[i] ==
481 				 MLX4_PORT_TYPE_IB)
482 				dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
483 			else {
484 				/* if IB and ETH are supported, we set the port
485 				 * type according to user selection of port type;
486 				 * if user selected none, take the FW hint */
487 				if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
488 					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
489 						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
490 				else
491 					dev->caps.port_type[i] = port_type_array[i - 1];
492 			}
493 		}
494 		/*
495 		 * Link sensing is allowed on the port if 3 conditions are true:
496 		 * 1. Both protocols are supported on the port.
497 		 * 2. Different types are supported on the port
498 		 * 3. FW declared that it supports link sensing
499 		 */
500 		mlx4_priv(dev)->sense.sense_allowed[i] =
501 			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
502 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
503 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
504 
505 		/*
506 		 * If "default_sense" bit is set, we move the port to "AUTO" mode
507 		 * and perform sense_port FW command to try and set the correct
508 		 * port type from beginning
509 		 */
510 		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
511 			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
512 			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
513 			mlx4_SENSE_PORT(dev, i, &sensed_port);
514 			if (sensed_port != MLX4_PORT_TYPE_NONE)
515 				dev->caps.port_type[i] = sensed_port;
516 		} else {
517 			dev->caps.possible_type[i] = dev->caps.port_type[i];
518 		}
519 
520 		if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
521 			dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
522 			mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
523 				  i, 1 << dev->caps.log_num_macs);
524 		}
525 		if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
526 			dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
527 			mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
528 				  i, 1 << dev->caps.log_num_vlans);
529 		}
530 	}
531 
532 	if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
533 	    (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
534 	    (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
535 		mlx4_warn(dev,
536 			  "Granular QoS per VF not supported with IB/Eth configuration\n");
537 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
538 	}
539 
540 	dev->caps.max_counters = dev_cap->max_counters;
541 
542 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
543 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
544 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
545 		(1 << dev->caps.log_num_macs) *
546 		(1 << dev->caps.log_num_vlans) *
547 		dev->caps.num_ports;
548 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
549 
550 	if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
551 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
552 		dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
553 	else
554 		dev->caps.dmfs_high_rate_qpn_base =
555 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
556 
557 	if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
558 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
559 		dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
560 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
561 		dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
562 	} else {
563 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
564 		dev->caps.dmfs_high_rate_qpn_base =
565 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
566 		dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
567 	}
568 
569 	dev->caps.rl_caps = dev_cap->rl_caps;
570 
571 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
572 		dev->caps.dmfs_high_rate_qpn_range;
573 
574 	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
575 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
576 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
577 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
578 
579 	dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
580 
581 	if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
582 		if (dev_cap->flags &
583 		    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
584 			mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
585 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
586 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
587 		}
588 
589 		if (dev_cap->flags2 &
590 		    (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
591 		     MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
592 			mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
593 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
594 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
595 		}
596 	}
597 
598 	if ((dev->caps.flags &
599 	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
600 	    mlx4_is_master(dev))
601 		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
602 
603 	if (!mlx4_is_slave(dev)) {
604 		mlx4_enable_cqe_eqe_stride(dev);
605 		dev->caps.alloc_res_qp_mask =
606 			(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
607 			MLX4_RESERVE_A0_QP;
608 
609 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
610 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
611 			mlx4_warn(dev, "Old device ETS support detected\n");
612 			mlx4_warn(dev, "Consider upgrading device FW.\n");
613 			dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
614 		}
615 
616 	} else {
617 		dev->caps.alloc_res_qp_mask = 0;
618 	}
619 
620 	mlx4_enable_ignore_fcs(dev);
621 
622 	return 0;
623 }
624 
625 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
626 				       enum pci_bus_speed *speed,
627 				       enum pcie_link_width *width)
628 {
629 	u32 lnkcap1, lnkcap2;
630 	int err1, err2;
631 
632 #define  PCIE_MLW_CAP_SHIFT 4	/* start of MLW mask in link capabilities */
633 
634 	*speed = PCI_SPEED_UNKNOWN;
635 	*width = PCIE_LNK_WIDTH_UNKNOWN;
636 
637 	err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
638 					  &lnkcap1);
639 	err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
640 					  &lnkcap2);
641 	if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
642 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
643 			*speed = PCIE_SPEED_8_0GT;
644 		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
645 			*speed = PCIE_SPEED_5_0GT;
646 		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
647 			*speed = PCIE_SPEED_2_5GT;
648 	}
649 	if (!err1) {
650 		*width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
651 		if (!lnkcap2) { /* pre-r3.0 */
652 			if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
653 				*speed = PCIE_SPEED_5_0GT;
654 			else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
655 				*speed = PCIE_SPEED_2_5GT;
656 		}
657 	}
658 
659 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
660 		return err1 ? err1 :
661 			err2 ? err2 : -EINVAL;
662 	}
663 	return 0;
664 }
665 
666 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
667 {
668 	enum pcie_link_width width, width_cap;
669 	enum pci_bus_speed speed, speed_cap;
670 	int err;
671 
672 #define PCIE_SPEED_STR(speed) \
673 	(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
674 	 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
675 	 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
676 	 "Unknown")
677 
678 	err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
679 	if (err) {
680 		mlx4_warn(dev,
681 			  "Unable to determine PCIe device BW capabilities\n");
682 		return;
683 	}
684 
685 	err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
686 	if (err || speed == PCI_SPEED_UNKNOWN ||
687 	    width == PCIE_LNK_WIDTH_UNKNOWN) {
688 		mlx4_warn(dev,
689 			  "Unable to determine PCI device chain minimum BW\n");
690 		return;
691 	}
692 
693 	if (width != width_cap || speed != speed_cap)
694 		mlx4_warn(dev,
695 			  "PCIe BW is different than device's capability\n");
696 
697 	mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
698 		  PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
699 	mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
700 		  width, width_cap);
701 	return;
702 }
703 
704 /*The function checks if there are live vf, return the num of them*/
705 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
706 {
707 	struct mlx4_priv *priv = mlx4_priv(dev);
708 	struct mlx4_slave_state *s_state;
709 	int i;
710 	int ret = 0;
711 
712 	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
713 		s_state = &priv->mfunc.master.slave_state[i];
714 		if (s_state->active && s_state->last_cmd !=
715 		    MLX4_COMM_CMD_RESET) {
716 			mlx4_warn(dev, "%s: slave: %d is still active\n",
717 				  __func__, i);
718 			ret++;
719 		}
720 	}
721 	return ret;
722 }
723 
724 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
725 {
726 	u32 qk = MLX4_RESERVED_QKEY_BASE;
727 
728 	if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
729 	    qpn < dev->phys_caps.base_proxy_sqpn)
730 		return -EINVAL;
731 
732 	if (qpn >= dev->phys_caps.base_tunnel_sqpn)
733 		/* tunnel qp */
734 		qk += qpn - dev->phys_caps.base_tunnel_sqpn;
735 	else
736 		qk += qpn - dev->phys_caps.base_proxy_sqpn;
737 	*qkey = qk;
738 	return 0;
739 }
740 EXPORT_SYMBOL(mlx4_get_parav_qkey);
741 
742 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
743 {
744 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
745 
746 	if (!mlx4_is_master(dev))
747 		return;
748 
749 	priv->virt2phys_pkey[slave][port - 1][i] = val;
750 }
751 EXPORT_SYMBOL(mlx4_sync_pkey_table);
752 
753 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
754 {
755 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
756 
757 	if (!mlx4_is_master(dev))
758 		return;
759 
760 	priv->slave_node_guids[slave] = guid;
761 }
762 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
763 
764 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
765 {
766 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
767 
768 	if (!mlx4_is_master(dev))
769 		return 0;
770 
771 	return priv->slave_node_guids[slave];
772 }
773 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
774 
775 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
776 {
777 	struct mlx4_priv *priv = mlx4_priv(dev);
778 	struct mlx4_slave_state *s_slave;
779 
780 	if (!mlx4_is_master(dev))
781 		return 0;
782 
783 	s_slave = &priv->mfunc.master.slave_state[slave];
784 	return !!s_slave->active;
785 }
786 EXPORT_SYMBOL(mlx4_is_slave_active);
787 
788 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
789 				       struct _rule_hw *eth_header)
790 {
791 	if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
792 	    is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
793 		struct mlx4_net_trans_rule_hw_eth *eth =
794 			(struct mlx4_net_trans_rule_hw_eth *)eth_header;
795 		struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
796 		bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
797 			next_rule->rsvd == 0;
798 
799 		if (last_rule)
800 			ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
801 	}
802 }
803 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
804 
805 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
806 				       struct mlx4_dev_cap *dev_cap,
807 				       struct mlx4_init_hca_param *hca_param)
808 {
809 	dev->caps.steering_mode = hca_param->steering_mode;
810 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
811 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
812 		dev->caps.fs_log_max_ucast_qp_range_size =
813 			dev_cap->fs_log_max_ucast_qp_range_size;
814 	} else
815 		dev->caps.num_qp_per_mgm =
816 			4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
817 
818 	mlx4_dbg(dev, "Steering mode is: %s\n",
819 		 mlx4_steering_mode_str(dev->caps.steering_mode));
820 }
821 
822 static int mlx4_slave_cap(struct mlx4_dev *dev)
823 {
824 	int			   err;
825 	u32			   page_size;
826 	struct mlx4_dev_cap	   dev_cap;
827 	struct mlx4_func_cap	   func_cap;
828 	struct mlx4_init_hca_param hca_param;
829 	u8			   i;
830 
831 	memset(&hca_param, 0, sizeof(hca_param));
832 	err = mlx4_QUERY_HCA(dev, &hca_param);
833 	if (err) {
834 		mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
835 		return err;
836 	}
837 
838 	/* fail if the hca has an unknown global capability
839 	 * at this time global_caps should be always zeroed
840 	 */
841 	if (hca_param.global_caps) {
842 		mlx4_err(dev, "Unknown hca global capabilities\n");
843 		return -EINVAL;
844 	}
845 
846 	dev->caps.hca_core_clock = hca_param.hca_core_clock;
847 
848 	memset(&dev_cap, 0, sizeof(dev_cap));
849 	dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
850 	err = mlx4_dev_cap(dev, &dev_cap);
851 	if (err) {
852 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
853 		return err;
854 	}
855 
856 	err = mlx4_QUERY_FW(dev);
857 	if (err)
858 		mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
859 
860 	page_size = ~dev->caps.page_size_cap + 1;
861 	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
862 	if (page_size > PAGE_SIZE) {
863 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
864 			 page_size, PAGE_SIZE);
865 		return -ENODEV;
866 	}
867 
868 	/* Set uar_page_shift for VF */
869 	dev->uar_page_shift = hca_param.uar_page_sz + 12;
870 
871 	/* Make sure the master uar page size is valid */
872 	if (dev->uar_page_shift > PAGE_SHIFT) {
873 		mlx4_err(dev,
874 			 "Invalid configuration: uar page size is larger than system page size\n");
875 		return  -ENODEV;
876 	}
877 
878 	/* Set reserved_uars based on the uar_page_shift */
879 	mlx4_set_num_reserved_uars(dev, &dev_cap);
880 
881 	/* Although uar page size in FW differs from system page size,
882 	 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
883 	 * still works with assumption that uar page size == system page size
884 	 */
885 	dev->caps.uar_page_size = PAGE_SIZE;
886 
887 	memset(&func_cap, 0, sizeof(func_cap));
888 	err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
889 	if (err) {
890 		mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
891 			 err);
892 		return err;
893 	}
894 
895 	if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
896 	    PF_CONTEXT_BEHAVIOUR_MASK) {
897 		mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
898 			 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
899 		return -EINVAL;
900 	}
901 
902 	dev->caps.num_ports		= func_cap.num_ports;
903 	dev->quotas.qp			= func_cap.qp_quota;
904 	dev->quotas.srq			= func_cap.srq_quota;
905 	dev->quotas.cq			= func_cap.cq_quota;
906 	dev->quotas.mpt			= func_cap.mpt_quota;
907 	dev->quotas.mtt			= func_cap.mtt_quota;
908 	dev->caps.num_qps		= 1 << hca_param.log_num_qps;
909 	dev->caps.num_srqs		= 1 << hca_param.log_num_srqs;
910 	dev->caps.num_cqs		= 1 << hca_param.log_num_cqs;
911 	dev->caps.num_mpts		= 1 << hca_param.log_mpt_sz;
912 	dev->caps.num_eqs		= func_cap.max_eq;
913 	dev->caps.reserved_eqs		= func_cap.reserved_eq;
914 	dev->caps.reserved_lkey		= func_cap.reserved_lkey;
915 	dev->caps.num_pds               = MLX4_NUM_PDS;
916 	dev->caps.num_mgms              = 0;
917 	dev->caps.num_amgms             = 0;
918 
919 	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
920 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
921 			 dev->caps.num_ports, MLX4_MAX_PORTS);
922 		return -ENODEV;
923 	}
924 
925 	mlx4_replace_zero_macs(dev);
926 
927 	dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
928 	dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
929 	dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
930 	dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
931 	dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
932 
933 	if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
934 	    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
935 	    !dev->caps.qp0_qkey) {
936 		err = -ENOMEM;
937 		goto err_mem;
938 	}
939 
940 	for (i = 1; i <= dev->caps.num_ports; ++i) {
941 		err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
942 		if (err) {
943 			mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
944 				 i, err);
945 			goto err_mem;
946 		}
947 		dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
948 		dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
949 		dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
950 		dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
951 		dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
952 		dev->caps.port_mask[i] = dev->caps.port_type[i];
953 		dev->caps.phys_port_id[i] = func_cap.phys_port_id;
954 		err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
955 						      &dev->caps.gid_table_len[i],
956 						      &dev->caps.pkey_table_len[i]);
957 		if (err)
958 			goto err_mem;
959 	}
960 
961 	if (dev->caps.uar_page_size * (dev->caps.num_uars -
962 				       dev->caps.reserved_uars) >
963 				       pci_resource_len(dev->persist->pdev,
964 							2)) {
965 		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
966 			 dev->caps.uar_page_size * dev->caps.num_uars,
967 			 (unsigned long long)
968 			 pci_resource_len(dev->persist->pdev, 2));
969 		err = -ENOMEM;
970 		goto err_mem;
971 	}
972 
973 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
974 		dev->caps.eqe_size   = 64;
975 		dev->caps.eqe_factor = 1;
976 	} else {
977 		dev->caps.eqe_size   = 32;
978 		dev->caps.eqe_factor = 0;
979 	}
980 
981 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
982 		dev->caps.cqe_size   = 64;
983 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
984 	} else {
985 		dev->caps.cqe_size   = 32;
986 	}
987 
988 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
989 		dev->caps.eqe_size = hca_param.eqe_size;
990 		dev->caps.eqe_factor = 0;
991 	}
992 
993 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
994 		dev->caps.cqe_size = hca_param.cqe_size;
995 		/* User still need to know when CQE > 32B */
996 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
997 	}
998 
999 	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1000 	mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1001 
1002 	slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
1003 	mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1004 		 hca_param.rss_ip_frags ? "on" : "off");
1005 
1006 	if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
1007 	    dev->caps.bf_reg_size)
1008 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1009 
1010 	if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
1011 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1012 
1013 	return 0;
1014 
1015 err_mem:
1016 	kfree(dev->caps.qp0_qkey);
1017 	kfree(dev->caps.qp0_tunnel);
1018 	kfree(dev->caps.qp0_proxy);
1019 	kfree(dev->caps.qp1_tunnel);
1020 	kfree(dev->caps.qp1_proxy);
1021 	dev->caps.qp0_qkey = NULL;
1022 	dev->caps.qp0_tunnel = NULL;
1023 	dev->caps.qp0_proxy = NULL;
1024 	dev->caps.qp1_tunnel = NULL;
1025 	dev->caps.qp1_proxy = NULL;
1026 
1027 	return err;
1028 }
1029 
1030 static void mlx4_request_modules(struct mlx4_dev *dev)
1031 {
1032 	int port;
1033 	int has_ib_port = false;
1034 	int has_eth_port = false;
1035 #define EN_DRV_NAME	"mlx4_en"
1036 #define IB_DRV_NAME	"mlx4_ib"
1037 
1038 	for (port = 1; port <= dev->caps.num_ports; port++) {
1039 		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1040 			has_ib_port = true;
1041 		else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1042 			has_eth_port = true;
1043 	}
1044 
1045 	if (has_eth_port)
1046 		request_module_nowait(EN_DRV_NAME);
1047 	if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1048 		request_module_nowait(IB_DRV_NAME);
1049 }
1050 
1051 /*
1052  * Change the port configuration of the device.
1053  * Every user of this function must hold the port mutex.
1054  */
1055 int mlx4_change_port_types(struct mlx4_dev *dev,
1056 			   enum mlx4_port_type *port_types)
1057 {
1058 	int err = 0;
1059 	int change = 0;
1060 	int port;
1061 
1062 	for (port = 0; port <  dev->caps.num_ports; port++) {
1063 		/* Change the port type only if the new type is different
1064 		 * from the current, and not set to Auto */
1065 		if (port_types[port] != dev->caps.port_type[port + 1])
1066 			change = 1;
1067 	}
1068 	if (change) {
1069 		mlx4_unregister_device(dev);
1070 		for (port = 1; port <= dev->caps.num_ports; port++) {
1071 			mlx4_CLOSE_PORT(dev, port);
1072 			dev->caps.port_type[port] = port_types[port - 1];
1073 			err = mlx4_SET_PORT(dev, port, -1);
1074 			if (err) {
1075 				mlx4_err(dev, "Failed to set port %d, aborting\n",
1076 					 port);
1077 				goto out;
1078 			}
1079 		}
1080 		mlx4_set_port_mask(dev);
1081 		err = mlx4_register_device(dev);
1082 		if (err) {
1083 			mlx4_err(dev, "Failed to register device\n");
1084 			goto out;
1085 		}
1086 		mlx4_request_modules(dev);
1087 	}
1088 
1089 out:
1090 	return err;
1091 }
1092 
1093 static ssize_t show_port_type(struct device *dev,
1094 			      struct device_attribute *attr,
1095 			      char *buf)
1096 {
1097 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1098 						   port_attr);
1099 	struct mlx4_dev *mdev = info->dev;
1100 	char type[8];
1101 
1102 	sprintf(type, "%s",
1103 		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1104 		"ib" : "eth");
1105 	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1106 		sprintf(buf, "auto (%s)\n", type);
1107 	else
1108 		sprintf(buf, "%s\n", type);
1109 
1110 	return strlen(buf);
1111 }
1112 
1113 static int __set_port_type(struct mlx4_port_info *info,
1114 			   enum mlx4_port_type port_type)
1115 {
1116 	struct mlx4_dev *mdev = info->dev;
1117 	struct mlx4_priv *priv = mlx4_priv(mdev);
1118 	enum mlx4_port_type types[MLX4_MAX_PORTS];
1119 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1120 	int i;
1121 	int err = 0;
1122 
1123 	if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1124 		mlx4_err(mdev,
1125 			 "Requested port type for port %d is not supported on this HCA\n",
1126 			 info->port);
1127 		err = -EINVAL;
1128 		goto err_sup;
1129 	}
1130 
1131 	mlx4_stop_sense(mdev);
1132 	mutex_lock(&priv->port_mutex);
1133 	info->tmp_type = port_type;
1134 
1135 	/* Possible type is always the one that was delivered */
1136 	mdev->caps.possible_type[info->port] = info->tmp_type;
1137 
1138 	for (i = 0; i < mdev->caps.num_ports; i++) {
1139 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1140 					mdev->caps.possible_type[i+1];
1141 		if (types[i] == MLX4_PORT_TYPE_AUTO)
1142 			types[i] = mdev->caps.port_type[i+1];
1143 	}
1144 
1145 	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1146 	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1147 		for (i = 1; i <= mdev->caps.num_ports; i++) {
1148 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1149 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1150 				err = -EINVAL;
1151 			}
1152 		}
1153 	}
1154 	if (err) {
1155 		mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1156 		goto out;
1157 	}
1158 
1159 	mlx4_do_sense_ports(mdev, new_types, types);
1160 
1161 	err = mlx4_check_port_params(mdev, new_types);
1162 	if (err)
1163 		goto out;
1164 
1165 	/* We are about to apply the changes after the configuration
1166 	 * was verified, no need to remember the temporary types
1167 	 * any more */
1168 	for (i = 0; i < mdev->caps.num_ports; i++)
1169 		priv->port[i + 1].tmp_type = 0;
1170 
1171 	err = mlx4_change_port_types(mdev, new_types);
1172 
1173 out:
1174 	mlx4_start_sense(mdev);
1175 	mutex_unlock(&priv->port_mutex);
1176 err_sup:
1177 	return err;
1178 }
1179 
1180 static ssize_t set_port_type(struct device *dev,
1181 			     struct device_attribute *attr,
1182 			     const char *buf, size_t count)
1183 {
1184 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1185 						   port_attr);
1186 	struct mlx4_dev *mdev = info->dev;
1187 	enum mlx4_port_type port_type;
1188 	static DEFINE_MUTEX(set_port_type_mutex);
1189 	int err;
1190 
1191 	mutex_lock(&set_port_type_mutex);
1192 
1193 	if (!strcmp(buf, "ib\n")) {
1194 		port_type = MLX4_PORT_TYPE_IB;
1195 	} else if (!strcmp(buf, "eth\n")) {
1196 		port_type = MLX4_PORT_TYPE_ETH;
1197 	} else if (!strcmp(buf, "auto\n")) {
1198 		port_type = MLX4_PORT_TYPE_AUTO;
1199 	} else {
1200 		mlx4_err(mdev, "%s is not supported port type\n", buf);
1201 		err = -EINVAL;
1202 		goto err_out;
1203 	}
1204 
1205 	err = __set_port_type(info, port_type);
1206 
1207 err_out:
1208 	mutex_unlock(&set_port_type_mutex);
1209 
1210 	return err ? err : count;
1211 }
1212 
1213 enum ibta_mtu {
1214 	IB_MTU_256  = 1,
1215 	IB_MTU_512  = 2,
1216 	IB_MTU_1024 = 3,
1217 	IB_MTU_2048 = 4,
1218 	IB_MTU_4096 = 5
1219 };
1220 
1221 static inline int int_to_ibta_mtu(int mtu)
1222 {
1223 	switch (mtu) {
1224 	case 256:  return IB_MTU_256;
1225 	case 512:  return IB_MTU_512;
1226 	case 1024: return IB_MTU_1024;
1227 	case 2048: return IB_MTU_2048;
1228 	case 4096: return IB_MTU_4096;
1229 	default: return -1;
1230 	}
1231 }
1232 
1233 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1234 {
1235 	switch (mtu) {
1236 	case IB_MTU_256:  return  256;
1237 	case IB_MTU_512:  return  512;
1238 	case IB_MTU_1024: return 1024;
1239 	case IB_MTU_2048: return 2048;
1240 	case IB_MTU_4096: return 4096;
1241 	default: return -1;
1242 	}
1243 }
1244 
1245 static ssize_t show_port_ib_mtu(struct device *dev,
1246 			     struct device_attribute *attr,
1247 			     char *buf)
1248 {
1249 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1250 						   port_mtu_attr);
1251 	struct mlx4_dev *mdev = info->dev;
1252 
1253 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1254 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1255 
1256 	sprintf(buf, "%d\n",
1257 			ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1258 	return strlen(buf);
1259 }
1260 
1261 static ssize_t set_port_ib_mtu(struct device *dev,
1262 			     struct device_attribute *attr,
1263 			     const char *buf, size_t count)
1264 {
1265 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1266 						   port_mtu_attr);
1267 	struct mlx4_dev *mdev = info->dev;
1268 	struct mlx4_priv *priv = mlx4_priv(mdev);
1269 	int err, port, mtu, ibta_mtu = -1;
1270 
1271 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1272 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1273 		return -EINVAL;
1274 	}
1275 
1276 	err = kstrtoint(buf, 0, &mtu);
1277 	if (!err)
1278 		ibta_mtu = int_to_ibta_mtu(mtu);
1279 
1280 	if (err || ibta_mtu < 0) {
1281 		mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1282 		return -EINVAL;
1283 	}
1284 
1285 	mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1286 
1287 	mlx4_stop_sense(mdev);
1288 	mutex_lock(&priv->port_mutex);
1289 	mlx4_unregister_device(mdev);
1290 	for (port = 1; port <= mdev->caps.num_ports; port++) {
1291 		mlx4_CLOSE_PORT(mdev, port);
1292 		err = mlx4_SET_PORT(mdev, port, -1);
1293 		if (err) {
1294 			mlx4_err(mdev, "Failed to set port %d, aborting\n",
1295 				 port);
1296 			goto err_set_port;
1297 		}
1298 	}
1299 	err = mlx4_register_device(mdev);
1300 err_set_port:
1301 	mutex_unlock(&priv->port_mutex);
1302 	mlx4_start_sense(mdev);
1303 	return err ? err : count;
1304 }
1305 
1306 /* bond for multi-function device */
1307 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1308 static int mlx4_mf_bond(struct mlx4_dev *dev)
1309 {
1310 	int err = 0;
1311 	int nvfs;
1312 	struct mlx4_slaves_pport slaves_port1;
1313 	struct mlx4_slaves_pport slaves_port2;
1314 	DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
1315 
1316 	slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1317 	slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1318 	bitmap_and(slaves_port_1_2,
1319 		   slaves_port1.slaves, slaves_port2.slaves,
1320 		   dev->persist->num_vfs + 1);
1321 
1322 	/* only single port vfs are allowed */
1323 	if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
1324 		mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1325 		return -EINVAL;
1326 	}
1327 
1328 	/* number of virtual functions is number of total functions minus one
1329 	 * physical function for each port.
1330 	 */
1331 	nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1332 		bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1333 
1334 	/* limit on maximum allowed VFs */
1335 	if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1336 		mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1337 			  nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1338 		return -EINVAL;
1339 	}
1340 
1341 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1342 		mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1343 		return -EINVAL;
1344 	}
1345 
1346 	err = mlx4_bond_mac_table(dev);
1347 	if (err)
1348 		return err;
1349 	err = mlx4_bond_vlan_table(dev);
1350 	if (err)
1351 		goto err1;
1352 	err = mlx4_bond_fs_rules(dev);
1353 	if (err)
1354 		goto err2;
1355 
1356 	return 0;
1357 err2:
1358 	(void)mlx4_unbond_vlan_table(dev);
1359 err1:
1360 	(void)mlx4_unbond_mac_table(dev);
1361 	return err;
1362 }
1363 
1364 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1365 {
1366 	int ret, ret1;
1367 
1368 	ret = mlx4_unbond_fs_rules(dev);
1369 	if (ret)
1370 		mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
1371 	ret1 = mlx4_unbond_mac_table(dev);
1372 	if (ret1) {
1373 		mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1374 		ret = ret1;
1375 	}
1376 	ret1 = mlx4_unbond_vlan_table(dev);
1377 	if (ret1) {
1378 		mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1379 		ret = ret1;
1380 	}
1381 	return ret;
1382 }
1383 
1384 int mlx4_bond(struct mlx4_dev *dev)
1385 {
1386 	int ret = 0;
1387 	struct mlx4_priv *priv = mlx4_priv(dev);
1388 
1389 	mutex_lock(&priv->bond_mutex);
1390 
1391 	if (!mlx4_is_bonded(dev)) {
1392 		ret = mlx4_do_bond(dev, true);
1393 		if (ret)
1394 			mlx4_err(dev, "Failed to bond device: %d\n", ret);
1395 		if (!ret && mlx4_is_master(dev)) {
1396 			ret = mlx4_mf_bond(dev);
1397 			if (ret) {
1398 				mlx4_err(dev, "bond for multifunction failed\n");
1399 				mlx4_do_bond(dev, false);
1400 			}
1401 		}
1402 	}
1403 
1404 	mutex_unlock(&priv->bond_mutex);
1405 	if (!ret)
1406 		mlx4_dbg(dev, "Device is bonded\n");
1407 
1408 	return ret;
1409 }
1410 EXPORT_SYMBOL_GPL(mlx4_bond);
1411 
1412 int mlx4_unbond(struct mlx4_dev *dev)
1413 {
1414 	int ret = 0;
1415 	struct mlx4_priv *priv = mlx4_priv(dev);
1416 
1417 	mutex_lock(&priv->bond_mutex);
1418 
1419 	if (mlx4_is_bonded(dev)) {
1420 		int ret2 = 0;
1421 
1422 		ret = mlx4_do_bond(dev, false);
1423 		if (ret)
1424 			mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1425 		if (mlx4_is_master(dev))
1426 			ret2 = mlx4_mf_unbond(dev);
1427 		if (ret2) {
1428 			mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1429 			ret = ret2;
1430 		}
1431 	}
1432 
1433 	mutex_unlock(&priv->bond_mutex);
1434 	if (!ret)
1435 		mlx4_dbg(dev, "Device is unbonded\n");
1436 
1437 	return ret;
1438 }
1439 EXPORT_SYMBOL_GPL(mlx4_unbond);
1440 
1441 
1442 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1443 {
1444 	u8 port1 = v2p->port1;
1445 	u8 port2 = v2p->port2;
1446 	struct mlx4_priv *priv = mlx4_priv(dev);
1447 	int err;
1448 
1449 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1450 		return -EOPNOTSUPP;
1451 
1452 	mutex_lock(&priv->bond_mutex);
1453 
1454 	/* zero means keep current mapping for this port */
1455 	if (port1 == 0)
1456 		port1 = priv->v2p.port1;
1457 	if (port2 == 0)
1458 		port2 = priv->v2p.port2;
1459 
1460 	if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1461 	    (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1462 	    (port1 == 2 && port2 == 1)) {
1463 		/* besides boundary checks cross mapping makes
1464 		 * no sense and therefore not allowed */
1465 		err = -EINVAL;
1466 	} else if ((port1 == priv->v2p.port1) &&
1467 		 (port2 == priv->v2p.port2)) {
1468 		err = 0;
1469 	} else {
1470 		err = mlx4_virt2phy_port_map(dev, port1, port2);
1471 		if (!err) {
1472 			mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1473 				 port1, port2);
1474 			priv->v2p.port1 = port1;
1475 			priv->v2p.port2 = port2;
1476 		} else {
1477 			mlx4_err(dev, "Failed to change port mape: %d\n", err);
1478 		}
1479 	}
1480 
1481 	mutex_unlock(&priv->bond_mutex);
1482 	return err;
1483 }
1484 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1485 
1486 static int mlx4_load_fw(struct mlx4_dev *dev)
1487 {
1488 	struct mlx4_priv *priv = mlx4_priv(dev);
1489 	int err;
1490 
1491 	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1492 					 GFP_HIGHUSER | __GFP_NOWARN, 0);
1493 	if (!priv->fw.fw_icm) {
1494 		mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1495 		return -ENOMEM;
1496 	}
1497 
1498 	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1499 	if (err) {
1500 		mlx4_err(dev, "MAP_FA command failed, aborting\n");
1501 		goto err_free;
1502 	}
1503 
1504 	err = mlx4_RUN_FW(dev);
1505 	if (err) {
1506 		mlx4_err(dev, "RUN_FW command failed, aborting\n");
1507 		goto err_unmap_fa;
1508 	}
1509 
1510 	return 0;
1511 
1512 err_unmap_fa:
1513 	mlx4_UNMAP_FA(dev);
1514 
1515 err_free:
1516 	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1517 	return err;
1518 }
1519 
1520 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1521 				int cmpt_entry_sz)
1522 {
1523 	struct mlx4_priv *priv = mlx4_priv(dev);
1524 	int err;
1525 	int num_eqs;
1526 
1527 	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1528 				  cmpt_base +
1529 				  ((u64) (MLX4_CMPT_TYPE_QP *
1530 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1531 				  cmpt_entry_sz, dev->caps.num_qps,
1532 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1533 				  0, 0);
1534 	if (err)
1535 		goto err;
1536 
1537 	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1538 				  cmpt_base +
1539 				  ((u64) (MLX4_CMPT_TYPE_SRQ *
1540 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1541 				  cmpt_entry_sz, dev->caps.num_srqs,
1542 				  dev->caps.reserved_srqs, 0, 0);
1543 	if (err)
1544 		goto err_qp;
1545 
1546 	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1547 				  cmpt_base +
1548 				  ((u64) (MLX4_CMPT_TYPE_CQ *
1549 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1550 				  cmpt_entry_sz, dev->caps.num_cqs,
1551 				  dev->caps.reserved_cqs, 0, 0);
1552 	if (err)
1553 		goto err_srq;
1554 
1555 	num_eqs = dev->phys_caps.num_phys_eqs;
1556 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1557 				  cmpt_base +
1558 				  ((u64) (MLX4_CMPT_TYPE_EQ *
1559 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1560 				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1561 	if (err)
1562 		goto err_cq;
1563 
1564 	return 0;
1565 
1566 err_cq:
1567 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1568 
1569 err_srq:
1570 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1571 
1572 err_qp:
1573 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1574 
1575 err:
1576 	return err;
1577 }
1578 
1579 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1580 			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1581 {
1582 	struct mlx4_priv *priv = mlx4_priv(dev);
1583 	u64 aux_pages;
1584 	int num_eqs;
1585 	int err;
1586 
1587 	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1588 	if (err) {
1589 		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1590 		return err;
1591 	}
1592 
1593 	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1594 		 (unsigned long long) icm_size >> 10,
1595 		 (unsigned long long) aux_pages << 2);
1596 
1597 	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1598 					  GFP_HIGHUSER | __GFP_NOWARN, 0);
1599 	if (!priv->fw.aux_icm) {
1600 		mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1601 		return -ENOMEM;
1602 	}
1603 
1604 	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1605 	if (err) {
1606 		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1607 		goto err_free_aux;
1608 	}
1609 
1610 	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1611 	if (err) {
1612 		mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1613 		goto err_unmap_aux;
1614 	}
1615 
1616 
1617 	num_eqs = dev->phys_caps.num_phys_eqs;
1618 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1619 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
1620 				  num_eqs, num_eqs, 0, 0);
1621 	if (err) {
1622 		mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1623 		goto err_unmap_cmpt;
1624 	}
1625 
1626 	/*
1627 	 * Reserved MTT entries must be aligned up to a cacheline
1628 	 * boundary, since the FW will write to them, while the driver
1629 	 * writes to all other MTT entries. (The variable
1630 	 * dev->caps.mtt_entry_sz below is really the MTT segment
1631 	 * size, not the raw entry size)
1632 	 */
1633 	dev->caps.reserved_mtts =
1634 		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1635 		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1636 
1637 	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1638 				  init_hca->mtt_base,
1639 				  dev->caps.mtt_entry_sz,
1640 				  dev->caps.num_mtts,
1641 				  dev->caps.reserved_mtts, 1, 0);
1642 	if (err) {
1643 		mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1644 		goto err_unmap_eq;
1645 	}
1646 
1647 	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1648 				  init_hca->dmpt_base,
1649 				  dev_cap->dmpt_entry_sz,
1650 				  dev->caps.num_mpts,
1651 				  dev->caps.reserved_mrws, 1, 1);
1652 	if (err) {
1653 		mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1654 		goto err_unmap_mtt;
1655 	}
1656 
1657 	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1658 				  init_hca->qpc_base,
1659 				  dev_cap->qpc_entry_sz,
1660 				  dev->caps.num_qps,
1661 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1662 				  0, 0);
1663 	if (err) {
1664 		mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1665 		goto err_unmap_dmpt;
1666 	}
1667 
1668 	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1669 				  init_hca->auxc_base,
1670 				  dev_cap->aux_entry_sz,
1671 				  dev->caps.num_qps,
1672 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1673 				  0, 0);
1674 	if (err) {
1675 		mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1676 		goto err_unmap_qp;
1677 	}
1678 
1679 	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1680 				  init_hca->altc_base,
1681 				  dev_cap->altc_entry_sz,
1682 				  dev->caps.num_qps,
1683 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1684 				  0, 0);
1685 	if (err) {
1686 		mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1687 		goto err_unmap_auxc;
1688 	}
1689 
1690 	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1691 				  init_hca->rdmarc_base,
1692 				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1693 				  dev->caps.num_qps,
1694 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1695 				  0, 0);
1696 	if (err) {
1697 		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1698 		goto err_unmap_altc;
1699 	}
1700 
1701 	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1702 				  init_hca->cqc_base,
1703 				  dev_cap->cqc_entry_sz,
1704 				  dev->caps.num_cqs,
1705 				  dev->caps.reserved_cqs, 0, 0);
1706 	if (err) {
1707 		mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1708 		goto err_unmap_rdmarc;
1709 	}
1710 
1711 	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1712 				  init_hca->srqc_base,
1713 				  dev_cap->srq_entry_sz,
1714 				  dev->caps.num_srqs,
1715 				  dev->caps.reserved_srqs, 0, 0);
1716 	if (err) {
1717 		mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1718 		goto err_unmap_cq;
1719 	}
1720 
1721 	/*
1722 	 * For flow steering device managed mode it is required to use
1723 	 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1724 	 * required, but for simplicity just map the whole multicast
1725 	 * group table now.  The table isn't very big and it's a lot
1726 	 * easier than trying to track ref counts.
1727 	 */
1728 	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1729 				  init_hca->mc_base,
1730 				  mlx4_get_mgm_entry_size(dev),
1731 				  dev->caps.num_mgms + dev->caps.num_amgms,
1732 				  dev->caps.num_mgms + dev->caps.num_amgms,
1733 				  0, 0);
1734 	if (err) {
1735 		mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1736 		goto err_unmap_srq;
1737 	}
1738 
1739 	return 0;
1740 
1741 err_unmap_srq:
1742 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1743 
1744 err_unmap_cq:
1745 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1746 
1747 err_unmap_rdmarc:
1748 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1749 
1750 err_unmap_altc:
1751 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1752 
1753 err_unmap_auxc:
1754 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1755 
1756 err_unmap_qp:
1757 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1758 
1759 err_unmap_dmpt:
1760 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1761 
1762 err_unmap_mtt:
1763 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1764 
1765 err_unmap_eq:
1766 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1767 
1768 err_unmap_cmpt:
1769 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1770 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1771 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1772 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1773 
1774 err_unmap_aux:
1775 	mlx4_UNMAP_ICM_AUX(dev);
1776 
1777 err_free_aux:
1778 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1779 
1780 	return err;
1781 }
1782 
1783 static void mlx4_free_icms(struct mlx4_dev *dev)
1784 {
1785 	struct mlx4_priv *priv = mlx4_priv(dev);
1786 
1787 	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1788 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1789 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1790 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1791 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1792 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1793 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1794 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1795 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1796 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1797 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1798 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1799 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1800 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1801 
1802 	mlx4_UNMAP_ICM_AUX(dev);
1803 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1804 }
1805 
1806 static void mlx4_slave_exit(struct mlx4_dev *dev)
1807 {
1808 	struct mlx4_priv *priv = mlx4_priv(dev);
1809 
1810 	mutex_lock(&priv->cmd.slave_cmd_mutex);
1811 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1812 			  MLX4_COMM_TIME))
1813 		mlx4_warn(dev, "Failed to close slave function\n");
1814 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1815 }
1816 
1817 static int map_bf_area(struct mlx4_dev *dev)
1818 {
1819 	struct mlx4_priv *priv = mlx4_priv(dev);
1820 	resource_size_t bf_start;
1821 	resource_size_t bf_len;
1822 	int err = 0;
1823 
1824 	if (!dev->caps.bf_reg_size)
1825 		return -ENXIO;
1826 
1827 	bf_start = pci_resource_start(dev->persist->pdev, 2) +
1828 			(dev->caps.num_uars << PAGE_SHIFT);
1829 	bf_len = pci_resource_len(dev->persist->pdev, 2) -
1830 			(dev->caps.num_uars << PAGE_SHIFT);
1831 	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1832 	if (!priv->bf_mapping)
1833 		err = -ENOMEM;
1834 
1835 	return err;
1836 }
1837 
1838 static void unmap_bf_area(struct mlx4_dev *dev)
1839 {
1840 	if (mlx4_priv(dev)->bf_mapping)
1841 		io_mapping_free(mlx4_priv(dev)->bf_mapping);
1842 }
1843 
1844 u64 mlx4_read_clock(struct mlx4_dev *dev)
1845 {
1846 	u32 clockhi, clocklo, clockhi1;
1847 	u64 cycles;
1848 	int i;
1849 	struct mlx4_priv *priv = mlx4_priv(dev);
1850 
1851 	for (i = 0; i < 10; i++) {
1852 		clockhi = swab32(readl(priv->clock_mapping));
1853 		clocklo = swab32(readl(priv->clock_mapping + 4));
1854 		clockhi1 = swab32(readl(priv->clock_mapping));
1855 		if (clockhi == clockhi1)
1856 			break;
1857 	}
1858 
1859 	cycles = (u64) clockhi << 32 | (u64) clocklo;
1860 
1861 	return cycles;
1862 }
1863 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1864 
1865 
1866 static int map_internal_clock(struct mlx4_dev *dev)
1867 {
1868 	struct mlx4_priv *priv = mlx4_priv(dev);
1869 
1870 	priv->clock_mapping =
1871 		ioremap(pci_resource_start(dev->persist->pdev,
1872 					   priv->fw.clock_bar) +
1873 			priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1874 
1875 	if (!priv->clock_mapping)
1876 		return -ENOMEM;
1877 
1878 	return 0;
1879 }
1880 
1881 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1882 				   struct mlx4_clock_params *params)
1883 {
1884 	struct mlx4_priv *priv = mlx4_priv(dev);
1885 
1886 	if (mlx4_is_slave(dev))
1887 		return -EOPNOTSUPP;
1888 
1889 	if (!params)
1890 		return -EINVAL;
1891 
1892 	params->bar = priv->fw.clock_bar;
1893 	params->offset = priv->fw.clock_offset;
1894 	params->size = MLX4_CLOCK_SIZE;
1895 
1896 	return 0;
1897 }
1898 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1899 
1900 static void unmap_internal_clock(struct mlx4_dev *dev)
1901 {
1902 	struct mlx4_priv *priv = mlx4_priv(dev);
1903 
1904 	if (priv->clock_mapping)
1905 		iounmap(priv->clock_mapping);
1906 }
1907 
1908 static void mlx4_close_hca(struct mlx4_dev *dev)
1909 {
1910 	unmap_internal_clock(dev);
1911 	unmap_bf_area(dev);
1912 	if (mlx4_is_slave(dev))
1913 		mlx4_slave_exit(dev);
1914 	else {
1915 		mlx4_CLOSE_HCA(dev, 0);
1916 		mlx4_free_icms(dev);
1917 	}
1918 }
1919 
1920 static void mlx4_close_fw(struct mlx4_dev *dev)
1921 {
1922 	if (!mlx4_is_slave(dev)) {
1923 		mlx4_UNMAP_FA(dev);
1924 		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1925 	}
1926 }
1927 
1928 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1929 {
1930 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1931 
1932 	u32 comm_flags;
1933 	u32 offline_bit;
1934 	unsigned long end;
1935 	struct mlx4_priv *priv = mlx4_priv(dev);
1936 
1937 	end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1938 	while (time_before(jiffies, end)) {
1939 		comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1940 					  MLX4_COMM_CHAN_FLAGS));
1941 		offline_bit = (comm_flags &
1942 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1943 		if (!offline_bit)
1944 			return 0;
1945 
1946 		/* If device removal has been requested,
1947 		 * do not continue retrying.
1948 		 */
1949 		if (dev->persist->interface_state &
1950 		    MLX4_INTERFACE_STATE_NOWAIT)
1951 			break;
1952 
1953 		/* There are cases as part of AER/Reset flow that PF needs
1954 		 * around 100 msec to load. We therefore sleep for 100 msec
1955 		 * to allow other tasks to make use of that CPU during this
1956 		 * time interval.
1957 		 */
1958 		msleep(100);
1959 	}
1960 	mlx4_err(dev, "Communication channel is offline.\n");
1961 	return -EIO;
1962 }
1963 
1964 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1965 {
1966 #define COMM_CHAN_RST_OFFSET 0x1e
1967 
1968 	struct mlx4_priv *priv = mlx4_priv(dev);
1969 	u32 comm_rst;
1970 	u32 comm_caps;
1971 
1972 	comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1973 				 MLX4_COMM_CHAN_CAPS));
1974 	comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1975 
1976 	if (comm_rst)
1977 		dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1978 }
1979 
1980 static int mlx4_init_slave(struct mlx4_dev *dev)
1981 {
1982 	struct mlx4_priv *priv = mlx4_priv(dev);
1983 	u64 dma = (u64) priv->mfunc.vhcr_dma;
1984 	int ret_from_reset = 0;
1985 	u32 slave_read;
1986 	u32 cmd_channel_ver;
1987 
1988 	if (atomic_read(&pf_loading)) {
1989 		mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1990 		return -EPROBE_DEFER;
1991 	}
1992 
1993 	mutex_lock(&priv->cmd.slave_cmd_mutex);
1994 	priv->cmd.max_cmds = 1;
1995 	if (mlx4_comm_check_offline(dev)) {
1996 		mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1997 		goto err_offline;
1998 	}
1999 
2000 	mlx4_reset_vf_support(dev);
2001 	mlx4_warn(dev, "Sending reset\n");
2002 	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
2003 				       MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
2004 	/* if we are in the middle of flr the slave will try
2005 	 * NUM_OF_RESET_RETRIES times before leaving.*/
2006 	if (ret_from_reset) {
2007 		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
2008 			mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
2009 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2010 			return -EPROBE_DEFER;
2011 		} else
2012 			goto err;
2013 	}
2014 
2015 	/* check the driver version - the slave I/F revision
2016 	 * must match the master's */
2017 	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2018 	cmd_channel_ver = mlx4_comm_get_version();
2019 
2020 	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2021 		MLX4_COMM_GET_IF_REV(slave_read)) {
2022 		mlx4_err(dev, "slave driver version is not supported by the master\n");
2023 		goto err;
2024 	}
2025 
2026 	mlx4_warn(dev, "Sending vhcr0\n");
2027 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2028 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2029 		goto err;
2030 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2031 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2032 		goto err;
2033 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2034 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2035 		goto err;
2036 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2037 			  MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2038 		goto err;
2039 
2040 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
2041 	return 0;
2042 
2043 err:
2044 	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2045 err_offline:
2046 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
2047 	return -EIO;
2048 }
2049 
2050 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2051 {
2052 	int i;
2053 
2054 	for (i = 1; i <= dev->caps.num_ports; i++) {
2055 		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2056 			dev->caps.gid_table_len[i] =
2057 				mlx4_get_slave_num_gids(dev, 0, i);
2058 		else
2059 			dev->caps.gid_table_len[i] = 1;
2060 		dev->caps.pkey_table_len[i] =
2061 			dev->phys_caps.pkey_phys_table_len[i] - 1;
2062 	}
2063 }
2064 
2065 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2066 {
2067 	int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2068 
2069 	for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2070 	      i++) {
2071 		if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2072 			break;
2073 	}
2074 
2075 	return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2076 }
2077 
2078 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2079 {
2080 	switch (dmfs_high_steer_mode) {
2081 	case MLX4_STEERING_DMFS_A0_DEFAULT:
2082 		return "default performance";
2083 
2084 	case MLX4_STEERING_DMFS_A0_DYNAMIC:
2085 		return "dynamic hybrid mode";
2086 
2087 	case MLX4_STEERING_DMFS_A0_STATIC:
2088 		return "performance optimized for limited rule configuration (static)";
2089 
2090 	case MLX4_STEERING_DMFS_A0_DISABLE:
2091 		return "disabled performance optimized steering";
2092 
2093 	case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2094 		return "performance optimized steering not supported";
2095 
2096 	default:
2097 		return "Unrecognized mode";
2098 	}
2099 }
2100 
2101 #define MLX4_DMFS_A0_STEERING			(1UL << 2)
2102 
2103 static void choose_steering_mode(struct mlx4_dev *dev,
2104 				 struct mlx4_dev_cap *dev_cap)
2105 {
2106 	if (mlx4_log_num_mgm_entry_size <= 0) {
2107 		if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2108 			if (dev->caps.dmfs_high_steer_mode ==
2109 			    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2110 				mlx4_err(dev, "DMFS high rate mode not supported\n");
2111 			else
2112 				dev->caps.dmfs_high_steer_mode =
2113 					MLX4_STEERING_DMFS_A0_STATIC;
2114 		}
2115 	}
2116 
2117 	if (mlx4_log_num_mgm_entry_size <= 0 &&
2118 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2119 	    (!mlx4_is_mfunc(dev) ||
2120 	     (dev_cap->fs_max_num_qp_per_entry >=
2121 	     (dev->persist->num_vfs + 1))) &&
2122 	    choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2123 		MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2124 		dev->oper_log_mgm_entry_size =
2125 			choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2126 		dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2127 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2128 		dev->caps.fs_log_max_ucast_qp_range_size =
2129 			dev_cap->fs_log_max_ucast_qp_range_size;
2130 	} else {
2131 		if (dev->caps.dmfs_high_steer_mode !=
2132 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2133 			dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2134 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2135 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2136 			dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2137 		else {
2138 			dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2139 
2140 			if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2141 			    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2142 				mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2143 		}
2144 		dev->oper_log_mgm_entry_size =
2145 			mlx4_log_num_mgm_entry_size > 0 ?
2146 			mlx4_log_num_mgm_entry_size :
2147 			MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2148 		dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2149 	}
2150 	mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2151 		 mlx4_steering_mode_str(dev->caps.steering_mode),
2152 		 dev->oper_log_mgm_entry_size,
2153 		 mlx4_log_num_mgm_entry_size);
2154 }
2155 
2156 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2157 				       struct mlx4_dev_cap *dev_cap)
2158 {
2159 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2160 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2161 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2162 	else
2163 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2164 
2165 	mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
2166 		 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2167 }
2168 
2169 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2170 {
2171 	int i;
2172 	struct mlx4_port_cap port_cap;
2173 
2174 	if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2175 		return -EINVAL;
2176 
2177 	for (i = 1; i <= dev->caps.num_ports; i++) {
2178 		if (mlx4_dev_port(dev, i, &port_cap)) {
2179 			mlx4_err(dev,
2180 				 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
2181 		} else if ((dev->caps.dmfs_high_steer_mode !=
2182 			    MLX4_STEERING_DMFS_A0_DEFAULT) &&
2183 			   (port_cap.dmfs_optimized_state ==
2184 			    !!(dev->caps.dmfs_high_steer_mode ==
2185 			    MLX4_STEERING_DMFS_A0_DISABLE))) {
2186 			mlx4_err(dev,
2187 				 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2188 				 dmfs_high_rate_steering_mode_str(
2189 					dev->caps.dmfs_high_steer_mode),
2190 				 (port_cap.dmfs_optimized_state ?
2191 					"enabled" : "disabled"));
2192 		}
2193 	}
2194 
2195 	return 0;
2196 }
2197 
2198 static int mlx4_init_fw(struct mlx4_dev *dev)
2199 {
2200 	struct mlx4_mod_stat_cfg   mlx4_cfg;
2201 	int err = 0;
2202 
2203 	if (!mlx4_is_slave(dev)) {
2204 		err = mlx4_QUERY_FW(dev);
2205 		if (err) {
2206 			if (err == -EACCES)
2207 				mlx4_info(dev, "non-primary physical function, skipping\n");
2208 			else
2209 				mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2210 			return err;
2211 		}
2212 
2213 		err = mlx4_load_fw(dev);
2214 		if (err) {
2215 			mlx4_err(dev, "Failed to start FW, aborting\n");
2216 			return err;
2217 		}
2218 
2219 		mlx4_cfg.log_pg_sz_m = 1;
2220 		mlx4_cfg.log_pg_sz = 0;
2221 		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2222 		if (err)
2223 			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2224 	}
2225 
2226 	return err;
2227 }
2228 
2229 static int mlx4_init_hca(struct mlx4_dev *dev)
2230 {
2231 	struct mlx4_priv	  *priv = mlx4_priv(dev);
2232 	struct mlx4_adapter	   adapter;
2233 	struct mlx4_dev_cap	   dev_cap;
2234 	struct mlx4_profile	   profile;
2235 	struct mlx4_init_hca_param init_hca;
2236 	u64 icm_size;
2237 	struct mlx4_config_dev_params params;
2238 	int err;
2239 
2240 	if (!mlx4_is_slave(dev)) {
2241 		err = mlx4_dev_cap(dev, &dev_cap);
2242 		if (err) {
2243 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2244 			return err;
2245 		}
2246 
2247 		choose_steering_mode(dev, &dev_cap);
2248 		choose_tunnel_offload_mode(dev, &dev_cap);
2249 
2250 		if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2251 		    mlx4_is_master(dev))
2252 			dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2253 
2254 		err = mlx4_get_phys_port_id(dev);
2255 		if (err)
2256 			mlx4_err(dev, "Fail to get physical port id\n");
2257 
2258 		if (mlx4_is_master(dev))
2259 			mlx4_parav_master_pf_caps(dev);
2260 
2261 		if (mlx4_low_memory_profile()) {
2262 			mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2263 			profile = low_mem_profile;
2264 		} else {
2265 			profile = default_profile;
2266 		}
2267 		if (dev->caps.steering_mode ==
2268 		    MLX4_STEERING_MODE_DEVICE_MANAGED)
2269 			profile.num_mcg = MLX4_FS_NUM_MCG;
2270 
2271 		icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2272 					     &init_hca);
2273 		if ((long long) icm_size < 0) {
2274 			err = icm_size;
2275 			return err;
2276 		}
2277 
2278 		dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2279 
2280 		if (enable_4k_uar) {
2281 			init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2282 						    PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2283 			init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2284 		} else {
2285 			init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2286 			init_hca.uar_page_sz = PAGE_SHIFT - 12;
2287 		}
2288 
2289 		init_hca.mw_enabled = 0;
2290 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2291 		    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2292 			init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2293 
2294 		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2295 		if (err)
2296 			return err;
2297 
2298 		err = mlx4_INIT_HCA(dev, &init_hca);
2299 		if (err) {
2300 			mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2301 			goto err_free_icm;
2302 		}
2303 
2304 		if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2305 			err = mlx4_query_func(dev, &dev_cap);
2306 			if (err < 0) {
2307 				mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2308 				goto err_close;
2309 			} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2310 				dev->caps.num_eqs = dev_cap.max_eqs;
2311 				dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2312 				dev->caps.reserved_uars = dev_cap.reserved_uars;
2313 			}
2314 		}
2315 
2316 		/*
2317 		 * If TS is supported by FW
2318 		 * read HCA frequency by QUERY_HCA command
2319 		 */
2320 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2321 			memset(&init_hca, 0, sizeof(init_hca));
2322 			err = mlx4_QUERY_HCA(dev, &init_hca);
2323 			if (err) {
2324 				mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2325 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2326 			} else {
2327 				dev->caps.hca_core_clock =
2328 					init_hca.hca_core_clock;
2329 			}
2330 
2331 			/* In case we got HCA frequency 0 - disable timestamping
2332 			 * to avoid dividing by zero
2333 			 */
2334 			if (!dev->caps.hca_core_clock) {
2335 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2336 				mlx4_err(dev,
2337 					 "HCA frequency is 0 - timestamping is not supported\n");
2338 			} else if (map_internal_clock(dev)) {
2339 				/*
2340 				 * Map internal clock,
2341 				 * in case of failure disable timestamping
2342 				 */
2343 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2344 				mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2345 			}
2346 		}
2347 
2348 		if (dev->caps.dmfs_high_steer_mode !=
2349 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2350 			if (mlx4_validate_optimized_steering(dev))
2351 				mlx4_warn(dev, "Optimized steering validation failed\n");
2352 
2353 			if (dev->caps.dmfs_high_steer_mode ==
2354 			    MLX4_STEERING_DMFS_A0_DISABLE) {
2355 				dev->caps.dmfs_high_rate_qpn_base =
2356 					dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2357 				dev->caps.dmfs_high_rate_qpn_range =
2358 					MLX4_A0_STEERING_TABLE_SIZE;
2359 			}
2360 
2361 			mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
2362 				  dmfs_high_rate_steering_mode_str(
2363 					dev->caps.dmfs_high_steer_mode));
2364 		}
2365 	} else {
2366 		err = mlx4_init_slave(dev);
2367 		if (err) {
2368 			if (err != -EPROBE_DEFER)
2369 				mlx4_err(dev, "Failed to initialize slave\n");
2370 			return err;
2371 		}
2372 
2373 		err = mlx4_slave_cap(dev);
2374 		if (err) {
2375 			mlx4_err(dev, "Failed to obtain slave caps\n");
2376 			goto err_close;
2377 		}
2378 	}
2379 
2380 	if (map_bf_area(dev))
2381 		mlx4_dbg(dev, "Failed to map blue flame area\n");
2382 
2383 	/*Only the master set the ports, all the rest got it from it.*/
2384 	if (!mlx4_is_slave(dev))
2385 		mlx4_set_port_mask(dev);
2386 
2387 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
2388 	if (err) {
2389 		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2390 		goto unmap_bf;
2391 	}
2392 
2393 	/* Query CONFIG_DEV parameters */
2394 	err = mlx4_config_dev_retrieval(dev, &params);
2395 	if (err && err != -EOPNOTSUPP) {
2396 		mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2397 	} else if (!err) {
2398 		dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2399 		dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2400 	}
2401 	priv->eq_table.inta_pin = adapter.inta_pin;
2402 	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2403 
2404 	return 0;
2405 
2406 unmap_bf:
2407 	unmap_internal_clock(dev);
2408 	unmap_bf_area(dev);
2409 
2410 	if (mlx4_is_slave(dev)) {
2411 		kfree(dev->caps.qp0_qkey);
2412 		kfree(dev->caps.qp0_tunnel);
2413 		kfree(dev->caps.qp0_proxy);
2414 		kfree(dev->caps.qp1_tunnel);
2415 		kfree(dev->caps.qp1_proxy);
2416 	}
2417 
2418 err_close:
2419 	if (mlx4_is_slave(dev))
2420 		mlx4_slave_exit(dev);
2421 	else
2422 		mlx4_CLOSE_HCA(dev, 0);
2423 
2424 err_free_icm:
2425 	if (!mlx4_is_slave(dev))
2426 		mlx4_free_icms(dev);
2427 
2428 	return err;
2429 }
2430 
2431 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2432 {
2433 	struct mlx4_priv *priv = mlx4_priv(dev);
2434 	int nent_pow2;
2435 
2436 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2437 		return -ENOENT;
2438 
2439 	if (!dev->caps.max_counters)
2440 		return -ENOSPC;
2441 
2442 	nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2443 	/* reserve last counter index for sink counter */
2444 	return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2445 				nent_pow2 - 1, 0,
2446 				nent_pow2 - dev->caps.max_counters + 1);
2447 }
2448 
2449 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2450 {
2451 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2452 		return;
2453 
2454 	if (!dev->caps.max_counters)
2455 		return;
2456 
2457 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2458 }
2459 
2460 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2461 {
2462 	struct mlx4_priv *priv = mlx4_priv(dev);
2463 	int port;
2464 
2465 	for (port = 0; port < dev->caps.num_ports; port++)
2466 		if (priv->def_counter[port] != -1)
2467 			mlx4_counter_free(dev,  priv->def_counter[port]);
2468 }
2469 
2470 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2471 {
2472 	struct mlx4_priv *priv = mlx4_priv(dev);
2473 	int port, err = 0;
2474 	u32 idx;
2475 
2476 	for (port = 0; port < dev->caps.num_ports; port++)
2477 		priv->def_counter[port] = -1;
2478 
2479 	for (port = 0; port < dev->caps.num_ports; port++) {
2480 		err = mlx4_counter_alloc(dev, &idx);
2481 
2482 		if (!err || err == -ENOSPC) {
2483 			priv->def_counter[port] = idx;
2484 		} else if (err == -ENOENT) {
2485 			err = 0;
2486 			continue;
2487 		} else if (mlx4_is_slave(dev) && err == -EINVAL) {
2488 			priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2489 			mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2490 				  MLX4_SINK_COUNTER_INDEX(dev));
2491 			err = 0;
2492 		} else {
2493 			mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2494 				 __func__, port + 1, err);
2495 			mlx4_cleanup_default_counters(dev);
2496 			return err;
2497 		}
2498 
2499 		mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2500 			 __func__, priv->def_counter[port], port + 1);
2501 	}
2502 
2503 	return err;
2504 }
2505 
2506 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2507 {
2508 	struct mlx4_priv *priv = mlx4_priv(dev);
2509 
2510 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2511 		return -ENOENT;
2512 
2513 	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2514 	if (*idx == -1) {
2515 		*idx = MLX4_SINK_COUNTER_INDEX(dev);
2516 		return -ENOSPC;
2517 	}
2518 
2519 	return 0;
2520 }
2521 
2522 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2523 {
2524 	u64 out_param;
2525 	int err;
2526 
2527 	if (mlx4_is_mfunc(dev)) {
2528 		err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2529 				   RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2530 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2531 		if (!err)
2532 			*idx = get_param_l(&out_param);
2533 
2534 		return err;
2535 	}
2536 	return __mlx4_counter_alloc(dev, idx);
2537 }
2538 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2539 
2540 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2541 				u8 counter_index)
2542 {
2543 	struct mlx4_cmd_mailbox *if_stat_mailbox;
2544 	int err;
2545 	u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2546 
2547 	if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2548 	if (IS_ERR(if_stat_mailbox))
2549 		return PTR_ERR(if_stat_mailbox);
2550 
2551 	err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2552 			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2553 			   MLX4_CMD_NATIVE);
2554 
2555 	mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2556 	return err;
2557 }
2558 
2559 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2560 {
2561 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2562 		return;
2563 
2564 	if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2565 		return;
2566 
2567 	__mlx4_clear_if_stat(dev, idx);
2568 
2569 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2570 	return;
2571 }
2572 
2573 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2574 {
2575 	u64 in_param = 0;
2576 
2577 	if (mlx4_is_mfunc(dev)) {
2578 		set_param_l(&in_param, idx);
2579 		mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2580 			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2581 			 MLX4_CMD_WRAPPED);
2582 		return;
2583 	}
2584 	__mlx4_counter_free(dev, idx);
2585 }
2586 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2587 
2588 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2589 {
2590 	struct mlx4_priv *priv = mlx4_priv(dev);
2591 
2592 	return priv->def_counter[port - 1];
2593 }
2594 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2595 
2596 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2597 {
2598 	struct mlx4_priv *priv = mlx4_priv(dev);
2599 
2600 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2601 }
2602 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2603 
2604 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2605 {
2606 	struct mlx4_priv *priv = mlx4_priv(dev);
2607 
2608 	return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2609 }
2610 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2611 
2612 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2613 {
2614 	struct mlx4_priv *priv = mlx4_priv(dev);
2615 	__be64 guid;
2616 
2617 	/* hw GUID */
2618 	if (entry == 0)
2619 		return;
2620 
2621 	get_random_bytes((char *)&guid, sizeof(guid));
2622 	guid &= ~(cpu_to_be64(1ULL << 56));
2623 	guid |= cpu_to_be64(1ULL << 57);
2624 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2625 }
2626 
2627 static int mlx4_setup_hca(struct mlx4_dev *dev)
2628 {
2629 	struct mlx4_priv *priv = mlx4_priv(dev);
2630 	int err;
2631 	int port;
2632 	__be32 ib_port_default_caps;
2633 
2634 	err = mlx4_init_uar_table(dev);
2635 	if (err) {
2636 		mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2637 		return err;
2638 	}
2639 
2640 	err = mlx4_uar_alloc(dev, &priv->driver_uar);
2641 	if (err) {
2642 		mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2643 		goto err_uar_table_free;
2644 	}
2645 
2646 	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2647 	if (!priv->kar) {
2648 		mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2649 		err = -ENOMEM;
2650 		goto err_uar_free;
2651 	}
2652 
2653 	err = mlx4_init_pd_table(dev);
2654 	if (err) {
2655 		mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2656 		goto err_kar_unmap;
2657 	}
2658 
2659 	err = mlx4_init_xrcd_table(dev);
2660 	if (err) {
2661 		mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2662 		goto err_pd_table_free;
2663 	}
2664 
2665 	err = mlx4_init_mr_table(dev);
2666 	if (err) {
2667 		mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2668 		goto err_xrcd_table_free;
2669 	}
2670 
2671 	if (!mlx4_is_slave(dev)) {
2672 		err = mlx4_init_mcg_table(dev);
2673 		if (err) {
2674 			mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2675 			goto err_mr_table_free;
2676 		}
2677 		err = mlx4_config_mad_demux(dev);
2678 		if (err) {
2679 			mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2680 			goto err_mcg_table_free;
2681 		}
2682 	}
2683 
2684 	err = mlx4_init_eq_table(dev);
2685 	if (err) {
2686 		mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2687 		goto err_mcg_table_free;
2688 	}
2689 
2690 	err = mlx4_cmd_use_events(dev);
2691 	if (err) {
2692 		mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2693 		goto err_eq_table_free;
2694 	}
2695 
2696 	err = mlx4_NOP(dev);
2697 	if (err) {
2698 		if (dev->flags & MLX4_FLAG_MSI_X) {
2699 			mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2700 				  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2701 			mlx4_warn(dev, "Trying again without MSI-X\n");
2702 		} else {
2703 			mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2704 				 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2705 			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2706 		}
2707 
2708 		goto err_cmd_poll;
2709 	}
2710 
2711 	mlx4_dbg(dev, "NOP command IRQ test passed\n");
2712 
2713 	err = mlx4_init_cq_table(dev);
2714 	if (err) {
2715 		mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2716 		goto err_cmd_poll;
2717 	}
2718 
2719 	err = mlx4_init_srq_table(dev);
2720 	if (err) {
2721 		mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2722 		goto err_cq_table_free;
2723 	}
2724 
2725 	err = mlx4_init_qp_table(dev);
2726 	if (err) {
2727 		mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2728 		goto err_srq_table_free;
2729 	}
2730 
2731 	if (!mlx4_is_slave(dev)) {
2732 		err = mlx4_init_counters_table(dev);
2733 		if (err && err != -ENOENT) {
2734 			mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2735 			goto err_qp_table_free;
2736 		}
2737 	}
2738 
2739 	err = mlx4_allocate_default_counters(dev);
2740 	if (err) {
2741 		mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2742 		goto err_counters_table_free;
2743 	}
2744 
2745 	if (!mlx4_is_slave(dev)) {
2746 		for (port = 1; port <= dev->caps.num_ports; port++) {
2747 			ib_port_default_caps = 0;
2748 			err = mlx4_get_port_ib_caps(dev, port,
2749 						    &ib_port_default_caps);
2750 			if (err)
2751 				mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2752 					  port, err);
2753 			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2754 
2755 			/* initialize per-slave default ib port capabilities */
2756 			if (mlx4_is_master(dev)) {
2757 				int i;
2758 				for (i = 0; i < dev->num_slaves; i++) {
2759 					if (i == mlx4_master_func_num(dev))
2760 						continue;
2761 					priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2762 						ib_port_default_caps;
2763 				}
2764 			}
2765 
2766 			if (mlx4_is_mfunc(dev))
2767 				dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2768 			else
2769 				dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2770 
2771 			err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2772 					    dev->caps.pkey_table_len[port] : -1);
2773 			if (err) {
2774 				mlx4_err(dev, "Failed to set port %d, aborting\n",
2775 					 port);
2776 				goto err_default_countes_free;
2777 			}
2778 		}
2779 	}
2780 
2781 	return 0;
2782 
2783 err_default_countes_free:
2784 	mlx4_cleanup_default_counters(dev);
2785 
2786 err_counters_table_free:
2787 	if (!mlx4_is_slave(dev))
2788 		mlx4_cleanup_counters_table(dev);
2789 
2790 err_qp_table_free:
2791 	mlx4_cleanup_qp_table(dev);
2792 
2793 err_srq_table_free:
2794 	mlx4_cleanup_srq_table(dev);
2795 
2796 err_cq_table_free:
2797 	mlx4_cleanup_cq_table(dev);
2798 
2799 err_cmd_poll:
2800 	mlx4_cmd_use_polling(dev);
2801 
2802 err_eq_table_free:
2803 	mlx4_cleanup_eq_table(dev);
2804 
2805 err_mcg_table_free:
2806 	if (!mlx4_is_slave(dev))
2807 		mlx4_cleanup_mcg_table(dev);
2808 
2809 err_mr_table_free:
2810 	mlx4_cleanup_mr_table(dev);
2811 
2812 err_xrcd_table_free:
2813 	mlx4_cleanup_xrcd_table(dev);
2814 
2815 err_pd_table_free:
2816 	mlx4_cleanup_pd_table(dev);
2817 
2818 err_kar_unmap:
2819 	iounmap(priv->kar);
2820 
2821 err_uar_free:
2822 	mlx4_uar_free(dev, &priv->driver_uar);
2823 
2824 err_uar_table_free:
2825 	mlx4_cleanup_uar_table(dev);
2826 	return err;
2827 }
2828 
2829 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2830 {
2831 	int requested_cpu = 0;
2832 	struct mlx4_priv *priv = mlx4_priv(dev);
2833 	struct mlx4_eq *eq;
2834 	int off = 0;
2835 	int i;
2836 
2837 	if (eqn > dev->caps.num_comp_vectors)
2838 		return -EINVAL;
2839 
2840 	for (i = 1; i < port; i++)
2841 		off += mlx4_get_eqs_per_port(dev, i);
2842 
2843 	requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2844 
2845 	/* Meaning EQs are shared, and this call comes from the second port */
2846 	if (requested_cpu < 0)
2847 		return 0;
2848 
2849 	eq = &priv->eq_table.eq[eqn];
2850 
2851 	if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2852 		return -ENOMEM;
2853 
2854 	cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2855 
2856 	return 0;
2857 }
2858 
2859 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2860 {
2861 	struct mlx4_priv *priv = mlx4_priv(dev);
2862 	struct msix_entry *entries;
2863 	int i;
2864 	int port = 0;
2865 
2866 	if (msi_x) {
2867 		int nreq = min3(dev->caps.num_ports *
2868 				(int)num_online_cpus() + 1,
2869 				dev->caps.num_eqs - dev->caps.reserved_eqs,
2870 				MAX_MSIX);
2871 
2872 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2873 		if (!entries)
2874 			goto no_msi;
2875 
2876 		for (i = 0; i < nreq; ++i)
2877 			entries[i].entry = i;
2878 
2879 		nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2880 					     nreq);
2881 
2882 		if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2883 			kfree(entries);
2884 			goto no_msi;
2885 		}
2886 		/* 1 is reserved for events (asyncrounous EQ) */
2887 		dev->caps.num_comp_vectors = nreq - 1;
2888 
2889 		priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2890 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2891 			    dev->caps.num_ports);
2892 
2893 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2894 			if (i == MLX4_EQ_ASYNC)
2895 				continue;
2896 
2897 			priv->eq_table.eq[i].irq =
2898 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2899 
2900 			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2901 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2902 					    dev->caps.num_ports);
2903 				/* We don't set affinity hint when there
2904 				 * aren't enough EQs
2905 				 */
2906 			} else {
2907 				set_bit(port,
2908 					priv->eq_table.eq[i].actv_ports.ports);
2909 				if (mlx4_init_affinity_hint(dev, port + 1, i))
2910 					mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2911 						  i);
2912 			}
2913 			/* We divide the Eqs evenly between the two ports.
2914 			 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2915 			 * refers to the number of Eqs per port
2916 			 * (i.e eqs_per_port). Theoretically, we would like to
2917 			 * write something like (i + 1) % eqs_per_port == 0.
2918 			 * However, since there's an asynchronous Eq, we have
2919 			 * to skip over it by comparing this condition to
2920 			 * !!((i + 1) > MLX4_EQ_ASYNC).
2921 			 */
2922 			if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2923 			    ((i + 1) %
2924 			     (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2925 			    !!((i + 1) > MLX4_EQ_ASYNC))
2926 				/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2927 				 * everything is shared anyway.
2928 				 */
2929 				port++;
2930 		}
2931 
2932 		dev->flags |= MLX4_FLAG_MSI_X;
2933 
2934 		kfree(entries);
2935 		return;
2936 	}
2937 
2938 no_msi:
2939 	dev->caps.num_comp_vectors = 1;
2940 
2941 	BUG_ON(MLX4_EQ_ASYNC >= 2);
2942 	for (i = 0; i < 2; ++i) {
2943 		priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2944 		if (i != MLX4_EQ_ASYNC) {
2945 			bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2946 				    dev->caps.num_ports);
2947 		}
2948 	}
2949 }
2950 
2951 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2952 {
2953 	struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
2954 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2955 	int err;
2956 
2957 	err = devlink_port_register(devlink, &info->devlink_port, port);
2958 	if (err)
2959 		return err;
2960 
2961 	info->dev = dev;
2962 	info->port = port;
2963 	if (!mlx4_is_slave(dev)) {
2964 		mlx4_init_mac_table(dev, &info->mac_table);
2965 		mlx4_init_vlan_table(dev, &info->vlan_table);
2966 		mlx4_init_roce_gid_table(dev, &info->gid_table);
2967 		info->base_qpn = mlx4_get_base_qpn(dev, port);
2968 	}
2969 
2970 	sprintf(info->dev_name, "mlx4_port%d", port);
2971 	info->port_attr.attr.name = info->dev_name;
2972 	if (mlx4_is_mfunc(dev))
2973 		info->port_attr.attr.mode = S_IRUGO;
2974 	else {
2975 		info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2976 		info->port_attr.store     = set_port_type;
2977 	}
2978 	info->port_attr.show      = show_port_type;
2979 	sysfs_attr_init(&info->port_attr.attr);
2980 
2981 	err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2982 	if (err) {
2983 		mlx4_err(dev, "Failed to create file for port %d\n", port);
2984 		devlink_port_unregister(&info->devlink_port);
2985 		info->port = -1;
2986 	}
2987 
2988 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2989 	info->port_mtu_attr.attr.name = info->dev_mtu_name;
2990 	if (mlx4_is_mfunc(dev))
2991 		info->port_mtu_attr.attr.mode = S_IRUGO;
2992 	else {
2993 		info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2994 		info->port_mtu_attr.store     = set_port_ib_mtu;
2995 	}
2996 	info->port_mtu_attr.show      = show_port_ib_mtu;
2997 	sysfs_attr_init(&info->port_mtu_attr.attr);
2998 
2999 	err = device_create_file(&dev->persist->pdev->dev,
3000 				 &info->port_mtu_attr);
3001 	if (err) {
3002 		mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
3003 		device_remove_file(&info->dev->persist->pdev->dev,
3004 				   &info->port_attr);
3005 		devlink_port_unregister(&info->devlink_port);
3006 		info->port = -1;
3007 	}
3008 
3009 	return err;
3010 }
3011 
3012 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
3013 {
3014 	if (info->port < 0)
3015 		return;
3016 
3017 	device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3018 	device_remove_file(&info->dev->persist->pdev->dev,
3019 			   &info->port_mtu_attr);
3020 	devlink_port_unregister(&info->devlink_port);
3021 
3022 #ifdef CONFIG_RFS_ACCEL
3023 	free_irq_cpu_rmap(info->rmap);
3024 	info->rmap = NULL;
3025 #endif
3026 }
3027 
3028 static int mlx4_init_steering(struct mlx4_dev *dev)
3029 {
3030 	struct mlx4_priv *priv = mlx4_priv(dev);
3031 	int num_entries = dev->caps.num_ports;
3032 	int i, j;
3033 
3034 	priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
3035 	if (!priv->steer)
3036 		return -ENOMEM;
3037 
3038 	for (i = 0; i < num_entries; i++)
3039 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
3040 			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3041 			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3042 		}
3043 	return 0;
3044 }
3045 
3046 static void mlx4_clear_steering(struct mlx4_dev *dev)
3047 {
3048 	struct mlx4_priv *priv = mlx4_priv(dev);
3049 	struct mlx4_steer_index *entry, *tmp_entry;
3050 	struct mlx4_promisc_qp *pqp, *tmp_pqp;
3051 	int num_entries = dev->caps.num_ports;
3052 	int i, j;
3053 
3054 	for (i = 0; i < num_entries; i++) {
3055 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
3056 			list_for_each_entry_safe(pqp, tmp_pqp,
3057 						 &priv->steer[i].promisc_qps[j],
3058 						 list) {
3059 				list_del(&pqp->list);
3060 				kfree(pqp);
3061 			}
3062 			list_for_each_entry_safe(entry, tmp_entry,
3063 						 &priv->steer[i].steer_entries[j],
3064 						 list) {
3065 				list_del(&entry->list);
3066 				list_for_each_entry_safe(pqp, tmp_pqp,
3067 							 &entry->duplicates,
3068 							 list) {
3069 					list_del(&pqp->list);
3070 					kfree(pqp);
3071 				}
3072 				kfree(entry);
3073 			}
3074 		}
3075 	}
3076 	kfree(priv->steer);
3077 }
3078 
3079 static int extended_func_num(struct pci_dev *pdev)
3080 {
3081 	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3082 }
3083 
3084 #define MLX4_OWNER_BASE	0x8069c
3085 #define MLX4_OWNER_SIZE	4
3086 
3087 static int mlx4_get_ownership(struct mlx4_dev *dev)
3088 {
3089 	void __iomem *owner;
3090 	u32 ret;
3091 
3092 	if (pci_channel_offline(dev->persist->pdev))
3093 		return -EIO;
3094 
3095 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3096 			MLX4_OWNER_BASE,
3097 			MLX4_OWNER_SIZE);
3098 	if (!owner) {
3099 		mlx4_err(dev, "Failed to obtain ownership bit\n");
3100 		return -ENOMEM;
3101 	}
3102 
3103 	ret = readl(owner);
3104 	iounmap(owner);
3105 	return (int) !!ret;
3106 }
3107 
3108 static void mlx4_free_ownership(struct mlx4_dev *dev)
3109 {
3110 	void __iomem *owner;
3111 
3112 	if (pci_channel_offline(dev->persist->pdev))
3113 		return;
3114 
3115 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3116 			MLX4_OWNER_BASE,
3117 			MLX4_OWNER_SIZE);
3118 	if (!owner) {
3119 		mlx4_err(dev, "Failed to obtain ownership bit\n");
3120 		return;
3121 	}
3122 	writel(0, owner);
3123 	msleep(1000);
3124 	iounmap(owner);
3125 }
3126 
3127 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV)	==\
3128 				  !!((flags) & MLX4_FLAG_MASTER))
3129 
3130 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3131 			     u8 total_vfs, int existing_vfs, int reset_flow)
3132 {
3133 	u64 dev_flags = dev->flags;
3134 	int err = 0;
3135 	int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
3136 					MLX4_MAX_NUM_VF);
3137 
3138 	if (reset_flow) {
3139 		dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3140 				       GFP_KERNEL);
3141 		if (!dev->dev_vfs)
3142 			goto free_mem;
3143 		return dev_flags;
3144 	}
3145 
3146 	atomic_inc(&pf_loading);
3147 	if (dev->flags &  MLX4_FLAG_SRIOV) {
3148 		if (existing_vfs != total_vfs) {
3149 			mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3150 				 existing_vfs, total_vfs);
3151 			total_vfs = existing_vfs;
3152 		}
3153 	}
3154 
3155 	dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
3156 	if (NULL == dev->dev_vfs) {
3157 		mlx4_err(dev, "Failed to allocate memory for VFs\n");
3158 		goto disable_sriov;
3159 	}
3160 
3161 	if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
3162 		if (total_vfs > fw_enabled_sriov_vfs) {
3163 			mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3164 				 total_vfs, fw_enabled_sriov_vfs);
3165 			err = -ENOMEM;
3166 			goto disable_sriov;
3167 		}
3168 		mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3169 		err = pci_enable_sriov(pdev, total_vfs);
3170 	}
3171 	if (err) {
3172 		mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3173 			 err);
3174 		goto disable_sriov;
3175 	} else {
3176 		mlx4_warn(dev, "Running in master mode\n");
3177 		dev_flags |= MLX4_FLAG_SRIOV |
3178 			MLX4_FLAG_MASTER;
3179 		dev_flags &= ~MLX4_FLAG_SLAVE;
3180 		dev->persist->num_vfs = total_vfs;
3181 	}
3182 	return dev_flags;
3183 
3184 disable_sriov:
3185 	atomic_dec(&pf_loading);
3186 free_mem:
3187 	dev->persist->num_vfs = 0;
3188 	kfree(dev->dev_vfs);
3189         dev->dev_vfs = NULL;
3190 	return dev_flags & ~MLX4_FLAG_MASTER;
3191 }
3192 
3193 enum {
3194 	MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3195 };
3196 
3197 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3198 			      int *nvfs)
3199 {
3200 	int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3201 	/* Checking for 64 VFs as a limitation of CX2 */
3202 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3203 	    requested_vfs >= 64) {
3204 		mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3205 			 requested_vfs);
3206 		return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3207 	}
3208 	return 0;
3209 }
3210 
3211 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3212 {
3213 	struct pci_dev *pdev = dev->persist->pdev;
3214 	int err = 0;
3215 
3216 	mutex_lock(&dev->persist->pci_status_mutex);
3217 	if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3218 		err = pci_enable_device(pdev);
3219 		if (!err)
3220 			dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3221 	}
3222 	mutex_unlock(&dev->persist->pci_status_mutex);
3223 
3224 	return err;
3225 }
3226 
3227 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3228 {
3229 	struct pci_dev *pdev = dev->persist->pdev;
3230 
3231 	mutex_lock(&dev->persist->pci_status_mutex);
3232 	if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3233 		pci_disable_device(pdev);
3234 		dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3235 	}
3236 	mutex_unlock(&dev->persist->pci_status_mutex);
3237 }
3238 
3239 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3240 			 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3241 			 int reset_flow)
3242 {
3243 	struct mlx4_dev *dev;
3244 	unsigned sum = 0;
3245 	int err;
3246 	int port;
3247 	int i;
3248 	struct mlx4_dev_cap *dev_cap = NULL;
3249 	int existing_vfs = 0;
3250 
3251 	dev = &priv->dev;
3252 
3253 	INIT_LIST_HEAD(&priv->ctx_list);
3254 	spin_lock_init(&priv->ctx_lock);
3255 
3256 	mutex_init(&priv->port_mutex);
3257 	mutex_init(&priv->bond_mutex);
3258 
3259 	INIT_LIST_HEAD(&priv->pgdir_list);
3260 	mutex_init(&priv->pgdir_mutex);
3261 	spin_lock_init(&priv->cmd.context_lock);
3262 
3263 	INIT_LIST_HEAD(&priv->bf_list);
3264 	mutex_init(&priv->bf_mutex);
3265 
3266 	dev->rev_id = pdev->revision;
3267 	dev->numa_node = dev_to_node(&pdev->dev);
3268 
3269 	/* Detect if this device is a virtual function */
3270 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3271 		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3272 		dev->flags |= MLX4_FLAG_SLAVE;
3273 	} else {
3274 		/* We reset the device and enable SRIOV only for physical
3275 		 * devices.  Try to claim ownership on the device;
3276 		 * if already taken, skip -- do not allow multiple PFs */
3277 		err = mlx4_get_ownership(dev);
3278 		if (err) {
3279 			if (err < 0)
3280 				return err;
3281 			else {
3282 				mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3283 				return -EINVAL;
3284 			}
3285 		}
3286 
3287 		atomic_set(&priv->opreq_count, 0);
3288 		INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3289 
3290 		/*
3291 		 * Now reset the HCA before we touch the PCI capabilities or
3292 		 * attempt a firmware command, since a boot ROM may have left
3293 		 * the HCA in an undefined state.
3294 		 */
3295 		err = mlx4_reset(dev);
3296 		if (err) {
3297 			mlx4_err(dev, "Failed to reset HCA, aborting\n");
3298 			goto err_sriov;
3299 		}
3300 
3301 		if (total_vfs) {
3302 			dev->flags = MLX4_FLAG_MASTER;
3303 			existing_vfs = pci_num_vf(pdev);
3304 			if (existing_vfs)
3305 				dev->flags |= MLX4_FLAG_SRIOV;
3306 			dev->persist->num_vfs = total_vfs;
3307 		}
3308 	}
3309 
3310 	/* on load remove any previous indication of internal error,
3311 	 * device is up.
3312 	 */
3313 	dev->persist->state = MLX4_DEVICE_STATE_UP;
3314 
3315 slave_start:
3316 	err = mlx4_cmd_init(dev);
3317 	if (err) {
3318 		mlx4_err(dev, "Failed to init command interface, aborting\n");
3319 		goto err_sriov;
3320 	}
3321 
3322 	/* In slave functions, the communication channel must be initialized
3323 	 * before posting commands. Also, init num_slaves before calling
3324 	 * mlx4_init_hca */
3325 	if (mlx4_is_mfunc(dev)) {
3326 		if (mlx4_is_master(dev)) {
3327 			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3328 
3329 		} else {
3330 			dev->num_slaves = 0;
3331 			err = mlx4_multi_func_init(dev);
3332 			if (err) {
3333 				mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3334 				goto err_cmd;
3335 			}
3336 		}
3337 	}
3338 
3339 	err = mlx4_init_fw(dev);
3340 	if (err) {
3341 		mlx4_err(dev, "Failed to init fw, aborting.\n");
3342 		goto err_mfunc;
3343 	}
3344 
3345 	if (mlx4_is_master(dev)) {
3346 		/* when we hit the goto slave_start below, dev_cap already initialized */
3347 		if (!dev_cap) {
3348 			dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3349 
3350 			if (!dev_cap) {
3351 				err = -ENOMEM;
3352 				goto err_fw;
3353 			}
3354 
3355 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3356 			if (err) {
3357 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3358 				goto err_fw;
3359 			}
3360 
3361 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3362 				goto err_fw;
3363 
3364 			if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3365 				u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3366 								  total_vfs,
3367 								  existing_vfs,
3368 								  reset_flow);
3369 
3370 				mlx4_close_fw(dev);
3371 				mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3372 				dev->flags = dev_flags;
3373 				if (!SRIOV_VALID_STATE(dev->flags)) {
3374 					mlx4_err(dev, "Invalid SRIOV state\n");
3375 					goto err_sriov;
3376 				}
3377 				err = mlx4_reset(dev);
3378 				if (err) {
3379 					mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3380 					goto err_sriov;
3381 				}
3382 				goto slave_start;
3383 			}
3384 		} else {
3385 			/* Legacy mode FW requires SRIOV to be enabled before
3386 			 * doing QUERY_DEV_CAP, since max_eq's value is different if
3387 			 * SRIOV is enabled.
3388 			 */
3389 			memset(dev_cap, 0, sizeof(*dev_cap));
3390 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3391 			if (err) {
3392 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3393 				goto err_fw;
3394 			}
3395 
3396 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3397 				goto err_fw;
3398 		}
3399 	}
3400 
3401 	err = mlx4_init_hca(dev);
3402 	if (err) {
3403 		if (err == -EACCES) {
3404 			/* Not primary Physical function
3405 			 * Running in slave mode */
3406 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3407 			/* We're not a PF */
3408 			if (dev->flags & MLX4_FLAG_SRIOV) {
3409 				if (!existing_vfs)
3410 					pci_disable_sriov(pdev);
3411 				if (mlx4_is_master(dev) && !reset_flow)
3412 					atomic_dec(&pf_loading);
3413 				dev->flags &= ~MLX4_FLAG_SRIOV;
3414 			}
3415 			if (!mlx4_is_slave(dev))
3416 				mlx4_free_ownership(dev);
3417 			dev->flags |= MLX4_FLAG_SLAVE;
3418 			dev->flags &= ~MLX4_FLAG_MASTER;
3419 			goto slave_start;
3420 		} else
3421 			goto err_fw;
3422 	}
3423 
3424 	if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3425 		u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3426 						  existing_vfs, reset_flow);
3427 
3428 		if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3429 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3430 			dev->flags = dev_flags;
3431 			err = mlx4_cmd_init(dev);
3432 			if (err) {
3433 				/* Only VHCR is cleaned up, so could still
3434 				 * send FW commands
3435 				 */
3436 				mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3437 				goto err_close;
3438 			}
3439 		} else {
3440 			dev->flags = dev_flags;
3441 		}
3442 
3443 		if (!SRIOV_VALID_STATE(dev->flags)) {
3444 			mlx4_err(dev, "Invalid SRIOV state\n");
3445 			goto err_close;
3446 		}
3447 	}
3448 
3449 	/* check if the device is functioning at its maximum possible speed.
3450 	 * No return code for this call, just warn the user in case of PCI
3451 	 * express device capabilities are under-satisfied by the bus.
3452 	 */
3453 	if (!mlx4_is_slave(dev))
3454 		mlx4_check_pcie_caps(dev);
3455 
3456 	/* In master functions, the communication channel must be initialized
3457 	 * after obtaining its address from fw */
3458 	if (mlx4_is_master(dev)) {
3459 		if (dev->caps.num_ports < 2 &&
3460 		    num_vfs_argc > 1) {
3461 			err = -EINVAL;
3462 			mlx4_err(dev,
3463 				 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3464 				 dev->caps.num_ports);
3465 			goto err_close;
3466 		}
3467 		memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3468 
3469 		for (i = 0;
3470 		     i < sizeof(dev->persist->nvfs)/
3471 		     sizeof(dev->persist->nvfs[0]); i++) {
3472 			unsigned j;
3473 
3474 			for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3475 				dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3476 				dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3477 					dev->caps.num_ports;
3478 			}
3479 		}
3480 
3481 		/* In master functions, the communication channel
3482 		 * must be initialized after obtaining its address from fw
3483 		 */
3484 		err = mlx4_multi_func_init(dev);
3485 		if (err) {
3486 			mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3487 			goto err_close;
3488 		}
3489 	}
3490 
3491 	err = mlx4_alloc_eq_table(dev);
3492 	if (err)
3493 		goto err_master_mfunc;
3494 
3495 	bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3496 	mutex_init(&priv->msix_ctl.pool_lock);
3497 
3498 	mlx4_enable_msi_x(dev);
3499 	if ((mlx4_is_mfunc(dev)) &&
3500 	    !(dev->flags & MLX4_FLAG_MSI_X)) {
3501 		err = -EOPNOTSUPP;
3502 		mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3503 		goto err_free_eq;
3504 	}
3505 
3506 	if (!mlx4_is_slave(dev)) {
3507 		err = mlx4_init_steering(dev);
3508 		if (err)
3509 			goto err_disable_msix;
3510 	}
3511 
3512 	mlx4_init_quotas(dev);
3513 
3514 	err = mlx4_setup_hca(dev);
3515 	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3516 	    !mlx4_is_mfunc(dev)) {
3517 		dev->flags &= ~MLX4_FLAG_MSI_X;
3518 		dev->caps.num_comp_vectors = 1;
3519 		pci_disable_msix(pdev);
3520 		err = mlx4_setup_hca(dev);
3521 	}
3522 
3523 	if (err)
3524 		goto err_steer;
3525 
3526 	/* When PF resources are ready arm its comm channel to enable
3527 	 * getting commands
3528 	 */
3529 	if (mlx4_is_master(dev)) {
3530 		err = mlx4_ARM_COMM_CHANNEL(dev);
3531 		if (err) {
3532 			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3533 				 err);
3534 			goto err_steer;
3535 		}
3536 	}
3537 
3538 	for (port = 1; port <= dev->caps.num_ports; port++) {
3539 		err = mlx4_init_port_info(dev, port);
3540 		if (err)
3541 			goto err_port;
3542 	}
3543 
3544 	priv->v2p.port1 = 1;
3545 	priv->v2p.port2 = 2;
3546 
3547 	err = mlx4_register_device(dev);
3548 	if (err)
3549 		goto err_port;
3550 
3551 	mlx4_request_modules(dev);
3552 
3553 	mlx4_sense_init(dev);
3554 	mlx4_start_sense(dev);
3555 
3556 	priv->removed = 0;
3557 
3558 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3559 		atomic_dec(&pf_loading);
3560 
3561 	kfree(dev_cap);
3562 	return 0;
3563 
3564 err_port:
3565 	for (--port; port >= 1; --port)
3566 		mlx4_cleanup_port_info(&priv->port[port]);
3567 
3568 	mlx4_cleanup_default_counters(dev);
3569 	if (!mlx4_is_slave(dev))
3570 		mlx4_cleanup_counters_table(dev);
3571 	mlx4_cleanup_qp_table(dev);
3572 	mlx4_cleanup_srq_table(dev);
3573 	mlx4_cleanup_cq_table(dev);
3574 	mlx4_cmd_use_polling(dev);
3575 	mlx4_cleanup_eq_table(dev);
3576 	mlx4_cleanup_mcg_table(dev);
3577 	mlx4_cleanup_mr_table(dev);
3578 	mlx4_cleanup_xrcd_table(dev);
3579 	mlx4_cleanup_pd_table(dev);
3580 	mlx4_cleanup_uar_table(dev);
3581 
3582 err_steer:
3583 	if (!mlx4_is_slave(dev))
3584 		mlx4_clear_steering(dev);
3585 
3586 err_disable_msix:
3587 	if (dev->flags & MLX4_FLAG_MSI_X)
3588 		pci_disable_msix(pdev);
3589 
3590 err_free_eq:
3591 	mlx4_free_eq_table(dev);
3592 
3593 err_master_mfunc:
3594 	if (mlx4_is_master(dev)) {
3595 		mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3596 		mlx4_multi_func_cleanup(dev);
3597 	}
3598 
3599 	if (mlx4_is_slave(dev)) {
3600 		kfree(dev->caps.qp0_qkey);
3601 		kfree(dev->caps.qp0_tunnel);
3602 		kfree(dev->caps.qp0_proxy);
3603 		kfree(dev->caps.qp1_tunnel);
3604 		kfree(dev->caps.qp1_proxy);
3605 	}
3606 
3607 err_close:
3608 	mlx4_close_hca(dev);
3609 
3610 err_fw:
3611 	mlx4_close_fw(dev);
3612 
3613 err_mfunc:
3614 	if (mlx4_is_slave(dev))
3615 		mlx4_multi_func_cleanup(dev);
3616 
3617 err_cmd:
3618 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3619 
3620 err_sriov:
3621 	if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3622 		pci_disable_sriov(pdev);
3623 		dev->flags &= ~MLX4_FLAG_SRIOV;
3624 	}
3625 
3626 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3627 		atomic_dec(&pf_loading);
3628 
3629 	kfree(priv->dev.dev_vfs);
3630 
3631 	if (!mlx4_is_slave(dev))
3632 		mlx4_free_ownership(dev);
3633 
3634 	kfree(dev_cap);
3635 	return err;
3636 }
3637 
3638 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3639 			   struct mlx4_priv *priv)
3640 {
3641 	int err;
3642 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3643 	int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3644 	const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3645 		{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3646 	unsigned total_vfs = 0;
3647 	unsigned int i;
3648 
3649 	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3650 
3651 	err = mlx4_pci_enable_device(&priv->dev);
3652 	if (err) {
3653 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3654 		return err;
3655 	}
3656 
3657 	/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3658 	 * per port, we must limit the number of VFs to 63 (since their are
3659 	 * 128 MACs)
3660 	 */
3661 	for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3662 	     total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3663 		nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3664 		if (nvfs[i] < 0) {
3665 			dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3666 			err = -EINVAL;
3667 			goto err_disable_pdev;
3668 		}
3669 	}
3670 	for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3671 	     i++) {
3672 		prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3673 		if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3674 			dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3675 			err = -EINVAL;
3676 			goto err_disable_pdev;
3677 		}
3678 	}
3679 	if (total_vfs > MLX4_MAX_NUM_VF) {
3680 		dev_err(&pdev->dev,
3681 			"Requested more VF's (%d) than allowed by hw (%d)\n",
3682 			total_vfs, MLX4_MAX_NUM_VF);
3683 		err = -EINVAL;
3684 		goto err_disable_pdev;
3685 	}
3686 
3687 	for (i = 0; i < MLX4_MAX_PORTS; i++) {
3688 		if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3689 			dev_err(&pdev->dev,
3690 				"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3691 				nvfs[i] + nvfs[2], i + 1,
3692 				MLX4_MAX_NUM_VF_P_PORT);
3693 			err = -EINVAL;
3694 			goto err_disable_pdev;
3695 		}
3696 	}
3697 
3698 	/* Check for BARs. */
3699 	if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3700 	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3701 		dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3702 			pci_dev_data, pci_resource_flags(pdev, 0));
3703 		err = -ENODEV;
3704 		goto err_disable_pdev;
3705 	}
3706 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3707 		dev_err(&pdev->dev, "Missing UAR, aborting\n");
3708 		err = -ENODEV;
3709 		goto err_disable_pdev;
3710 	}
3711 
3712 	err = pci_request_regions(pdev, DRV_NAME);
3713 	if (err) {
3714 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3715 		goto err_disable_pdev;
3716 	}
3717 
3718 	pci_set_master(pdev);
3719 
3720 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3721 	if (err) {
3722 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3723 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3724 		if (err) {
3725 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3726 			goto err_release_regions;
3727 		}
3728 	}
3729 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3730 	if (err) {
3731 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3732 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3733 		if (err) {
3734 			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3735 			goto err_release_regions;
3736 		}
3737 	}
3738 
3739 	/* Allow large DMA segments, up to the firmware limit of 1 GB */
3740 	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3741 	/* Detect if this device is a virtual function */
3742 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3743 		/* When acting as pf, we normally skip vfs unless explicitly
3744 		 * requested to probe them.
3745 		 */
3746 		if (total_vfs) {
3747 			unsigned vfs_offset = 0;
3748 
3749 			for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3750 			     vfs_offset + nvfs[i] < extended_func_num(pdev);
3751 			     vfs_offset += nvfs[i], i++)
3752 				;
3753 			if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3754 				err = -ENODEV;
3755 				goto err_release_regions;
3756 			}
3757 			if ((extended_func_num(pdev) - vfs_offset)
3758 			    > prb_vf[i]) {
3759 				dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3760 					 extended_func_num(pdev));
3761 				err = -ENODEV;
3762 				goto err_release_regions;
3763 			}
3764 		}
3765 	}
3766 
3767 	err = mlx4_catas_init(&priv->dev);
3768 	if (err)
3769 		goto err_release_regions;
3770 
3771 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3772 	if (err)
3773 		goto err_catas;
3774 
3775 	return 0;
3776 
3777 err_catas:
3778 	mlx4_catas_end(&priv->dev);
3779 
3780 err_release_regions:
3781 	pci_release_regions(pdev);
3782 
3783 err_disable_pdev:
3784 	mlx4_pci_disable_device(&priv->dev);
3785 	pci_set_drvdata(pdev, NULL);
3786 	return err;
3787 }
3788 
3789 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
3790 				      enum devlink_port_type port_type)
3791 {
3792 	struct mlx4_port_info *info = container_of(devlink_port,
3793 						   struct mlx4_port_info,
3794 						   devlink_port);
3795 	enum mlx4_port_type mlx4_port_type;
3796 
3797 	switch (port_type) {
3798 	case DEVLINK_PORT_TYPE_AUTO:
3799 		mlx4_port_type = MLX4_PORT_TYPE_AUTO;
3800 		break;
3801 	case DEVLINK_PORT_TYPE_ETH:
3802 		mlx4_port_type = MLX4_PORT_TYPE_ETH;
3803 		break;
3804 	case DEVLINK_PORT_TYPE_IB:
3805 		mlx4_port_type = MLX4_PORT_TYPE_IB;
3806 		break;
3807 	default:
3808 		return -EOPNOTSUPP;
3809 	}
3810 
3811 	return __set_port_type(info, mlx4_port_type);
3812 }
3813 
3814 static const struct devlink_ops mlx4_devlink_ops = {
3815 	.port_type_set	= mlx4_devlink_port_type_set,
3816 };
3817 
3818 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3819 {
3820 	struct devlink *devlink;
3821 	struct mlx4_priv *priv;
3822 	struct mlx4_dev *dev;
3823 	int ret;
3824 
3825 	printk_once(KERN_INFO "%s", mlx4_version);
3826 
3827 	devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
3828 	if (!devlink)
3829 		return -ENOMEM;
3830 	priv = devlink_priv(devlink);
3831 
3832 	dev       = &priv->dev;
3833 	dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3834 	if (!dev->persist) {
3835 		ret = -ENOMEM;
3836 		goto err_devlink_free;
3837 	}
3838 	dev->persist->pdev = pdev;
3839 	dev->persist->dev = dev;
3840 	pci_set_drvdata(pdev, dev->persist);
3841 	priv->pci_dev_data = id->driver_data;
3842 	mutex_init(&dev->persist->device_state_mutex);
3843 	mutex_init(&dev->persist->interface_state_mutex);
3844 	mutex_init(&dev->persist->pci_status_mutex);
3845 
3846 	ret = devlink_register(devlink, &pdev->dev);
3847 	if (ret)
3848 		goto err_persist_free;
3849 
3850 	ret =  __mlx4_init_one(pdev, id->driver_data, priv);
3851 	if (ret)
3852 		goto err_devlink_unregister;
3853 
3854 	pci_save_state(pdev);
3855 	return 0;
3856 
3857 err_devlink_unregister:
3858 	devlink_unregister(devlink);
3859 err_persist_free:
3860 	kfree(dev->persist);
3861 err_devlink_free:
3862 	devlink_free(devlink);
3863 	return ret;
3864 }
3865 
3866 static void mlx4_clean_dev(struct mlx4_dev *dev)
3867 {
3868 	struct mlx4_dev_persistent *persist = dev->persist;
3869 	struct mlx4_priv *priv = mlx4_priv(dev);
3870 	unsigned long	flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3871 
3872 	memset(priv, 0, sizeof(*priv));
3873 	priv->dev.persist = persist;
3874 	priv->dev.flags = flags;
3875 }
3876 
3877 static void mlx4_unload_one(struct pci_dev *pdev)
3878 {
3879 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3880 	struct mlx4_dev  *dev  = persist->dev;
3881 	struct mlx4_priv *priv = mlx4_priv(dev);
3882 	int               pci_dev_data;
3883 	int p, i;
3884 
3885 	if (priv->removed)
3886 		return;
3887 
3888 	/* saving current ports type for further use */
3889 	for (i = 0; i < dev->caps.num_ports; i++) {
3890 		dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3891 		dev->persist->curr_port_poss_type[i] = dev->caps.
3892 						       possible_type[i + 1];
3893 	}
3894 
3895 	pci_dev_data = priv->pci_dev_data;
3896 
3897 	mlx4_stop_sense(dev);
3898 	mlx4_unregister_device(dev);
3899 
3900 	for (p = 1; p <= dev->caps.num_ports; p++) {
3901 		mlx4_cleanup_port_info(&priv->port[p]);
3902 		mlx4_CLOSE_PORT(dev, p);
3903 	}
3904 
3905 	if (mlx4_is_master(dev))
3906 		mlx4_free_resource_tracker(dev,
3907 					   RES_TR_FREE_SLAVES_ONLY);
3908 
3909 	mlx4_cleanup_default_counters(dev);
3910 	if (!mlx4_is_slave(dev))
3911 		mlx4_cleanup_counters_table(dev);
3912 	mlx4_cleanup_qp_table(dev);
3913 	mlx4_cleanup_srq_table(dev);
3914 	mlx4_cleanup_cq_table(dev);
3915 	mlx4_cmd_use_polling(dev);
3916 	mlx4_cleanup_eq_table(dev);
3917 	mlx4_cleanup_mcg_table(dev);
3918 	mlx4_cleanup_mr_table(dev);
3919 	mlx4_cleanup_xrcd_table(dev);
3920 	mlx4_cleanup_pd_table(dev);
3921 
3922 	if (mlx4_is_master(dev))
3923 		mlx4_free_resource_tracker(dev,
3924 					   RES_TR_FREE_STRUCTS_ONLY);
3925 
3926 	iounmap(priv->kar);
3927 	mlx4_uar_free(dev, &priv->driver_uar);
3928 	mlx4_cleanup_uar_table(dev);
3929 	if (!mlx4_is_slave(dev))
3930 		mlx4_clear_steering(dev);
3931 	mlx4_free_eq_table(dev);
3932 	if (mlx4_is_master(dev))
3933 		mlx4_multi_func_cleanup(dev);
3934 	mlx4_close_hca(dev);
3935 	mlx4_close_fw(dev);
3936 	if (mlx4_is_slave(dev))
3937 		mlx4_multi_func_cleanup(dev);
3938 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3939 
3940 	if (dev->flags & MLX4_FLAG_MSI_X)
3941 		pci_disable_msix(pdev);
3942 
3943 	if (!mlx4_is_slave(dev))
3944 		mlx4_free_ownership(dev);
3945 
3946 	kfree(dev->caps.qp0_qkey);
3947 	kfree(dev->caps.qp0_tunnel);
3948 	kfree(dev->caps.qp0_proxy);
3949 	kfree(dev->caps.qp1_tunnel);
3950 	kfree(dev->caps.qp1_proxy);
3951 	kfree(dev->dev_vfs);
3952 
3953 	mlx4_clean_dev(dev);
3954 	priv->pci_dev_data = pci_dev_data;
3955 	priv->removed = 1;
3956 }
3957 
3958 static void mlx4_remove_one(struct pci_dev *pdev)
3959 {
3960 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3961 	struct mlx4_dev  *dev  = persist->dev;
3962 	struct mlx4_priv *priv = mlx4_priv(dev);
3963 	struct devlink *devlink = priv_to_devlink(priv);
3964 	int active_vfs = 0;
3965 
3966 	if (mlx4_is_slave(dev))
3967 		persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
3968 
3969 	mutex_lock(&persist->interface_state_mutex);
3970 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3971 	mutex_unlock(&persist->interface_state_mutex);
3972 
3973 	/* Disabling SR-IOV is not allowed while there are active vf's */
3974 	if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3975 		active_vfs = mlx4_how_many_lives_vf(dev);
3976 		if (active_vfs) {
3977 			pr_warn("Removing PF when there are active VF's !!\n");
3978 			pr_warn("Will not disable SR-IOV.\n");
3979 		}
3980 	}
3981 
3982 	/* device marked to be under deletion running now without the lock
3983 	 * letting other tasks to be terminated
3984 	 */
3985 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3986 		mlx4_unload_one(pdev);
3987 	else
3988 		mlx4_info(dev, "%s: interface is down\n", __func__);
3989 	mlx4_catas_end(dev);
3990 	if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3991 		mlx4_warn(dev, "Disabling SR-IOV\n");
3992 		pci_disable_sriov(pdev);
3993 	}
3994 
3995 	pci_release_regions(pdev);
3996 	mlx4_pci_disable_device(dev);
3997 	devlink_unregister(devlink);
3998 	kfree(dev->persist);
3999 	devlink_free(devlink);
4000 	pci_set_drvdata(pdev, NULL);
4001 }
4002 
4003 static int restore_current_port_types(struct mlx4_dev *dev,
4004 				      enum mlx4_port_type *types,
4005 				      enum mlx4_port_type *poss_types)
4006 {
4007 	struct mlx4_priv *priv = mlx4_priv(dev);
4008 	int err, i;
4009 
4010 	mlx4_stop_sense(dev);
4011 
4012 	mutex_lock(&priv->port_mutex);
4013 	for (i = 0; i < dev->caps.num_ports; i++)
4014 		dev->caps.possible_type[i + 1] = poss_types[i];
4015 	err = mlx4_change_port_types(dev, types);
4016 	mlx4_start_sense(dev);
4017 	mutex_unlock(&priv->port_mutex);
4018 
4019 	return err;
4020 }
4021 
4022 int mlx4_restart_one(struct pci_dev *pdev)
4023 {
4024 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4025 	struct mlx4_dev	 *dev  = persist->dev;
4026 	struct mlx4_priv *priv = mlx4_priv(dev);
4027 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4028 	int pci_dev_data, err, total_vfs;
4029 
4030 	pci_dev_data = priv->pci_dev_data;
4031 	total_vfs = dev->persist->num_vfs;
4032 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4033 
4034 	mlx4_unload_one(pdev);
4035 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
4036 	if (err) {
4037 		mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4038 			 __func__, pci_name(pdev), err);
4039 		return err;
4040 	}
4041 
4042 	err = restore_current_port_types(dev, dev->persist->curr_port_type,
4043 					 dev->persist->curr_port_poss_type);
4044 	if (err)
4045 		mlx4_err(dev, "could not restore original port types (%d)\n",
4046 			 err);
4047 
4048 	return err;
4049 }
4050 
4051 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4052 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4053 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4054 
4055 static const struct pci_device_id mlx4_pci_table[] = {
4056 	/* MT25408 "Hermon" */
4057 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR),	/* SDR */
4058 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR),	/* DDR */
4059 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR),	/* QDR */
4060 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
4061 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2),	/* QDR Gen2 */
4062 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN),	/* EN 10GigE */
4063 	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2),  /* EN 10GigE Gen2 */
4064 	/* MT25458 ConnectX EN 10GBASE-T */
4065 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
4066 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2),	/* Gen2 */
4067 	/* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4068 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
4069 	/* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4070 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
4071 	/* MT26478 ConnectX2 40GigE PCIe Gen2 */
4072 	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
4073 	/* MT25400 Family [ConnectX-2] */
4074 	MLX_VF(0x1002),					/* Virtual Function */
4075 	/* MT27500 Family [ConnectX-3] */
4076 	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
4077 	MLX_VF(0x1004),					/* Virtual Function */
4078 	MLX_GN(0x1005),					/* MT27510 Family */
4079 	MLX_GN(0x1006),					/* MT27511 Family */
4080 	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO),	/* MT27520 Family */
4081 	MLX_GN(0x1008),					/* MT27521 Family */
4082 	MLX_GN(0x1009),					/* MT27530 Family */
4083 	MLX_GN(0x100a),					/* MT27531 Family */
4084 	MLX_GN(0x100b),					/* MT27540 Family */
4085 	MLX_GN(0x100c),					/* MT27541 Family */
4086 	MLX_GN(0x100d),					/* MT27550 Family */
4087 	MLX_GN(0x100e),					/* MT27551 Family */
4088 	MLX_GN(0x100f),					/* MT27560 Family */
4089 	MLX_GN(0x1010),					/* MT27561 Family */
4090 
4091 	/*
4092 	 * See the mellanox_check_broken_intx_masking() quirk when
4093 	 * adding devices
4094 	 */
4095 
4096 	{ 0, }
4097 };
4098 
4099 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4100 
4101 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4102 					      pci_channel_state_t state)
4103 {
4104 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4105 
4106 	mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4107 	mlx4_enter_error_state(persist);
4108 
4109 	mutex_lock(&persist->interface_state_mutex);
4110 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4111 		mlx4_unload_one(pdev);
4112 
4113 	mutex_unlock(&persist->interface_state_mutex);
4114 	if (state == pci_channel_io_perm_failure)
4115 		return PCI_ERS_RESULT_DISCONNECT;
4116 
4117 	mlx4_pci_disable_device(persist->dev);
4118 	return PCI_ERS_RESULT_NEED_RESET;
4119 }
4120 
4121 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4122 {
4123 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4124 	struct mlx4_dev	 *dev  = persist->dev;
4125 	int err;
4126 
4127 	mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4128 	err = mlx4_pci_enable_device(dev);
4129 	if (err) {
4130 		mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4131 		return PCI_ERS_RESULT_DISCONNECT;
4132 	}
4133 
4134 	pci_set_master(pdev);
4135 	pci_restore_state(pdev);
4136 	pci_save_state(pdev);
4137 	return PCI_ERS_RESULT_RECOVERED;
4138 }
4139 
4140 static void mlx4_pci_resume(struct pci_dev *pdev)
4141 {
4142 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4143 	struct mlx4_dev	 *dev  = persist->dev;
4144 	struct mlx4_priv *priv = mlx4_priv(dev);
4145 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4146 	int total_vfs;
4147 	int err;
4148 
4149 	mlx4_err(dev, "%s was called\n", __func__);
4150 	total_vfs = dev->persist->num_vfs;
4151 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4152 
4153 	mutex_lock(&persist->interface_state_mutex);
4154 	if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4155 		err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4156 				    priv, 1);
4157 		if (err) {
4158 			mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4159 				 __func__,  err);
4160 			goto end;
4161 		}
4162 
4163 		err = restore_current_port_types(dev, dev->persist->
4164 						 curr_port_type, dev->persist->
4165 						 curr_port_poss_type);
4166 		if (err)
4167 			mlx4_err(dev, "could not restore original port types (%d)\n", err);
4168 	}
4169 end:
4170 	mutex_unlock(&persist->interface_state_mutex);
4171 
4172 }
4173 
4174 static void mlx4_shutdown(struct pci_dev *pdev)
4175 {
4176 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4177 
4178 	mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4179 	mutex_lock(&persist->interface_state_mutex);
4180 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4181 		mlx4_unload_one(pdev);
4182 	mutex_unlock(&persist->interface_state_mutex);
4183 }
4184 
4185 static const struct pci_error_handlers mlx4_err_handler = {
4186 	.error_detected = mlx4_pci_err_detected,
4187 	.slot_reset     = mlx4_pci_slot_reset,
4188 	.resume		= mlx4_pci_resume,
4189 };
4190 
4191 static struct pci_driver mlx4_driver = {
4192 	.name		= DRV_NAME,
4193 	.id_table	= mlx4_pci_table,
4194 	.probe		= mlx4_init_one,
4195 	.shutdown	= mlx4_shutdown,
4196 	.remove		= mlx4_remove_one,
4197 	.err_handler    = &mlx4_err_handler,
4198 };
4199 
4200 static int __init mlx4_verify_params(void)
4201 {
4202 	if ((log_num_mac < 0) || (log_num_mac > 7)) {
4203 		pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4204 		return -1;
4205 	}
4206 
4207 	if (log_num_vlan != 0)
4208 		pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4209 			MLX4_LOG_NUM_VLANS);
4210 
4211 	if (use_prio != 0)
4212 		pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4213 
4214 	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
4215 		pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4216 			log_mtts_per_seg);
4217 		return -1;
4218 	}
4219 
4220 	/* Check if module param for ports type has legal combination */
4221 	if (port_type_array[0] == false && port_type_array[1] == true) {
4222 		pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4223 		port_type_array[0] = true;
4224 	}
4225 
4226 	if (mlx4_log_num_mgm_entry_size < -7 ||
4227 	    (mlx4_log_num_mgm_entry_size > 0 &&
4228 	     (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4229 	      mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4230 		pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4231 			mlx4_log_num_mgm_entry_size,
4232 			MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4233 			MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4234 		return -1;
4235 	}
4236 
4237 	return 0;
4238 }
4239 
4240 static int __init mlx4_init(void)
4241 {
4242 	int ret;
4243 
4244 	if (mlx4_verify_params())
4245 		return -EINVAL;
4246 
4247 
4248 	mlx4_wq = create_singlethread_workqueue("mlx4");
4249 	if (!mlx4_wq)
4250 		return -ENOMEM;
4251 
4252 	ret = pci_register_driver(&mlx4_driver);
4253 	if (ret < 0)
4254 		destroy_workqueue(mlx4_wq);
4255 	return ret < 0 ? ret : 0;
4256 }
4257 
4258 static void __exit mlx4_cleanup(void)
4259 {
4260 	pci_unregister_driver(&mlx4_driver);
4261 	destroy_workqueue(mlx4_wq);
4262 }
4263 
4264 module_init(mlx4_init);
4265 module_exit(mlx4_cleanup);
4266