1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
45 
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
48 
49 #include "mlx4.h"
50 #include "fw.h"
51 #include "icm.h"
52 
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
57 
58 struct workqueue_struct *mlx4_wq;
59 
60 #ifdef CONFIG_MLX4_DEBUG
61 
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65 
66 #endif /* CONFIG_MLX4_DEBUG */
67 
68 #ifdef CONFIG_PCI_MSI
69 
70 static int msi_x = 1;
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73 
74 #else /* CONFIG_PCI_MSI */
75 
76 #define msi_x (0)
77 
78 #endif /* CONFIG_PCI_MSI */
79 
80 static uint8_t num_vfs[3] = {0, 0, 0};
81 static int num_vfs_argc;
82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 			  "num_vfs=port1,port2,port1+2");
85 
86 static uint8_t probe_vf[3] = {0, 0, 0};
87 static int probe_vfs_argc;
88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 			   "probe_vf=port1,port2,port1+2");
91 
92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
93 module_param_named(log_num_mgm_entry_size,
94 			mlx4_log_num_mgm_entry_size, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
96 					 " of qp per mcg, for example:"
97 					 " 10 gives 248.range: 7 <="
98 					 " log_num_mgm_entry_size <= 12."
99 					 " To activate device managed"
100 					 " flow steering when available, set to -1");
101 
102 static bool enable_64b_cqe_eqe = true;
103 module_param(enable_64b_cqe_eqe, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 		 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
106 
107 #define PF_CONTEXT_BEHAVIOUR_MASK	(MLX4_FUNC_CAP_64B_EQE_CQE | \
108 					 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 					 MLX4_FUNC_CAP_DMFS_A0_STATIC)
110 
111 #define RESET_PERSIST_MASK_FLAGS	(MLX4_FLAG_SRIOV)
112 
113 static char mlx4_version[] =
114 	DRV_NAME ": Mellanox ConnectX core driver v"
115 	DRV_VERSION " (" DRV_RELDATE ")\n";
116 
117 static struct mlx4_profile default_profile = {
118 	.num_qp		= 1 << 18,
119 	.num_srq	= 1 << 16,
120 	.rdmarc_per_qp	= 1 << 4,
121 	.num_cq		= 1 << 16,
122 	.num_mcg	= 1 << 13,
123 	.num_mpt	= 1 << 19,
124 	.num_mtt	= 1 << 20, /* It is really num mtt segements */
125 };
126 
127 static struct mlx4_profile low_mem_profile = {
128 	.num_qp		= 1 << 17,
129 	.num_srq	= 1 << 6,
130 	.rdmarc_per_qp	= 1 << 4,
131 	.num_cq		= 1 << 8,
132 	.num_mcg	= 1 << 8,
133 	.num_mpt	= 1 << 9,
134 	.num_mtt	= 1 << 7,
135 };
136 
137 static int log_num_mac = 7;
138 module_param_named(log_num_mac, log_num_mac, int, 0444);
139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
140 
141 static int log_num_vlan;
142 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
144 /* Log2 max number of VLANs per ETH port (0-7) */
145 #define MLX4_LOG_NUM_VLANS 7
146 #define MLX4_MIN_LOG_NUM_VLANS 0
147 #define MLX4_MIN_LOG_NUM_MAC 1
148 
149 static bool use_prio;
150 module_param_named(use_prio, use_prio, bool, 0444);
151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
152 
153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
156 
157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
158 static int arr_argc = 2;
159 module_param_array(port_type_array, int, &arr_argc, 0444);
160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
161 				"1 for IB, 2 for Ethernet");
162 
163 struct mlx4_port_config {
164 	struct list_head list;
165 	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
166 	struct pci_dev *pdev;
167 };
168 
169 static atomic_t pf_loading = ATOMIC_INIT(0);
170 
171 int mlx4_check_port_params(struct mlx4_dev *dev,
172 			   enum mlx4_port_type *port_type)
173 {
174 	int i;
175 
176 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
177 		for (i = 0; i < dev->caps.num_ports - 1; i++) {
178 			if (port_type[i] != port_type[i + 1]) {
179 				mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
180 				return -EINVAL;
181 			}
182 		}
183 	}
184 
185 	for (i = 0; i < dev->caps.num_ports; i++) {
186 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
187 			mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
188 				 i + 1);
189 			return -EINVAL;
190 		}
191 	}
192 	return 0;
193 }
194 
195 static void mlx4_set_port_mask(struct mlx4_dev *dev)
196 {
197 	int i;
198 
199 	for (i = 1; i <= dev->caps.num_ports; ++i)
200 		dev->caps.port_mask[i] = dev->caps.port_type[i];
201 }
202 
203 enum {
204 	MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
205 };
206 
207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
208 {
209 	int err = 0;
210 	struct mlx4_func func;
211 
212 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
213 		err = mlx4_QUERY_FUNC(dev, &func, 0);
214 		if (err) {
215 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
216 			return err;
217 		}
218 		dev_cap->max_eqs = func.max_eq;
219 		dev_cap->reserved_eqs = func.rsvd_eqs;
220 		dev_cap->reserved_uars = func.rsvd_uars;
221 		err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
222 	}
223 	return err;
224 }
225 
226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
227 {
228 	struct mlx4_caps *dev_cap = &dev->caps;
229 
230 	/* FW not supporting or cancelled by user */
231 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
232 	    !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
233 		return;
234 
235 	/* Must have 64B CQE_EQE enabled by FW to use bigger stride
236 	 * When FW has NCSI it may decide not to report 64B CQE/EQEs
237 	 */
238 	if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
239 	    !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
240 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
241 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
242 		return;
243 	}
244 
245 	if (cache_line_size() == 128 || cache_line_size() == 256) {
246 		mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
247 		/* Changing the real data inside CQE size to 32B */
248 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
249 		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
250 
251 		if (mlx4_is_master(dev))
252 			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
253 	} else {
254 		if (cache_line_size() != 32  && cache_line_size() != 64)
255 			mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
256 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
257 		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
258 	}
259 }
260 
261 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
262 			  struct mlx4_port_cap *port_cap)
263 {
264 	dev->caps.vl_cap[port]	    = port_cap->max_vl;
265 	dev->caps.ib_mtu_cap[port]	    = port_cap->ib_mtu;
266 	dev->phys_caps.gid_phys_table_len[port]  = port_cap->max_gids;
267 	dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
268 	/* set gid and pkey table operating lengths by default
269 	 * to non-sriov values
270 	 */
271 	dev->caps.gid_table_len[port]  = port_cap->max_gids;
272 	dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
273 	dev->caps.port_width_cap[port] = port_cap->max_port_width;
274 	dev->caps.eth_mtu_cap[port]    = port_cap->eth_mtu;
275 	dev->caps.def_mac[port]        = port_cap->def_mac;
276 	dev->caps.supported_type[port] = port_cap->supported_port_types;
277 	dev->caps.suggested_type[port] = port_cap->suggested_type;
278 	dev->caps.default_sense[port] = port_cap->default_sense;
279 	dev->caps.trans_type[port]	    = port_cap->trans_type;
280 	dev->caps.vendor_oui[port]     = port_cap->vendor_oui;
281 	dev->caps.wavelength[port]     = port_cap->wavelength;
282 	dev->caps.trans_code[port]     = port_cap->trans_code;
283 
284 	return 0;
285 }
286 
287 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
288 			 struct mlx4_port_cap *port_cap)
289 {
290 	int err = 0;
291 
292 	err = mlx4_QUERY_PORT(dev, port, port_cap);
293 
294 	if (err)
295 		mlx4_err(dev, "QUERY_PORT command failed.\n");
296 
297 	return err;
298 }
299 
300 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
301 {
302 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
303 		return;
304 
305 	if (mlx4_is_mfunc(dev)) {
306 		mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
307 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
308 		return;
309 	}
310 
311 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
312 		mlx4_dbg(dev,
313 			 "Keep FCS is not supported - Disabling Ignore FCS");
314 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
315 		return;
316 	}
317 }
318 
319 #define MLX4_A0_STEERING_TABLE_SIZE	256
320 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
321 {
322 	int err;
323 	int i;
324 
325 	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
326 	if (err) {
327 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
328 		return err;
329 	}
330 	mlx4_dev_cap_dump(dev, dev_cap);
331 
332 	if (dev_cap->min_page_sz > PAGE_SIZE) {
333 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
334 			 dev_cap->min_page_sz, PAGE_SIZE);
335 		return -ENODEV;
336 	}
337 	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
338 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
339 			 dev_cap->num_ports, MLX4_MAX_PORTS);
340 		return -ENODEV;
341 	}
342 
343 	if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
344 		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
345 			 dev_cap->uar_size,
346 			 (unsigned long long)
347 			 pci_resource_len(dev->persist->pdev, 2));
348 		return -ENODEV;
349 	}
350 
351 	dev->caps.num_ports	     = dev_cap->num_ports;
352 	dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
353 	dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
354 				      dev->caps.num_sys_eqs :
355 				      MLX4_MAX_EQ_NUM;
356 	for (i = 1; i <= dev->caps.num_ports; ++i) {
357 		err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
358 		if (err) {
359 			mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
360 			return err;
361 		}
362 	}
363 
364 	dev->caps.uar_page_size	     = PAGE_SIZE;
365 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
366 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
367 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
368 	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
369 	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
370 	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
371 	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
372 	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
373 	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
374 	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
375 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
376 	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
377 	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
378 	/*
379 	 * Subtract 1 from the limit because we need to allocate a
380 	 * spare CQE so the HCA HW can tell the difference between an
381 	 * empty CQ and a full CQ.
382 	 */
383 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
384 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
385 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
386 	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
387 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
388 
389 	/* The first 128 UARs are used for EQ doorbells */
390 	dev->caps.reserved_uars	     = max_t(int, 128, dev_cap->reserved_uars);
391 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
392 	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 					dev_cap->reserved_xrcds : 0;
394 	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
395 					dev_cap->max_xrcds : 0;
396 	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
397 
398 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
399 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
400 	dev->caps.flags		     = dev_cap->flags;
401 	dev->caps.flags2	     = dev_cap->flags2;
402 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
403 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
404 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
405 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
406 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
407 
408 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 		struct mlx4_init_hca_param hca_param;
410 
411 		memset(&hca_param, 0, sizeof(hca_param));
412 		err = mlx4_QUERY_HCA(dev, &hca_param);
413 		/* Turn off PHV_EN flag in case phv_check_en is set.
414 		 * phv_check_en is a HW check that parse the packet and verify
415 		 * phv bit was reported correctly in the wqe. To allow QinQ
416 		 * PHV_EN flag should be set and phv_check_en must be cleared
417 		 * otherwise QinQ packets will be drop by the HW.
418 		 */
419 		if (err || hca_param.phv_check_en)
420 			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
421 	}
422 
423 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
424 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
425 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
426 	/* Don't do sense port on multifunction devices (for now at least) */
427 	if (mlx4_is_mfunc(dev))
428 		dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
429 
430 	if (mlx4_low_memory_profile()) {
431 		dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
432 		dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
433 	} else {
434 		dev->caps.log_num_macs  = log_num_mac;
435 		dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
436 	}
437 
438 	for (i = 1; i <= dev->caps.num_ports; ++i) {
439 		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
440 		if (dev->caps.supported_type[i]) {
441 			/* if only ETH is supported - assign ETH */
442 			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
443 				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
444 			/* if only IB is supported, assign IB */
445 			else if (dev->caps.supported_type[i] ==
446 				 MLX4_PORT_TYPE_IB)
447 				dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
448 			else {
449 				/* if IB and ETH are supported, we set the port
450 				 * type according to user selection of port type;
451 				 * if user selected none, take the FW hint */
452 				if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
453 					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
454 						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
455 				else
456 					dev->caps.port_type[i] = port_type_array[i - 1];
457 			}
458 		}
459 		/*
460 		 * Link sensing is allowed on the port if 3 conditions are true:
461 		 * 1. Both protocols are supported on the port.
462 		 * 2. Different types are supported on the port
463 		 * 3. FW declared that it supports link sensing
464 		 */
465 		mlx4_priv(dev)->sense.sense_allowed[i] =
466 			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
467 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
468 			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
469 
470 		/*
471 		 * If "default_sense" bit is set, we move the port to "AUTO" mode
472 		 * and perform sense_port FW command to try and set the correct
473 		 * port type from beginning
474 		 */
475 		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
476 			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
477 			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
478 			mlx4_SENSE_PORT(dev, i, &sensed_port);
479 			if (sensed_port != MLX4_PORT_TYPE_NONE)
480 				dev->caps.port_type[i] = sensed_port;
481 		} else {
482 			dev->caps.possible_type[i] = dev->caps.port_type[i];
483 		}
484 
485 		if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
486 			dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
487 			mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
488 				  i, 1 << dev->caps.log_num_macs);
489 		}
490 		if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
491 			dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
492 			mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
493 				  i, 1 << dev->caps.log_num_vlans);
494 		}
495 	}
496 
497 	if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
498 	    (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
499 	    (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
500 		mlx4_warn(dev,
501 			  "Granular QoS per VF not supported with IB/Eth configuration\n");
502 		dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
503 	}
504 
505 	dev->caps.max_counters = dev_cap->max_counters;
506 
507 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
508 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
509 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
510 		(1 << dev->caps.log_num_macs) *
511 		(1 << dev->caps.log_num_vlans) *
512 		dev->caps.num_ports;
513 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
514 
515 	if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
516 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
517 		dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
518 	else
519 		dev->caps.dmfs_high_rate_qpn_base =
520 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
521 
522 	if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
523 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
524 		dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
525 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
526 		dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
527 	} else {
528 		dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
529 		dev->caps.dmfs_high_rate_qpn_base =
530 			dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
531 		dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
532 	}
533 
534 	dev->caps.rl_caps = dev_cap->rl_caps;
535 
536 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
537 		dev->caps.dmfs_high_rate_qpn_range;
538 
539 	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
540 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
541 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
542 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
543 
544 	dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
545 
546 	if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
547 		if (dev_cap->flags &
548 		    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
549 			mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
550 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
551 			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
552 		}
553 
554 		if (dev_cap->flags2 &
555 		    (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
556 		     MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
557 			mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
558 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
559 			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
560 		}
561 	}
562 
563 	if ((dev->caps.flags &
564 	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
565 	    mlx4_is_master(dev))
566 		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
567 
568 	if (!mlx4_is_slave(dev)) {
569 		mlx4_enable_cqe_eqe_stride(dev);
570 		dev->caps.alloc_res_qp_mask =
571 			(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
572 			MLX4_RESERVE_A0_QP;
573 
574 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
575 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
576 			mlx4_warn(dev, "Old device ETS support detected\n");
577 			mlx4_warn(dev, "Consider upgrading device FW.\n");
578 			dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
579 		}
580 
581 	} else {
582 		dev->caps.alloc_res_qp_mask = 0;
583 	}
584 
585 	mlx4_enable_ignore_fcs(dev);
586 
587 	return 0;
588 }
589 
590 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
591 				       enum pci_bus_speed *speed,
592 				       enum pcie_link_width *width)
593 {
594 	u32 lnkcap1, lnkcap2;
595 	int err1, err2;
596 
597 #define  PCIE_MLW_CAP_SHIFT 4	/* start of MLW mask in link capabilities */
598 
599 	*speed = PCI_SPEED_UNKNOWN;
600 	*width = PCIE_LNK_WIDTH_UNKNOWN;
601 
602 	err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
603 					  &lnkcap1);
604 	err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
605 					  &lnkcap2);
606 	if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
607 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
608 			*speed = PCIE_SPEED_8_0GT;
609 		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
610 			*speed = PCIE_SPEED_5_0GT;
611 		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
612 			*speed = PCIE_SPEED_2_5GT;
613 	}
614 	if (!err1) {
615 		*width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
616 		if (!lnkcap2) { /* pre-r3.0 */
617 			if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
618 				*speed = PCIE_SPEED_5_0GT;
619 			else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
620 				*speed = PCIE_SPEED_2_5GT;
621 		}
622 	}
623 
624 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
625 		return err1 ? err1 :
626 			err2 ? err2 : -EINVAL;
627 	}
628 	return 0;
629 }
630 
631 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
632 {
633 	enum pcie_link_width width, width_cap;
634 	enum pci_bus_speed speed, speed_cap;
635 	int err;
636 
637 #define PCIE_SPEED_STR(speed) \
638 	(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
639 	 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
640 	 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
641 	 "Unknown")
642 
643 	err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
644 	if (err) {
645 		mlx4_warn(dev,
646 			  "Unable to determine PCIe device BW capabilities\n");
647 		return;
648 	}
649 
650 	err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
651 	if (err || speed == PCI_SPEED_UNKNOWN ||
652 	    width == PCIE_LNK_WIDTH_UNKNOWN) {
653 		mlx4_warn(dev,
654 			  "Unable to determine PCI device chain minimum BW\n");
655 		return;
656 	}
657 
658 	if (width != width_cap || speed != speed_cap)
659 		mlx4_warn(dev,
660 			  "PCIe BW is different than device's capability\n");
661 
662 	mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
663 		  PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
664 	mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
665 		  width, width_cap);
666 	return;
667 }
668 
669 /*The function checks if there are live vf, return the num of them*/
670 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
671 {
672 	struct mlx4_priv *priv = mlx4_priv(dev);
673 	struct mlx4_slave_state *s_state;
674 	int i;
675 	int ret = 0;
676 
677 	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
678 		s_state = &priv->mfunc.master.slave_state[i];
679 		if (s_state->active && s_state->last_cmd !=
680 		    MLX4_COMM_CMD_RESET) {
681 			mlx4_warn(dev, "%s: slave: %d is still active\n",
682 				  __func__, i);
683 			ret++;
684 		}
685 	}
686 	return ret;
687 }
688 
689 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
690 {
691 	u32 qk = MLX4_RESERVED_QKEY_BASE;
692 
693 	if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
694 	    qpn < dev->phys_caps.base_proxy_sqpn)
695 		return -EINVAL;
696 
697 	if (qpn >= dev->phys_caps.base_tunnel_sqpn)
698 		/* tunnel qp */
699 		qk += qpn - dev->phys_caps.base_tunnel_sqpn;
700 	else
701 		qk += qpn - dev->phys_caps.base_proxy_sqpn;
702 	*qkey = qk;
703 	return 0;
704 }
705 EXPORT_SYMBOL(mlx4_get_parav_qkey);
706 
707 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
708 {
709 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
710 
711 	if (!mlx4_is_master(dev))
712 		return;
713 
714 	priv->virt2phys_pkey[slave][port - 1][i] = val;
715 }
716 EXPORT_SYMBOL(mlx4_sync_pkey_table);
717 
718 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
719 {
720 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
721 
722 	if (!mlx4_is_master(dev))
723 		return;
724 
725 	priv->slave_node_guids[slave] = guid;
726 }
727 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
728 
729 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
730 {
731 	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
732 
733 	if (!mlx4_is_master(dev))
734 		return 0;
735 
736 	return priv->slave_node_guids[slave];
737 }
738 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
739 
740 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
741 {
742 	struct mlx4_priv *priv = mlx4_priv(dev);
743 	struct mlx4_slave_state *s_slave;
744 
745 	if (!mlx4_is_master(dev))
746 		return 0;
747 
748 	s_slave = &priv->mfunc.master.slave_state[slave];
749 	return !!s_slave->active;
750 }
751 EXPORT_SYMBOL(mlx4_is_slave_active);
752 
753 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
754 				       struct mlx4_dev_cap *dev_cap,
755 				       struct mlx4_init_hca_param *hca_param)
756 {
757 	dev->caps.steering_mode = hca_param->steering_mode;
758 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
759 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
760 		dev->caps.fs_log_max_ucast_qp_range_size =
761 			dev_cap->fs_log_max_ucast_qp_range_size;
762 	} else
763 		dev->caps.num_qp_per_mgm =
764 			4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
765 
766 	mlx4_dbg(dev, "Steering mode is: %s\n",
767 		 mlx4_steering_mode_str(dev->caps.steering_mode));
768 }
769 
770 static int mlx4_slave_cap(struct mlx4_dev *dev)
771 {
772 	int			   err;
773 	u32			   page_size;
774 	struct mlx4_dev_cap	   dev_cap;
775 	struct mlx4_func_cap	   func_cap;
776 	struct mlx4_init_hca_param hca_param;
777 	u8			   i;
778 
779 	memset(&hca_param, 0, sizeof(hca_param));
780 	err = mlx4_QUERY_HCA(dev, &hca_param);
781 	if (err) {
782 		mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
783 		return err;
784 	}
785 
786 	/* fail if the hca has an unknown global capability
787 	 * at this time global_caps should be always zeroed
788 	 */
789 	if (hca_param.global_caps) {
790 		mlx4_err(dev, "Unknown hca global capabilities\n");
791 		return -ENOSYS;
792 	}
793 
794 	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
795 
796 	dev->caps.hca_core_clock = hca_param.hca_core_clock;
797 
798 	memset(&dev_cap, 0, sizeof(dev_cap));
799 	dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
800 	err = mlx4_dev_cap(dev, &dev_cap);
801 	if (err) {
802 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
803 		return err;
804 	}
805 
806 	err = mlx4_QUERY_FW(dev);
807 	if (err)
808 		mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
809 
810 	page_size = ~dev->caps.page_size_cap + 1;
811 	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
812 	if (page_size > PAGE_SIZE) {
813 		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
814 			 page_size, PAGE_SIZE);
815 		return -ENODEV;
816 	}
817 
818 	/* slave gets uar page size from QUERY_HCA fw command */
819 	dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
820 
821 	/* TODO: relax this assumption */
822 	if (dev->caps.uar_page_size != PAGE_SIZE) {
823 		mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
824 			 dev->caps.uar_page_size, PAGE_SIZE);
825 		return -ENODEV;
826 	}
827 
828 	memset(&func_cap, 0, sizeof(func_cap));
829 	err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 	if (err) {
831 		mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
832 			 err);
833 		return err;
834 	}
835 
836 	if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
837 	    PF_CONTEXT_BEHAVIOUR_MASK) {
838 		mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
839 			 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
840 		return -ENOSYS;
841 	}
842 
843 	dev->caps.num_ports		= func_cap.num_ports;
844 	dev->quotas.qp			= func_cap.qp_quota;
845 	dev->quotas.srq			= func_cap.srq_quota;
846 	dev->quotas.cq			= func_cap.cq_quota;
847 	dev->quotas.mpt			= func_cap.mpt_quota;
848 	dev->quotas.mtt			= func_cap.mtt_quota;
849 	dev->caps.num_qps		= 1 << hca_param.log_num_qps;
850 	dev->caps.num_srqs		= 1 << hca_param.log_num_srqs;
851 	dev->caps.num_cqs		= 1 << hca_param.log_num_cqs;
852 	dev->caps.num_mpts		= 1 << hca_param.log_mpt_sz;
853 	dev->caps.num_eqs		= func_cap.max_eq;
854 	dev->caps.reserved_eqs		= func_cap.reserved_eq;
855 	dev->caps.reserved_lkey		= func_cap.reserved_lkey;
856 	dev->caps.num_pds               = MLX4_NUM_PDS;
857 	dev->caps.num_mgms              = 0;
858 	dev->caps.num_amgms             = 0;
859 
860 	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
861 		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
862 			 dev->caps.num_ports, MLX4_MAX_PORTS);
863 		return -ENODEV;
864 	}
865 
866 	dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
867 	dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
868 	dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
869 	dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
870 	dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
871 
872 	if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
873 	    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
874 	    !dev->caps.qp0_qkey) {
875 		err = -ENOMEM;
876 		goto err_mem;
877 	}
878 
879 	for (i = 1; i <= dev->caps.num_ports; ++i) {
880 		err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
881 		if (err) {
882 			mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
883 				 i, err);
884 			goto err_mem;
885 		}
886 		dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
887 		dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
888 		dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
889 		dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
890 		dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
891 		dev->caps.port_mask[i] = dev->caps.port_type[i];
892 		dev->caps.phys_port_id[i] = func_cap.phys_port_id;
893 		if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
894 						    &dev->caps.gid_table_len[i],
895 						    &dev->caps.pkey_table_len[i]))
896 			goto err_mem;
897 	}
898 
899 	if (dev->caps.uar_page_size * (dev->caps.num_uars -
900 				       dev->caps.reserved_uars) >
901 				       pci_resource_len(dev->persist->pdev,
902 							2)) {
903 		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
904 			 dev->caps.uar_page_size * dev->caps.num_uars,
905 			 (unsigned long long)
906 			 pci_resource_len(dev->persist->pdev, 2));
907 		goto err_mem;
908 	}
909 
910 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
911 		dev->caps.eqe_size   = 64;
912 		dev->caps.eqe_factor = 1;
913 	} else {
914 		dev->caps.eqe_size   = 32;
915 		dev->caps.eqe_factor = 0;
916 	}
917 
918 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
919 		dev->caps.cqe_size   = 64;
920 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
921 	} else {
922 		dev->caps.cqe_size   = 32;
923 	}
924 
925 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
926 		dev->caps.eqe_size = hca_param.eqe_size;
927 		dev->caps.eqe_factor = 0;
928 	}
929 
930 	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
931 		dev->caps.cqe_size = hca_param.cqe_size;
932 		/* User still need to know when CQE > 32B */
933 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
934 	}
935 
936 	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
937 	mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
938 
939 	slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
940 	mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
941 		 hca_param.rss_ip_frags ? "on" : "off");
942 
943 	if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
944 	    dev->caps.bf_reg_size)
945 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
946 
947 	if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
948 		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
949 
950 	return 0;
951 
952 err_mem:
953 	kfree(dev->caps.qp0_qkey);
954 	kfree(dev->caps.qp0_tunnel);
955 	kfree(dev->caps.qp0_proxy);
956 	kfree(dev->caps.qp1_tunnel);
957 	kfree(dev->caps.qp1_proxy);
958 	dev->caps.qp0_qkey = NULL;
959 	dev->caps.qp0_tunnel = NULL;
960 	dev->caps.qp0_proxy = NULL;
961 	dev->caps.qp1_tunnel = NULL;
962 	dev->caps.qp1_proxy = NULL;
963 
964 	return err;
965 }
966 
967 static void mlx4_request_modules(struct mlx4_dev *dev)
968 {
969 	int port;
970 	int has_ib_port = false;
971 	int has_eth_port = false;
972 #define EN_DRV_NAME	"mlx4_en"
973 #define IB_DRV_NAME	"mlx4_ib"
974 
975 	for (port = 1; port <= dev->caps.num_ports; port++) {
976 		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
977 			has_ib_port = true;
978 		else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
979 			has_eth_port = true;
980 	}
981 
982 	if (has_eth_port)
983 		request_module_nowait(EN_DRV_NAME);
984 	if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
985 		request_module_nowait(IB_DRV_NAME);
986 }
987 
988 /*
989  * Change the port configuration of the device.
990  * Every user of this function must hold the port mutex.
991  */
992 int mlx4_change_port_types(struct mlx4_dev *dev,
993 			   enum mlx4_port_type *port_types)
994 {
995 	int err = 0;
996 	int change = 0;
997 	int port;
998 
999 	for (port = 0; port <  dev->caps.num_ports; port++) {
1000 		/* Change the port type only if the new type is different
1001 		 * from the current, and not set to Auto */
1002 		if (port_types[port] != dev->caps.port_type[port + 1])
1003 			change = 1;
1004 	}
1005 	if (change) {
1006 		mlx4_unregister_device(dev);
1007 		for (port = 1; port <= dev->caps.num_ports; port++) {
1008 			mlx4_CLOSE_PORT(dev, port);
1009 			dev->caps.port_type[port] = port_types[port - 1];
1010 			err = mlx4_SET_PORT(dev, port, -1);
1011 			if (err) {
1012 				mlx4_err(dev, "Failed to set port %d, aborting\n",
1013 					 port);
1014 				goto out;
1015 			}
1016 		}
1017 		mlx4_set_port_mask(dev);
1018 		err = mlx4_register_device(dev);
1019 		if (err) {
1020 			mlx4_err(dev, "Failed to register device\n");
1021 			goto out;
1022 		}
1023 		mlx4_request_modules(dev);
1024 	}
1025 
1026 out:
1027 	return err;
1028 }
1029 
1030 static ssize_t show_port_type(struct device *dev,
1031 			      struct device_attribute *attr,
1032 			      char *buf)
1033 {
1034 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1035 						   port_attr);
1036 	struct mlx4_dev *mdev = info->dev;
1037 	char type[8];
1038 
1039 	sprintf(type, "%s",
1040 		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1041 		"ib" : "eth");
1042 	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1043 		sprintf(buf, "auto (%s)\n", type);
1044 	else
1045 		sprintf(buf, "%s\n", type);
1046 
1047 	return strlen(buf);
1048 }
1049 
1050 static ssize_t set_port_type(struct device *dev,
1051 			     struct device_attribute *attr,
1052 			     const char *buf, size_t count)
1053 {
1054 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1055 						   port_attr);
1056 	struct mlx4_dev *mdev = info->dev;
1057 	struct mlx4_priv *priv = mlx4_priv(mdev);
1058 	enum mlx4_port_type types[MLX4_MAX_PORTS];
1059 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1060 	static DEFINE_MUTEX(set_port_type_mutex);
1061 	int i;
1062 	int err = 0;
1063 
1064 	mutex_lock(&set_port_type_mutex);
1065 
1066 	if (!strcmp(buf, "ib\n"))
1067 		info->tmp_type = MLX4_PORT_TYPE_IB;
1068 	else if (!strcmp(buf, "eth\n"))
1069 		info->tmp_type = MLX4_PORT_TYPE_ETH;
1070 	else if (!strcmp(buf, "auto\n"))
1071 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
1072 	else {
1073 		mlx4_err(mdev, "%s is not supported port type\n", buf);
1074 		err = -EINVAL;
1075 		goto err_out;
1076 	}
1077 
1078 	mlx4_stop_sense(mdev);
1079 	mutex_lock(&priv->port_mutex);
1080 	/* Possible type is always the one that was delivered */
1081 	mdev->caps.possible_type[info->port] = info->tmp_type;
1082 
1083 	for (i = 0; i < mdev->caps.num_ports; i++) {
1084 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1085 					mdev->caps.possible_type[i+1];
1086 		if (types[i] == MLX4_PORT_TYPE_AUTO)
1087 			types[i] = mdev->caps.port_type[i+1];
1088 	}
1089 
1090 	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1091 	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1092 		for (i = 1; i <= mdev->caps.num_ports; i++) {
1093 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1094 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1095 				err = -EINVAL;
1096 			}
1097 		}
1098 	}
1099 	if (err) {
1100 		mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1101 		goto out;
1102 	}
1103 
1104 	mlx4_do_sense_ports(mdev, new_types, types);
1105 
1106 	err = mlx4_check_port_params(mdev, new_types);
1107 	if (err)
1108 		goto out;
1109 
1110 	/* We are about to apply the changes after the configuration
1111 	 * was verified, no need to remember the temporary types
1112 	 * any more */
1113 	for (i = 0; i < mdev->caps.num_ports; i++)
1114 		priv->port[i + 1].tmp_type = 0;
1115 
1116 	err = mlx4_change_port_types(mdev, new_types);
1117 
1118 out:
1119 	mlx4_start_sense(mdev);
1120 	mutex_unlock(&priv->port_mutex);
1121 err_out:
1122 	mutex_unlock(&set_port_type_mutex);
1123 
1124 	return err ? err : count;
1125 }
1126 
1127 enum ibta_mtu {
1128 	IB_MTU_256  = 1,
1129 	IB_MTU_512  = 2,
1130 	IB_MTU_1024 = 3,
1131 	IB_MTU_2048 = 4,
1132 	IB_MTU_4096 = 5
1133 };
1134 
1135 static inline int int_to_ibta_mtu(int mtu)
1136 {
1137 	switch (mtu) {
1138 	case 256:  return IB_MTU_256;
1139 	case 512:  return IB_MTU_512;
1140 	case 1024: return IB_MTU_1024;
1141 	case 2048: return IB_MTU_2048;
1142 	case 4096: return IB_MTU_4096;
1143 	default: return -1;
1144 	}
1145 }
1146 
1147 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1148 {
1149 	switch (mtu) {
1150 	case IB_MTU_256:  return  256;
1151 	case IB_MTU_512:  return  512;
1152 	case IB_MTU_1024: return 1024;
1153 	case IB_MTU_2048: return 2048;
1154 	case IB_MTU_4096: return 4096;
1155 	default: return -1;
1156 	}
1157 }
1158 
1159 static ssize_t show_port_ib_mtu(struct device *dev,
1160 			     struct device_attribute *attr,
1161 			     char *buf)
1162 {
1163 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1164 						   port_mtu_attr);
1165 	struct mlx4_dev *mdev = info->dev;
1166 
1167 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1168 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1169 
1170 	sprintf(buf, "%d\n",
1171 			ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1172 	return strlen(buf);
1173 }
1174 
1175 static ssize_t set_port_ib_mtu(struct device *dev,
1176 			     struct device_attribute *attr,
1177 			     const char *buf, size_t count)
1178 {
1179 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1180 						   port_mtu_attr);
1181 	struct mlx4_dev *mdev = info->dev;
1182 	struct mlx4_priv *priv = mlx4_priv(mdev);
1183 	int err, port, mtu, ibta_mtu = -1;
1184 
1185 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1186 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1187 		return -EINVAL;
1188 	}
1189 
1190 	err = kstrtoint(buf, 0, &mtu);
1191 	if (!err)
1192 		ibta_mtu = int_to_ibta_mtu(mtu);
1193 
1194 	if (err || ibta_mtu < 0) {
1195 		mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1196 		return -EINVAL;
1197 	}
1198 
1199 	mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1200 
1201 	mlx4_stop_sense(mdev);
1202 	mutex_lock(&priv->port_mutex);
1203 	mlx4_unregister_device(mdev);
1204 	for (port = 1; port <= mdev->caps.num_ports; port++) {
1205 		mlx4_CLOSE_PORT(mdev, port);
1206 		err = mlx4_SET_PORT(mdev, port, -1);
1207 		if (err) {
1208 			mlx4_err(mdev, "Failed to set port %d, aborting\n",
1209 				 port);
1210 			goto err_set_port;
1211 		}
1212 	}
1213 	err = mlx4_register_device(mdev);
1214 err_set_port:
1215 	mutex_unlock(&priv->port_mutex);
1216 	mlx4_start_sense(mdev);
1217 	return err ? err : count;
1218 }
1219 
1220 int mlx4_bond(struct mlx4_dev *dev)
1221 {
1222 	int ret = 0;
1223 	struct mlx4_priv *priv = mlx4_priv(dev);
1224 
1225 	mutex_lock(&priv->bond_mutex);
1226 
1227 	if (!mlx4_is_bonded(dev))
1228 		ret = mlx4_do_bond(dev, true);
1229 	else
1230 		ret = 0;
1231 
1232 	mutex_unlock(&priv->bond_mutex);
1233 	if (ret)
1234 		mlx4_err(dev, "Failed to bond device: %d\n", ret);
1235 	else
1236 		mlx4_dbg(dev, "Device is bonded\n");
1237 	return ret;
1238 }
1239 EXPORT_SYMBOL_GPL(mlx4_bond);
1240 
1241 int mlx4_unbond(struct mlx4_dev *dev)
1242 {
1243 	int ret = 0;
1244 	struct mlx4_priv *priv = mlx4_priv(dev);
1245 
1246 	mutex_lock(&priv->bond_mutex);
1247 
1248 	if (mlx4_is_bonded(dev))
1249 		ret = mlx4_do_bond(dev, false);
1250 
1251 	mutex_unlock(&priv->bond_mutex);
1252 	if (ret)
1253 		mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1254 	else
1255 		mlx4_dbg(dev, "Device is unbonded\n");
1256 	return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(mlx4_unbond);
1259 
1260 
1261 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1262 {
1263 	u8 port1 = v2p->port1;
1264 	u8 port2 = v2p->port2;
1265 	struct mlx4_priv *priv = mlx4_priv(dev);
1266 	int err;
1267 
1268 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1269 		return -ENOTSUPP;
1270 
1271 	mutex_lock(&priv->bond_mutex);
1272 
1273 	/* zero means keep current mapping for this port */
1274 	if (port1 == 0)
1275 		port1 = priv->v2p.port1;
1276 	if (port2 == 0)
1277 		port2 = priv->v2p.port2;
1278 
1279 	if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1280 	    (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1281 	    (port1 == 2 && port2 == 1)) {
1282 		/* besides boundary checks cross mapping makes
1283 		 * no sense and therefore not allowed */
1284 		err = -EINVAL;
1285 	} else if ((port1 == priv->v2p.port1) &&
1286 		 (port2 == priv->v2p.port2)) {
1287 		err = 0;
1288 	} else {
1289 		err = mlx4_virt2phy_port_map(dev, port1, port2);
1290 		if (!err) {
1291 			mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1292 				 port1, port2);
1293 			priv->v2p.port1 = port1;
1294 			priv->v2p.port2 = port2;
1295 		} else {
1296 			mlx4_err(dev, "Failed to change port mape: %d\n", err);
1297 		}
1298 	}
1299 
1300 	mutex_unlock(&priv->bond_mutex);
1301 	return err;
1302 }
1303 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1304 
1305 static int mlx4_load_fw(struct mlx4_dev *dev)
1306 {
1307 	struct mlx4_priv *priv = mlx4_priv(dev);
1308 	int err;
1309 
1310 	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1311 					 GFP_HIGHUSER | __GFP_NOWARN, 0);
1312 	if (!priv->fw.fw_icm) {
1313 		mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1314 		return -ENOMEM;
1315 	}
1316 
1317 	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1318 	if (err) {
1319 		mlx4_err(dev, "MAP_FA command failed, aborting\n");
1320 		goto err_free;
1321 	}
1322 
1323 	err = mlx4_RUN_FW(dev);
1324 	if (err) {
1325 		mlx4_err(dev, "RUN_FW command failed, aborting\n");
1326 		goto err_unmap_fa;
1327 	}
1328 
1329 	return 0;
1330 
1331 err_unmap_fa:
1332 	mlx4_UNMAP_FA(dev);
1333 
1334 err_free:
1335 	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1336 	return err;
1337 }
1338 
1339 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1340 				int cmpt_entry_sz)
1341 {
1342 	struct mlx4_priv *priv = mlx4_priv(dev);
1343 	int err;
1344 	int num_eqs;
1345 
1346 	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1347 				  cmpt_base +
1348 				  ((u64) (MLX4_CMPT_TYPE_QP *
1349 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1350 				  cmpt_entry_sz, dev->caps.num_qps,
1351 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1352 				  0, 0);
1353 	if (err)
1354 		goto err;
1355 
1356 	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1357 				  cmpt_base +
1358 				  ((u64) (MLX4_CMPT_TYPE_SRQ *
1359 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1360 				  cmpt_entry_sz, dev->caps.num_srqs,
1361 				  dev->caps.reserved_srqs, 0, 0);
1362 	if (err)
1363 		goto err_qp;
1364 
1365 	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1366 				  cmpt_base +
1367 				  ((u64) (MLX4_CMPT_TYPE_CQ *
1368 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1369 				  cmpt_entry_sz, dev->caps.num_cqs,
1370 				  dev->caps.reserved_cqs, 0, 0);
1371 	if (err)
1372 		goto err_srq;
1373 
1374 	num_eqs = dev->phys_caps.num_phys_eqs;
1375 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1376 				  cmpt_base +
1377 				  ((u64) (MLX4_CMPT_TYPE_EQ *
1378 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1379 				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1380 	if (err)
1381 		goto err_cq;
1382 
1383 	return 0;
1384 
1385 err_cq:
1386 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1387 
1388 err_srq:
1389 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1390 
1391 err_qp:
1392 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1393 
1394 err:
1395 	return err;
1396 }
1397 
1398 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1399 			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1400 {
1401 	struct mlx4_priv *priv = mlx4_priv(dev);
1402 	u64 aux_pages;
1403 	int num_eqs;
1404 	int err;
1405 
1406 	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1407 	if (err) {
1408 		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1409 		return err;
1410 	}
1411 
1412 	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1413 		 (unsigned long long) icm_size >> 10,
1414 		 (unsigned long long) aux_pages << 2);
1415 
1416 	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1417 					  GFP_HIGHUSER | __GFP_NOWARN, 0);
1418 	if (!priv->fw.aux_icm) {
1419 		mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1420 		return -ENOMEM;
1421 	}
1422 
1423 	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1424 	if (err) {
1425 		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1426 		goto err_free_aux;
1427 	}
1428 
1429 	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1430 	if (err) {
1431 		mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1432 		goto err_unmap_aux;
1433 	}
1434 
1435 
1436 	num_eqs = dev->phys_caps.num_phys_eqs;
1437 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1438 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
1439 				  num_eqs, num_eqs, 0, 0);
1440 	if (err) {
1441 		mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1442 		goto err_unmap_cmpt;
1443 	}
1444 
1445 	/*
1446 	 * Reserved MTT entries must be aligned up to a cacheline
1447 	 * boundary, since the FW will write to them, while the driver
1448 	 * writes to all other MTT entries. (The variable
1449 	 * dev->caps.mtt_entry_sz below is really the MTT segment
1450 	 * size, not the raw entry size)
1451 	 */
1452 	dev->caps.reserved_mtts =
1453 		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1454 		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1455 
1456 	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1457 				  init_hca->mtt_base,
1458 				  dev->caps.mtt_entry_sz,
1459 				  dev->caps.num_mtts,
1460 				  dev->caps.reserved_mtts, 1, 0);
1461 	if (err) {
1462 		mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1463 		goto err_unmap_eq;
1464 	}
1465 
1466 	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1467 				  init_hca->dmpt_base,
1468 				  dev_cap->dmpt_entry_sz,
1469 				  dev->caps.num_mpts,
1470 				  dev->caps.reserved_mrws, 1, 1);
1471 	if (err) {
1472 		mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1473 		goto err_unmap_mtt;
1474 	}
1475 
1476 	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1477 				  init_hca->qpc_base,
1478 				  dev_cap->qpc_entry_sz,
1479 				  dev->caps.num_qps,
1480 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1481 				  0, 0);
1482 	if (err) {
1483 		mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1484 		goto err_unmap_dmpt;
1485 	}
1486 
1487 	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1488 				  init_hca->auxc_base,
1489 				  dev_cap->aux_entry_sz,
1490 				  dev->caps.num_qps,
1491 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1492 				  0, 0);
1493 	if (err) {
1494 		mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1495 		goto err_unmap_qp;
1496 	}
1497 
1498 	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1499 				  init_hca->altc_base,
1500 				  dev_cap->altc_entry_sz,
1501 				  dev->caps.num_qps,
1502 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1503 				  0, 0);
1504 	if (err) {
1505 		mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1506 		goto err_unmap_auxc;
1507 	}
1508 
1509 	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1510 				  init_hca->rdmarc_base,
1511 				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1512 				  dev->caps.num_qps,
1513 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1514 				  0, 0);
1515 	if (err) {
1516 		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1517 		goto err_unmap_altc;
1518 	}
1519 
1520 	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1521 				  init_hca->cqc_base,
1522 				  dev_cap->cqc_entry_sz,
1523 				  dev->caps.num_cqs,
1524 				  dev->caps.reserved_cqs, 0, 0);
1525 	if (err) {
1526 		mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1527 		goto err_unmap_rdmarc;
1528 	}
1529 
1530 	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1531 				  init_hca->srqc_base,
1532 				  dev_cap->srq_entry_sz,
1533 				  dev->caps.num_srqs,
1534 				  dev->caps.reserved_srqs, 0, 0);
1535 	if (err) {
1536 		mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1537 		goto err_unmap_cq;
1538 	}
1539 
1540 	/*
1541 	 * For flow steering device managed mode it is required to use
1542 	 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1543 	 * required, but for simplicity just map the whole multicast
1544 	 * group table now.  The table isn't very big and it's a lot
1545 	 * easier than trying to track ref counts.
1546 	 */
1547 	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1548 				  init_hca->mc_base,
1549 				  mlx4_get_mgm_entry_size(dev),
1550 				  dev->caps.num_mgms + dev->caps.num_amgms,
1551 				  dev->caps.num_mgms + dev->caps.num_amgms,
1552 				  0, 0);
1553 	if (err) {
1554 		mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1555 		goto err_unmap_srq;
1556 	}
1557 
1558 	return 0;
1559 
1560 err_unmap_srq:
1561 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1562 
1563 err_unmap_cq:
1564 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1565 
1566 err_unmap_rdmarc:
1567 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1568 
1569 err_unmap_altc:
1570 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1571 
1572 err_unmap_auxc:
1573 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1574 
1575 err_unmap_qp:
1576 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1577 
1578 err_unmap_dmpt:
1579 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1580 
1581 err_unmap_mtt:
1582 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1583 
1584 err_unmap_eq:
1585 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1586 
1587 err_unmap_cmpt:
1588 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1589 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1590 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1591 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1592 
1593 err_unmap_aux:
1594 	mlx4_UNMAP_ICM_AUX(dev);
1595 
1596 err_free_aux:
1597 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1598 
1599 	return err;
1600 }
1601 
1602 static void mlx4_free_icms(struct mlx4_dev *dev)
1603 {
1604 	struct mlx4_priv *priv = mlx4_priv(dev);
1605 
1606 	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1607 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1608 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1609 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1610 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1611 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1612 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1613 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1614 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1615 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1616 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1617 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1618 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1619 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1620 
1621 	mlx4_UNMAP_ICM_AUX(dev);
1622 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1623 }
1624 
1625 static void mlx4_slave_exit(struct mlx4_dev *dev)
1626 {
1627 	struct mlx4_priv *priv = mlx4_priv(dev);
1628 
1629 	mutex_lock(&priv->cmd.slave_cmd_mutex);
1630 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1631 			  MLX4_COMM_TIME))
1632 		mlx4_warn(dev, "Failed to close slave function\n");
1633 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1634 }
1635 
1636 static int map_bf_area(struct mlx4_dev *dev)
1637 {
1638 	struct mlx4_priv *priv = mlx4_priv(dev);
1639 	resource_size_t bf_start;
1640 	resource_size_t bf_len;
1641 	int err = 0;
1642 
1643 	if (!dev->caps.bf_reg_size)
1644 		return -ENXIO;
1645 
1646 	bf_start = pci_resource_start(dev->persist->pdev, 2) +
1647 			(dev->caps.num_uars << PAGE_SHIFT);
1648 	bf_len = pci_resource_len(dev->persist->pdev, 2) -
1649 			(dev->caps.num_uars << PAGE_SHIFT);
1650 	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1651 	if (!priv->bf_mapping)
1652 		err = -ENOMEM;
1653 
1654 	return err;
1655 }
1656 
1657 static void unmap_bf_area(struct mlx4_dev *dev)
1658 {
1659 	if (mlx4_priv(dev)->bf_mapping)
1660 		io_mapping_free(mlx4_priv(dev)->bf_mapping);
1661 }
1662 
1663 cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1664 {
1665 	u32 clockhi, clocklo, clockhi1;
1666 	cycle_t cycles;
1667 	int i;
1668 	struct mlx4_priv *priv = mlx4_priv(dev);
1669 
1670 	for (i = 0; i < 10; i++) {
1671 		clockhi = swab32(readl(priv->clock_mapping));
1672 		clocklo = swab32(readl(priv->clock_mapping + 4));
1673 		clockhi1 = swab32(readl(priv->clock_mapping));
1674 		if (clockhi == clockhi1)
1675 			break;
1676 	}
1677 
1678 	cycles = (u64) clockhi << 32 | (u64) clocklo;
1679 
1680 	return cycles;
1681 }
1682 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1683 
1684 
1685 static int map_internal_clock(struct mlx4_dev *dev)
1686 {
1687 	struct mlx4_priv *priv = mlx4_priv(dev);
1688 
1689 	priv->clock_mapping =
1690 		ioremap(pci_resource_start(dev->persist->pdev,
1691 					   priv->fw.clock_bar) +
1692 			priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1693 
1694 	if (!priv->clock_mapping)
1695 		return -ENOMEM;
1696 
1697 	return 0;
1698 }
1699 
1700 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1701 				   struct mlx4_clock_params *params)
1702 {
1703 	struct mlx4_priv *priv = mlx4_priv(dev);
1704 
1705 	if (mlx4_is_slave(dev))
1706 		return -ENOTSUPP;
1707 
1708 	if (!params)
1709 		return -EINVAL;
1710 
1711 	params->bar = priv->fw.clock_bar;
1712 	params->offset = priv->fw.clock_offset;
1713 	params->size = MLX4_CLOCK_SIZE;
1714 
1715 	return 0;
1716 }
1717 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1718 
1719 static void unmap_internal_clock(struct mlx4_dev *dev)
1720 {
1721 	struct mlx4_priv *priv = mlx4_priv(dev);
1722 
1723 	if (priv->clock_mapping)
1724 		iounmap(priv->clock_mapping);
1725 }
1726 
1727 static void mlx4_close_hca(struct mlx4_dev *dev)
1728 {
1729 	unmap_internal_clock(dev);
1730 	unmap_bf_area(dev);
1731 	if (mlx4_is_slave(dev))
1732 		mlx4_slave_exit(dev);
1733 	else {
1734 		mlx4_CLOSE_HCA(dev, 0);
1735 		mlx4_free_icms(dev);
1736 	}
1737 }
1738 
1739 static void mlx4_close_fw(struct mlx4_dev *dev)
1740 {
1741 	if (!mlx4_is_slave(dev)) {
1742 		mlx4_UNMAP_FA(dev);
1743 		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1744 	}
1745 }
1746 
1747 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1748 {
1749 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1750 
1751 	u32 comm_flags;
1752 	u32 offline_bit;
1753 	unsigned long end;
1754 	struct mlx4_priv *priv = mlx4_priv(dev);
1755 
1756 	end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1757 	while (time_before(jiffies, end)) {
1758 		comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1759 					  MLX4_COMM_CHAN_FLAGS));
1760 		offline_bit = (comm_flags &
1761 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1762 		if (!offline_bit)
1763 			return 0;
1764 		/* There are cases as part of AER/Reset flow that PF needs
1765 		 * around 100 msec to load. We therefore sleep for 100 msec
1766 		 * to allow other tasks to make use of that CPU during this
1767 		 * time interval.
1768 		 */
1769 		msleep(100);
1770 	}
1771 	mlx4_err(dev, "Communication channel is offline.\n");
1772 	return -EIO;
1773 }
1774 
1775 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1776 {
1777 #define COMM_CHAN_RST_OFFSET 0x1e
1778 
1779 	struct mlx4_priv *priv = mlx4_priv(dev);
1780 	u32 comm_rst;
1781 	u32 comm_caps;
1782 
1783 	comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1784 				 MLX4_COMM_CHAN_CAPS));
1785 	comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1786 
1787 	if (comm_rst)
1788 		dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1789 }
1790 
1791 static int mlx4_init_slave(struct mlx4_dev *dev)
1792 {
1793 	struct mlx4_priv *priv = mlx4_priv(dev);
1794 	u64 dma = (u64) priv->mfunc.vhcr_dma;
1795 	int ret_from_reset = 0;
1796 	u32 slave_read;
1797 	u32 cmd_channel_ver;
1798 
1799 	if (atomic_read(&pf_loading)) {
1800 		mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1801 		return -EPROBE_DEFER;
1802 	}
1803 
1804 	mutex_lock(&priv->cmd.slave_cmd_mutex);
1805 	priv->cmd.max_cmds = 1;
1806 	if (mlx4_comm_check_offline(dev)) {
1807 		mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1808 		goto err_offline;
1809 	}
1810 
1811 	mlx4_reset_vf_support(dev);
1812 	mlx4_warn(dev, "Sending reset\n");
1813 	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1814 				       MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1815 	/* if we are in the middle of flr the slave will try
1816 	 * NUM_OF_RESET_RETRIES times before leaving.*/
1817 	if (ret_from_reset) {
1818 		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1819 			mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1820 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1821 			return -EPROBE_DEFER;
1822 		} else
1823 			goto err;
1824 	}
1825 
1826 	/* check the driver version - the slave I/F revision
1827 	 * must match the master's */
1828 	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1829 	cmd_channel_ver = mlx4_comm_get_version();
1830 
1831 	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1832 		MLX4_COMM_GET_IF_REV(slave_read)) {
1833 		mlx4_err(dev, "slave driver version is not supported by the master\n");
1834 		goto err;
1835 	}
1836 
1837 	mlx4_warn(dev, "Sending vhcr0\n");
1838 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1839 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1840 		goto err;
1841 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1842 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1843 		goto err;
1844 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1845 			     MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1846 		goto err;
1847 	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
1848 			  MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1849 		goto err;
1850 
1851 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1852 	return 0;
1853 
1854 err:
1855 	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
1856 err_offline:
1857 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1858 	return -EIO;
1859 }
1860 
1861 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1862 {
1863 	int i;
1864 
1865 	for (i = 1; i <= dev->caps.num_ports; i++) {
1866 		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1867 			dev->caps.gid_table_len[i] =
1868 				mlx4_get_slave_num_gids(dev, 0, i);
1869 		else
1870 			dev->caps.gid_table_len[i] = 1;
1871 		dev->caps.pkey_table_len[i] =
1872 			dev->phys_caps.pkey_phys_table_len[i] - 1;
1873 	}
1874 }
1875 
1876 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1877 {
1878 	int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1879 
1880 	for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1881 	      i++) {
1882 		if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1883 			break;
1884 	}
1885 
1886 	return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1887 }
1888 
1889 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
1890 {
1891 	switch (dmfs_high_steer_mode) {
1892 	case MLX4_STEERING_DMFS_A0_DEFAULT:
1893 		return "default performance";
1894 
1895 	case MLX4_STEERING_DMFS_A0_DYNAMIC:
1896 		return "dynamic hybrid mode";
1897 
1898 	case MLX4_STEERING_DMFS_A0_STATIC:
1899 		return "performance optimized for limited rule configuration (static)";
1900 
1901 	case MLX4_STEERING_DMFS_A0_DISABLE:
1902 		return "disabled performance optimized steering";
1903 
1904 	case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
1905 		return "performance optimized steering not supported";
1906 
1907 	default:
1908 		return "Unrecognized mode";
1909 	}
1910 }
1911 
1912 #define MLX4_DMFS_A0_STEERING			(1UL << 2)
1913 
1914 static void choose_steering_mode(struct mlx4_dev *dev,
1915 				 struct mlx4_dev_cap *dev_cap)
1916 {
1917 	if (mlx4_log_num_mgm_entry_size <= 0) {
1918 		if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
1919 			if (dev->caps.dmfs_high_steer_mode ==
1920 			    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1921 				mlx4_err(dev, "DMFS high rate mode not supported\n");
1922 			else
1923 				dev->caps.dmfs_high_steer_mode =
1924 					MLX4_STEERING_DMFS_A0_STATIC;
1925 		}
1926 	}
1927 
1928 	if (mlx4_log_num_mgm_entry_size <= 0 &&
1929 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1930 	    (!mlx4_is_mfunc(dev) ||
1931 	     (dev_cap->fs_max_num_qp_per_entry >=
1932 	     (dev->persist->num_vfs + 1))) &&
1933 	    choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1934 		MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1935 		dev->oper_log_mgm_entry_size =
1936 			choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1937 		dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1938 		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1939 		dev->caps.fs_log_max_ucast_qp_range_size =
1940 			dev_cap->fs_log_max_ucast_qp_range_size;
1941 	} else {
1942 		if (dev->caps.dmfs_high_steer_mode !=
1943 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1944 			dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
1945 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1946 		    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1947 			dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1948 		else {
1949 			dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1950 
1951 			if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1952 			    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1953 				mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1954 		}
1955 		dev->oper_log_mgm_entry_size =
1956 			mlx4_log_num_mgm_entry_size > 0 ?
1957 			mlx4_log_num_mgm_entry_size :
1958 			MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1959 		dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1960 	}
1961 	mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1962 		 mlx4_steering_mode_str(dev->caps.steering_mode),
1963 		 dev->oper_log_mgm_entry_size,
1964 		 mlx4_log_num_mgm_entry_size);
1965 }
1966 
1967 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1968 				       struct mlx4_dev_cap *dev_cap)
1969 {
1970 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1971 	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1972 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1973 	else
1974 		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1975 
1976 	mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
1977 		 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1978 }
1979 
1980 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
1981 {
1982 	int i;
1983 	struct mlx4_port_cap port_cap;
1984 
1985 	if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1986 		return -EINVAL;
1987 
1988 	for (i = 1; i <= dev->caps.num_ports; i++) {
1989 		if (mlx4_dev_port(dev, i, &port_cap)) {
1990 			mlx4_err(dev,
1991 				 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1992 		} else if ((dev->caps.dmfs_high_steer_mode !=
1993 			    MLX4_STEERING_DMFS_A0_DEFAULT) &&
1994 			   (port_cap.dmfs_optimized_state ==
1995 			    !!(dev->caps.dmfs_high_steer_mode ==
1996 			    MLX4_STEERING_DMFS_A0_DISABLE))) {
1997 			mlx4_err(dev,
1998 				 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
1999 				 dmfs_high_rate_steering_mode_str(
2000 					dev->caps.dmfs_high_steer_mode),
2001 				 (port_cap.dmfs_optimized_state ?
2002 					"enabled" : "disabled"));
2003 		}
2004 	}
2005 
2006 	return 0;
2007 }
2008 
2009 static int mlx4_init_fw(struct mlx4_dev *dev)
2010 {
2011 	struct mlx4_mod_stat_cfg   mlx4_cfg;
2012 	int err = 0;
2013 
2014 	if (!mlx4_is_slave(dev)) {
2015 		err = mlx4_QUERY_FW(dev);
2016 		if (err) {
2017 			if (err == -EACCES)
2018 				mlx4_info(dev, "non-primary physical function, skipping\n");
2019 			else
2020 				mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2021 			return err;
2022 		}
2023 
2024 		err = mlx4_load_fw(dev);
2025 		if (err) {
2026 			mlx4_err(dev, "Failed to start FW, aborting\n");
2027 			return err;
2028 		}
2029 
2030 		mlx4_cfg.log_pg_sz_m = 1;
2031 		mlx4_cfg.log_pg_sz = 0;
2032 		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2033 		if (err)
2034 			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2035 	}
2036 
2037 	return err;
2038 }
2039 
2040 static int mlx4_init_hca(struct mlx4_dev *dev)
2041 {
2042 	struct mlx4_priv	  *priv = mlx4_priv(dev);
2043 	struct mlx4_adapter	   adapter;
2044 	struct mlx4_dev_cap	   dev_cap;
2045 	struct mlx4_profile	   profile;
2046 	struct mlx4_init_hca_param init_hca;
2047 	u64 icm_size;
2048 	struct mlx4_config_dev_params params;
2049 	int err;
2050 
2051 	if (!mlx4_is_slave(dev)) {
2052 		err = mlx4_dev_cap(dev, &dev_cap);
2053 		if (err) {
2054 			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2055 			return err;
2056 		}
2057 
2058 		choose_steering_mode(dev, &dev_cap);
2059 		choose_tunnel_offload_mode(dev, &dev_cap);
2060 
2061 		if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2062 		    mlx4_is_master(dev))
2063 			dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2064 
2065 		err = mlx4_get_phys_port_id(dev);
2066 		if (err)
2067 			mlx4_err(dev, "Fail to get physical port id\n");
2068 
2069 		if (mlx4_is_master(dev))
2070 			mlx4_parav_master_pf_caps(dev);
2071 
2072 		if (mlx4_low_memory_profile()) {
2073 			mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2074 			profile = low_mem_profile;
2075 		} else {
2076 			profile = default_profile;
2077 		}
2078 		if (dev->caps.steering_mode ==
2079 		    MLX4_STEERING_MODE_DEVICE_MANAGED)
2080 			profile.num_mcg = MLX4_FS_NUM_MCG;
2081 
2082 		icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2083 					     &init_hca);
2084 		if ((long long) icm_size < 0) {
2085 			err = icm_size;
2086 			return err;
2087 		}
2088 
2089 		dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2090 
2091 		init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2092 		init_hca.uar_page_sz = PAGE_SHIFT - 12;
2093 		init_hca.mw_enabled = 0;
2094 		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2095 		    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2096 			init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2097 
2098 		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2099 		if (err)
2100 			return err;
2101 
2102 		err = mlx4_INIT_HCA(dev, &init_hca);
2103 		if (err) {
2104 			mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2105 			goto err_free_icm;
2106 		}
2107 
2108 		if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2109 			err = mlx4_query_func(dev, &dev_cap);
2110 			if (err < 0) {
2111 				mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2112 				goto err_close;
2113 			} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2114 				dev->caps.num_eqs = dev_cap.max_eqs;
2115 				dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2116 				dev->caps.reserved_uars = dev_cap.reserved_uars;
2117 			}
2118 		}
2119 
2120 		/*
2121 		 * If TS is supported by FW
2122 		 * read HCA frequency by QUERY_HCA command
2123 		 */
2124 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2125 			memset(&init_hca, 0, sizeof(init_hca));
2126 			err = mlx4_QUERY_HCA(dev, &init_hca);
2127 			if (err) {
2128 				mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2129 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2130 			} else {
2131 				dev->caps.hca_core_clock =
2132 					init_hca.hca_core_clock;
2133 			}
2134 
2135 			/* In case we got HCA frequency 0 - disable timestamping
2136 			 * to avoid dividing by zero
2137 			 */
2138 			if (!dev->caps.hca_core_clock) {
2139 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2140 				mlx4_err(dev,
2141 					 "HCA frequency is 0 - timestamping is not supported\n");
2142 			} else if (map_internal_clock(dev)) {
2143 				/*
2144 				 * Map internal clock,
2145 				 * in case of failure disable timestamping
2146 				 */
2147 				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2148 				mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2149 			}
2150 		}
2151 
2152 		if (dev->caps.dmfs_high_steer_mode !=
2153 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2154 			if (mlx4_validate_optimized_steering(dev))
2155 				mlx4_warn(dev, "Optimized steering validation failed\n");
2156 
2157 			if (dev->caps.dmfs_high_steer_mode ==
2158 			    MLX4_STEERING_DMFS_A0_DISABLE) {
2159 				dev->caps.dmfs_high_rate_qpn_base =
2160 					dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2161 				dev->caps.dmfs_high_rate_qpn_range =
2162 					MLX4_A0_STEERING_TABLE_SIZE;
2163 			}
2164 
2165 			mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2166 				 dmfs_high_rate_steering_mode_str(
2167 					dev->caps.dmfs_high_steer_mode));
2168 		}
2169 	} else {
2170 		err = mlx4_init_slave(dev);
2171 		if (err) {
2172 			if (err != -EPROBE_DEFER)
2173 				mlx4_err(dev, "Failed to initialize slave\n");
2174 			return err;
2175 		}
2176 
2177 		err = mlx4_slave_cap(dev);
2178 		if (err) {
2179 			mlx4_err(dev, "Failed to obtain slave caps\n");
2180 			goto err_close;
2181 		}
2182 	}
2183 
2184 	if (map_bf_area(dev))
2185 		mlx4_dbg(dev, "Failed to map blue flame area\n");
2186 
2187 	/*Only the master set the ports, all the rest got it from it.*/
2188 	if (!mlx4_is_slave(dev))
2189 		mlx4_set_port_mask(dev);
2190 
2191 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
2192 	if (err) {
2193 		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2194 		goto unmap_bf;
2195 	}
2196 
2197 	/* Query CONFIG_DEV parameters */
2198 	err = mlx4_config_dev_retrieval(dev, &params);
2199 	if (err && err != -ENOTSUPP) {
2200 		mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2201 	} else if (!err) {
2202 		dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2203 		dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2204 	}
2205 	priv->eq_table.inta_pin = adapter.inta_pin;
2206 	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2207 
2208 	return 0;
2209 
2210 unmap_bf:
2211 	unmap_internal_clock(dev);
2212 	unmap_bf_area(dev);
2213 
2214 	if (mlx4_is_slave(dev)) {
2215 		kfree(dev->caps.qp0_qkey);
2216 		kfree(dev->caps.qp0_tunnel);
2217 		kfree(dev->caps.qp0_proxy);
2218 		kfree(dev->caps.qp1_tunnel);
2219 		kfree(dev->caps.qp1_proxy);
2220 	}
2221 
2222 err_close:
2223 	if (mlx4_is_slave(dev))
2224 		mlx4_slave_exit(dev);
2225 	else
2226 		mlx4_CLOSE_HCA(dev, 0);
2227 
2228 err_free_icm:
2229 	if (!mlx4_is_slave(dev))
2230 		mlx4_free_icms(dev);
2231 
2232 	return err;
2233 }
2234 
2235 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2236 {
2237 	struct mlx4_priv *priv = mlx4_priv(dev);
2238 	int nent_pow2;
2239 
2240 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2241 		return -ENOENT;
2242 
2243 	if (!dev->caps.max_counters)
2244 		return -ENOSPC;
2245 
2246 	nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2247 	/* reserve last counter index for sink counter */
2248 	return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2249 				nent_pow2 - 1, 0,
2250 				nent_pow2 - dev->caps.max_counters + 1);
2251 }
2252 
2253 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2254 {
2255 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2256 		return;
2257 
2258 	if (!dev->caps.max_counters)
2259 		return;
2260 
2261 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2262 }
2263 
2264 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2265 {
2266 	struct mlx4_priv *priv = mlx4_priv(dev);
2267 	int port;
2268 
2269 	for (port = 0; port < dev->caps.num_ports; port++)
2270 		if (priv->def_counter[port] != -1)
2271 			mlx4_counter_free(dev,  priv->def_counter[port]);
2272 }
2273 
2274 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2275 {
2276 	struct mlx4_priv *priv = mlx4_priv(dev);
2277 	int port, err = 0;
2278 	u32 idx;
2279 
2280 	for (port = 0; port < dev->caps.num_ports; port++)
2281 		priv->def_counter[port] = -1;
2282 
2283 	for (port = 0; port < dev->caps.num_ports; port++) {
2284 		err = mlx4_counter_alloc(dev, &idx);
2285 
2286 		if (!err || err == -ENOSPC) {
2287 			priv->def_counter[port] = idx;
2288 		} else if (err == -ENOENT) {
2289 			err = 0;
2290 			continue;
2291 		} else {
2292 			mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2293 				 __func__, port + 1, err);
2294 			mlx4_cleanup_default_counters(dev);
2295 			return err;
2296 		}
2297 
2298 		mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2299 			 __func__, priv->def_counter[port], port + 1);
2300 	}
2301 
2302 	return err;
2303 }
2304 
2305 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2306 {
2307 	struct mlx4_priv *priv = mlx4_priv(dev);
2308 
2309 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2310 		return -ENOENT;
2311 
2312 	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2313 	if (*idx == -1) {
2314 		*idx = MLX4_SINK_COUNTER_INDEX(dev);
2315 		return -ENOSPC;
2316 	}
2317 
2318 	return 0;
2319 }
2320 
2321 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2322 {
2323 	u64 out_param;
2324 	int err;
2325 
2326 	if (mlx4_is_mfunc(dev)) {
2327 		err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2328 				   RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2329 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2330 		if (!err)
2331 			*idx = get_param_l(&out_param);
2332 
2333 		return err;
2334 	}
2335 	return __mlx4_counter_alloc(dev, idx);
2336 }
2337 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2338 
2339 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2340 				u8 counter_index)
2341 {
2342 	struct mlx4_cmd_mailbox *if_stat_mailbox;
2343 	int err;
2344 	u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2345 
2346 	if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2347 	if (IS_ERR(if_stat_mailbox))
2348 		return PTR_ERR(if_stat_mailbox);
2349 
2350 	err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2351 			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2352 			   MLX4_CMD_NATIVE);
2353 
2354 	mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2355 	return err;
2356 }
2357 
2358 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2359 {
2360 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2361 		return;
2362 
2363 	if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2364 		return;
2365 
2366 	__mlx4_clear_if_stat(dev, idx);
2367 
2368 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2369 	return;
2370 }
2371 
2372 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2373 {
2374 	u64 in_param = 0;
2375 
2376 	if (mlx4_is_mfunc(dev)) {
2377 		set_param_l(&in_param, idx);
2378 		mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2379 			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2380 			 MLX4_CMD_WRAPPED);
2381 		return;
2382 	}
2383 	__mlx4_counter_free(dev, idx);
2384 }
2385 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2386 
2387 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2388 {
2389 	struct mlx4_priv *priv = mlx4_priv(dev);
2390 
2391 	return priv->def_counter[port - 1];
2392 }
2393 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2394 
2395 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2396 {
2397 	struct mlx4_priv *priv = mlx4_priv(dev);
2398 
2399 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2400 }
2401 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2402 
2403 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2404 {
2405 	struct mlx4_priv *priv = mlx4_priv(dev);
2406 
2407 	return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2408 }
2409 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2410 
2411 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2412 {
2413 	struct mlx4_priv *priv = mlx4_priv(dev);
2414 	__be64 guid;
2415 
2416 	/* hw GUID */
2417 	if (entry == 0)
2418 		return;
2419 
2420 	get_random_bytes((char *)&guid, sizeof(guid));
2421 	guid &= ~(cpu_to_be64(1ULL << 56));
2422 	guid |= cpu_to_be64(1ULL << 57);
2423 	priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2424 }
2425 
2426 static int mlx4_setup_hca(struct mlx4_dev *dev)
2427 {
2428 	struct mlx4_priv *priv = mlx4_priv(dev);
2429 	int err;
2430 	int port;
2431 	__be32 ib_port_default_caps;
2432 
2433 	err = mlx4_init_uar_table(dev);
2434 	if (err) {
2435 		mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2436 		 return err;
2437 	}
2438 
2439 	err = mlx4_uar_alloc(dev, &priv->driver_uar);
2440 	if (err) {
2441 		mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2442 		goto err_uar_table_free;
2443 	}
2444 
2445 	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2446 	if (!priv->kar) {
2447 		mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2448 		err = -ENOMEM;
2449 		goto err_uar_free;
2450 	}
2451 
2452 	err = mlx4_init_pd_table(dev);
2453 	if (err) {
2454 		mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2455 		goto err_kar_unmap;
2456 	}
2457 
2458 	err = mlx4_init_xrcd_table(dev);
2459 	if (err) {
2460 		mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2461 		goto err_pd_table_free;
2462 	}
2463 
2464 	err = mlx4_init_mr_table(dev);
2465 	if (err) {
2466 		mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2467 		goto err_xrcd_table_free;
2468 	}
2469 
2470 	if (!mlx4_is_slave(dev)) {
2471 		err = mlx4_init_mcg_table(dev);
2472 		if (err) {
2473 			mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2474 			goto err_mr_table_free;
2475 		}
2476 		err = mlx4_config_mad_demux(dev);
2477 		if (err) {
2478 			mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2479 			goto err_mcg_table_free;
2480 		}
2481 	}
2482 
2483 	err = mlx4_init_eq_table(dev);
2484 	if (err) {
2485 		mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2486 		goto err_mcg_table_free;
2487 	}
2488 
2489 	err = mlx4_cmd_use_events(dev);
2490 	if (err) {
2491 		mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2492 		goto err_eq_table_free;
2493 	}
2494 
2495 	err = mlx4_NOP(dev);
2496 	if (err) {
2497 		if (dev->flags & MLX4_FLAG_MSI_X) {
2498 			mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2499 				  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2500 			mlx4_warn(dev, "Trying again without MSI-X\n");
2501 		} else {
2502 			mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2503 				 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2504 			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2505 		}
2506 
2507 		goto err_cmd_poll;
2508 	}
2509 
2510 	mlx4_dbg(dev, "NOP command IRQ test passed\n");
2511 
2512 	err = mlx4_init_cq_table(dev);
2513 	if (err) {
2514 		mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2515 		goto err_cmd_poll;
2516 	}
2517 
2518 	err = mlx4_init_srq_table(dev);
2519 	if (err) {
2520 		mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2521 		goto err_cq_table_free;
2522 	}
2523 
2524 	err = mlx4_init_qp_table(dev);
2525 	if (err) {
2526 		mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2527 		goto err_srq_table_free;
2528 	}
2529 
2530 	if (!mlx4_is_slave(dev)) {
2531 		err = mlx4_init_counters_table(dev);
2532 		if (err && err != -ENOENT) {
2533 			mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2534 			goto err_qp_table_free;
2535 		}
2536 	}
2537 
2538 	err = mlx4_allocate_default_counters(dev);
2539 	if (err) {
2540 		mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2541 		goto err_counters_table_free;
2542 	}
2543 
2544 	if (!mlx4_is_slave(dev)) {
2545 		for (port = 1; port <= dev->caps.num_ports; port++) {
2546 			ib_port_default_caps = 0;
2547 			err = mlx4_get_port_ib_caps(dev, port,
2548 						    &ib_port_default_caps);
2549 			if (err)
2550 				mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2551 					  port, err);
2552 			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2553 
2554 			/* initialize per-slave default ib port capabilities */
2555 			if (mlx4_is_master(dev)) {
2556 				int i;
2557 				for (i = 0; i < dev->num_slaves; i++) {
2558 					if (i == mlx4_master_func_num(dev))
2559 						continue;
2560 					priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2561 						ib_port_default_caps;
2562 				}
2563 			}
2564 
2565 			if (mlx4_is_mfunc(dev))
2566 				dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2567 			else
2568 				dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2569 
2570 			err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2571 					    dev->caps.pkey_table_len[port] : -1);
2572 			if (err) {
2573 				mlx4_err(dev, "Failed to set port %d, aborting\n",
2574 					 port);
2575 				goto err_default_countes_free;
2576 			}
2577 		}
2578 	}
2579 
2580 	return 0;
2581 
2582 err_default_countes_free:
2583 	mlx4_cleanup_default_counters(dev);
2584 
2585 err_counters_table_free:
2586 	if (!mlx4_is_slave(dev))
2587 		mlx4_cleanup_counters_table(dev);
2588 
2589 err_qp_table_free:
2590 	mlx4_cleanup_qp_table(dev);
2591 
2592 err_srq_table_free:
2593 	mlx4_cleanup_srq_table(dev);
2594 
2595 err_cq_table_free:
2596 	mlx4_cleanup_cq_table(dev);
2597 
2598 err_cmd_poll:
2599 	mlx4_cmd_use_polling(dev);
2600 
2601 err_eq_table_free:
2602 	mlx4_cleanup_eq_table(dev);
2603 
2604 err_mcg_table_free:
2605 	if (!mlx4_is_slave(dev))
2606 		mlx4_cleanup_mcg_table(dev);
2607 
2608 err_mr_table_free:
2609 	mlx4_cleanup_mr_table(dev);
2610 
2611 err_xrcd_table_free:
2612 	mlx4_cleanup_xrcd_table(dev);
2613 
2614 err_pd_table_free:
2615 	mlx4_cleanup_pd_table(dev);
2616 
2617 err_kar_unmap:
2618 	iounmap(priv->kar);
2619 
2620 err_uar_free:
2621 	mlx4_uar_free(dev, &priv->driver_uar);
2622 
2623 err_uar_table_free:
2624 	mlx4_cleanup_uar_table(dev);
2625 	return err;
2626 }
2627 
2628 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2629 {
2630 	int requested_cpu = 0;
2631 	struct mlx4_priv *priv = mlx4_priv(dev);
2632 	struct mlx4_eq *eq;
2633 	int off = 0;
2634 	int i;
2635 
2636 	if (eqn > dev->caps.num_comp_vectors)
2637 		return -EINVAL;
2638 
2639 	for (i = 1; i < port; i++)
2640 		off += mlx4_get_eqs_per_port(dev, i);
2641 
2642 	requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2643 
2644 	/* Meaning EQs are shared, and this call comes from the second port */
2645 	if (requested_cpu < 0)
2646 		return 0;
2647 
2648 	eq = &priv->eq_table.eq[eqn];
2649 
2650 	if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2651 		return -ENOMEM;
2652 
2653 	cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2654 
2655 	return 0;
2656 }
2657 
2658 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2659 {
2660 	struct mlx4_priv *priv = mlx4_priv(dev);
2661 	struct msix_entry *entries;
2662 	int i;
2663 	int port = 0;
2664 
2665 	if (msi_x) {
2666 		int nreq = dev->caps.num_ports * num_online_cpus() + 1;
2667 
2668 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2669 			     nreq);
2670 
2671 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2672 		if (!entries)
2673 			goto no_msi;
2674 
2675 		for (i = 0; i < nreq; ++i)
2676 			entries[i].entry = i;
2677 
2678 		nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2679 					     nreq);
2680 
2681 		if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2682 			kfree(entries);
2683 			goto no_msi;
2684 		}
2685 		/* 1 is reserved for events (asyncrounous EQ) */
2686 		dev->caps.num_comp_vectors = nreq - 1;
2687 
2688 		priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2689 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2690 			    dev->caps.num_ports);
2691 
2692 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2693 			if (i == MLX4_EQ_ASYNC)
2694 				continue;
2695 
2696 			priv->eq_table.eq[i].irq =
2697 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2698 
2699 			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2700 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2701 					    dev->caps.num_ports);
2702 				/* We don't set affinity hint when there
2703 				 * aren't enough EQs
2704 				 */
2705 			} else {
2706 				set_bit(port,
2707 					priv->eq_table.eq[i].actv_ports.ports);
2708 				if (mlx4_init_affinity_hint(dev, port + 1, i))
2709 					mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2710 						  i);
2711 			}
2712 			/* We divide the Eqs evenly between the two ports.
2713 			 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2714 			 * refers to the number of Eqs per port
2715 			 * (i.e eqs_per_port). Theoretically, we would like to
2716 			 * write something like (i + 1) % eqs_per_port == 0.
2717 			 * However, since there's an asynchronous Eq, we have
2718 			 * to skip over it by comparing this condition to
2719 			 * !!((i + 1) > MLX4_EQ_ASYNC).
2720 			 */
2721 			if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2722 			    ((i + 1) %
2723 			     (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2724 			    !!((i + 1) > MLX4_EQ_ASYNC))
2725 				/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2726 				 * everything is shared anyway.
2727 				 */
2728 				port++;
2729 		}
2730 
2731 		dev->flags |= MLX4_FLAG_MSI_X;
2732 
2733 		kfree(entries);
2734 		return;
2735 	}
2736 
2737 no_msi:
2738 	dev->caps.num_comp_vectors = 1;
2739 
2740 	BUG_ON(MLX4_EQ_ASYNC >= 2);
2741 	for (i = 0; i < 2; ++i) {
2742 		priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2743 		if (i != MLX4_EQ_ASYNC) {
2744 			bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2745 				    dev->caps.num_ports);
2746 		}
2747 	}
2748 }
2749 
2750 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2751 {
2752 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2753 	int err = 0;
2754 
2755 	info->dev = dev;
2756 	info->port = port;
2757 	if (!mlx4_is_slave(dev)) {
2758 		mlx4_init_mac_table(dev, &info->mac_table);
2759 		mlx4_init_vlan_table(dev, &info->vlan_table);
2760 		mlx4_init_roce_gid_table(dev, &info->gid_table);
2761 		info->base_qpn = mlx4_get_base_qpn(dev, port);
2762 	}
2763 
2764 	sprintf(info->dev_name, "mlx4_port%d", port);
2765 	info->port_attr.attr.name = info->dev_name;
2766 	if (mlx4_is_mfunc(dev))
2767 		info->port_attr.attr.mode = S_IRUGO;
2768 	else {
2769 		info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2770 		info->port_attr.store     = set_port_type;
2771 	}
2772 	info->port_attr.show      = show_port_type;
2773 	sysfs_attr_init(&info->port_attr.attr);
2774 
2775 	err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2776 	if (err) {
2777 		mlx4_err(dev, "Failed to create file for port %d\n", port);
2778 		info->port = -1;
2779 	}
2780 
2781 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2782 	info->port_mtu_attr.attr.name = info->dev_mtu_name;
2783 	if (mlx4_is_mfunc(dev))
2784 		info->port_mtu_attr.attr.mode = S_IRUGO;
2785 	else {
2786 		info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2787 		info->port_mtu_attr.store     = set_port_ib_mtu;
2788 	}
2789 	info->port_mtu_attr.show      = show_port_ib_mtu;
2790 	sysfs_attr_init(&info->port_mtu_attr.attr);
2791 
2792 	err = device_create_file(&dev->persist->pdev->dev,
2793 				 &info->port_mtu_attr);
2794 	if (err) {
2795 		mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2796 		device_remove_file(&info->dev->persist->pdev->dev,
2797 				   &info->port_attr);
2798 		info->port = -1;
2799 	}
2800 
2801 	return err;
2802 }
2803 
2804 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2805 {
2806 	if (info->port < 0)
2807 		return;
2808 
2809 	device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2810 	device_remove_file(&info->dev->persist->pdev->dev,
2811 			   &info->port_mtu_attr);
2812 #ifdef CONFIG_RFS_ACCEL
2813 	free_irq_cpu_rmap(info->rmap);
2814 	info->rmap = NULL;
2815 #endif
2816 }
2817 
2818 static int mlx4_init_steering(struct mlx4_dev *dev)
2819 {
2820 	struct mlx4_priv *priv = mlx4_priv(dev);
2821 	int num_entries = dev->caps.num_ports;
2822 	int i, j;
2823 
2824 	priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2825 	if (!priv->steer)
2826 		return -ENOMEM;
2827 
2828 	for (i = 0; i < num_entries; i++)
2829 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
2830 			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2831 			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2832 		}
2833 	return 0;
2834 }
2835 
2836 static void mlx4_clear_steering(struct mlx4_dev *dev)
2837 {
2838 	struct mlx4_priv *priv = mlx4_priv(dev);
2839 	struct mlx4_steer_index *entry, *tmp_entry;
2840 	struct mlx4_promisc_qp *pqp, *tmp_pqp;
2841 	int num_entries = dev->caps.num_ports;
2842 	int i, j;
2843 
2844 	for (i = 0; i < num_entries; i++) {
2845 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
2846 			list_for_each_entry_safe(pqp, tmp_pqp,
2847 						 &priv->steer[i].promisc_qps[j],
2848 						 list) {
2849 				list_del(&pqp->list);
2850 				kfree(pqp);
2851 			}
2852 			list_for_each_entry_safe(entry, tmp_entry,
2853 						 &priv->steer[i].steer_entries[j],
2854 						 list) {
2855 				list_del(&entry->list);
2856 				list_for_each_entry_safe(pqp, tmp_pqp,
2857 							 &entry->duplicates,
2858 							 list) {
2859 					list_del(&pqp->list);
2860 					kfree(pqp);
2861 				}
2862 				kfree(entry);
2863 			}
2864 		}
2865 	}
2866 	kfree(priv->steer);
2867 }
2868 
2869 static int extended_func_num(struct pci_dev *pdev)
2870 {
2871 	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2872 }
2873 
2874 #define MLX4_OWNER_BASE	0x8069c
2875 #define MLX4_OWNER_SIZE	4
2876 
2877 static int mlx4_get_ownership(struct mlx4_dev *dev)
2878 {
2879 	void __iomem *owner;
2880 	u32 ret;
2881 
2882 	if (pci_channel_offline(dev->persist->pdev))
2883 		return -EIO;
2884 
2885 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2886 			MLX4_OWNER_BASE,
2887 			MLX4_OWNER_SIZE);
2888 	if (!owner) {
2889 		mlx4_err(dev, "Failed to obtain ownership bit\n");
2890 		return -ENOMEM;
2891 	}
2892 
2893 	ret = readl(owner);
2894 	iounmap(owner);
2895 	return (int) !!ret;
2896 }
2897 
2898 static void mlx4_free_ownership(struct mlx4_dev *dev)
2899 {
2900 	void __iomem *owner;
2901 
2902 	if (pci_channel_offline(dev->persist->pdev))
2903 		return;
2904 
2905 	owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2906 			MLX4_OWNER_BASE,
2907 			MLX4_OWNER_SIZE);
2908 	if (!owner) {
2909 		mlx4_err(dev, "Failed to obtain ownership bit\n");
2910 		return;
2911 	}
2912 	writel(0, owner);
2913 	msleep(1000);
2914 	iounmap(owner);
2915 }
2916 
2917 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV)	==\
2918 				  !!((flags) & MLX4_FLAG_MASTER))
2919 
2920 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2921 			     u8 total_vfs, int existing_vfs, int reset_flow)
2922 {
2923 	u64 dev_flags = dev->flags;
2924 	int err = 0;
2925 	int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
2926 					MLX4_MAX_NUM_VF);
2927 
2928 	if (reset_flow) {
2929 		dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
2930 				       GFP_KERNEL);
2931 		if (!dev->dev_vfs)
2932 			goto free_mem;
2933 		return dev_flags;
2934 	}
2935 
2936 	atomic_inc(&pf_loading);
2937 	if (dev->flags &  MLX4_FLAG_SRIOV) {
2938 		if (existing_vfs != total_vfs) {
2939 			mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2940 				 existing_vfs, total_vfs);
2941 			total_vfs = existing_vfs;
2942 		}
2943 	}
2944 
2945 	dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
2946 	if (NULL == dev->dev_vfs) {
2947 		mlx4_err(dev, "Failed to allocate memory for VFs\n");
2948 		goto disable_sriov;
2949 	}
2950 
2951 	if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
2952 		if (total_vfs > fw_enabled_sriov_vfs) {
2953 			mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
2954 				 total_vfs, fw_enabled_sriov_vfs);
2955 			err = -ENOMEM;
2956 			goto disable_sriov;
2957 		}
2958 		mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
2959 		err = pci_enable_sriov(pdev, total_vfs);
2960 	}
2961 	if (err) {
2962 		mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2963 			 err);
2964 		goto disable_sriov;
2965 	} else {
2966 		mlx4_warn(dev, "Running in master mode\n");
2967 		dev_flags |= MLX4_FLAG_SRIOV |
2968 			MLX4_FLAG_MASTER;
2969 		dev_flags &= ~MLX4_FLAG_SLAVE;
2970 		dev->persist->num_vfs = total_vfs;
2971 	}
2972 	return dev_flags;
2973 
2974 disable_sriov:
2975 	atomic_dec(&pf_loading);
2976 free_mem:
2977 	dev->persist->num_vfs = 0;
2978 	kfree(dev->dev_vfs);
2979         dev->dev_vfs = NULL;
2980 	return dev_flags & ~MLX4_FLAG_MASTER;
2981 }
2982 
2983 enum {
2984 	MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
2985 };
2986 
2987 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
2988 			      int *nvfs)
2989 {
2990 	int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
2991 	/* Checking for 64 VFs as a limitation of CX2 */
2992 	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
2993 	    requested_vfs >= 64) {
2994 		mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
2995 			 requested_vfs);
2996 		return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
2997 	}
2998 	return 0;
2999 }
3000 
3001 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3002 			 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3003 			 int reset_flow)
3004 {
3005 	struct mlx4_dev *dev;
3006 	unsigned sum = 0;
3007 	int err;
3008 	int port;
3009 	int i;
3010 	struct mlx4_dev_cap *dev_cap = NULL;
3011 	int existing_vfs = 0;
3012 
3013 	dev = &priv->dev;
3014 
3015 	INIT_LIST_HEAD(&priv->ctx_list);
3016 	spin_lock_init(&priv->ctx_lock);
3017 
3018 	mutex_init(&priv->port_mutex);
3019 	mutex_init(&priv->bond_mutex);
3020 
3021 	INIT_LIST_HEAD(&priv->pgdir_list);
3022 	mutex_init(&priv->pgdir_mutex);
3023 
3024 	INIT_LIST_HEAD(&priv->bf_list);
3025 	mutex_init(&priv->bf_mutex);
3026 
3027 	dev->rev_id = pdev->revision;
3028 	dev->numa_node = dev_to_node(&pdev->dev);
3029 
3030 	/* Detect if this device is a virtual function */
3031 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3032 		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3033 		dev->flags |= MLX4_FLAG_SLAVE;
3034 	} else {
3035 		/* We reset the device and enable SRIOV only for physical
3036 		 * devices.  Try to claim ownership on the device;
3037 		 * if already taken, skip -- do not allow multiple PFs */
3038 		err = mlx4_get_ownership(dev);
3039 		if (err) {
3040 			if (err < 0)
3041 				return err;
3042 			else {
3043 				mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3044 				return -EINVAL;
3045 			}
3046 		}
3047 
3048 		atomic_set(&priv->opreq_count, 0);
3049 		INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3050 
3051 		/*
3052 		 * Now reset the HCA before we touch the PCI capabilities or
3053 		 * attempt a firmware command, since a boot ROM may have left
3054 		 * the HCA in an undefined state.
3055 		 */
3056 		err = mlx4_reset(dev);
3057 		if (err) {
3058 			mlx4_err(dev, "Failed to reset HCA, aborting\n");
3059 			goto err_sriov;
3060 		}
3061 
3062 		if (total_vfs) {
3063 			dev->flags = MLX4_FLAG_MASTER;
3064 			existing_vfs = pci_num_vf(pdev);
3065 			if (existing_vfs)
3066 				dev->flags |= MLX4_FLAG_SRIOV;
3067 			dev->persist->num_vfs = total_vfs;
3068 		}
3069 	}
3070 
3071 	/* on load remove any previous indication of internal error,
3072 	 * device is up.
3073 	 */
3074 	dev->persist->state = MLX4_DEVICE_STATE_UP;
3075 
3076 slave_start:
3077 	err = mlx4_cmd_init(dev);
3078 	if (err) {
3079 		mlx4_err(dev, "Failed to init command interface, aborting\n");
3080 		goto err_sriov;
3081 	}
3082 
3083 	/* In slave functions, the communication channel must be initialized
3084 	 * before posting commands. Also, init num_slaves before calling
3085 	 * mlx4_init_hca */
3086 	if (mlx4_is_mfunc(dev)) {
3087 		if (mlx4_is_master(dev)) {
3088 			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3089 
3090 		} else {
3091 			dev->num_slaves = 0;
3092 			err = mlx4_multi_func_init(dev);
3093 			if (err) {
3094 				mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3095 				goto err_cmd;
3096 			}
3097 		}
3098 	}
3099 
3100 	err = mlx4_init_fw(dev);
3101 	if (err) {
3102 		mlx4_err(dev, "Failed to init fw, aborting.\n");
3103 		goto err_mfunc;
3104 	}
3105 
3106 	if (mlx4_is_master(dev)) {
3107 		/* when we hit the goto slave_start below, dev_cap already initialized */
3108 		if (!dev_cap) {
3109 			dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3110 
3111 			if (!dev_cap) {
3112 				err = -ENOMEM;
3113 				goto err_fw;
3114 			}
3115 
3116 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3117 			if (err) {
3118 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3119 				goto err_fw;
3120 			}
3121 
3122 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3123 				goto err_fw;
3124 
3125 			if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3126 				u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3127 								  total_vfs,
3128 								  existing_vfs,
3129 								  reset_flow);
3130 
3131 				mlx4_close_fw(dev);
3132 				mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3133 				dev->flags = dev_flags;
3134 				if (!SRIOV_VALID_STATE(dev->flags)) {
3135 					mlx4_err(dev, "Invalid SRIOV state\n");
3136 					goto err_sriov;
3137 				}
3138 				err = mlx4_reset(dev);
3139 				if (err) {
3140 					mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3141 					goto err_sriov;
3142 				}
3143 				goto slave_start;
3144 			}
3145 		} else {
3146 			/* Legacy mode FW requires SRIOV to be enabled before
3147 			 * doing QUERY_DEV_CAP, since max_eq's value is different if
3148 			 * SRIOV is enabled.
3149 			 */
3150 			memset(dev_cap, 0, sizeof(*dev_cap));
3151 			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3152 			if (err) {
3153 				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3154 				goto err_fw;
3155 			}
3156 
3157 			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3158 				goto err_fw;
3159 		}
3160 	}
3161 
3162 	err = mlx4_init_hca(dev);
3163 	if (err) {
3164 		if (err == -EACCES) {
3165 			/* Not primary Physical function
3166 			 * Running in slave mode */
3167 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3168 			/* We're not a PF */
3169 			if (dev->flags & MLX4_FLAG_SRIOV) {
3170 				if (!existing_vfs)
3171 					pci_disable_sriov(pdev);
3172 				if (mlx4_is_master(dev) && !reset_flow)
3173 					atomic_dec(&pf_loading);
3174 				dev->flags &= ~MLX4_FLAG_SRIOV;
3175 			}
3176 			if (!mlx4_is_slave(dev))
3177 				mlx4_free_ownership(dev);
3178 			dev->flags |= MLX4_FLAG_SLAVE;
3179 			dev->flags &= ~MLX4_FLAG_MASTER;
3180 			goto slave_start;
3181 		} else
3182 			goto err_fw;
3183 	}
3184 
3185 	if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3186 		u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3187 						  existing_vfs, reset_flow);
3188 
3189 		if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3190 			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3191 			dev->flags = dev_flags;
3192 			err = mlx4_cmd_init(dev);
3193 			if (err) {
3194 				/* Only VHCR is cleaned up, so could still
3195 				 * send FW commands
3196 				 */
3197 				mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3198 				goto err_close;
3199 			}
3200 		} else {
3201 			dev->flags = dev_flags;
3202 		}
3203 
3204 		if (!SRIOV_VALID_STATE(dev->flags)) {
3205 			mlx4_err(dev, "Invalid SRIOV state\n");
3206 			goto err_close;
3207 		}
3208 	}
3209 
3210 	/* check if the device is functioning at its maximum possible speed.
3211 	 * No return code for this call, just warn the user in case of PCI
3212 	 * express device capabilities are under-satisfied by the bus.
3213 	 */
3214 	if (!mlx4_is_slave(dev))
3215 		mlx4_check_pcie_caps(dev);
3216 
3217 	/* In master functions, the communication channel must be initialized
3218 	 * after obtaining its address from fw */
3219 	if (mlx4_is_master(dev)) {
3220 		if (dev->caps.num_ports < 2 &&
3221 		    num_vfs_argc > 1) {
3222 			err = -EINVAL;
3223 			mlx4_err(dev,
3224 				 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3225 				 dev->caps.num_ports);
3226 			goto err_close;
3227 		}
3228 		memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3229 
3230 		for (i = 0;
3231 		     i < sizeof(dev->persist->nvfs)/
3232 		     sizeof(dev->persist->nvfs[0]); i++) {
3233 			unsigned j;
3234 
3235 			for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3236 				dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3237 				dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3238 					dev->caps.num_ports;
3239 			}
3240 		}
3241 
3242 		/* In master functions, the communication channel
3243 		 * must be initialized after obtaining its address from fw
3244 		 */
3245 		err = mlx4_multi_func_init(dev);
3246 		if (err) {
3247 			mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3248 			goto err_close;
3249 		}
3250 	}
3251 
3252 	err = mlx4_alloc_eq_table(dev);
3253 	if (err)
3254 		goto err_master_mfunc;
3255 
3256 	bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3257 	mutex_init(&priv->msix_ctl.pool_lock);
3258 
3259 	mlx4_enable_msi_x(dev);
3260 	if ((mlx4_is_mfunc(dev)) &&
3261 	    !(dev->flags & MLX4_FLAG_MSI_X)) {
3262 		err = -ENOSYS;
3263 		mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3264 		goto err_free_eq;
3265 	}
3266 
3267 	if (!mlx4_is_slave(dev)) {
3268 		err = mlx4_init_steering(dev);
3269 		if (err)
3270 			goto err_disable_msix;
3271 	}
3272 
3273 	err = mlx4_setup_hca(dev);
3274 	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3275 	    !mlx4_is_mfunc(dev)) {
3276 		dev->flags &= ~MLX4_FLAG_MSI_X;
3277 		dev->caps.num_comp_vectors = 1;
3278 		pci_disable_msix(pdev);
3279 		err = mlx4_setup_hca(dev);
3280 	}
3281 
3282 	if (err)
3283 		goto err_steer;
3284 
3285 	mlx4_init_quotas(dev);
3286 	/* When PF resources are ready arm its comm channel to enable
3287 	 * getting commands
3288 	 */
3289 	if (mlx4_is_master(dev)) {
3290 		err = mlx4_ARM_COMM_CHANNEL(dev);
3291 		if (err) {
3292 			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3293 				 err);
3294 			goto err_steer;
3295 		}
3296 	}
3297 
3298 	for (port = 1; port <= dev->caps.num_ports; port++) {
3299 		err = mlx4_init_port_info(dev, port);
3300 		if (err)
3301 			goto err_port;
3302 	}
3303 
3304 	priv->v2p.port1 = 1;
3305 	priv->v2p.port2 = 2;
3306 
3307 	err = mlx4_register_device(dev);
3308 	if (err)
3309 		goto err_port;
3310 
3311 	mlx4_request_modules(dev);
3312 
3313 	mlx4_sense_init(dev);
3314 	mlx4_start_sense(dev);
3315 
3316 	priv->removed = 0;
3317 
3318 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3319 		atomic_dec(&pf_loading);
3320 
3321 	kfree(dev_cap);
3322 	return 0;
3323 
3324 err_port:
3325 	for (--port; port >= 1; --port)
3326 		mlx4_cleanup_port_info(&priv->port[port]);
3327 
3328 	mlx4_cleanup_default_counters(dev);
3329 	if (!mlx4_is_slave(dev))
3330 		mlx4_cleanup_counters_table(dev);
3331 	mlx4_cleanup_qp_table(dev);
3332 	mlx4_cleanup_srq_table(dev);
3333 	mlx4_cleanup_cq_table(dev);
3334 	mlx4_cmd_use_polling(dev);
3335 	mlx4_cleanup_eq_table(dev);
3336 	mlx4_cleanup_mcg_table(dev);
3337 	mlx4_cleanup_mr_table(dev);
3338 	mlx4_cleanup_xrcd_table(dev);
3339 	mlx4_cleanup_pd_table(dev);
3340 	mlx4_cleanup_uar_table(dev);
3341 
3342 err_steer:
3343 	if (!mlx4_is_slave(dev))
3344 		mlx4_clear_steering(dev);
3345 
3346 err_disable_msix:
3347 	if (dev->flags & MLX4_FLAG_MSI_X)
3348 		pci_disable_msix(pdev);
3349 
3350 err_free_eq:
3351 	mlx4_free_eq_table(dev);
3352 
3353 err_master_mfunc:
3354 	if (mlx4_is_master(dev)) {
3355 		mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3356 		mlx4_multi_func_cleanup(dev);
3357 	}
3358 
3359 	if (mlx4_is_slave(dev)) {
3360 		kfree(dev->caps.qp0_qkey);
3361 		kfree(dev->caps.qp0_tunnel);
3362 		kfree(dev->caps.qp0_proxy);
3363 		kfree(dev->caps.qp1_tunnel);
3364 		kfree(dev->caps.qp1_proxy);
3365 	}
3366 
3367 err_close:
3368 	mlx4_close_hca(dev);
3369 
3370 err_fw:
3371 	mlx4_close_fw(dev);
3372 
3373 err_mfunc:
3374 	if (mlx4_is_slave(dev))
3375 		mlx4_multi_func_cleanup(dev);
3376 
3377 err_cmd:
3378 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3379 
3380 err_sriov:
3381 	if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3382 		pci_disable_sriov(pdev);
3383 		dev->flags &= ~MLX4_FLAG_SRIOV;
3384 	}
3385 
3386 	if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3387 		atomic_dec(&pf_loading);
3388 
3389 	kfree(priv->dev.dev_vfs);
3390 
3391 	if (!mlx4_is_slave(dev))
3392 		mlx4_free_ownership(dev);
3393 
3394 	kfree(dev_cap);
3395 	return err;
3396 }
3397 
3398 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3399 			   struct mlx4_priv *priv)
3400 {
3401 	int err;
3402 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3403 	int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3404 	const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3405 		{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3406 	unsigned total_vfs = 0;
3407 	unsigned int i;
3408 
3409 	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3410 
3411 	err = pci_enable_device(pdev);
3412 	if (err) {
3413 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3414 		return err;
3415 	}
3416 
3417 	/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3418 	 * per port, we must limit the number of VFs to 63 (since their are
3419 	 * 128 MACs)
3420 	 */
3421 	for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3422 	     total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3423 		nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3424 		if (nvfs[i] < 0) {
3425 			dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3426 			err = -EINVAL;
3427 			goto err_disable_pdev;
3428 		}
3429 	}
3430 	for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3431 	     i++) {
3432 		prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3433 		if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3434 			dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3435 			err = -EINVAL;
3436 			goto err_disable_pdev;
3437 		}
3438 	}
3439 	if (total_vfs > MLX4_MAX_NUM_VF) {
3440 		dev_err(&pdev->dev,
3441 			"Requested more VF's (%d) than allowed by hw (%d)\n",
3442 			total_vfs, MLX4_MAX_NUM_VF);
3443 		err = -EINVAL;
3444 		goto err_disable_pdev;
3445 	}
3446 
3447 	for (i = 0; i < MLX4_MAX_PORTS; i++) {
3448 		if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3449 			dev_err(&pdev->dev,
3450 				"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3451 				nvfs[i] + nvfs[2], i + 1,
3452 				MLX4_MAX_NUM_VF_P_PORT);
3453 			err = -EINVAL;
3454 			goto err_disable_pdev;
3455 		}
3456 	}
3457 
3458 	/* Check for BARs. */
3459 	if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3460 	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3461 		dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3462 			pci_dev_data, pci_resource_flags(pdev, 0));
3463 		err = -ENODEV;
3464 		goto err_disable_pdev;
3465 	}
3466 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3467 		dev_err(&pdev->dev, "Missing UAR, aborting\n");
3468 		err = -ENODEV;
3469 		goto err_disable_pdev;
3470 	}
3471 
3472 	err = pci_request_regions(pdev, DRV_NAME);
3473 	if (err) {
3474 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3475 		goto err_disable_pdev;
3476 	}
3477 
3478 	pci_set_master(pdev);
3479 
3480 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3481 	if (err) {
3482 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3483 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3484 		if (err) {
3485 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3486 			goto err_release_regions;
3487 		}
3488 	}
3489 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3490 	if (err) {
3491 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3492 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3493 		if (err) {
3494 			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3495 			goto err_release_regions;
3496 		}
3497 	}
3498 
3499 	/* Allow large DMA segments, up to the firmware limit of 1 GB */
3500 	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3501 	/* Detect if this device is a virtual function */
3502 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3503 		/* When acting as pf, we normally skip vfs unless explicitly
3504 		 * requested to probe them.
3505 		 */
3506 		if (total_vfs) {
3507 			unsigned vfs_offset = 0;
3508 
3509 			for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3510 			     vfs_offset + nvfs[i] < extended_func_num(pdev);
3511 			     vfs_offset += nvfs[i], i++)
3512 				;
3513 			if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3514 				err = -ENODEV;
3515 				goto err_release_regions;
3516 			}
3517 			if ((extended_func_num(pdev) - vfs_offset)
3518 			    > prb_vf[i]) {
3519 				dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3520 					 extended_func_num(pdev));
3521 				err = -ENODEV;
3522 				goto err_release_regions;
3523 			}
3524 		}
3525 	}
3526 
3527 	err = mlx4_catas_init(&priv->dev);
3528 	if (err)
3529 		goto err_release_regions;
3530 
3531 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3532 	if (err)
3533 		goto err_catas;
3534 
3535 	return 0;
3536 
3537 err_catas:
3538 	mlx4_catas_end(&priv->dev);
3539 
3540 err_release_regions:
3541 	pci_release_regions(pdev);
3542 
3543 err_disable_pdev:
3544 	pci_disable_device(pdev);
3545 	pci_set_drvdata(pdev, NULL);
3546 	return err;
3547 }
3548 
3549 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3550 {
3551 	struct mlx4_priv *priv;
3552 	struct mlx4_dev *dev;
3553 	int ret;
3554 
3555 	printk_once(KERN_INFO "%s", mlx4_version);
3556 
3557 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3558 	if (!priv)
3559 		return -ENOMEM;
3560 
3561 	dev       = &priv->dev;
3562 	dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3563 	if (!dev->persist) {
3564 		kfree(priv);
3565 		return -ENOMEM;
3566 	}
3567 	dev->persist->pdev = pdev;
3568 	dev->persist->dev = dev;
3569 	pci_set_drvdata(pdev, dev->persist);
3570 	priv->pci_dev_data = id->driver_data;
3571 	mutex_init(&dev->persist->device_state_mutex);
3572 	mutex_init(&dev->persist->interface_state_mutex);
3573 
3574 	ret =  __mlx4_init_one(pdev, id->driver_data, priv);
3575 	if (ret) {
3576 		kfree(dev->persist);
3577 		kfree(priv);
3578 	} else {
3579 		pci_save_state(pdev);
3580 	}
3581 
3582 	return ret;
3583 }
3584 
3585 static void mlx4_clean_dev(struct mlx4_dev *dev)
3586 {
3587 	struct mlx4_dev_persistent *persist = dev->persist;
3588 	struct mlx4_priv *priv = mlx4_priv(dev);
3589 	unsigned long	flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3590 
3591 	memset(priv, 0, sizeof(*priv));
3592 	priv->dev.persist = persist;
3593 	priv->dev.flags = flags;
3594 }
3595 
3596 static void mlx4_unload_one(struct pci_dev *pdev)
3597 {
3598 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3599 	struct mlx4_dev  *dev  = persist->dev;
3600 	struct mlx4_priv *priv = mlx4_priv(dev);
3601 	int               pci_dev_data;
3602 	int p, i;
3603 
3604 	if (priv->removed)
3605 		return;
3606 
3607 	/* saving current ports type for further use */
3608 	for (i = 0; i < dev->caps.num_ports; i++) {
3609 		dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3610 		dev->persist->curr_port_poss_type[i] = dev->caps.
3611 						       possible_type[i + 1];
3612 	}
3613 
3614 	pci_dev_data = priv->pci_dev_data;
3615 
3616 	mlx4_stop_sense(dev);
3617 	mlx4_unregister_device(dev);
3618 
3619 	for (p = 1; p <= dev->caps.num_ports; p++) {
3620 		mlx4_cleanup_port_info(&priv->port[p]);
3621 		mlx4_CLOSE_PORT(dev, p);
3622 	}
3623 
3624 	if (mlx4_is_master(dev))
3625 		mlx4_free_resource_tracker(dev,
3626 					   RES_TR_FREE_SLAVES_ONLY);
3627 
3628 	mlx4_cleanup_default_counters(dev);
3629 	if (!mlx4_is_slave(dev))
3630 		mlx4_cleanup_counters_table(dev);
3631 	mlx4_cleanup_qp_table(dev);
3632 	mlx4_cleanup_srq_table(dev);
3633 	mlx4_cleanup_cq_table(dev);
3634 	mlx4_cmd_use_polling(dev);
3635 	mlx4_cleanup_eq_table(dev);
3636 	mlx4_cleanup_mcg_table(dev);
3637 	mlx4_cleanup_mr_table(dev);
3638 	mlx4_cleanup_xrcd_table(dev);
3639 	mlx4_cleanup_pd_table(dev);
3640 
3641 	if (mlx4_is_master(dev))
3642 		mlx4_free_resource_tracker(dev,
3643 					   RES_TR_FREE_STRUCTS_ONLY);
3644 
3645 	iounmap(priv->kar);
3646 	mlx4_uar_free(dev, &priv->driver_uar);
3647 	mlx4_cleanup_uar_table(dev);
3648 	if (!mlx4_is_slave(dev))
3649 		mlx4_clear_steering(dev);
3650 	mlx4_free_eq_table(dev);
3651 	if (mlx4_is_master(dev))
3652 		mlx4_multi_func_cleanup(dev);
3653 	mlx4_close_hca(dev);
3654 	mlx4_close_fw(dev);
3655 	if (mlx4_is_slave(dev))
3656 		mlx4_multi_func_cleanup(dev);
3657 	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3658 
3659 	if (dev->flags & MLX4_FLAG_MSI_X)
3660 		pci_disable_msix(pdev);
3661 
3662 	if (!mlx4_is_slave(dev))
3663 		mlx4_free_ownership(dev);
3664 
3665 	kfree(dev->caps.qp0_qkey);
3666 	kfree(dev->caps.qp0_tunnel);
3667 	kfree(dev->caps.qp0_proxy);
3668 	kfree(dev->caps.qp1_tunnel);
3669 	kfree(dev->caps.qp1_proxy);
3670 	kfree(dev->dev_vfs);
3671 
3672 	mlx4_clean_dev(dev);
3673 	priv->pci_dev_data = pci_dev_data;
3674 	priv->removed = 1;
3675 }
3676 
3677 static void mlx4_remove_one(struct pci_dev *pdev)
3678 {
3679 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3680 	struct mlx4_dev  *dev  = persist->dev;
3681 	struct mlx4_priv *priv = mlx4_priv(dev);
3682 	int active_vfs = 0;
3683 
3684 	mutex_lock(&persist->interface_state_mutex);
3685 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3686 	mutex_unlock(&persist->interface_state_mutex);
3687 
3688 	/* Disabling SR-IOV is not allowed while there are active vf's */
3689 	if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3690 		active_vfs = mlx4_how_many_lives_vf(dev);
3691 		if (active_vfs) {
3692 			pr_warn("Removing PF when there are active VF's !!\n");
3693 			pr_warn("Will not disable SR-IOV.\n");
3694 		}
3695 	}
3696 
3697 	/* device marked to be under deletion running now without the lock
3698 	 * letting other tasks to be terminated
3699 	 */
3700 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3701 		mlx4_unload_one(pdev);
3702 	else
3703 		mlx4_info(dev, "%s: interface is down\n", __func__);
3704 	mlx4_catas_end(dev);
3705 	if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3706 		mlx4_warn(dev, "Disabling SR-IOV\n");
3707 		pci_disable_sriov(pdev);
3708 	}
3709 
3710 	pci_release_regions(pdev);
3711 	pci_disable_device(pdev);
3712 	kfree(dev->persist);
3713 	kfree(priv);
3714 	pci_set_drvdata(pdev, NULL);
3715 }
3716 
3717 static int restore_current_port_types(struct mlx4_dev *dev,
3718 				      enum mlx4_port_type *types,
3719 				      enum mlx4_port_type *poss_types)
3720 {
3721 	struct mlx4_priv *priv = mlx4_priv(dev);
3722 	int err, i;
3723 
3724 	mlx4_stop_sense(dev);
3725 
3726 	mutex_lock(&priv->port_mutex);
3727 	for (i = 0; i < dev->caps.num_ports; i++)
3728 		dev->caps.possible_type[i + 1] = poss_types[i];
3729 	err = mlx4_change_port_types(dev, types);
3730 	mlx4_start_sense(dev);
3731 	mutex_unlock(&priv->port_mutex);
3732 
3733 	return err;
3734 }
3735 
3736 int mlx4_restart_one(struct pci_dev *pdev)
3737 {
3738 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3739 	struct mlx4_dev	 *dev  = persist->dev;
3740 	struct mlx4_priv *priv = mlx4_priv(dev);
3741 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3742 	int pci_dev_data, err, total_vfs;
3743 
3744 	pci_dev_data = priv->pci_dev_data;
3745 	total_vfs = dev->persist->num_vfs;
3746 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3747 
3748 	mlx4_unload_one(pdev);
3749 	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3750 	if (err) {
3751 		mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3752 			 __func__, pci_name(pdev), err);
3753 		return err;
3754 	}
3755 
3756 	err = restore_current_port_types(dev, dev->persist->curr_port_type,
3757 					 dev->persist->curr_port_poss_type);
3758 	if (err)
3759 		mlx4_err(dev, "could not restore original port types (%d)\n",
3760 			 err);
3761 
3762 	return err;
3763 }
3764 
3765 static const struct pci_device_id mlx4_pci_table[] = {
3766 	/* MT25408 "Hermon" SDR */
3767 	{ PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3768 	/* MT25408 "Hermon" DDR */
3769 	{ PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3770 	/* MT25408 "Hermon" QDR */
3771 	{ PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3772 	/* MT25408 "Hermon" DDR PCIe gen2 */
3773 	{ PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3774 	/* MT25408 "Hermon" QDR PCIe gen2 */
3775 	{ PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3776 	/* MT25408 "Hermon" EN 10GigE */
3777 	{ PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3778 	/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3779 	{ PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3780 	/* MT25458 ConnectX EN 10GBASE-T 10GigE */
3781 	{ PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3782 	/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3783 	{ PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3784 	/* MT26468 ConnectX EN 10GigE PCIe gen2*/
3785 	{ PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3786 	/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3787 	{ PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3788 	/* MT26478 ConnectX2 40GigE PCIe gen2 */
3789 	{ PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3790 	/* MT25400 Family [ConnectX-2 Virtual Function] */
3791 	{ PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
3792 	/* MT27500 Family [ConnectX-3] */
3793 	{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
3794 	/* MT27500 Family [ConnectX-3 Virtual Function] */
3795 	{ PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
3796 	{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
3797 	{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
3798 	{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
3799 	{ PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
3800 	{ PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
3801 	{ PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
3802 	{ PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
3803 	{ PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
3804 	{ PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
3805 	{ PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
3806 	{ PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
3807 	{ PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
3808 	{ 0, }
3809 };
3810 
3811 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3812 
3813 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3814 					      pci_channel_state_t state)
3815 {
3816 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3817 
3818 	mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
3819 	mlx4_enter_error_state(persist);
3820 
3821 	mutex_lock(&persist->interface_state_mutex);
3822 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3823 		mlx4_unload_one(pdev);
3824 
3825 	mutex_unlock(&persist->interface_state_mutex);
3826 	if (state == pci_channel_io_perm_failure)
3827 		return PCI_ERS_RESULT_DISCONNECT;
3828 
3829 	pci_disable_device(pdev);
3830 	return PCI_ERS_RESULT_NEED_RESET;
3831 }
3832 
3833 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3834 {
3835 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3836 	struct mlx4_dev	 *dev  = persist->dev;
3837 	struct mlx4_priv *priv = mlx4_priv(dev);
3838 	int               ret;
3839 	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3840 	int total_vfs;
3841 
3842 	mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
3843 	ret = pci_enable_device(pdev);
3844 	if (ret) {
3845 		mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
3846 		return PCI_ERS_RESULT_DISCONNECT;
3847 	}
3848 
3849 	pci_set_master(pdev);
3850 	pci_restore_state(pdev);
3851 	pci_save_state(pdev);
3852 
3853 	total_vfs = dev->persist->num_vfs;
3854 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3855 
3856 	mutex_lock(&persist->interface_state_mutex);
3857 	if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
3858 		ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
3859 				    priv, 1);
3860 		if (ret) {
3861 			mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
3862 				 __func__,  ret);
3863 			goto end;
3864 		}
3865 
3866 		ret = restore_current_port_types(dev, dev->persist->
3867 						 curr_port_type, dev->persist->
3868 						 curr_port_poss_type);
3869 		if (ret)
3870 			mlx4_err(dev, "could not restore original port types (%d)\n", ret);
3871 	}
3872 end:
3873 	mutex_unlock(&persist->interface_state_mutex);
3874 
3875 	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3876 }
3877 
3878 static void mlx4_shutdown(struct pci_dev *pdev)
3879 {
3880 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3881 
3882 	mlx4_info(persist->dev, "mlx4_shutdown was called\n");
3883 	mutex_lock(&persist->interface_state_mutex);
3884 	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3885 		mlx4_unload_one(pdev);
3886 	mutex_unlock(&persist->interface_state_mutex);
3887 }
3888 
3889 static const struct pci_error_handlers mlx4_err_handler = {
3890 	.error_detected = mlx4_pci_err_detected,
3891 	.slot_reset     = mlx4_pci_slot_reset,
3892 };
3893 
3894 static struct pci_driver mlx4_driver = {
3895 	.name		= DRV_NAME,
3896 	.id_table	= mlx4_pci_table,
3897 	.probe		= mlx4_init_one,
3898 	.shutdown	= mlx4_shutdown,
3899 	.remove		= mlx4_remove_one,
3900 	.err_handler    = &mlx4_err_handler,
3901 };
3902 
3903 static int __init mlx4_verify_params(void)
3904 {
3905 	if ((log_num_mac < 0) || (log_num_mac > 7)) {
3906 		pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
3907 		return -1;
3908 	}
3909 
3910 	if (log_num_vlan != 0)
3911 		pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3912 			MLX4_LOG_NUM_VLANS);
3913 
3914 	if (use_prio != 0)
3915 		pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3916 
3917 	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
3918 		pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3919 			log_mtts_per_seg);
3920 		return -1;
3921 	}
3922 
3923 	/* Check if module param for ports type has legal combination */
3924 	if (port_type_array[0] == false && port_type_array[1] == true) {
3925 		pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3926 		port_type_array[0] = true;
3927 	}
3928 
3929 	if (mlx4_log_num_mgm_entry_size < -7 ||
3930 	    (mlx4_log_num_mgm_entry_size > 0 &&
3931 	     (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
3932 	      mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
3933 		pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
3934 			mlx4_log_num_mgm_entry_size,
3935 			MLX4_MIN_MGM_LOG_ENTRY_SIZE,
3936 			MLX4_MAX_MGM_LOG_ENTRY_SIZE);
3937 		return -1;
3938 	}
3939 
3940 	return 0;
3941 }
3942 
3943 static int __init mlx4_init(void)
3944 {
3945 	int ret;
3946 
3947 	if (mlx4_verify_params())
3948 		return -EINVAL;
3949 
3950 
3951 	mlx4_wq = create_singlethread_workqueue("mlx4");
3952 	if (!mlx4_wq)
3953 		return -ENOMEM;
3954 
3955 	ret = pci_register_driver(&mlx4_driver);
3956 	if (ret < 0)
3957 		destroy_workqueue(mlx4_wq);
3958 	return ret < 0 ? ret : 0;
3959 }
3960 
3961 static void __exit mlx4_cleanup(void)
3962 {
3963 	pci_unregister_driver(&mlx4_driver);
3964 	destroy_workqueue(mlx4_wq);
3965 }
3966 
3967 module_init(mlx4_init);
3968 module_exit(mlx4_cleanup);
3969