1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 
44 #include <linux/mlx4/device.h>
45 #include <linux/mlx4/doorbell.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "icm.h"
50 
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_VERSION(DRV_VERSION);
55 
56 struct workqueue_struct *mlx4_wq;
57 
58 #ifdef CONFIG_MLX4_DEBUG
59 
60 int mlx4_debug_level = 0;
61 module_param_named(debug_level, mlx4_debug_level, int, 0644);
62 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
63 
64 #endif /* CONFIG_MLX4_DEBUG */
65 
66 #ifdef CONFIG_PCI_MSI
67 
68 static int msi_x = 1;
69 module_param(msi_x, int, 0444);
70 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
71 
72 #else /* CONFIG_PCI_MSI */
73 
74 #define msi_x (0)
75 
76 #endif /* CONFIG_PCI_MSI */
77 
78 static char mlx4_version[] __devinitdata =
79 	DRV_NAME ": Mellanox ConnectX core driver v"
80 	DRV_VERSION " (" DRV_RELDATE ")\n";
81 
82 static struct mlx4_profile default_profile = {
83 	.num_qp		= 1 << 17,
84 	.num_srq	= 1 << 16,
85 	.rdmarc_per_qp	= 1 << 4,
86 	.num_cq		= 1 << 16,
87 	.num_mcg	= 1 << 13,
88 	.num_mpt	= 1 << 17,
89 	.num_mtt	= 1 << 20,
90 };
91 
92 static int log_num_mac = 2;
93 module_param_named(log_num_mac, log_num_mac, int, 0444);
94 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
95 
96 static int log_num_vlan;
97 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
98 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
99 
100 static int use_prio;
101 module_param_named(use_prio, use_prio, bool, 0444);
102 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
103 		  "(0/1, default 0)");
104 
105 static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
106 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
107 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
108 
109 int mlx4_check_port_params(struct mlx4_dev *dev,
110 			   enum mlx4_port_type *port_type)
111 {
112 	int i;
113 
114 	for (i = 0; i < dev->caps.num_ports - 1; i++) {
115 		if (port_type[i] != port_type[i + 1]) {
116 			if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
117 				mlx4_err(dev, "Only same port types supported "
118 					 "on this HCA, aborting.\n");
119 				return -EINVAL;
120 			}
121 			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
122 			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
123 				return -EINVAL;
124 		}
125 	}
126 
127 	for (i = 0; i < dev->caps.num_ports; i++) {
128 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
129 			mlx4_err(dev, "Requested port type for port %d is not "
130 				      "supported on this HCA\n", i + 1);
131 			return -EINVAL;
132 		}
133 	}
134 	return 0;
135 }
136 
137 static void mlx4_set_port_mask(struct mlx4_dev *dev)
138 {
139 	int i;
140 
141 	dev->caps.port_mask = 0;
142 	for (i = 1; i <= dev->caps.num_ports; ++i)
143 		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
144 			dev->caps.port_mask |= 1 << (i - 1);
145 }
146 
147 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
148 {
149 	int err;
150 	int i;
151 
152 	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
153 	if (err) {
154 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
155 		return err;
156 	}
157 
158 	if (dev_cap->min_page_sz > PAGE_SIZE) {
159 		mlx4_err(dev, "HCA minimum page size of %d bigger than "
160 			 "kernel PAGE_SIZE of %ld, aborting.\n",
161 			 dev_cap->min_page_sz, PAGE_SIZE);
162 		return -ENODEV;
163 	}
164 	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
165 		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
166 			 "aborting.\n",
167 			 dev_cap->num_ports, MLX4_MAX_PORTS);
168 		return -ENODEV;
169 	}
170 
171 	if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
172 		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
173 			 "PCI resource 2 size of 0x%llx, aborting.\n",
174 			 dev_cap->uar_size,
175 			 (unsigned long long) pci_resource_len(dev->pdev, 2));
176 		return -ENODEV;
177 	}
178 
179 	dev->caps.num_ports	     = dev_cap->num_ports;
180 	for (i = 1; i <= dev->caps.num_ports; ++i) {
181 		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
182 		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
183 		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
184 		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
185 		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
186 		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
187 		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
188 		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
189 		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
190 		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
191 		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
192 		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
193 	}
194 
195 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
196 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
197 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
198 	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
199 	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
200 	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
201 	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
202 	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
203 	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
204 	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
205 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
206 	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
207 	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
208 	dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
209 	/*
210 	 * Subtract 1 from the limit because we need to allocate a
211 	 * spare CQE so the HCA HW can tell the difference between an
212 	 * empty CQ and a full CQ.
213 	 */
214 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
215 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
216 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
217 	dev->caps.mtts_per_seg	     = 1 << log_mtts_per_seg;
218 	dev->caps.reserved_mtts	     = DIV_ROUND_UP(dev_cap->reserved_mtts,
219 						    dev->caps.mtts_per_seg);
220 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
221 	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
222 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
223 	dev->caps.mtt_entry_sz	     = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
224 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
225 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
226 	dev->caps.flags		     = dev_cap->flags;
227 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
228 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
229 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
230 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
231 
232 	dev->caps.log_num_macs  = log_num_mac;
233 	dev->caps.log_num_vlans = log_num_vlan;
234 	dev->caps.log_num_prios = use_prio ? 3 : 0;
235 
236 	for (i = 1; i <= dev->caps.num_ports; ++i) {
237 		if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
238 			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
239 		else
240 			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
241 		dev->caps.possible_type[i] = dev->caps.port_type[i];
242 		mlx4_priv(dev)->sense.sense_allowed[i] =
243 			dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
244 
245 		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
246 			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
247 			mlx4_warn(dev, "Requested number of MACs is too much "
248 				  "for port %d, reducing to %d.\n",
249 				  i, 1 << dev->caps.log_num_macs);
250 		}
251 		if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
252 			dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
253 			mlx4_warn(dev, "Requested number of VLANs is too much "
254 				  "for port %d, reducing to %d.\n",
255 				  i, 1 << dev->caps.log_num_vlans);
256 		}
257 	}
258 
259 	mlx4_set_port_mask(dev);
260 
261 	dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
262 
263 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
264 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
265 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
266 		(1 << dev->caps.log_num_macs) *
267 		(1 << dev->caps.log_num_vlans) *
268 		(1 << dev->caps.log_num_prios) *
269 		dev->caps.num_ports;
270 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
271 
272 	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
273 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
274 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
275 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
276 
277 	return 0;
278 }
279 
280 /*
281  * Change the port configuration of the device.
282  * Every user of this function must hold the port mutex.
283  */
284 int mlx4_change_port_types(struct mlx4_dev *dev,
285 			   enum mlx4_port_type *port_types)
286 {
287 	int err = 0;
288 	int change = 0;
289 	int port;
290 
291 	for (port = 0; port <  dev->caps.num_ports; port++) {
292 		/* Change the port type only if the new type is different
293 		 * from the current, and not set to Auto */
294 		if (port_types[port] != dev->caps.port_type[port + 1]) {
295 			change = 1;
296 			dev->caps.port_type[port + 1] = port_types[port];
297 		}
298 	}
299 	if (change) {
300 		mlx4_unregister_device(dev);
301 		for (port = 1; port <= dev->caps.num_ports; port++) {
302 			mlx4_CLOSE_PORT(dev, port);
303 			err = mlx4_SET_PORT(dev, port);
304 			if (err) {
305 				mlx4_err(dev, "Failed to set port %d, "
306 					      "aborting\n", port);
307 				goto out;
308 			}
309 		}
310 		mlx4_set_port_mask(dev);
311 		err = mlx4_register_device(dev);
312 	}
313 
314 out:
315 	return err;
316 }
317 
318 static ssize_t show_port_type(struct device *dev,
319 			      struct device_attribute *attr,
320 			      char *buf)
321 {
322 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
323 						   port_attr);
324 	struct mlx4_dev *mdev = info->dev;
325 	char type[8];
326 
327 	sprintf(type, "%s",
328 		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
329 		"ib" : "eth");
330 	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
331 		sprintf(buf, "auto (%s)\n", type);
332 	else
333 		sprintf(buf, "%s\n", type);
334 
335 	return strlen(buf);
336 }
337 
338 static ssize_t set_port_type(struct device *dev,
339 			     struct device_attribute *attr,
340 			     const char *buf, size_t count)
341 {
342 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
343 						   port_attr);
344 	struct mlx4_dev *mdev = info->dev;
345 	struct mlx4_priv *priv = mlx4_priv(mdev);
346 	enum mlx4_port_type types[MLX4_MAX_PORTS];
347 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
348 	int i;
349 	int err = 0;
350 
351 	if (!strcmp(buf, "ib\n"))
352 		info->tmp_type = MLX4_PORT_TYPE_IB;
353 	else if (!strcmp(buf, "eth\n"))
354 		info->tmp_type = MLX4_PORT_TYPE_ETH;
355 	else if (!strcmp(buf, "auto\n"))
356 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
357 	else {
358 		mlx4_err(mdev, "%s is not supported port type\n", buf);
359 		return -EINVAL;
360 	}
361 
362 	mlx4_stop_sense(mdev);
363 	mutex_lock(&priv->port_mutex);
364 	/* Possible type is always the one that was delivered */
365 	mdev->caps.possible_type[info->port] = info->tmp_type;
366 
367 	for (i = 0; i < mdev->caps.num_ports; i++) {
368 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
369 					mdev->caps.possible_type[i+1];
370 		if (types[i] == MLX4_PORT_TYPE_AUTO)
371 			types[i] = mdev->caps.port_type[i+1];
372 	}
373 
374 	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
375 		for (i = 1; i <= mdev->caps.num_ports; i++) {
376 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
377 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
378 				err = -EINVAL;
379 			}
380 		}
381 	}
382 	if (err) {
383 		mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
384 			       "Set only 'eth' or 'ib' for both ports "
385 			       "(should be the same)\n");
386 		goto out;
387 	}
388 
389 	mlx4_do_sense_ports(mdev, new_types, types);
390 
391 	err = mlx4_check_port_params(mdev, new_types);
392 	if (err)
393 		goto out;
394 
395 	/* We are about to apply the changes after the configuration
396 	 * was verified, no need to remember the temporary types
397 	 * any more */
398 	for (i = 0; i < mdev->caps.num_ports; i++)
399 		priv->port[i + 1].tmp_type = 0;
400 
401 	err = mlx4_change_port_types(mdev, new_types);
402 
403 out:
404 	mlx4_start_sense(mdev);
405 	mutex_unlock(&priv->port_mutex);
406 	return err ? err : count;
407 }
408 
409 static int mlx4_load_fw(struct mlx4_dev *dev)
410 {
411 	struct mlx4_priv *priv = mlx4_priv(dev);
412 	int err;
413 
414 	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
415 					 GFP_HIGHUSER | __GFP_NOWARN, 0);
416 	if (!priv->fw.fw_icm) {
417 		mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
418 		return -ENOMEM;
419 	}
420 
421 	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
422 	if (err) {
423 		mlx4_err(dev, "MAP_FA command failed, aborting.\n");
424 		goto err_free;
425 	}
426 
427 	err = mlx4_RUN_FW(dev);
428 	if (err) {
429 		mlx4_err(dev, "RUN_FW command failed, aborting.\n");
430 		goto err_unmap_fa;
431 	}
432 
433 	return 0;
434 
435 err_unmap_fa:
436 	mlx4_UNMAP_FA(dev);
437 
438 err_free:
439 	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
440 	return err;
441 }
442 
443 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
444 				int cmpt_entry_sz)
445 {
446 	struct mlx4_priv *priv = mlx4_priv(dev);
447 	int err;
448 
449 	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
450 				  cmpt_base +
451 				  ((u64) (MLX4_CMPT_TYPE_QP *
452 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
453 				  cmpt_entry_sz, dev->caps.num_qps,
454 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
455 				  0, 0);
456 	if (err)
457 		goto err;
458 
459 	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
460 				  cmpt_base +
461 				  ((u64) (MLX4_CMPT_TYPE_SRQ *
462 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
463 				  cmpt_entry_sz, dev->caps.num_srqs,
464 				  dev->caps.reserved_srqs, 0, 0);
465 	if (err)
466 		goto err_qp;
467 
468 	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
469 				  cmpt_base +
470 				  ((u64) (MLX4_CMPT_TYPE_CQ *
471 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
472 				  cmpt_entry_sz, dev->caps.num_cqs,
473 				  dev->caps.reserved_cqs, 0, 0);
474 	if (err)
475 		goto err_srq;
476 
477 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
478 				  cmpt_base +
479 				  ((u64) (MLX4_CMPT_TYPE_EQ *
480 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
481 				  cmpt_entry_sz,
482 				  dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
483 	if (err)
484 		goto err_cq;
485 
486 	return 0;
487 
488 err_cq:
489 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
490 
491 err_srq:
492 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
493 
494 err_qp:
495 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
496 
497 err:
498 	return err;
499 }
500 
501 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
502 			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
503 {
504 	struct mlx4_priv *priv = mlx4_priv(dev);
505 	u64 aux_pages;
506 	int err;
507 
508 	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
509 	if (err) {
510 		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
511 		return err;
512 	}
513 
514 	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
515 		 (unsigned long long) icm_size >> 10,
516 		 (unsigned long long) aux_pages << 2);
517 
518 	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
519 					  GFP_HIGHUSER | __GFP_NOWARN, 0);
520 	if (!priv->fw.aux_icm) {
521 		mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
522 		return -ENOMEM;
523 	}
524 
525 	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
526 	if (err) {
527 		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
528 		goto err_free_aux;
529 	}
530 
531 	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
532 	if (err) {
533 		mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
534 		goto err_unmap_aux;
535 	}
536 
537 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
538 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
539 				  dev->caps.num_eqs, dev->caps.num_eqs,
540 				  0, 0);
541 	if (err) {
542 		mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
543 		goto err_unmap_cmpt;
544 	}
545 
546 	/*
547 	 * Reserved MTT entries must be aligned up to a cacheline
548 	 * boundary, since the FW will write to them, while the driver
549 	 * writes to all other MTT entries. (The variable
550 	 * dev->caps.mtt_entry_sz below is really the MTT segment
551 	 * size, not the raw entry size)
552 	 */
553 	dev->caps.reserved_mtts =
554 		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
555 		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
556 
557 	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
558 				  init_hca->mtt_base,
559 				  dev->caps.mtt_entry_sz,
560 				  dev->caps.num_mtt_segs,
561 				  dev->caps.reserved_mtts, 1, 0);
562 	if (err) {
563 		mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
564 		goto err_unmap_eq;
565 	}
566 
567 	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
568 				  init_hca->dmpt_base,
569 				  dev_cap->dmpt_entry_sz,
570 				  dev->caps.num_mpts,
571 				  dev->caps.reserved_mrws, 1, 1);
572 	if (err) {
573 		mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
574 		goto err_unmap_mtt;
575 	}
576 
577 	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
578 				  init_hca->qpc_base,
579 				  dev_cap->qpc_entry_sz,
580 				  dev->caps.num_qps,
581 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
582 				  0, 0);
583 	if (err) {
584 		mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
585 		goto err_unmap_dmpt;
586 	}
587 
588 	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
589 				  init_hca->auxc_base,
590 				  dev_cap->aux_entry_sz,
591 				  dev->caps.num_qps,
592 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
593 				  0, 0);
594 	if (err) {
595 		mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
596 		goto err_unmap_qp;
597 	}
598 
599 	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
600 				  init_hca->altc_base,
601 				  dev_cap->altc_entry_sz,
602 				  dev->caps.num_qps,
603 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
604 				  0, 0);
605 	if (err) {
606 		mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
607 		goto err_unmap_auxc;
608 	}
609 
610 	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
611 				  init_hca->rdmarc_base,
612 				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
613 				  dev->caps.num_qps,
614 				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
615 				  0, 0);
616 	if (err) {
617 		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
618 		goto err_unmap_altc;
619 	}
620 
621 	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
622 				  init_hca->cqc_base,
623 				  dev_cap->cqc_entry_sz,
624 				  dev->caps.num_cqs,
625 				  dev->caps.reserved_cqs, 0, 0);
626 	if (err) {
627 		mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
628 		goto err_unmap_rdmarc;
629 	}
630 
631 	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
632 				  init_hca->srqc_base,
633 				  dev_cap->srq_entry_sz,
634 				  dev->caps.num_srqs,
635 				  dev->caps.reserved_srqs, 0, 0);
636 	if (err) {
637 		mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
638 		goto err_unmap_cq;
639 	}
640 
641 	/*
642 	 * It's not strictly required, but for simplicity just map the
643 	 * whole multicast group table now.  The table isn't very big
644 	 * and it's a lot easier than trying to track ref counts.
645 	 */
646 	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
647 				  init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
648 				  dev->caps.num_mgms + dev->caps.num_amgms,
649 				  dev->caps.num_mgms + dev->caps.num_amgms,
650 				  0, 0);
651 	if (err) {
652 		mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
653 		goto err_unmap_srq;
654 	}
655 
656 	return 0;
657 
658 err_unmap_srq:
659 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
660 
661 err_unmap_cq:
662 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
663 
664 err_unmap_rdmarc:
665 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
666 
667 err_unmap_altc:
668 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
669 
670 err_unmap_auxc:
671 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
672 
673 err_unmap_qp:
674 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
675 
676 err_unmap_dmpt:
677 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
678 
679 err_unmap_mtt:
680 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
681 
682 err_unmap_eq:
683 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
684 
685 err_unmap_cmpt:
686 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
687 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
688 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
689 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
690 
691 err_unmap_aux:
692 	mlx4_UNMAP_ICM_AUX(dev);
693 
694 err_free_aux:
695 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
696 
697 	return err;
698 }
699 
700 static void mlx4_free_icms(struct mlx4_dev *dev)
701 {
702 	struct mlx4_priv *priv = mlx4_priv(dev);
703 
704 	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
705 	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
706 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
707 	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
708 	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
709 	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
710 	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
711 	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
712 	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
713 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
714 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
715 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
716 	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
717 	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
718 
719 	mlx4_UNMAP_ICM_AUX(dev);
720 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
721 }
722 
723 static int map_bf_area(struct mlx4_dev *dev)
724 {
725 	struct mlx4_priv *priv = mlx4_priv(dev);
726 	resource_size_t bf_start;
727 	resource_size_t bf_len;
728 	int err = 0;
729 
730 	bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
731 	bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
732 	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
733 	if (!priv->bf_mapping)
734 		err = -ENOMEM;
735 
736 	return err;
737 }
738 
739 static void unmap_bf_area(struct mlx4_dev *dev)
740 {
741 	if (mlx4_priv(dev)->bf_mapping)
742 		io_mapping_free(mlx4_priv(dev)->bf_mapping);
743 }
744 
745 static void mlx4_close_hca(struct mlx4_dev *dev)
746 {
747 	unmap_bf_area(dev);
748 	mlx4_CLOSE_HCA(dev, 0);
749 	mlx4_free_icms(dev);
750 	mlx4_UNMAP_FA(dev);
751 	mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
752 }
753 
754 static int mlx4_init_hca(struct mlx4_dev *dev)
755 {
756 	struct mlx4_priv	  *priv = mlx4_priv(dev);
757 	struct mlx4_adapter	   adapter;
758 	struct mlx4_dev_cap	   dev_cap;
759 	struct mlx4_mod_stat_cfg   mlx4_cfg;
760 	struct mlx4_profile	   profile;
761 	struct mlx4_init_hca_param init_hca;
762 	u64 icm_size;
763 	int err;
764 
765 	err = mlx4_QUERY_FW(dev);
766 	if (err) {
767 		if (err == -EACCES)
768 			mlx4_info(dev, "non-primary physical function, skipping.\n");
769 		else
770 			mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
771 		return err;
772 	}
773 
774 	err = mlx4_load_fw(dev);
775 	if (err) {
776 		mlx4_err(dev, "Failed to start FW, aborting.\n");
777 		return err;
778 	}
779 
780 	mlx4_cfg.log_pg_sz_m = 1;
781 	mlx4_cfg.log_pg_sz = 0;
782 	err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
783 	if (err)
784 		mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
785 
786 	err = mlx4_dev_cap(dev, &dev_cap);
787 	if (err) {
788 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
789 		goto err_stop_fw;
790 	}
791 
792 	profile = default_profile;
793 
794 	icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
795 	if ((long long) icm_size < 0) {
796 		err = icm_size;
797 		goto err_stop_fw;
798 	}
799 
800 	if (map_bf_area(dev))
801 		mlx4_dbg(dev, "Failed to map blue flame area\n");
802 
803 	init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
804 
805 	err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
806 	if (err)
807 		goto err_stop_fw;
808 
809 	err = mlx4_INIT_HCA(dev, &init_hca);
810 	if (err) {
811 		mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
812 		goto err_free_icm;
813 	}
814 
815 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
816 	if (err) {
817 		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
818 		goto err_close;
819 	}
820 
821 	priv->eq_table.inta_pin = adapter.inta_pin;
822 	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
823 
824 	return 0;
825 
826 err_close:
827 	mlx4_CLOSE_HCA(dev, 0);
828 
829 err_free_icm:
830 	mlx4_free_icms(dev);
831 
832 err_stop_fw:
833 	unmap_bf_area(dev);
834 	mlx4_UNMAP_FA(dev);
835 	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
836 
837 	return err;
838 }
839 
840 static int mlx4_init_counters_table(struct mlx4_dev *dev)
841 {
842 	struct mlx4_priv *priv = mlx4_priv(dev);
843 	int nent;
844 
845 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
846 		return -ENOENT;
847 
848 	nent = dev->caps.max_counters;
849 	return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
850 }
851 
852 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
853 {
854 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
855 }
856 
857 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
858 {
859 	struct mlx4_priv *priv = mlx4_priv(dev);
860 
861 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
862 		return -ENOENT;
863 
864 	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
865 	if (*idx == -1)
866 		return -ENOMEM;
867 
868 	return 0;
869 }
870 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
871 
872 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
873 {
874 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
875 	return;
876 }
877 EXPORT_SYMBOL_GPL(mlx4_counter_free);
878 
879 static int mlx4_setup_hca(struct mlx4_dev *dev)
880 {
881 	struct mlx4_priv *priv = mlx4_priv(dev);
882 	int err;
883 	int port;
884 	__be32 ib_port_default_caps;
885 
886 	err = mlx4_init_uar_table(dev);
887 	if (err) {
888 		mlx4_err(dev, "Failed to initialize "
889 			 "user access region table, aborting.\n");
890 		return err;
891 	}
892 
893 	err = mlx4_uar_alloc(dev, &priv->driver_uar);
894 	if (err) {
895 		mlx4_err(dev, "Failed to allocate driver access region, "
896 			 "aborting.\n");
897 		goto err_uar_table_free;
898 	}
899 
900 	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
901 	if (!priv->kar) {
902 		mlx4_err(dev, "Couldn't map kernel access region, "
903 			 "aborting.\n");
904 		err = -ENOMEM;
905 		goto err_uar_free;
906 	}
907 
908 	err = mlx4_init_pd_table(dev);
909 	if (err) {
910 		mlx4_err(dev, "Failed to initialize "
911 			 "protection domain table, aborting.\n");
912 		goto err_kar_unmap;
913 	}
914 
915 	err = mlx4_init_mr_table(dev);
916 	if (err) {
917 		mlx4_err(dev, "Failed to initialize "
918 			 "memory region table, aborting.\n");
919 		goto err_pd_table_free;
920 	}
921 
922 	err = mlx4_init_eq_table(dev);
923 	if (err) {
924 		mlx4_err(dev, "Failed to initialize "
925 			 "event queue table, aborting.\n");
926 		goto err_mr_table_free;
927 	}
928 
929 	err = mlx4_cmd_use_events(dev);
930 	if (err) {
931 		mlx4_err(dev, "Failed to switch to event-driven "
932 			 "firmware commands, aborting.\n");
933 		goto err_eq_table_free;
934 	}
935 
936 	err = mlx4_NOP(dev);
937 	if (err) {
938 		if (dev->flags & MLX4_FLAG_MSI_X) {
939 			mlx4_warn(dev, "NOP command failed to generate MSI-X "
940 				  "interrupt IRQ %d).\n",
941 				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
942 			mlx4_warn(dev, "Trying again without MSI-X.\n");
943 		} else {
944 			mlx4_err(dev, "NOP command failed to generate interrupt "
945 				 "(IRQ %d), aborting.\n",
946 				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
947 			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
948 		}
949 
950 		goto err_cmd_poll;
951 	}
952 
953 	mlx4_dbg(dev, "NOP command IRQ test passed\n");
954 
955 	err = mlx4_init_cq_table(dev);
956 	if (err) {
957 		mlx4_err(dev, "Failed to initialize "
958 			 "completion queue table, aborting.\n");
959 		goto err_cmd_poll;
960 	}
961 
962 	err = mlx4_init_srq_table(dev);
963 	if (err) {
964 		mlx4_err(dev, "Failed to initialize "
965 			 "shared receive queue table, aborting.\n");
966 		goto err_cq_table_free;
967 	}
968 
969 	err = mlx4_init_qp_table(dev);
970 	if (err) {
971 		mlx4_err(dev, "Failed to initialize "
972 			 "queue pair table, aborting.\n");
973 		goto err_srq_table_free;
974 	}
975 
976 	err = mlx4_init_mcg_table(dev);
977 	if (err) {
978 		mlx4_err(dev, "Failed to initialize "
979 			 "multicast group table, aborting.\n");
980 		goto err_qp_table_free;
981 	}
982 
983 	err = mlx4_init_counters_table(dev);
984 	if (err && err != -ENOENT) {
985 		mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
986 		goto err_counters_table_free;
987 	}
988 
989 	for (port = 1; port <= dev->caps.num_ports; port++) {
990 		enum mlx4_port_type port_type = 0;
991 		mlx4_SENSE_PORT(dev, port, &port_type);
992 		if (port_type)
993 			dev->caps.port_type[port] = port_type;
994 		ib_port_default_caps = 0;
995 		err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
996 		if (err)
997 			mlx4_warn(dev, "failed to get port %d default "
998 				  "ib capabilities (%d). Continuing with "
999 				  "caps = 0\n", port, err);
1000 		dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1001 		err = mlx4_SET_PORT(dev, port);
1002 		if (err) {
1003 			mlx4_err(dev, "Failed to set port %d, aborting\n",
1004 				port);
1005 			goto err_mcg_table_free;
1006 		}
1007 	}
1008 	mlx4_set_port_mask(dev);
1009 
1010 	return 0;
1011 
1012 err_mcg_table_free:
1013 	mlx4_cleanup_mcg_table(dev);
1014 
1015 err_counters_table_free:
1016 	mlx4_cleanup_counters_table(dev);
1017 
1018 err_qp_table_free:
1019 	mlx4_cleanup_qp_table(dev);
1020 
1021 err_srq_table_free:
1022 	mlx4_cleanup_srq_table(dev);
1023 
1024 err_cq_table_free:
1025 	mlx4_cleanup_cq_table(dev);
1026 
1027 err_cmd_poll:
1028 	mlx4_cmd_use_polling(dev);
1029 
1030 err_eq_table_free:
1031 	mlx4_cleanup_eq_table(dev);
1032 
1033 err_mr_table_free:
1034 	mlx4_cleanup_mr_table(dev);
1035 
1036 err_pd_table_free:
1037 	mlx4_cleanup_pd_table(dev);
1038 
1039 err_kar_unmap:
1040 	iounmap(priv->kar);
1041 
1042 err_uar_free:
1043 	mlx4_uar_free(dev, &priv->driver_uar);
1044 
1045 err_uar_table_free:
1046 	mlx4_cleanup_uar_table(dev);
1047 	return err;
1048 }
1049 
1050 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1051 {
1052 	struct mlx4_priv *priv = mlx4_priv(dev);
1053 	struct msix_entry *entries;
1054 	int nreq = min_t(int, dev->caps.num_ports *
1055 			 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1056 				+ MSIX_LEGACY_SZ, MAX_MSIX);
1057 	int err;
1058 	int i;
1059 
1060 	if (msi_x) {
1061 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1062 			     nreq);
1063 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1064 		if (!entries)
1065 			goto no_msi;
1066 
1067 		for (i = 0; i < nreq; ++i)
1068 			entries[i].entry = i;
1069 
1070 	retry:
1071 		err = pci_enable_msix(dev->pdev, entries, nreq);
1072 		if (err) {
1073 			/* Try again if at least 2 vectors are available */
1074 			if (err > 1) {
1075 				mlx4_info(dev, "Requested %d vectors, "
1076 					  "but only %d MSI-X vectors available, "
1077 					  "trying again\n", nreq, err);
1078 				nreq = err;
1079 				goto retry;
1080 			}
1081 			kfree(entries);
1082 			goto no_msi;
1083 		}
1084 
1085 		if (nreq <
1086 		    MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1087 			/*Working in legacy mode , all EQ's shared*/
1088 			dev->caps.comp_pool           = 0;
1089 			dev->caps.num_comp_vectors = nreq - 1;
1090 		} else {
1091 			dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
1092 			dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1093 		}
1094 		for (i = 0; i < nreq; ++i)
1095 			priv->eq_table.eq[i].irq = entries[i].vector;
1096 
1097 		dev->flags |= MLX4_FLAG_MSI_X;
1098 
1099 		kfree(entries);
1100 		return;
1101 	}
1102 
1103 no_msi:
1104 	dev->caps.num_comp_vectors = 1;
1105 	dev->caps.comp_pool	   = 0;
1106 
1107 	for (i = 0; i < 2; ++i)
1108 		priv->eq_table.eq[i].irq = dev->pdev->irq;
1109 }
1110 
1111 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1112 {
1113 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1114 	int err = 0;
1115 
1116 	info->dev = dev;
1117 	info->port = port;
1118 	mlx4_init_mac_table(dev, &info->mac_table);
1119 	mlx4_init_vlan_table(dev, &info->vlan_table);
1120 	info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1121 			(port - 1) * (1 << log_num_mac);
1122 
1123 	sprintf(info->dev_name, "mlx4_port%d", port);
1124 	info->port_attr.attr.name = info->dev_name;
1125 	info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1126 	info->port_attr.show      = show_port_type;
1127 	info->port_attr.store     = set_port_type;
1128 	sysfs_attr_init(&info->port_attr.attr);
1129 
1130 	err = device_create_file(&dev->pdev->dev, &info->port_attr);
1131 	if (err) {
1132 		mlx4_err(dev, "Failed to create file for port %d\n", port);
1133 		info->port = -1;
1134 	}
1135 
1136 	return err;
1137 }
1138 
1139 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1140 {
1141 	if (info->port < 0)
1142 		return;
1143 
1144 	device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1145 }
1146 
1147 static int mlx4_init_steering(struct mlx4_dev *dev)
1148 {
1149 	struct mlx4_priv *priv = mlx4_priv(dev);
1150 	int num_entries = dev->caps.num_ports;
1151 	int i, j;
1152 
1153 	priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1154 	if (!priv->steer)
1155 		return -ENOMEM;
1156 
1157 	for (i = 0; i < num_entries; i++) {
1158 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
1159 			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1160 			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1161 		}
1162 		INIT_LIST_HEAD(&priv->steer[i].high_prios);
1163 	}
1164 	return 0;
1165 }
1166 
1167 static void mlx4_clear_steering(struct mlx4_dev *dev)
1168 {
1169 	struct mlx4_priv *priv = mlx4_priv(dev);
1170 	struct mlx4_steer_index *entry, *tmp_entry;
1171 	struct mlx4_promisc_qp *pqp, *tmp_pqp;
1172 	int num_entries = dev->caps.num_ports;
1173 	int i, j;
1174 
1175 	for (i = 0; i < num_entries; i++) {
1176 		for (j = 0; j < MLX4_NUM_STEERS; j++) {
1177 			list_for_each_entry_safe(pqp, tmp_pqp,
1178 						 &priv->steer[i].promisc_qps[j],
1179 						 list) {
1180 				list_del(&pqp->list);
1181 				kfree(pqp);
1182 			}
1183 			list_for_each_entry_safe(entry, tmp_entry,
1184 						 &priv->steer[i].steer_entries[j],
1185 						 list) {
1186 				list_del(&entry->list);
1187 				list_for_each_entry_safe(pqp, tmp_pqp,
1188 							 &entry->duplicates,
1189 							 list) {
1190 					list_del(&pqp->list);
1191 					kfree(pqp);
1192 				}
1193 				kfree(entry);
1194 			}
1195 		}
1196 	}
1197 	kfree(priv->steer);
1198 }
1199 
1200 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1201 {
1202 	struct mlx4_priv *priv;
1203 	struct mlx4_dev *dev;
1204 	int err;
1205 	int port;
1206 
1207 	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1208 
1209 	err = pci_enable_device(pdev);
1210 	if (err) {
1211 		dev_err(&pdev->dev, "Cannot enable PCI device, "
1212 			"aborting.\n");
1213 		return err;
1214 	}
1215 
1216 	/*
1217 	 * Check for BARs.  We expect 0: 1MB
1218 	 */
1219 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1220 	    pci_resource_len(pdev, 0) != 1 << 20) {
1221 		dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1222 		err = -ENODEV;
1223 		goto err_disable_pdev;
1224 	}
1225 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1226 		dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1227 		err = -ENODEV;
1228 		goto err_disable_pdev;
1229 	}
1230 
1231 	err = pci_request_regions(pdev, DRV_NAME);
1232 	if (err) {
1233 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1234 		goto err_disable_pdev;
1235 	}
1236 
1237 	pci_set_master(pdev);
1238 
1239 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1240 	if (err) {
1241 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1242 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1243 		if (err) {
1244 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1245 			goto err_release_regions;
1246 		}
1247 	}
1248 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1249 	if (err) {
1250 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1251 			 "consistent PCI DMA mask.\n");
1252 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1253 		if (err) {
1254 			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1255 				"aborting.\n");
1256 			goto err_release_regions;
1257 		}
1258 	}
1259 
1260 	/* Allow large DMA segments, up to the firmware limit of 1 GB */
1261 	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1262 
1263 	priv = kzalloc(sizeof *priv, GFP_KERNEL);
1264 	if (!priv) {
1265 		dev_err(&pdev->dev, "Device struct alloc failed, "
1266 			"aborting.\n");
1267 		err = -ENOMEM;
1268 		goto err_release_regions;
1269 	}
1270 
1271 	dev       = &priv->dev;
1272 	dev->pdev = pdev;
1273 	INIT_LIST_HEAD(&priv->ctx_list);
1274 	spin_lock_init(&priv->ctx_lock);
1275 
1276 	mutex_init(&priv->port_mutex);
1277 
1278 	INIT_LIST_HEAD(&priv->pgdir_list);
1279 	mutex_init(&priv->pgdir_mutex);
1280 
1281 	INIT_LIST_HEAD(&priv->bf_list);
1282 	mutex_init(&priv->bf_mutex);
1283 
1284 	dev->rev_id = pdev->revision;
1285 
1286 	/*
1287 	 * Now reset the HCA before we touch the PCI capabilities or
1288 	 * attempt a firmware command, since a boot ROM may have left
1289 	 * the HCA in an undefined state.
1290 	 */
1291 	err = mlx4_reset(dev);
1292 	if (err) {
1293 		mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1294 		goto err_free_dev;
1295 	}
1296 
1297 	if (mlx4_cmd_init(dev)) {
1298 		mlx4_err(dev, "Failed to init command interface, aborting.\n");
1299 		goto err_free_dev;
1300 	}
1301 
1302 	err = mlx4_init_hca(dev);
1303 	if (err)
1304 		goto err_cmd;
1305 
1306 	err = mlx4_alloc_eq_table(dev);
1307 	if (err)
1308 		goto err_close;
1309 
1310 	priv->msix_ctl.pool_bm = 0;
1311 	spin_lock_init(&priv->msix_ctl.pool_lock);
1312 
1313 	mlx4_enable_msi_x(dev);
1314 
1315 	err = mlx4_init_steering(dev);
1316 	if (err)
1317 		goto err_free_eq;
1318 
1319 	err = mlx4_setup_hca(dev);
1320 	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1321 		dev->flags &= ~MLX4_FLAG_MSI_X;
1322 		pci_disable_msix(pdev);
1323 		err = mlx4_setup_hca(dev);
1324 	}
1325 
1326 	if (err)
1327 		goto err_steer;
1328 
1329 	for (port = 1; port <= dev->caps.num_ports; port++) {
1330 		err = mlx4_init_port_info(dev, port);
1331 		if (err)
1332 			goto err_port;
1333 	}
1334 
1335 	err = mlx4_register_device(dev);
1336 	if (err)
1337 		goto err_port;
1338 
1339 	mlx4_sense_init(dev);
1340 	mlx4_start_sense(dev);
1341 
1342 	pci_set_drvdata(pdev, dev);
1343 
1344 	return 0;
1345 
1346 err_port:
1347 	for (--port; port >= 1; --port)
1348 		mlx4_cleanup_port_info(&priv->port[port]);
1349 
1350 	mlx4_cleanup_counters_table(dev);
1351 	mlx4_cleanup_mcg_table(dev);
1352 	mlx4_cleanup_qp_table(dev);
1353 	mlx4_cleanup_srq_table(dev);
1354 	mlx4_cleanup_cq_table(dev);
1355 	mlx4_cmd_use_polling(dev);
1356 	mlx4_cleanup_eq_table(dev);
1357 	mlx4_cleanup_mr_table(dev);
1358 	mlx4_cleanup_pd_table(dev);
1359 	mlx4_cleanup_uar_table(dev);
1360 
1361 err_steer:
1362 	mlx4_clear_steering(dev);
1363 
1364 err_free_eq:
1365 	mlx4_free_eq_table(dev);
1366 
1367 err_close:
1368 	if (dev->flags & MLX4_FLAG_MSI_X)
1369 		pci_disable_msix(pdev);
1370 
1371 	mlx4_close_hca(dev);
1372 
1373 err_cmd:
1374 	mlx4_cmd_cleanup(dev);
1375 
1376 err_free_dev:
1377 	kfree(priv);
1378 
1379 err_release_regions:
1380 	pci_release_regions(pdev);
1381 
1382 err_disable_pdev:
1383 	pci_disable_device(pdev);
1384 	pci_set_drvdata(pdev, NULL);
1385 	return err;
1386 }
1387 
1388 static int __devinit mlx4_init_one(struct pci_dev *pdev,
1389 				   const struct pci_device_id *id)
1390 {
1391 	printk_once(KERN_INFO "%s", mlx4_version);
1392 
1393 	return __mlx4_init_one(pdev, id);
1394 }
1395 
1396 static void mlx4_remove_one(struct pci_dev *pdev)
1397 {
1398 	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
1399 	struct mlx4_priv *priv = mlx4_priv(dev);
1400 	int p;
1401 
1402 	if (dev) {
1403 		mlx4_stop_sense(dev);
1404 		mlx4_unregister_device(dev);
1405 
1406 		for (p = 1; p <= dev->caps.num_ports; p++) {
1407 			mlx4_cleanup_port_info(&priv->port[p]);
1408 			mlx4_CLOSE_PORT(dev, p);
1409 		}
1410 
1411 		mlx4_cleanup_counters_table(dev);
1412 		mlx4_cleanup_mcg_table(dev);
1413 		mlx4_cleanup_qp_table(dev);
1414 		mlx4_cleanup_srq_table(dev);
1415 		mlx4_cleanup_cq_table(dev);
1416 		mlx4_cmd_use_polling(dev);
1417 		mlx4_cleanup_eq_table(dev);
1418 		mlx4_cleanup_mr_table(dev);
1419 		mlx4_cleanup_pd_table(dev);
1420 
1421 		iounmap(priv->kar);
1422 		mlx4_uar_free(dev, &priv->driver_uar);
1423 		mlx4_cleanup_uar_table(dev);
1424 		mlx4_clear_steering(dev);
1425 		mlx4_free_eq_table(dev);
1426 		mlx4_close_hca(dev);
1427 		mlx4_cmd_cleanup(dev);
1428 
1429 		if (dev->flags & MLX4_FLAG_MSI_X)
1430 			pci_disable_msix(pdev);
1431 
1432 		kfree(priv);
1433 		pci_release_regions(pdev);
1434 		pci_disable_device(pdev);
1435 		pci_set_drvdata(pdev, NULL);
1436 	}
1437 }
1438 
1439 int mlx4_restart_one(struct pci_dev *pdev)
1440 {
1441 	mlx4_remove_one(pdev);
1442 	return __mlx4_init_one(pdev, NULL);
1443 }
1444 
1445 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1446 	{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1447 	{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1448 	{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1449 	{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1450 	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1451 	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1452 	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1453 	{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1454 	{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1455 	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1456 	{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1457 	{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1458 	{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1459 	{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1460 	{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1461 	{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1462 	{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1463 	{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1464 	{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1465 	{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1466 	{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1467 	{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1468 	{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1469 	{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1470 	{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1471 	{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1472 	{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1473 	{ 0, }
1474 };
1475 
1476 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1477 
1478 static struct pci_driver mlx4_driver = {
1479 	.name		= DRV_NAME,
1480 	.id_table	= mlx4_pci_table,
1481 	.probe		= mlx4_init_one,
1482 	.remove		= __devexit_p(mlx4_remove_one)
1483 };
1484 
1485 static int __init mlx4_verify_params(void)
1486 {
1487 	if ((log_num_mac < 0) || (log_num_mac > 7)) {
1488 		pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
1489 		return -1;
1490 	}
1491 
1492 	if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1493 		pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1494 		return -1;
1495 	}
1496 
1497 	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1498 		pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1499 		return -1;
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static int __init mlx4_init(void)
1506 {
1507 	int ret;
1508 
1509 	if (mlx4_verify_params())
1510 		return -EINVAL;
1511 
1512 	mlx4_catas_init();
1513 
1514 	mlx4_wq = create_singlethread_workqueue("mlx4");
1515 	if (!mlx4_wq)
1516 		return -ENOMEM;
1517 
1518 	ret = pci_register_driver(&mlx4_driver);
1519 	return ret < 0 ? ret : 0;
1520 }
1521 
1522 static void __exit mlx4_cleanup(void)
1523 {
1524 	pci_unregister_driver(&mlx4_driver);
1525 	destroy_workqueue(mlx4_wq);
1526 }
1527 
1528 module_init(mlx4_init);
1529 module_exit(mlx4_cleanup);
1530